diff --git a/.github/workflows/payload-after-push-amd64.yaml b/.github/workflows/build-kata-static-tarball-amd64.yaml similarity index 55% rename from .github/workflows/payload-after-push-amd64.yaml rename to .github/workflows/build-kata-static-tarball-amd64.yaml index 2b4814b84..5942a5d79 100644 --- a/.github/workflows/payload-after-push-amd64.yaml +++ b/.github/workflows/build-kata-static-tarball-amd64.yaml @@ -1,10 +1,14 @@ -name: CI | Publish kata-deploy payload for amd64 +name: CI | Build kata-static tarball for amd64 on: workflow_call: inputs: - target-arch: - required: true + tarball-suffix: + required: false type: string + push-to-registry: + required: false + type: string + default: no jobs: build-asset: @@ -15,21 +19,23 @@ jobs: - cloud-hypervisor - firecracker - kernel + - kernel-dragonball-experimental + - kernel-tdx-experimental + - kernel-gpu + - kernel-gpu-snp + - kernel-gpu-tdx-experimental - nydus - qemu + - qemu-tdx-experimental - rootfs-image - rootfs-initrd + - shim-v2 + - tdvf - virtiofsd steps: - - name: Login to Kata Containers quay.io - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} - password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - - uses: actions/checkout@v3 with: + ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # This is needed in order to keep the commit ids history - name: Build ${{ matrix.asset }} run: | @@ -40,12 +46,12 @@ jobs: env: KATA_ASSET: ${{ matrix.asset }} TAR_OUTPUT: ${{ matrix.asset }}.tar.gz - PUSH_TO_REGISTRY: yes + PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }} - name: store-artifact ${{ matrix.asset }} uses: actions/upload-artifact@v3 with: - name: kata-artifacts-amd64 + name: kata-artifacts-amd64${{ inputs.tarball-suffix }} path: kata-build/kata-static-${{ matrix.asset }}.tar.xz retention-days: 1 if-no-files-found: error @@ -55,10 +61,12 @@ jobs: needs: build-asset steps: - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} - name: get-artifacts uses: actions/download-artifact@v3 with: - name: kata-artifacts-amd64 + name: kata-artifacts-amd64${{ inputs.tarball-suffix }} path: kata-artifacts - name: merge-artifacts run: | @@ -66,31 +74,7 @@ jobs: - name: store-artifacts uses: actions/upload-artifact@v3 with: - name: kata-static-tarball-amd64 + name: kata-static-tarball-amd64${{ inputs.tarball-suffix }} path: kata-static.tar.xz retention-days: 1 if-no-files-found: error - - kata-payload: - needs: create-kata-tarball - runs-on: ubuntu-latest - steps: - - name: Login to Kata Containers quay.io - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} - password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - - - uses: actions/checkout@v3 - - name: get-kata-tarball - uses: actions/download-artifact@v3 - with: - name: kata-static-tarball-amd64 - - - name: build-and-push-kata-payload - id: build-and-push-kata-payload - run: | - ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ - $(pwd)/kata-static.tar.xz "quay.io/kata-containers/kata-deploy-ci" \ - "kata-containers-${{ inputs.target-arch }}" diff --git a/.github/workflows/payload-after-push-arm64.yaml b/.github/workflows/build-kata-static-tarball-arm64.yaml similarity index 56% rename from .github/workflows/payload-after-push-arm64.yaml rename to .github/workflows/build-kata-static-tarball-arm64.yaml index c7315bab0..753bcf13a 100644 --- a/.github/workflows/payload-after-push-arm64.yaml +++ b/.github/workflows/build-kata-static-tarball-arm64.yaml @@ -1,10 +1,14 @@ -name: CI | Publish kata-deploy payload for arm64 +name: CI | Build kata-static tarball for arm64 on: workflow_call: inputs: - target-arch: - required: true + tarball-suffix: + required: false type: string + push-to-registry: + required: false + type: string + default: no jobs: build-asset: @@ -15,25 +19,21 @@ jobs: - cloud-hypervisor - firecracker - kernel + - kernel-dragonball-experimental - nydus - qemu - rootfs-image - rootfs-initrd + - shim-v2 - virtiofsd steps: - - name: Login to Kata Containers quay.io - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} - password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - - name: Adjust a permission for repo run: | sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v3 with: + ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # This is needed in order to keep the commit ids history - name: Build ${{ matrix.asset }} run: | @@ -44,12 +44,12 @@ jobs: env: KATA_ASSET: ${{ matrix.asset }} TAR_OUTPUT: ${{ matrix.asset }}.tar.gz - PUSH_TO_REGISTRY: yes + PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }} - name: store-artifact ${{ matrix.asset }} uses: actions/upload-artifact@v3 with: - name: kata-artifacts-arm64 + name: kata-artifacts-arm64${{ inputs.tarball-suffix }} path: kata-build/kata-static-${{ matrix.asset }}.tar.xz retention-days: 1 if-no-files-found: error @@ -63,10 +63,12 @@ jobs: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} - name: get-artifacts uses: actions/download-artifact@v3 with: - name: kata-artifacts-arm64 + name: kata-artifacts-arm64${{ inputs.tarball-suffix }} path: kata-artifacts - name: merge-artifacts run: | @@ -74,35 +76,7 @@ jobs: - name: store-artifacts uses: actions/upload-artifact@v3 with: - name: kata-static-tarball-arm64 + name: kata-static-tarball-arm64${{ inputs.tarball-suffix }} path: kata-static.tar.xz retention-days: 1 if-no-files-found: error - - kata-payload: - needs: create-kata-tarball - runs-on: arm64 - steps: - - name: Login to Kata Containers quay.io - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} - password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - - - name: Adjust a permission for repo - run: | - sudo chown -R $USER:$USER $GITHUB_WORKSPACE - - - uses: actions/checkout@v3 - - name: get-kata-tarball - uses: actions/download-artifact@v3 - with: - name: kata-static-tarball-arm64 - - - name: build-and-push-kata-payload - id: build-and-push-kata-payload - run: | - ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ - $(pwd)/kata-static.tar.xz "quay.io/kata-containers/kata-deploy-ci" \ - "kata-containers-${{ inputs.target-arch }}" diff --git a/.github/workflows/payload-after-push-s390x.yaml b/.github/workflows/build-kata-static-tarball-s390x.yaml similarity index 56% rename from .github/workflows/payload-after-push-s390x.yaml rename to .github/workflows/build-kata-static-tarball-s390x.yaml index 4fa147205..95e4a5ff5 100644 --- a/.github/workflows/payload-after-push-s390x.yaml +++ b/.github/workflows/build-kata-static-tarball-s390x.yaml @@ -1,10 +1,14 @@ -name: CI | Publish kata-deploy payload for s390x +name: CI | Build kata-static tarball for s390x on: workflow_call: inputs: - target-arch: - required: true + tarball-suffix: + required: false type: string + push-to-registry: + required: false + type: string + default: no jobs: build-asset: @@ -13,25 +17,19 @@ jobs: matrix: asset: - kernel - - shim-v2 - qemu - rootfs-image - rootfs-initrd + - shim-v2 - virtiofsd steps: - - name: Login to Kata Containers quay.io - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} - password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - - name: Adjust a permission for repo run: | sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v3 with: + ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # This is needed in order to keep the commit ids history - name: Build ${{ matrix.asset }} run: | @@ -43,12 +41,12 @@ jobs: env: KATA_ASSET: ${{ matrix.asset }} TAR_OUTPUT: ${{ matrix.asset }}.tar.gz - PUSH_TO_REGISTRY: yes + PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }} - name: store-artifact ${{ matrix.asset }} uses: actions/upload-artifact@v3 with: - name: kata-artifacts-s390x + name: kata-artifacts-s390x${{ inputs.tarball-suffix }} path: kata-build/kata-static-${{ matrix.asset }}.tar.xz retention-days: 1 if-no-files-found: error @@ -62,10 +60,12 @@ jobs: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} - name: get-artifacts uses: actions/download-artifact@v3 with: - name: kata-artifacts-s390x + name: kata-artifacts-s390x${{ inputs.tarball-suffix }} path: kata-artifacts - name: merge-artifacts run: | @@ -73,35 +73,7 @@ jobs: - name: store-artifacts uses: actions/upload-artifact@v3 with: - name: kata-static-tarball-s390x + name: kata-static-tarball-s390x${{ inputs.tarball-suffix }} path: kata-static.tar.xz retention-days: 1 if-no-files-found: error - - kata-payload: - needs: create-kata-tarball - runs-on: s390x - steps: - - name: Login to Kata Containers quay.io - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} - password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - - - name: Adjust a permission for repo - run: | - sudo chown -R $USER:$USER $GITHUB_WORKSPACE - - - uses: actions/checkout@v3 - - name: get-kata-tarball - uses: actions/download-artifact@v3 - with: - name: kata-static-tarball-s390x - - - name: build-and-push-kata-payload - id: build-and-push-kata-payload - run: | - ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ - $(pwd)/kata-static.tar.xz "quay.io/kata-containers/kata-deploy-ci" \ - "kata-containers-${{ inputs.target-arch }}" diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml new file mode 100644 index 000000000..6db1cda72 --- /dev/null +++ b/.github/workflows/ci-on-push.yaml @@ -0,0 +1,38 @@ +name: Kata Containers CI +on: + pull_request_target: + branches: + - 'main' + +jobs: + build-kata-static-tarball-amd64: + uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml + with: + tarball-suffix: -${{ github.event.pull_request.number}}-${{ github.event.pull_request.head.sha }} + + publish-kata-deploy-payload-amd64: + needs: build-kata-static-tarball-amd64 + uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml + with: + tarball-suffix: -${{ github.event.pull_request.number}}-${{ github.event.pull_request.head.sha }} + registry: ghcr.io + repo: ${{ github.repository_owner }}/kata-deploy-ci + tag: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-amd64 + secrets: inherit + + run-k8s-tests-on-aks: + needs: publish-kata-deploy-payload-amd64 + uses: ./.github/workflows/run-k8s-tests-on-aks.yaml + with: + registry: ghcr.io + repo: ${{ github.repository_owner }}/kata-deploy-ci + tag: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-amd64 + secrets: inherit + + run-k8s-tests-on-tdx: + needs: publish-kata-deploy-payload-amd64 + uses: ./.github/workflows/run-k8s-tests-on-tdx.yaml + with: + registry: ghcr.io + repo: ${{ github.repository_owner }}/kata-deploy-ci + tag: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-amd64 diff --git a/.github/workflows/kata-deploy-push.yaml b/.github/workflows/kata-deploy-push.yaml deleted file mode 100644 index 9b5fb4c0d..000000000 --- a/.github/workflows/kata-deploy-push.yaml +++ /dev/null @@ -1,84 +0,0 @@ -name: kata deploy build - -on: - pull_request: - types: - - opened - - edited - - reopened - - synchronize - paths: - - tools/** - - versions.yaml - -jobs: - build-asset: - runs-on: ubuntu-latest - strategy: - matrix: - asset: - - kernel - - kernel-dragonball-experimental - - shim-v2 - - qemu - - cloud-hypervisor - - firecracker - - rootfs-image - - rootfs-initrd - - virtiofsd - - nydus - steps: - - uses: actions/checkout@v2 - - name: Build ${{ matrix.asset }} - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - run: | - make "${KATA_ASSET}-tarball" - build_dir=$(readlink -f build) - # store-artifact does not work with symlink - sudo cp -r --preserve=all "${build_dir}" "kata-build" - env: - KATA_ASSET: ${{ matrix.asset }} - - - name: store-artifact ${{ matrix.asset }} - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - uses: actions/upload-artifact@v2 - with: - name: kata-artifacts - path: kata-build/kata-static-${{ matrix.asset }}.tar.xz - if-no-files-found: error - - create-kata-tarball: - runs-on: ubuntu-latest - needs: build-asset - steps: - - uses: actions/checkout@v2 - - name: get-artifacts - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - uses: actions/download-artifact@v2 - with: - name: kata-artifacts - path: build - - name: merge-artifacts - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - run: | - make merge-builds - - name: store-artifacts - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - uses: actions/upload-artifact@v2 - with: - name: kata-static-tarball - path: kata-static.tar.xz - - make-kata-tarball: - runs-on: ubuntu-latest - steps: - - name: Free disk space - run: | - sudo rm -rf /usr/share/dotnet - sudo rm -rf "$AGENT_TOOLSDIRECTORY" - - uses: actions/checkout@v2 - - name: make kata-tarball - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - run: | - make kata-tarball - sudo make install-tarball diff --git a/.github/workflows/kata-deploy-test.yaml b/.github/workflows/kata-deploy-test.yaml deleted file mode 100644 index 6b30109c2..000000000 --- a/.github/workflows/kata-deploy-test.yaml +++ /dev/null @@ -1,164 +0,0 @@ -on: - workflow_dispatch: # this is used to trigger the workflow on non-main branches - inputs: - pr: - description: 'PR number from the selected branch to test' - type: string - required: true - issue_comment: - types: [created, edited] - -name: test-kata-deploy - -jobs: - check-comment-and-membership: - runs-on: ubuntu-latest - if: | - github.event.issue.pull_request - && github.event_name == 'issue_comment' - && github.event.action == 'created' - && startsWith(github.event.comment.body, '/test_kata_deploy') - || github.event_name == 'workflow_dispatch' - steps: - - name: Check membership on comment or dispatch - uses: kata-containers/is-organization-member@1.0.1 - id: is_organization_member - with: - organization: kata-containers - username: ${{ github.event.comment.user.login || github.event.sender.login }} - token: ${{ secrets.GITHUB_TOKEN }} - - name: Fail if not member - run: | - result=${{ steps.is_organization_member.outputs.result }} - if [ $result == false ]; then - user=${{ github.event.comment.user.login || github.event.sender.login }} - echo Either ${user} is not part of the kata-containers organization - echo or ${user} has its Organization Visibility set to Private at - echo https://github.com/orgs/kata-containers/people?query=${user} - echo - echo Ensure you change your Organization Visibility to Public and - echo trigger the test again. - exit 1 - fi - - build-asset: - runs-on: ubuntu-latest - needs: check-comment-and-membership - strategy: - matrix: - asset: - - cloud-hypervisor - - firecracker - - kernel - - kernel-dragonball-experimental - - nydus - - qemu - - rootfs-image - - rootfs-initrd - - shim-v2 - - virtiofsd - steps: - - name: get-PR-ref - id: get-PR-ref - run: | - if [ ${{ github.event_name }} == 'issue_comment' ]; then - ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#') - else # workflow_dispatch - ref="refs/pull/${{ github.event.inputs.pr }}/merge" - fi - echo "reference for PR: " ${ref} "event:" ${{ github.event_name }} - echo "pr-ref=${ref}" >> $GITHUB_OUTPUT - - uses: actions/checkout@v2 - with: - ref: ${{ steps.get-PR-ref.outputs.pr-ref }} - - - name: Build ${{ matrix.asset }} - run: | - make "${KATA_ASSET}-tarball" - build_dir=$(readlink -f build) - # store-artifact does not work with symlink - sudo cp -r "${build_dir}" "kata-build" - env: - KATA_ASSET: ${{ matrix.asset }} - TAR_OUTPUT: ${{ matrix.asset }}.tar.gz - - - name: store-artifact ${{ matrix.asset }} - uses: actions/upload-artifact@v2 - with: - name: kata-artifacts - path: kata-build/kata-static-${{ matrix.asset }}.tar.xz - if-no-files-found: error - - create-kata-tarball: - runs-on: ubuntu-latest - needs: build-asset - steps: - - name: get-PR-ref - id: get-PR-ref - run: | - if [ ${{ github.event_name }} == 'issue_comment' ]; then - ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#') - else # workflow_dispatch - ref="refs/pull/${{ github.event.inputs.pr }}/merge" - fi - echo "reference for PR: " ${ref} "event:" ${{ github.event_name }} - echo "pr-ref=${ref}" >> $GITHUB_OUTPUT - - uses: actions/checkout@v2 - with: - ref: ${{ steps.get-PR-ref.outputs.pr-ref }} - - name: get-artifacts - uses: actions/download-artifact@v2 - with: - name: kata-artifacts - path: kata-artifacts - - name: merge-artifacts - run: | - ./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts - - name: store-artifacts - uses: actions/upload-artifact@v2 - with: - name: kata-static-tarball - path: kata-static.tar.xz - - kata-deploy: - needs: create-kata-tarball - runs-on: ubuntu-latest - steps: - - name: get-PR-ref - id: get-PR-ref - run: | - if [ ${{ github.event_name }} == 'issue_comment' ]; then - ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#') - else # workflow_dispatch - ref="refs/pull/${{ github.event.inputs.pr }}/merge" - fi - echo "reference for PR: " ${ref} "event:" ${{ github.event_name }} - echo "pr-ref=${ref}" >> $GITHUB_OUTPUT - - uses: actions/checkout@v2 - with: - ref: ${{ steps.get-PR-ref.outputs.pr-ref }} - - name: get-kata-tarball - uses: actions/download-artifact@v2 - with: - name: kata-static-tarball - - name: build-and-push-kata-deploy-ci - id: build-and-push-kata-deploy-ci - run: | - PR_SHA=$(git log --format=format:%H -n1) - mv kata-static.tar.xz $GITHUB_WORKSPACE/tools/packaging/kata-deploy/kata-static.tar.xz - docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t quay.io/kata-containers/kata-deploy-ci:$PR_SHA $GITHUB_WORKSPACE/tools/packaging/kata-deploy - docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io - docker push quay.io/kata-containers/kata-deploy-ci:$PR_SHA - mkdir -p packaging/kata-deploy - ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action - echo "PKG_SHA=${PR_SHA}" >> $GITHUB_OUTPUT - - name: test-kata-deploy-ci-in-aks - uses: ./packaging/kata-deploy/action - with: - packaging-sha: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} - env: - PKG_SHA: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} - AZ_APPID: ${{ secrets.AZ_APPID }} - AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }} - AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }} - AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }} diff --git a/.github/workflows/payload-after-push.yaml b/.github/workflows/payload-after-push.yaml index f07c8859c..25a7a18c2 100644 --- a/.github/workflows/payload-after-push.yaml +++ b/.github/workflows/payload-after-push.yaml @@ -7,26 +7,50 @@ on: jobs: build-assets-amd64: - uses: ./.github/workflows/payload-after-push-amd64.yaml + uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml with: - target-arch: amd64 - secrets: inherit + push-to-registry: yes build-assets-arm64: - uses: ./.github/workflows/payload-after-push-arm64.yaml + uses: ./.github/workflows/build-kata-static-tarball-arm64.yaml with: - target-arch: arm64 - secrets: inherit + push-to-registry: yes build-assets-s390x: - uses: ./.github/workflows/payload-after-push-s390x.yaml + uses: ./.github/workflows/build-kata-static-tarball-s390x.yaml with: - target-arch: s390x + push-to-registry: yes + + publish-kata-deploy-payload-amd64: + needs: build-assets-amd64 + uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml + with: + registry: quay.io + repo: kata-containers/kata-deploy-ci + tag: kata-containers-amd64 secrets: inherit - publish: + publish-kata-deploy-payload-arm64: + needs: build-assets-arm64 + uses: ./.github/workflows/publish-kata-deploy-payload-arm64.yaml + with: + registry: quay.io + repo: kata-containers/kata-deploy-ci + tag: kata-containers-arm64 + secrets: inherit + + publish-kata-deploy-payload-s390x: + needs: build-assets-s390x + uses: ./.github/workflows/publish-kata-deploy-payload-s390x.yaml + with: + registry: quay.io + repo: kata-containers/kata-deploy-ci + tag: kata-containers-s390x + secrets: inherit + + publish-manifest: runs-on: ubuntu-latest - needs: [build-assets-amd64, build-assets-arm64, build-assets-s390x] + needs: [publish-kata-deploy-payload-amd64, publish-kata-deploy-payload-arm64, publish-kata-deploy-payload-s390x] steps: - name: Checkout repository uses: actions/checkout@v3 diff --git a/.github/workflows/publish-kata-deploy-payload-amd64.yaml b/.github/workflows/publish-kata-deploy-payload-amd64.yaml new file mode 100644 index 000000000..91c7a0612 --- /dev/null +++ b/.github/workflows/publish-kata-deploy-payload-amd64.yaml @@ -0,0 +1,52 @@ +name: CI | Publish kata-deploy payload for amd64 +on: + workflow_call: + inputs: + tarball-suffix: + required: false + type: string + registry: + required: true + type: string + repo: + required: true + type: string + tag: + required: true + type: string + +jobs: + kata-payload: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: get-kata-tarball + uses: actions/download-artifact@v3 + with: + name: kata-static-tarball-amd64${{ inputs.tarball-suffix }} + + - name: Login to Kata Containers quay.io + if: ${{ inputs.registry == 'quay.io' }} + uses: docker/login-action@v2 + with: + registry: quay.io + username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} + password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} + + - name: Login to Kata Containers ghcr.io + if: ${{ inputs.registry == 'ghcr.io' }} + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: build-and-push-kata-payload + id: build-and-push-kata-payload + run: | + ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ + $(pwd)/kata-static.tar.xz \ + ${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }} diff --git a/.github/workflows/publish-kata-deploy-payload-arm64.yaml b/.github/workflows/publish-kata-deploy-payload-arm64.yaml new file mode 100644 index 000000000..c4fd32477 --- /dev/null +++ b/.github/workflows/publish-kata-deploy-payload-arm64.yaml @@ -0,0 +1,57 @@ +name: CI | Publish kata-deploy payload for arm64 +on: + workflow_call: + inputs: + tarball-suffix: + required: false + type: string + registry: + required: true + type: string + repo: + required: true + type: string + tag: + required: true + type: string + +jobs: + kata-payload: + runs-on: arm64 + steps: + - name: Adjust a permission for repo + run: | + sudo chown -R $USER:$USER $GITHUB_WORKSPACE + + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: get-kata-tarball + uses: actions/download-artifact@v3 + with: + name: kata-static-tarball-arm64${{ inputs.tarball-suffix }} + + - name: Login to Kata Containers quay.io + if: ${{ inputs.registry == 'quay.io' }} + uses: docker/login-action@v2 + with: + registry: quay.io + username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} + password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} + + - name: Login to Kata Containers ghcr.io + if: ${{ inputs.registry == 'ghcr.io' }} + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: build-and-push-kata-payload + id: build-and-push-kata-payload + run: | + ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ + $(pwd)/kata-static.tar.xz \ + ${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }} + diff --git a/.github/workflows/publish-kata-deploy-payload-s390x.yaml b/.github/workflows/publish-kata-deploy-payload-s390x.yaml new file mode 100644 index 000000000..2a0ea8071 --- /dev/null +++ b/.github/workflows/publish-kata-deploy-payload-s390x.yaml @@ -0,0 +1,56 @@ +name: CI | Publish kata-deploy payload for s390x +on: + workflow_call: + inputs: + tarball-suffix: + required: false + type: string + registry: + required: true + type: string + repo: + required: true + type: string + tag: + required: true + type: string + +jobs: + kata-payload: + runs-on: s390x + steps: + - name: Adjust a permission for repo + run: | + sudo chown -R $USER:$USER $GITHUB_WORKSPACE + + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: get-kata-tarball + uses: actions/download-artifact@v3 + with: + name: kata-static-tarball-s390x${{ inputs.tarball-suffix }} + + - name: Login to Kata Containers quay.io + if: ${{ inputs.registry == 'quay.io' }} + uses: docker/login-action@v2 + with: + registry: quay.io + username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} + password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} + + - name: Login to Kata Containers ghcr.io + if: ${{ inputs.registry == 'ghcr.io' }} + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: build-and-push-kata-payload + id: build-and-push-kata-payload + run: | + ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ + $(pwd)/kata-static.tar.xz \ + ${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 87a5992c1..a642fa36f 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -5,69 +5,18 @@ on: - '[0-9]+.[0-9]+.[0-9]+*' jobs: - build-asset: - runs-on: ubuntu-latest - strategy: - matrix: - asset: - - cloud-hypervisor - - firecracker - - kernel - - kernel-dragonball-experimental - - nydus - - qemu - - rootfs-image - - rootfs-initrd - - shim-v2 - - virtiofsd - steps: - - uses: actions/checkout@v2 - - name: Build ${{ matrix.asset }} - run: | - ./tools/packaging/kata-deploy/local-build/kata-deploy-copy-yq-installer.sh - ./tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh --build="${KATA_ASSET}" - build_dir=$(readlink -f build) - # store-artifact does not work with symlink - sudo cp -r "${build_dir}" "kata-build" - env: - KATA_ASSET: ${{ matrix.asset }} - TAR_OUTPUT: ${{ matrix.asset }}.tar.gz - - - name: store-artifact ${{ matrix.asset }} - uses: actions/upload-artifact@v2 - with: - name: kata-artifacts - path: kata-build/kata-static-${{ matrix.asset }}.tar.xz - if-no-files-found: error - - create-kata-tarball: - runs-on: ubuntu-latest - needs: build-asset - steps: - - uses: actions/checkout@v2 - - name: get-artifacts - uses: actions/download-artifact@v2 - with: - name: kata-artifacts - path: kata-artifacts - - name: merge-artifacts - run: | - ./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts - - name: store-artifacts - uses: actions/upload-artifact@v2 - with: - name: kata-static-tarball - path: kata-static.tar.xz + build-kata-static-tarball-amd64: + uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml kata-deploy: - needs: create-kata-tarball + needs: build-kata-static-tarball-amd64 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: get-kata-tarball - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: - name: kata-static-tarball + name: kata-static-tarball-amd64 - name: build-and-push-kata-deploy-ci id: build-and-push-kata-deploy-ci run: | @@ -112,9 +61,9 @@ jobs: needs: kata-deploy runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: download-artifacts - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: kata-static-tarball - name: install hub @@ -136,7 +85,7 @@ jobs: needs: upload-static-tarball runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: generate-and-upload-tarball run: | tag=$(echo $GITHUB_REF | cut -d/ -f3-) @@ -150,7 +99,7 @@ jobs: needs: upload-cargo-vendored-tarball runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: download-and-upload-tarball env: GITHUB_TOKEN: ${{ secrets.GIT_UPLOAD_TOKEN }} diff --git a/.github/workflows/run-k8s-tests-on-aks.yaml b/.github/workflows/run-k8s-tests-on-aks.yaml new file mode 100644 index 000000000..f9a26debb --- /dev/null +++ b/.github/workflows/run-k8s-tests-on-aks.yaml @@ -0,0 +1,92 @@ +name: CI | Run kubernetes tests on AKS +on: + workflow_call: + inputs: + registry: + required: true + type: string + repo: + required: true + type: string + tag: + required: true + type: string + +jobs: + run-k8s-tests: + strategy: + fail-fast: false + matrix: + vmm: + - clh + - dragonball + - qemu + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Download Azure CLI + run: | + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + + - name: Log into the Azure account + run: | + az login \ + --service-principal \ + -u "${{ secrets.AZ_APPID }}" \ + -p "${{ secrets.AZ_PASSWORD }}" \ + --tenant "${{ secrets.AZ_TENANT_ID }}" + + - name: Create AKS cluster + run: | + az aks create \ + -g "kataCI" \ + -n "${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-${{ matrix.vmm }}-amd64" \ + -s "Standard_D4s_v5" \ + --node-count 1 \ + --generate-ssh-keys + + - name: Install `bats` + run: | + sudo apt-get update + sudo apt-get -y install bats + + - name: Install `kubectl` + run: | + sudo az aks install-cli + + - name: Download credentials for the Kubernetes CLI to use them + run: | + az aks get-credentials -g "kataCI" -n ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-${{ matrix.vmm }}-amd64 + + - name: Deploy kata-deploy + run: | + sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}|g" tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + cat tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + cat tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml | grep "${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}" || die "Failed to setup the tests image" + + kubectl apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml + kubectl apply -f tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod + kubectl apply -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml + + - name: Run tests + timeout-minutes: 30 + run: | + pushd tests/integration/kubernetes + sed -i -e 's|runtimeClassName: kata|runtimeClassName: kata-${{ matrix.vmm }}|' runtimeclass_workloads/*.yaml + bash run_kubernetes_tests.sh + popd + env: + KATA_HYPERVISOR: ${{ matrix.vmm }} + + - name: Delete AKS cluster + if: always() + run: | + az aks delete \ + -g "kataCI" \ + -n "${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-${{ matrix.vmm }}-amd64" \ + --yes \ + --no-wait diff --git a/.github/workflows/run-k8s-tests-on-tdx.yaml b/.github/workflows/run-k8s-tests-on-tdx.yaml new file mode 100644 index 000000000..1777a16c8 --- /dev/null +++ b/.github/workflows/run-k8s-tests-on-tdx.yaml @@ -0,0 +1,65 @@ +name: CI | Run kubernetes tests on TDX +on: + workflow_call: + inputs: + registry: + required: true + type: string + repo: + required: true + type: string + tag: + required: true + type: string + +jobs: + run-k8s-tests: + strategy: + fail-fast: false + matrix: + vmm: + - qemu-tdx + runs-on: tdx + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Deploy kata-deploy + run: | + sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}|g" tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + cat tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + cat tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml | grep "${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}" || die "Failed to setup the tests image" + + kubectl apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml + kubectl apply -k tools/packaging/kata-deploy/kata-deploy/overlays/k3s + kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod + kubectl apply -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml + + - name: Run tests + timeout-minutes: 30 + run: | + pushd tests/integration/kubernetes + sed -i -e 's|runtimeClassName: kata|runtimeClassName: kata-${{ matrix.vmm }}|' runtimeclass_workloads/*.yaml + bash run_kubernetes_tests.sh + popd + env: + KATA_HYPERVISOR: ${{ matrix.vmm }} + + - name: Delete kata-deploy + if: always() + run: | + kubectl delete -k tools/packaging/kata-deploy/kata-deploy/overlays/k3s + kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod + + sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}|g" tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml + cat tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml + cat tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml | grep "${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}" || die "Failed to setup the tests image" + kubectl apply -f tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml + sleep 180s + + kubectl delete -f tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml + kubectl delete -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml + kubectl delete -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml diff --git a/VERSION b/VERSION index a36373c3b..2f81ab203 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.1.0-rc0 +3.2.0-alpha0 diff --git a/docs/design/architecture/networking.md b/docs/design/architecture/networking.md index ab056849c..1550f0ad0 100644 --- a/docs/design/architecture/networking.md +++ b/docs/design/architecture/networking.md @@ -36,7 +36,7 @@ compatibility, and performance on par with MACVTAP. Kata Containers has deprecated support for bridge due to lacking performance relative to TC-filter and MACVTAP. Kata Containers supports both -[CNM](https://github.com/docker/libnetwork/blob/master/docs/design.md#the-container-network-model) +[CNM](https://github.com/moby/libnetwork/blob/master/docs/design.md#the-container-network-model) and [CNI](https://github.com/containernetworking/cni) for networking management. ## Network Hotplug diff --git a/docs/how-to/how-to-run-rootless-vmm.md b/docs/how-to/how-to-run-rootless-vmm.md index 3986de252..7711c1325 100644 --- a/docs/how-to/how-to-run-rootless-vmm.md +++ b/docs/how-to/how-to-run-rootless-vmm.md @@ -1,5 +1,5 @@ ## Introduction -To improve security, Kata Container supports running the VMM process (currently only QEMU) as a non-`root` user. +To improve security, Kata Container supports running the VMM process (QEMU and cloud-hypervisor) as a non-`root` user. This document describes how to enable the rootless VMM mode and its limitations. ## Pre-requisites @@ -27,7 +27,7 @@ Another necessary change is to move the hypervisor runtime files (e.g. `vhost-fs ## Limitations 1. Only the VMM process is running as a non-root user. Other processes such as Kata Container shimv2 and `virtiofsd` still run as the root user. -2. Currently, this feature is only supported in QEMU. Still need to bring it to Firecracker and Cloud Hypervisor (see https://github.com/kata-containers/kata-containers/issues/2567). +2. Currently, this feature is only supported in QEMU and cloud-hypervisor. For firecracker, you can use jailer to run the VMM process with a non-root user. 3. Certain features will not work when rootless VMM is enabled, including: 1. Passing devices to the guest (`virtio-blk`, `virtio-scsi`) will not work if the non-privileged user does not have permission to access it (leading to a permission denied error). A more permissive permission (e.g. 666) may overcome this issue. However, you need to be aware of the potential security implications of reducing the security on such devices. 2. `vfio` device will also not work because of permission denied error. \ No newline at end of file diff --git a/docs/install/kata-containers-3.0-rust-runtime-installation-guide.md b/docs/install/kata-containers-3.0-rust-runtime-installation-guide.md index d8150bf3d..f83e4ea02 100644 --- a/docs/install/kata-containers-3.0-rust-runtime-installation-guide.md +++ b/docs/install/kata-containers-3.0-rust-runtime-installation-guide.md @@ -49,7 +49,7 @@ Follow the [`kata-deploy`](../../tools/packaging/kata-deploy/README.md). * Download `Rustup` and install `Rust` > **Notes:** - > Rust version 1.62.0 is needed + > For Rust version, please see [`versions.yaml`](../../versions.yaml) file's rust section. Example for `x86_64` ``` diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 2b68a7e87..4bbb8e0f1 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -34,54 +34,6 @@ parts: mkdir -p $(dirname ${kata_dir}) ln -sf $(realpath "${SNAPCRAFT_STAGE}/..") ${kata_dir} - godeps: - after: [metadata] - plugin: nil - prime: - - -* - build-packages: - - curl - override-build: | - source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - - # put everything in stage - cd "${SNAPCRAFT_STAGE}" - - version="$(${yq} r ${kata_dir}/versions.yaml languages.golang.meta.newest-version)" - tarfile="go${version}.${goos}-${goarch}.tar.gz" - curl -LO https://golang.org/dl/${tarfile} - tar -xf ${tarfile} --strip-components=1 - - rustdeps: - after: [metadata] - plugin: nil - prime: - - -* - build-packages: - - curl - override-build: | - source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - - # put everything in stage - cd "${SNAPCRAFT_STAGE}" - - version="$(${yq} r ${kata_dir}/versions.yaml languages.rust.meta.newest-version)" - if ! command -v rustup > /dev/null; then - curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain ${version} - fi - - export PATH=${PATH}:${HOME}/.cargo/bin - rustup toolchain install ${version} - rustup default ${version} - if [ "${arch}" == "ppc64le" ] || [ "${arch}" == "s390x" ] ; then - [ "${arch}" == "ppc64le" ] && arch="powerpc64le" - rustup target add ${arch}-unknown-linux-gnu - else - rustup target add ${arch}-unknown-linux-musl - $([ "$(whoami)" != "root" ] && echo sudo) ln -sf /usr/bin/g++ /bin/musl-g++ - fi - rustup component add rustfmt - docker: after: [metadata] plugin: nil @@ -111,240 +63,92 @@ parts: echo "Adding $USER into docker group" sudo -E gpasswd -a $USER docker echo "Starting docker" + # docker may fail to start using "fd://" in docker.service + sudo sed -i 's/fd:\/\//unix:\/\//g' /lib/systemd/system/docker.service + sudo systemctl daemon-reload sudo -E systemctl start docker || true image: - after: [godeps, docker, qemu, kernel] + after: [docker] plugin: nil - build-packages: - - docker.io - - cpio - - git - - iptables - - software-properties-common - - uidmap - - gnupg2 override-build: | source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - [ "${arch}" = "ppc64le" ] || [ "${arch}" = "s390x" ] && sudo apt-get --no-install-recommends install -y protobuf-compiler + cd "${SNAPCRAFT_PROJECT_DIR}" + sudo -E NO_TTY=true make rootfs-image-tarball - if [ -n "$http_proxy" ]; then - echo "Setting proxy $http_proxy" - sudo -E systemctl set-environment http_proxy="$http_proxy" || true - sudo -E systemctl set-environment https_proxy="$https_proxy" || true - fi + tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-rootfs-image.tar.xz" - # Copy yq binary. It's used in the container - cp -a "${yq}" "${GOPATH}/bin/" + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" - cd "${kata_dir}/tools/osbuilder" - # build image - export AGENT_INIT=yes - export USE_DOCKER=1 - export DEBUG=1 - initrd_distro=$(${yq} r -X ${kata_dir}/versions.yaml assets.initrd.architecture.${arch}.name) - image_distro=$(${yq} r -X ${kata_dir}/versions.yaml assets.image.architecture.${arch}.name) - case "$arch" in - x86_64) - # In some build systems it's impossible to build a rootfs image, try with the initrd image - sudo -E PATH=$PATH make image DISTRO="${image_distro}" || sudo -E PATH="$PATH" make initrd DISTRO="${initrd_distro}" - ;; + sudo -E NO_TTY=true make rootfs-initrd-tarball - aarch64|ppc64le|s390x) - sudo -E PATH="$PATH" make initrd DISTRO="${initrd_distro}" - ;; + tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-rootfs-initrd.tar.xz" - *) die "unsupported architecture: ${arch}" ;; - esac + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" - # Install image - kata_image_dir="${SNAPCRAFT_PART_INSTALL}/usr/share/kata-containers" - mkdir -p "${kata_image_dir}" - cp kata-containers*.img "${kata_image_dir}" runtime: - after: [godeps, image, cloud-hypervisor] + after: [docker] plugin: nil - build-attributes: [no-patchelf] override-build: | source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - cd "${kata_dir}/src/runtime" + cd "${SNAPCRAFT_PROJECT_DIR}" + sudo -E NO_TTY=true make shim-v2-tarball - qemu_cmd="qemu-system-${qemu_arch}" + tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-shim-v2.tar.xz" - # build and install runtime - make \ - PREFIX="/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr" \ - SKIP_GO_VERSION_CHECK=1 \ - QEMUCMD="${qemu_cmd}" + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" - make install \ - PREFIX=/usr \ - DESTDIR="${SNAPCRAFT_PART_INSTALL}" \ - SKIP_GO_VERSION_CHECK=1 \ - QEMUCMD="${qemu_cmd}" - - if [ ! -f ${SNAPCRAFT_PART_INSTALL}/../../image/install/usr/share/kata-containers/kata-containers.img ]; then - sed -i -e "s|^image =.*|initrd = \"/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr/share/kata-containers/kata-containers-initrd.img\"|" \ - ${SNAPCRAFT_PART_INSTALL}/usr/share/defaults/${SNAPCRAFT_PROJECT_NAME}/configuration.toml - fi + mkdir -p "${SNAPCRAFT_PART_INSTALL}/usr/bin" + ln -sf "${SNAPCRAFT_PART_INSTALL}/opt/kata/bin/containerd-shim-kata-v2" "${SNAPCRAFT_PART_INSTALL}/usr/bin/containerd-shim-kata-v2" + ln -sf "${SNAPCRAFT_PART_INSTALL}/opt/kata/bin/kata-runtime" "${SNAPCRAFT_PART_INSTALL}/usr/bin/kata-runtime" + ln -sf "${SNAPCRAFT_PART_INSTALL}/opt/kata/bin/kata-collect-data.sh" "${SNAPCRAFT_PART_INSTALL}/usr/bin/kata-collect-data.sh" kernel: - after: [godeps] + after: [docker] plugin: nil - build-packages: - - libelf-dev - - curl - - build-essential - - bison - - flex override-build: | source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - kernel_version="$(${yq} r $versions_file assets.kernel.version)" - #Remove extra 'v' - kernel_version="${kernel_version#v}" + cd "${SNAPCRAFT_PROJECT_DIR}" + sudo -E NO_TTY=true make kernel-tarball - [ "${arch}" = "s390x" ] && sudo apt-get --no-install-recommends install -y libssl-dev + tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-kernel.tar.xz" - cd "${kata_dir}/tools/packaging/kernel" - kernel_dir_prefix="kata-linux-" - - # Setup and build kernel - ./build-kernel.sh -v "${kernel_version}" -d setup - cd ${kernel_dir_prefix}* - make -j $(nproc ${CI:+--ignore 1}) EXTRAVERSION=".container" - - kernel_suffix="${kernel_version}.container" - kata_kernel_dir="${SNAPCRAFT_PART_INSTALL}/usr/share/kata-containers" - mkdir -p "${kata_kernel_dir}" - - # Install bz kernel - make install INSTALL_PATH="${kata_kernel_dir}" EXTRAVERSION=".container" || true - vmlinuz_name="vmlinuz-${kernel_suffix}" - ln -sf "${vmlinuz_name}" "${kata_kernel_dir}/vmlinuz.container" - - # Install raw kernel - vmlinux_path="vmlinux" - [ "${arch}" = "s390x" ] && vmlinux_path="arch/s390/boot/vmlinux" - vmlinux_name="vmlinux-${kernel_suffix}" - cp "${vmlinux_path}" "${kata_kernel_dir}/${vmlinux_name}" - ln -sf "${vmlinux_name}" "${kata_kernel_dir}/vmlinux.container" + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" qemu: plugin: make - after: [godeps] - build-packages: - - gcc - - python3 - - zlib1g-dev - - libcap-ng-dev - - libglib2.0-dev - - libpixman-1-dev - - libnuma-dev - - libltdl-dev - - libcap-dev - - libattr1-dev - - libfdt-dev - - curl - - libcapstone-dev - - bc - - libblkid-dev - - libffi-dev - - libmount-dev - - libseccomp-dev - - libselinux1-dev - - ninja-build + after: [docker] override-build: | source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.version)" - url="$(${yq} r ${versions_file} assets.hypervisor.qemu.url)" - commit="" - patches_dir="${kata_dir}/tools/packaging/qemu/patches/$(echo ${branch} | sed -e 's/.[[:digit:]]*$//' -e 's/^v//').x" - patches_version_dir="${kata_dir}/tools/packaging/qemu/patches/tag_patches/${branch}" + cd "${SNAPCRAFT_PROJECT_DIR}" + sudo -E NO_TTY=true make qemu-tarball - # download source - qemu_dir="${SNAPCRAFT_STAGE}/qemu" - rm -rf "${qemu_dir}" - git clone --depth 1 --branch ${branch} --single-branch ${url} "${qemu_dir}" - cd "${qemu_dir}" - [ -z "${commit}" ] || git checkout "${commit}" + tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-qemu.tar.xz" - [ -n "$(ls -A ui/keycodemapdb)" ] || git clone --depth 1 https://github.com/qemu/keycodemapdb ui/keycodemapdb/ - [ -n "$(ls -A capstone)" ] || git clone --depth 1 https://github.com/qemu/capstone capstone - - # Apply branch patches - [ -d "${patches_version_dir}" ] || mkdir "${patches_version_dir}" - ${kata_dir}/tools/packaging/scripts/apply_patches.sh "${patches_dir}" - ${kata_dir}/tools/packaging/scripts/apply_patches.sh "${patches_version_dir}" - - # Only x86_64 supports libpmem - [ "${arch}" = "x86_64" ] && sudo apt-get --no-install-recommends install -y apt-utils ca-certificates libpmem-dev - - configure_hypervisor="${kata_dir}/tools/packaging/scripts/configure-hypervisor.sh" - chmod +x "${configure_hypervisor}" - # static build. The --prefix, --libdir, --libexecdir, --datadir arguments are - # based on PREFIX and set by configure-hypervisor.sh - echo "$(PREFIX=/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr ${configure_hypervisor} -s kata-qemu) \ - --disable-rbd " \ - | xargs ./configure - - # Copy QEMU configurations (Kconfigs) - case "${branch}" in - "v5.1.0") - cp -a "${kata_dir}"/tools/packaging/qemu/default-configs/* default-configs - ;; - - *) - cp -a "${kata_dir}"/tools/packaging/qemu/default-configs/* configs/devices/ - ;; - esac - - # build and install - make -j $(nproc ${CI:+--ignore 1}) - make install DESTDIR="${SNAPCRAFT_PART_INSTALL}" - prime: - - -snap/ - - -usr/bin/qemu-ga - - -usr/bin/qemu-pr-helper - - -usr/bin/virtfs-proxy-helper - - -usr/include/ - - -usr/share/applications/ - - -usr/share/icons/ - - -usr/var/ - - usr/* - - lib/* - organize: - # Hack: move qemu to / - "snap/kata-containers/current/": "./" + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" virtiofsd: plugin: nil - after: [godeps, rustdeps, docker] + after: [docker] override-build: | source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - echo "INFO: Building rust version of virtiofsd" - cd "${SNAPCRAFT_PROJECT_DIR}" - # Clean-up build dir in case it already exists sudo -E NO_TTY=true make virtiofsd-tarball - sudo install \ - --owner='root' \ - --group='root' \ - --mode=0755 \ - -D \ - --target-directory="${SNAPCRAFT_PART_INSTALL}/usr/libexec/" \ - build/virtiofsd/builddir/virtiofsd/virtiofsd + tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-virtiofsd.tar.xz" + + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" cloud-hypervisor: plugin: nil - after: [godeps, docker] + after: [docker] override-build: | source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" @@ -353,13 +157,8 @@ parts: sudo -E NO_TTY=true make cloud-hypervisor-tarball tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-cloud-hypervisor.tar.xz" - tmpdir=$(mktemp -d) - tar -xvJpf "${tarfile}" -C "${tmpdir}" - - install -D "${tmpdir}/opt/kata/bin/cloud-hypervisor" "${SNAPCRAFT_PART_INSTALL}/usr/bin/cloud-hypervisor" - - rm -rf "${tmpdir}" + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" fi apps: diff --git a/src/agent/Cargo.lock b/src/agent/Cargo.lock index 282bfc785..eaec36ffb 100644 --- a/src/agent/Cargo.lock +++ b/src/agent/Cargo.lock @@ -35,7 +35,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" dependencies = [ "cfg-if 1.0.0", - "cipher 0.4.3", + "cipher 0.4.4", "cpufeatures", ] @@ -68,6 +68,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "aho-corasick" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" +dependencies = [ + "memchr", +] + [[package]] name = "android_system_properties" version = "0.1.5" @@ -88,9 +97,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.68" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" +checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" [[package]] name = "arc-swap" @@ -109,9 +118,9 @@ dependencies = [ [[package]] name = "asn1-rs" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf6690c370453db30743b373a60ba498fc0d6d83b11f4abfd87a84a075db5dd4" +checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ "asn1-rs-derive", "asn1-rs-impl", @@ -120,7 +129,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -131,7 +140,7 @@ checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -143,7 +152,7 @@ checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -186,9 +195,9 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" +checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" dependencies = [ "async-lock", "async-task", @@ -200,32 +209,31 @@ dependencies = [ [[package]] name = "async-io" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ "async-lock", "autocfg 1.1.0", + "cfg-if 1.0.0", "concurrent-queue", "futures-lite", - "libc", "log", "parking", "polling", + "rustix 0.37.15", "slab", "socket2", "waker-fn", - "windows-sys 0.42.0", ] [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", - "futures-lite", ] [[package]] @@ -236,24 +244,24 @@ checksum = "d7d78656ba01f1b93024b7c3a0467f1608e4be67d725749fdcd7d2c7678fd7a2" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "async-task" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" +checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" -version = "0.1.66" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -288,27 +296,39 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + [[package]] name = "base64" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" + [[package]] name = "base64-serde" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e964e3e0a930303c7c0bdb28ebf691dd98d9eee4b8b68019d2c995710b58a18" dependencies = [ - "base64", + "base64 0.13.1", "serde", ] [[package]] name = "base64ct" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bincode" @@ -347,7 +367,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd9e32d7420c85055e8107e5b2463c4eeefeaac18b52359fe9f9c08a18f342b2" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -373,9 +393,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -409,29 +429,31 @@ dependencies = [ [[package]] name = "bstr" -version = "0.2.17" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" +checksum = "c3d4260bcc2e8fc9df1eac4919a720effeb63a3f0952f5bf4944adfa18897f09" dependencies = [ "memchr", + "serde", ] [[package]] name = "buffered-reader" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9f82920285502602088677aeb65df0909b39c347b38565e553ba0363c242f65" +checksum = "d0dd286184b392a1ce6b3deecd073f0330df194bf935b87f852147d50d0d2d18" dependencies = [ "bzip2", "flate2", + "lazy_static", "libc", ] [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "9b1ce199063694f33ffb7dd4e0ee620741495c32833cde5aa08f02a0bf96f0c8" [[package]] name = "byte-unit" @@ -457,15 +479,15 @@ dependencies = [ [[package]] name = "bytes" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "bzip2" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6afcd980b5f3a45017c57e57a2fcccbb351cc43a356ce117ef760ef8052b89b0" +checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" dependencies = [ "bzip2-sys", "libc", @@ -510,7 +532,7 @@ dependencies = [ "cached_proc_macro_types", "darling 0.13.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -553,9 +575,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" dependencies = [ "jobserver", ] @@ -587,9 +609,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", @@ -612,9 +634,9 @@ dependencies = [ [[package]] name = "cipher" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1873270f8f7942c191139cb8a40fd228da6c3fd2fc376d7e92d47aa14aeb59e" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", @@ -622,9 +644,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.23" +version = "3.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" +checksum = "eef2b3ded6a26dfaec672a742c93c8cf6b689220324da509ec5caa20de55dc83" dependencies = [ "atty", "bitflags", @@ -639,15 +661,15 @@ dependencies = [ [[package]] name = "clap_derive" -version = "3.2.18" +version = "3.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" +checksum = "d756c5824fc5c0c1ee8e36000f576968dbcb2081def956c83fad6f40acd46f96" dependencies = [ - "heck 0.4.0", + "heck 0.4.1", "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -687,9 +709,9 @@ checksum = "2382f75942f4b3be3690fe4f86365e9c853c1587d6ee58212cebf6e2a9ccd101" [[package]] name = "concurrent-queue" -version = "2.0.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] @@ -706,6 +728,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" +[[package]] +name = "const-oid" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" + [[package]] name = "core-foundation" version = "0.9.3" @@ -718,15 +746,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" dependencies = [ "libc", ] @@ -742,9 +770,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -752,9 +780,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if 1.0.0", ] @@ -775,6 +803,18 @@ dependencies = [ "subtle", ] +[[package]] +name = "crypto-bigint" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -821,7 +861,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" dependencies = [ - "cipher 0.4.3", + "cipher 0.4.4", ] [[package]] @@ -839,9 +879,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.85" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5add3fc1717409d029b20c5b6903fc0c0b02fa6741d820054f4a2efa5e5816fd" +checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" dependencies = [ "cc", "cxxbridge-flags", @@ -851,9 +891,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.85" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c87959ba14bc6fbc61df77c3fcfe180fc32b93538c4f1031dd802ccb5f2ff0" +checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" dependencies = [ "cc", "codespan-reporting", @@ -861,24 +901,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn", + "syn 2.0.15", ] [[package]] name = "cxxbridge-flags" -version = "1.0.85" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69a3e162fde4e594ed2b07d0f83c6c67b745e7f28ce58c6df5e6b6bef99dfb59" +checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.85" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e7e2adeb6a0d4a282e581096b06e1791532b7d576dcde5ccd9382acf55db8e6" +checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -893,12 +933,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.2" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ - "darling_core 0.14.2", - "darling_macro 0.14.2", + "darling_core 0.14.4", + "darling_macro 0.14.4", ] [[package]] @@ -912,21 +952,21 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn", + "syn 1.0.109", ] [[package]] name = "darling_core" -version = "0.14.2" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", - "syn", + "syn 1.0.109", ] [[package]] @@ -937,18 +977,18 @@ checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "darling_macro" -version = "0.14.2" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ - "darling_core 0.14.2", + "darling_core 0.14.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -983,15 +1023,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" dependencies = [ "const-oid 0.7.1", - "crypto-bigint", - "pem-rfc7468", + "crypto-bigint 0.3.2", + "pem-rfc7468 0.3.1", +] + +[[package]] +name = "der" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +dependencies = [ + "const-oid 0.9.2", + "pem-rfc7468 0.6.0", + "zeroize", ] [[package]] name = "der-parser" -version = "8.1.0" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d4bc9b0db0a0df9ae64634ac5bdefb7afcb534e182275ca0beadbe486701c1" +checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ "asn1-rs", "displaydoc", @@ -1009,7 +1060,7 @@ checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1020,7 +1071,7 @@ checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1038,10 +1089,10 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" dependencies = [ - "darling 0.14.2", + "darling 0.14.4", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1051,7 +1102,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" dependencies = [ "derive_builder_core", - "syn", + "syn 1.0.109", ] [[package]] @@ -1086,7 +1137,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -1140,7 +1191,7 @@ checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1151,9 +1202,9 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "dyn-clone" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9b0705efd4599c15a38151f4721f7bc388306f61084d3bfd50bd07fbca5cb60" +checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30" [[package]] name = "eax" @@ -1175,18 +1226,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34d33b390ab82f2e1481e331dbd0530895640179d2128ef9a79cc690b78d1eba" dependencies = [ "der 0.3.5", - "elliptic-curve", + "elliptic-curve 0.9.12", "hmac 0.11.0", - "signature", + "signature 1.3.2", +] + +[[package]] +name = "ecdsa" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12844141594ad74185a926d030f3b605f6a903b4e3fec351f3ea338ac5b7637e" +dependencies = [ + "der 0.6.1", + "elliptic-curve 0.12.3", + "rfc6979", + "signature 2.0.0", ] [[package]] name = "ed25519" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ - "signature", + "signature 1.3.2", ] [[package]] @@ -1204,9 +1267,9 @@ dependencies = [ [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "elliptic-curve" @@ -1215,9 +1278,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c13e9b0c3c4170dcc2a12783746c4205d98e18957f57854251eea3f9750fe005" dependencies = [ "bitvec", - "ff", + "ff 0.9.0", "generic-array", - "group", + "group 0.9.0", "pkcs8 0.6.1", "rand_core 0.6.4", "subtle", @@ -1225,28 +1288,50 @@ dependencies = [ ] [[package]] -name = "ena" -version = "0.14.0" +name = "elliptic-curve" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7402b94a93c24e742487327a7cd839dc9d36fec9de9fb25b09f2dae459f36c3" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest 0.10.6", + "ff 0.12.1", + "generic-array", + "group 0.12.1", + "hkdf", + "pem-rfc7468 0.6.0", + "pkcs8 0.9.0", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + +[[package]] +name = "ena" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c533630cf40e9caa44bd91aadc88a75d75a4c3a12b4cfde353cbed41daa1e1f1" dependencies = [ "log", ] [[package]] name = "encoding_rs" -version = "0.8.31" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "enumflags2" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e75d4cd21b95383444831539909fbb14b9dc3fdceb2a6f5d36577329a1f55ccb" +checksum = "c041f5090df68b32bcd905365fd51769c8b9d553fe87fde0b683534f10c01bd2" dependencies = [ "enumflags2_derive", "serde", @@ -1254,24 +1339,24 @@ dependencies = [ [[package]] name = "enumflags2_derive" -version = "0.7.4" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f58dc3c5e468259f19f2d46304a6b28f1c3d034442e14b322d2b850e36f6d5ae" +checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "errno" -version = "0.2.8" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -1303,9 +1388,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -1322,15 +1407,25 @@ dependencies = [ ] [[package]] -name = "filetime" -version = "0.2.19" +name = "ff" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e884668cd0c7480504233e951174ddc3b382f7c2666e3b7310b5c4e7b0c37f9" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "filetime" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cbc844cecaee9d4443931972e1289c8ff485cb4cc2767cb03ca139ed6885153" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", - "windows-sys 0.42.0", + "redox_syscall 0.2.16", + "windows-sys 0.48.0", ] [[package]] @@ -1365,7 +1460,7 @@ dependencies = [ "futures-sink", "nanorand", "pin-project", - "spin 0.9.4", + "spin 0.9.8", ] [[package]] @@ -1406,9 +1501,9 @@ checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" [[package]] name = "futures" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -1421,9 +1516,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -1431,15 +1526,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -1448,15 +1543,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ "fastrand", "futures-core", @@ -1469,32 +1564,32 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -1510,9 +1605,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -1533,9 +1628,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -1553,22 +1648,22 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "glob" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a1e17342619edbc21a964c2afbeb6c820c6a2560032872f397bb97ea127bd0a" +checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" dependencies = [ - "aho-corasick", + "aho-corasick 0.7.20", "bstr", "fnv", "log", @@ -1581,18 +1676,29 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61b3c1e8b4f1ca07e6605ea1be903a5f6956aec5c8a67fd44d56076631675ed8" dependencies = [ - "ff", + "ff 0.9.0", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", "rand_core 0.6.4", "subtle", ] [[package]] name = "h2" -version = "0.3.15" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "fnv", "futures-core", "futures-sink", @@ -1601,7 +1707,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.8", "tracing", ] @@ -1622,9 +1728,9 @@ dependencies = [ [[package]] name = "heck" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" @@ -1644,12 +1750,27 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hkdf" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +dependencies = [ + "hmac 0.12.1", +] + [[package]] name = "hmac" version = "0.11.0" @@ -1671,20 +1792,20 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "fnv", "itoa", ] [[package]] name = "http-auth" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0b40b39d66c28829a0cf4d09f7e139ff8201f7500a5083732848ed3b4b4d850" +checksum = "5430cacd7a1f9a02fbeb350dfc81a0e5ed42d81f3398cb0ba184017f85bdcfbc" dependencies = [ "memchr", ] @@ -1695,7 +1816,7 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "http", "pin-project-lite", ] @@ -1714,11 +1835,11 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.23" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "futures-channel", "futures-core", "futures-util", @@ -1742,7 +1863,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "hyper", "native-tls", "tokio", @@ -1751,16 +1872,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows", ] [[package]] @@ -1807,7 +1928,7 @@ dependencies = [ "anyhow", "async-compression", "async-trait", - "base64", + "base64 0.13.1", "cfg-if 1.0.0", "flate2", "flume", @@ -1831,8 +1952,8 @@ dependencies = [ "strum_macros", "tar", "tokio", - "ttrpc 0.7.1", - "ttrpc-codegen 0.4.1", + "ttrpc", + "ttrpc-codegen", "url", "walkdir", "zstd", @@ -1840,9 +1961,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg 1.1.0", "hashbrown", @@ -1890,12 +2011,13 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.3" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46112a93252b123d31a119a8d1a1ac19deac4fac6e0e8b0df58f0d4e5870e63c" +checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" dependencies = [ + "hermit-abi 0.3.1", "libc", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -1909,9 +2031,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.7.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11b0d96e660696543b251e58030cf9787df56da39dab19ad60eae7353040917e" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "ipnetwork" @@ -1922,6 +2044,18 @@ dependencies = [ "serde", ] +[[package]] +name = "is-terminal" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" +dependencies = [ + "hermit-abi 0.3.1", + "io-lifetimes", + "rustix 0.37.15", + "windows-sys 0.48.0", +] + [[package]] name = "itertools" version = "0.10.5" @@ -1933,27 +2067,27 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jobserver" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" dependencies = [ "libc", ] [[package]] name = "josekit" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee6af62ad98bdf699ad2ecc8323479a1fdc7aa5faa6043d93119d83f6c5fca8" +checksum = "33a96c4f2128a6f44ecf7c36df2b03dddf5a07b060a4d5ebc0a81e9821f7c60e" dependencies = [ "anyhow", - "base64", + "base64 0.21.0", "flate2", "once_cell", "openssl", @@ -1961,14 +2095,14 @@ dependencies = [ "serde", "serde_json", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] name = "js-sys" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" dependencies = [ "wasm-bindgen", ] @@ -1979,7 +2113,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6204285f77fe7d9784db3fdc449ecce1a0114927a51d5a41c4c7a292011c015f" dependencies = [ - "base64", + "base64 0.13.1", "crypto-common", "digest 0.10.6", "hmac 0.12.1", @@ -2016,7 +2150,7 @@ dependencies = [ "opentelemetry", "procfs 0.12.0", "prometheus", - "protobuf 2.28.0", + "protobuf 3.2.0", "protocols", "regex", "rtnetlink", @@ -2038,7 +2172,7 @@ dependencies = [ "tracing", "tracing-opentelemetry", "tracing-subscriber", - "ttrpc 0.6.1", + "ttrpc", "url", "vsock-exporter", "which", @@ -2072,7 +2206,7 @@ name = "kata-types" version = "0.1.0" dependencies = [ "anyhow", - "base64", + "base64 0.13.1", "bitmask-enum", "byte-unit", "glob", @@ -2099,20 +2233,20 @@ dependencies = [ [[package]] name = "lalrpop" -version = "0.19.8" +version = "0.19.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30455341b0e18f276fa64540aff54deafb54c589de6aca68659c63dd2d5d823" +checksum = "06ada7ece1f5bc6d36eec2a4dc204135f14888209b3773df8fefcfe990fd4cbc" dependencies = [ "ascii-canvas", - "atty", "bit-set", "diff", "ena", + "is-terminal", "itertools", "lalrpop-util", - "petgraph 0.6.2", + "petgraph 0.6.3", "regex", - "regex-syntax", + "regex-syntax 0.6.29", "string_cache", "term", "tiny-keccak", @@ -2121,9 +2255,9 @@ dependencies = [ [[package]] name = "lalrpop-util" -version = "0.19.8" +version = "0.19.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf796c978e9b4d983414f4caedc9273aa33ee214c5b887bd55fde84c85d2dc4" +checksum = "3d3b45d694c8074f77bc24fc26e47633c862a9cd3b48dd51209c02ba4c434d68" [[package]] name = "lazy_static" @@ -2136,9 +2270,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.139" +version = "0.2.142" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "6a987beff54b60ffa6d51982e1aa1146bc42f19bd26be28b0586f252fccf5317" [[package]] name = "libm" @@ -2185,6 +2319,12 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +[[package]] +name = "linux-raw-sys" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36eb31c1778188ae1e64398743890d0877fef36d11521ac60406b42016e8c2cf" + [[package]] name = "lock_api" version = "0.4.9" @@ -2270,15 +2410,15 @@ dependencies = [ [[package]] name = "memsec" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac78937f19a0c7807e45a931eac41f766f210173ec664ec046d58e6d388a5cb" +checksum = "0fa0916b001582d253822171bd23f4a0229d32b9507fae236f5da8cad515ba7c" [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "minimal-lexical" @@ -2297,14 +2437,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -2319,7 +2459,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -2384,7 +2524,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddd06e90449ae973fe3888c1ff85949604ef5189b4ac9a2ae39518da1e00762d" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "futures", "log", "netlink-packet-core", @@ -2477,9 +2617,9 @@ dependencies = [ [[package]] name = "nom" -version = "7.1.2" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5507769c4919c998e69e49c839d9dc6e693ede4cc4290d6ad8b41d4f09c548c" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", @@ -2591,9 +2731,9 @@ version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eeaf26a72311c087f8c5ba617c96fac67a5c04f430e716ac8d8ab2de62e23368" dependencies = [ - "base64", + "base64 0.13.1", "chrono", - "getrandom 0.2.8", + "getrandom 0.2.9", "http", "rand 0.8.5", "reqwest", @@ -2635,7 +2775,7 @@ dependencies = [ "sha2 0.10.6", "thiserror", "tokio", - "tokio-util 0.7.4", + "tokio-util 0.7.8", "tracing", "unicase", ] @@ -2661,7 +2801,7 @@ dependencies = [ "aes 0.8.2", "anyhow", "async-trait", - "base64", + "base64 0.13.1", "base64-serde", "ctr 0.9.2", "hmac 0.12.1", @@ -2674,7 +2814,7 @@ dependencies = [ "serde_json", "sha2 0.10.6", "tokio", - "ttrpc 0.7.1", + "ttrpc", ] [[package]] @@ -2697,9 +2837,9 @@ dependencies = [ [[package]] name = "olpc-cjson" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87dc75cf72208cd853671c1abccc5d5d1e43b1e378dde67340ef933219a8c13c" +checksum = "d637c9c15b639ccff597da8f4fa968300651ad2f1e968aefc3b4927a6fb2027a" dependencies = [ "serde", "serde_json", @@ -2708,9 +2848,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "opaque-debug" @@ -2730,11 +2870,11 @@ dependencies = [ [[package]] name = "openidconnect" -version = "2.4.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87af7097640fedbe64718ac1c9b0549d72da747a3f527cd089215f96c6f691d5" +checksum = "98dd5b7049bac4fdd2233b8c9767d42c05da8006fdb79cc903258556d2b18009" dependencies = [ - "base64", + "base64 0.13.1", "chrono", "http", "itertools", @@ -2748,15 +2888,18 @@ dependencies = [ "serde_derive", "serde_json", "serde_path_to_error", + "serde_plain", + "serde_with", + "subtle", "thiserror", "url", ] [[package]] name = "openssl" -version = "0.10.45" +version = "0.10.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -2769,13 +2912,13 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -2786,20 +2929,19 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.24.0+1.1.1s" +version = "111.25.3+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3498f259dab01178c6228c6b00dcef0ed2a2d5e20d648c017861227773ea4abd" +checksum = "924757a6a226bf60da5f7dd0311a34d2b52283dd82ddeb103208ddc66362f80c" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.80" +version = "0.9.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e" dependencies = [ - "autocfg 1.1.0", "cc", "libc", "openssl-src", @@ -2848,9 +2990,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.4.1" +version = "6.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" +checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" [[package]] name = "p256" @@ -2858,16 +3000,40 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f05f5287453297c4c16af5e2b04df8fd2a3008d70f252729650bc6d7ace5844" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.11.1", + "elliptic-curve 0.9.12", "sha2 0.9.9", ] [[package]] -name = "parking" -version = "2.0.0" +name = "p256" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +checksum = "49c124b3cbce43bcbac68c58ec181d98ed6cc7e6d0aa7c3ba97b2563410b0e55" +dependencies = [ + "ecdsa 0.15.1", + "elliptic-curve 0.12.3", + "primeorder", + "sha2 0.10.6", +] + +[[package]] +name = "p384" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "630a4a9b2618348ececfae61a4905f564b817063bf2d66cdfc2ced523fe1d2d4" +dependencies = [ + "ecdsa 0.15.1", + "elliptic-curve 0.12.3", + "primeorder", + "sha2 0.10.6", +] + +[[package]] +name = "parking" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" [[package]] name = "parking_lot" @@ -2887,7 +3053,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.5", + "parking_lot_core 0.9.7", ] [[package]] @@ -2899,29 +3065,29 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.5" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] name = "paste" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "path-absolutize" @@ -2972,18 +3138,18 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" dependencies = [ - "base64", + "base64 0.13.1", "once_cell", "regex", ] [[package]] name = "pem" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" dependencies = [ - "base64", + "base64 0.13.1", ] [[package]] @@ -2995,6 +3161,15 @@ dependencies = [ "base64ct", ] +[[package]] +name = "pem-rfc7468" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.2.0" @@ -3013,9 +3188,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" +checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset 0.4.2", "indexmap", @@ -3032,20 +3207,21 @@ dependencies = [ [[package]] name = "picky" -version = "7.0.0-rc.3" +version = "7.0.0-rc.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b467d8082dcc552d4ca8c9aecdc94a09b0e092b961c542bb78b6feff8f1b3ea" +checksum = "72ac7d98dfb5e53cdea76b70df8d5e8dd7717a2d685a12f54c547e03b5afd76a" dependencies = [ - "base64", + "base64 0.13.1", "digest 0.10.6", "md-5 0.10.5", "num-bigint-dig 0.8.2", "oid", - "picky-asn1 0.6.0", + "p256 0.12.0", + "p384", + "picky-asn1", "picky-asn1-der", "picky-asn1-x509", "rand 0.8.5", - "ring", "rsa 0.6.1", "serde", "sha-1 0.10.1", @@ -3056,20 +3232,9 @@ dependencies = [ [[package]] name = "picky-asn1" -version = "0.5.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1088a7f82ee21e534da0f62b074b559d2a0717b0d5104ba7a47c1f5bc6c83f69" -dependencies = [ - "oid", - "serde", - "serde_bytes", -] - -[[package]] -name = "picky-asn1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b7a3f07db0e5b22727979a992df18c78170c7c30279ab4149a395c0c3843832" +checksum = "6f338f1fd4f3e13e75e986ca29f2a3c62528d88d3cbadf4afdcefb6b087f2d32" dependencies = [ "oid", "serde", @@ -3079,25 +3244,25 @@ dependencies = [ [[package]] name = "picky-asn1-der" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de76bf631e2f2064f78d7f1ea8a57cb0445d83138cd5fac67274d50b0f6053c2" +checksum = "e47267a46f4ea246b772381970b8ed3f15963dd3e15ffc2c3f4ac3bc2d77384b" dependencies = [ - "picky-asn1 0.5.0", + "picky-asn1", "serde", "serde_bytes", ] [[package]] name = "picky-asn1-x509" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25ffcd92e3f788f0f76506f3b86310876cc0014ade835d68a6365ee0fd1009dc" +checksum = "fdb51541f90aa99f2fa7191c8daebc224d500cd5963c6ca3e6cede9645a1b2e1" dependencies = [ - "base64", + "base64 0.13.1", "num-bigint-dig 0.8.2", "oid", - "picky-asn1 0.6.0", + "picky-asn1", "picky-asn1-der", "serde", "zeroize", @@ -3120,7 +3285,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3167,6 +3332,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der 0.6.1", + "spki 0.6.0", +] + [[package]] name = "pkg-config" version = "0.3.26" @@ -3175,16 +3350,18 @@ checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "polling" -version = "2.5.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg 1.1.0", + "bitflags", "cfg-if 1.0.0", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "windows-sys 0.42.0", + "pin-project-lite", + "windows-sys 0.48.0", ] [[package]] @@ -3200,14 +3377,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] -name = "proc-macro-crate" -version = "1.2.1" +name = "primeorder" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" +checksum = "0b54f7131b3dba65a2f414cf5bd25b66d4682e4608610668eae785750ba4c5b2" +dependencies = [ + "elliptic-curve 0.12.3", +] + +[[package]] +name = "proc-macro-crate" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "thiserror", - "toml", + "toml_edit", ] [[package]] @@ -3219,7 +3404,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -3236,9 +3421,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.49" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] @@ -3268,7 +3453,7 @@ dependencies = [ "byteorder", "hex", "lazy_static", - "rustix", + "rustix 0.36.13", ] [[package]] @@ -3294,7 +3479,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "prost-derive", ] @@ -3304,7 +3489,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "heck 0.3.3", "itertools", "log", @@ -3326,7 +3511,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3335,7 +3520,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "prost", ] @@ -3344,10 +3529,6 @@ name = "protobuf" version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" -dependencies = [ - "serde", - "serde_derive", -] [[package]] name = "protobuf" @@ -3384,26 +3565,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "protobuf-codegen-pure" -version = "2.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a29399fc94bcd3eeaa951c715f7bea69409b2445356b00519740bcd6ddd865" -dependencies = [ - "protobuf 2.28.0", - "protobuf-codegen 2.28.0", -] - -[[package]] -name = "protobuf-codegen-pure3" -version = "2.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0a3cf0a7de7570cb67bfb9a9a585b5841b49790a1be0ef104340a2110b91135" -dependencies = [ - "protobuf 2.28.0", - "protobuf-codegen 2.28.0", -] - [[package]] name = "protobuf-parse" version = "3.2.0" @@ -3435,16 +3596,16 @@ version = "0.1.0" dependencies = [ "async-trait", "oci", - "protobuf 2.28.0", - "ttrpc 0.6.1", - "ttrpc-codegen 0.2.2", + "protobuf 3.2.0", + "ttrpc", + "ttrpc-codegen", ] [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -3514,7 +3675,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -3535,26 +3696,35 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.8", - "redox_syscall", + "getrandom 0.2.9", + "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" dependencies = [ - "aho-corasick", + "aho-corasick 1.0.1", "memchr", - "regex-syntax", + "regex-syntax 0.7.1", ] [[package]] @@ -3563,32 +3733,29 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "regex-syntax" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] +checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" [[package]] name = "reqwest" -version = "0.11.13" +version = "0.11.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" +checksum = "27b71749df584b7f4cac2c426c127a7c785a5106cc98f7a8feb044115f0fa254" dependencies = [ - "base64", - "bytes 1.3.0", + "base64 0.21.0", + "bytes 1.4.0", "encoding_rs", "futures-core", "futures-util", @@ -3610,15 +3777,27 @@ dependencies = [ "serde_urlencoded", "tokio", "tokio-native-tls", - "tokio-util 0.7.4", + "tokio-util 0.7.8", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-streams", "web-sys", "winreg", ] +[[package]] +name = "rfc6979" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint 0.4.9", + "hmac 0.12.1", + "zeroize", +] + [[package]] name = "ring" version = "0.16.20" @@ -3722,16 +3901,30 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.6" +version = "0.36.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4feacf7db682c6c329c4ede12649cd36ecab0f3be5b7d74e6a20304725db4549" +checksum = "3a38f9520be93aba504e8ca974197f46158de5dcaa9fa04b57c57cd6a679d658" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", - "linux-raw-sys", - "windows-sys 0.42.0", + "linux-raw-sys 0.1.4", + "windows-sys 0.45.0", +] + +[[package]] +name = "rustix" +version = "0.37.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0661814f891c57c930a610266415528da53c4933e6dea5fb350cbfe048a9ece" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.4", + "windows-sys 0.48.0", ] [[package]] @@ -3754,7 +3947,7 @@ dependencies = [ "nix 0.24.3", "oci", "path-absolutize 1.2.1", - "protobuf 2.28.0", + "protobuf 3.2.0", "protocols", "regex", "rlimit", @@ -3775,15 +3968,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "same-file" @@ -3805,12 +3998,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" +checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "lazy_static", - "windows-sys 0.36.1", + "windows-sys 0.42.0", ] [[package]] @@ -3821,15 +4013,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" + +[[package]] +name = "sec1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct", + "der 0.6.1", + "generic-array", + "pkcs8 0.9.0", + "subtle", + "zeroize", +] [[package]] name = "security-framework" -version = "2.7.0" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ "bitflags", "core-foundation", @@ -3840,9 +4046,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ "core-foundation-sys", "libc", @@ -3850,13 +4056,13 @@ dependencies = [ [[package]] name = "sequoia-openpgp" -version = "1.12.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aab653aad177cc40f34dbbc119127715f609d7e28481595506f1645bc67cd53" +checksum = "e5f5aa2f8ea2ab9690c2839a16f7bf335f404fa97fad627c8e828b302b1a5c18" dependencies = [ "aes 0.6.0", "anyhow", - "base64", + "base64 0.13.1", "block-modes", "block-padding", "blowfish", @@ -3869,11 +4075,11 @@ dependencies = [ "digest 0.9.0", "dyn-clone", "eax", - "ecdsa", + "ecdsa 0.11.1", "ed25519-dalek", "flate2", "generic-array", - "getrandom 0.2.8", + "getrandom 0.2.9", "idea", "idna", "lalrpop", @@ -3883,11 +4089,12 @@ dependencies = [ "md-5 0.9.1", "memsec", "num-bigint-dig 0.6.1", - "p256", + "once_cell", + "p256 0.8.1", "rand 0.7.3", "rand_core 0.6.4", "regex", - "regex-syntax", + "regex-syntax 0.6.29", "ripemd160", "rsa 0.3.0", "sha-1 0.9.8", @@ -3902,9 +4109,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.152" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" dependencies = [ "serde_derive", ] @@ -3921,29 +4128,29 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718dc5fff5b36f99093fc49b280cfc96ce6fc824317783bff5a1fed0c7a64819" +checksum = "416bda436f9aab92e02c8e10d49a15ddd339cea90b6e340fe51ed97abb548294" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "serde_json" -version = "1.0.91" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "indexmap", "itoa", @@ -3953,9 +4160,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b04f22b563c91331a10074bda3dd5492e3cc39d56bd557e91c0af42b6c7341" +checksum = "f7f05c1d5476066defcdfacce1f52fc3cae3af1d3089727100c02ae92e5abbe0" dependencies = [ "serde", ] @@ -3971,13 +4178,13 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5ec9fa74a20ebbe5d9ac23dac1fc96ba0ecfe9f50f2843b52e537b10fbcb4e" +checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -3992,6 +4199,28 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_with" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" +dependencies = [ + "serde", + "serde_with_macros", +] + +[[package]] +name = "serde_with_macros" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" +dependencies = [ + "darling 0.13.4", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "serde_yaml" version = "0.8.26" @@ -4023,7 +4252,7 @@ checksum = "b2acd6defeddb41eb60bb468f8825d0cfd0c2a76bc03bfd235b6a1dc4f6a1ad5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4101,9 +4330,9 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" +checksum = "54c2bb1a323307527314a36bfb73f24febb08ce2b8a554bf4ffd6f51ad15198c" dependencies = [ "digest 0.10.6", "keccak", @@ -4120,9 +4349,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -4137,6 +4366,16 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "signature" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fe458c98333f9c8152221191a77e2a44e8325d0193484af2e9421a53019e57d" +dependencies = [ + "digest 0.10.6", + "rand_core 0.6.4", +] + [[package]] name = "sigstore" version = "0.3.3" @@ -4144,14 +4383,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fffeeee5fc70b65f62e2b75b4bf5a7fbb54867a949c32088be67032d6eded74" dependencies = [ "async-trait", - "base64", + "base64 0.13.1", "cached", "lazy_static", "oci-distribution", "olpc-cjson", "open", "openidconnect", - "pem 1.1.0", + "pem 1.1.1", "picky", "regex", "ring", @@ -4185,9 +4424,9 @@ checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg 1.1.0", ] @@ -4225,7 +4464,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -4272,17 +4511,17 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "475b3bbe5245c26f2d8a6f62d67c1f30eb9fffeccee721c45d162c3ebbdf81b2" dependencies = [ - "heck 0.4.0", + "heck 0.4.1", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -4296,9 +4535,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spin" -version = "0.9.4" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" dependencies = [ "lock_api", ] @@ -4322,6 +4561,16 @@ dependencies = [ "der 0.5.1", ] +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der 0.6.1", +] + [[package]] name = "static_assertions" version = "1.1.0" @@ -4330,9 +4579,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "string_cache" -version = "0.8.4" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213494b7a2b503146286049378ce02b482200519accc31872ee8be91fa820a08" +checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" dependencies = [ "new_debug_unreachable", "once_cell", @@ -4362,11 +4611,11 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "heck 0.4.0", + "heck 0.4.1", "proc-macro2", "quote", "rustversion", - "syn", + "syn 1.0.109", ] [[package]] @@ -4387,9 +4636,20 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" dependencies = [ "proc-macro2", "quote", @@ -4404,7 +4664,7 @@ checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "unicode-xid", ] @@ -4433,16 +4693,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.3.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if 1.0.0", "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", + "redox_syscall 0.3.5", + "rustix 0.37.15", + "windows-sys 0.45.0", ] [[package]] @@ -4458,9 +4717,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] @@ -4480,30 +4739,31 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if 1.0.0", "once_cell", ] @@ -4520,9 +4780,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.17" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ "itoa", "serde", @@ -4538,9 +4798,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] @@ -4565,20 +4825,19 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.23.0" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eab6d665857cc6ca78d6e80303a02cea7a7851e85dfbd77cbdc09bd129f1ef46" +checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" dependencies = [ "autocfg 1.1.0", - "bytes 1.3.0", + "bytes 1.4.0", "libc", - "memchr", "mio", "num_cpus", "parking_lot 0.12.1", @@ -4586,25 +4845,25 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "tokio-native-tls" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", @@ -4612,9 +4871,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", @@ -4627,7 +4886,7 @@ version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "futures-core", "futures-sink", "log", @@ -4637,11 +4896,11 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.4" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ - "bytes 1.3.0", + "bytes 1.4.0", "futures-core", "futures-io", "futures-sink", @@ -4652,9 +4911,9 @@ dependencies = [ [[package]] name = "tokio-vsock" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d2ad075b54bbb450ae2e3770211d7954362a488fcd386085c9fbb6d787ade8b" +checksum = "9b33556828911d16e24d8b5d336446b0bf6b4b9bfda52cbdc2fa35b7a2862ebc" dependencies = [ "bytes 0.4.12", "futures", @@ -4665,13 +4924,30 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" + +[[package]] +name = "toml_edit" +version = "0.19.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] + [[package]] name = "tough" version = "0.12.5" @@ -4685,7 +4961,7 @@ dependencies = [ "log", "olpc-cjson", "path-absolutize 3.0.14", - "pem 1.1.0", + "pem 1.1.1", "percent-encoding", "reqwest", "ring", @@ -4707,11 +4983,10 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "cf9cf6a813d3f40c88b0b6b6f29a5c95c6cdbf97c1f9cc53fb820200f5ad814d" dependencies = [ - "cfg-if 1.0.0", "log", "pin-project-lite", "tracing-attributes", @@ -4720,13 +4995,13 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] @@ -4797,28 +5072,9 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" - -[[package]] -name = "ttrpc" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ecfff459a859c6ba6668ff72b34c2f1d94d9d58f7088414c2674ad0f31cc7d8" -dependencies = [ - "async-trait", - "byteorder", - "futures", - "libc", - "log", - "nix 0.23.2", - "protobuf 2.28.0", - "protobuf-codegen-pure", - "thiserror", - "tokio", - "tokio-vsock", -] +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "ttrpc" @@ -4841,41 +5097,14 @@ dependencies = [ [[package]] name = "ttrpc-codegen" -version = "0.2.2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df80affc2cf8c589172b05ba2b8e8a88722ebf4e28b86604615497a8b6fb78c0" -dependencies = [ - "protobuf 2.28.0", - "protobuf-codegen 2.28.0", - "protobuf-codegen-pure3", - "ttrpc-compiler 0.4.2", -] - -[[package]] -name = "ttrpc-codegen" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3eea6e6c3db96d8d6c5513de779b9ef1a0ae8371fd2adfe5f03eef1cc6b8320" +checksum = "94d7f7631d7a9ebed715a47cd4cb6072cbc7ae1d4ec01598971bbec0024340c2" dependencies = [ "protobuf 2.28.0", "protobuf-codegen 3.2.0", "protobuf-support", - "ttrpc-compiler 0.6.1", -] - -[[package]] -name = "ttrpc-compiler" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8db19ce6af25713061dd805d6733b6f0c45904bd63526ce5d2568c858b7edc71" -dependencies = [ - "derive-new", - "prost", - "prost-build", - "prost-types", - "protobuf 2.28.0", - "protobuf-codegen 2.28.0", - "tempfile", + "ttrpc-compiler", ] [[package]] @@ -4931,15 +5160,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -4952,9 +5181,9 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" [[package]] name = "unicode-width" @@ -5039,12 +5268,11 @@ checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi", "winapi-util", ] @@ -5078,9 +5306,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -5088,24 +5316,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.33" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -5115,9 +5343,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5125,47 +5353,51 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" +checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" + +[[package]] +name = "wasm-streams" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bbae3363c08332cadccd13b67db371814cd214c2524020932f0804b8cf7c078" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] [[package]] name = "web-sys" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" dependencies = [ "js-sys", "wasm-bindgen", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "which" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", "libc", @@ -5204,16 +5436,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "windows-sys" -version = "0.36.1" +name = "windows" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", + "windows-targets 0.48.0", ] [[package]] @@ -5222,86 +5450,155 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" -version = "0.36.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + +[[package]] +name = "winnow" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae8970b36c66498d8ff1d66685dc86b91b29db0c7739899012f63a63814b4b28" +dependencies = [ + "memchr", +] [[package]] name = "winreg" @@ -5336,7 +5633,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" dependencies = [ "asn1-rs", - "base64", + "base64 0.13.1", "data-encoding", "der-parser", "lazy_static", @@ -5345,7 +5642,7 @@ dependencies = [ "ring", "rusticata-macros", "thiserror", - "time 0.3.17", + "time 0.3.20", ] [[package]] @@ -5422,14 +5719,14 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn", + "syn 1.0.109", ] [[package]] name = "zbus_names" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c737644108627748a660d038974160e0cbb62605536091bdfa28fd7f64d43c8" +checksum = "f34f314916bd89bdb9934154627fab152f4f28acdda03e7c4c68181b214fe7e3" dependencies = [ "serde", "static_assertions", @@ -5438,23 +5735,22 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.3" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.15", ] [[package]] @@ -5478,19 +5774,20 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.4+zstd.1.5.2" +version = "2.0.8+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fa202f2ef00074143e219d15b62ffc317d17cc33909feac471c044087cad7b0" +checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" dependencies = [ "cc", "libc", + "pkg-config", ] [[package]] name = "zvariant" -version = "3.9.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f8c89c183461e11867ded456db252eae90874bc6769b7adbea464caa777e51" +checksum = "46fe4914a985446d6fd287019b5fceccce38303d71407d9e6e711d44954a05d8" dependencies = [ "byteorder", "enumflags2", @@ -5502,12 +5799,24 @@ dependencies = [ [[package]] name = "zvariant_derive" -version = "3.9.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "155247a5d1ab55e335421c104ccd95d64f17cebbd02f50cdbc1c33385f9c4d81" +checksum = "34c20260af4b28b3275d6676c7e2a6be0d4332e8e0aba4616d34007fd84e462a" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", + "zvariant_utils", +] + +[[package]] +name = "zvariant_utils" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53b22993dbc4d128a17a3b6c92f1c63872dd67198537ee728d8b5d7c40640a8b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", ] diff --git a/src/agent/Cargo.toml b/src/agent/Cargo.toml index 3ae604eed..25627e46e 100644 --- a/src/agent/Cargo.toml +++ b/src/agent/Cargo.toml @@ -10,8 +10,8 @@ oci = { path = "../libs/oci" } rustjail = { path = "rustjail" } protocols = { path = "../libs/protocols", features = ["async"] } lazy_static = "1.3.0" -ttrpc = { version = "0.6.0", features = ["async"], default-features = false } -protobuf = "2.27.0" +ttrpc = { version = "0.7.1", features = ["async"], default-features = false } +protobuf = "3.2.0" libc = "0.2.58" nix = "0.24.2" capctl = "0.2.0" diff --git a/src/agent/rustjail/Cargo.toml b/src/agent/rustjail/Cargo.toml index 8c9c2230d..19602bee2 100644 --- a/src/agent/rustjail/Cargo.toml +++ b/src/agent/rustjail/Cargo.toml @@ -18,7 +18,7 @@ scopeguard = "1.0.0" capctl = "0.2.0" lazy_static = "1.3.0" libc = "0.2.58" -protobuf = "2.27.0" +protobuf = "3.2.0" slog = "2.5.2" slog-scope = "4.1.2" scan_fmt = "0.2.6" diff --git a/src/agent/rustjail/src/cgroups/fs/mod.rs b/src/agent/rustjail/src/cgroups/fs/mod.rs index 4d7c7d6dc..80c31b617 100644 --- a/src/agent/rustjail/src/cgroups/fs/mod.rs +++ b/src/agent/rustjail/src/cgroups/fs/mod.rs @@ -27,7 +27,7 @@ use oci::{ LinuxNetwork, LinuxPids, LinuxResources, }; -use protobuf::{CachedSize, RepeatedField, SingularPtrField, UnknownFields}; +use protobuf::MessageField; use protocols::agent::{ BlkioStats, BlkioStatsEntry, CgroupStats, CpuStats, CpuUsage, HugetlbStats, MemoryData, MemoryStats, PidsStats, ThrottlingData, @@ -50,7 +50,7 @@ macro_rules! get_controller_or_return_singular_none { ($cg:ident) => { match $cg.controller_of() { Some(c) => c, - None => return SingularPtrField::none(), + None => return MessageField::none(), } }; } @@ -134,11 +134,10 @@ impl CgroupManager for Manager { let throttling_data = get_cpu_stats(&self.cgroup); - let cpu_stats = SingularPtrField::some(CpuStats { + let cpu_stats = MessageField::some(CpuStats { cpu_usage, throttling_data, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }); // Memorystats @@ -160,8 +159,7 @@ impl CgroupManager for Manager { pids_stats, blkio_stats, hugetlb_stats, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }) } @@ -446,14 +444,14 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool let memstat = get_memory_stats(cg) .into_option() .ok_or_else(|| anyhow!("failed to get the cgroup memory stats"))?; - let memusage = memstat.get_usage(); + let memusage = memstat.usage(); // When update memory limit, the kernel would check the current memory limit // set against the new swap setting, if the current memory limit is large than // the new swap, then set limit first, otherwise the kernel would complain and // refused to set; on the other hand, if the current memory limit is smaller than // the new swap, then we should set the swap first and then set the memor limit. - if swap == -1 || memusage.get_limit() < swap as u64 { + if swap == -1 || memusage.limit() < swap as u64 { mem_controller.set_memswap_limit(swap)?; set_resource!(mem_controller, set_limit, memory, limit); } else { @@ -660,21 +658,20 @@ lazy_static! { }; } -fn get_cpu_stats(cg: &cgroups::Cgroup) -> SingularPtrField { +fn get_cpu_stats(cg: &cgroups::Cgroup) -> MessageField { let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg); let stat = cpu_controller.cpu().stat; let h = lines_to_map(&stat); - SingularPtrField::some(ThrottlingData { + MessageField::some(ThrottlingData { periods: *h.get("nr_periods").unwrap_or(&0), throttled_periods: *h.get("nr_throttled").unwrap_or(&0), throttled_time: *h.get("throttled_time").unwrap_or(&0), - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }) } -fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField { +fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> MessageField { if let Some(cpuacct_controller) = cg.controller_of::() { let cpuacct = cpuacct_controller.cpuacct(); @@ -688,13 +685,12 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField { let percpu_usage = line_to_vec(&cpuacct.usage_percpu); - return SingularPtrField::some(CpuUsage { + return MessageField::some(CpuUsage { total_usage, percpu_usage, usage_in_kernelmode, usage_in_usermode, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }); } @@ -707,17 +703,16 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField { let total_usage = *h.get("usage_usec").unwrap_or(&0); let percpu_usage = vec![]; - SingularPtrField::some(CpuUsage { + MessageField::some(CpuUsage { total_usage, percpu_usage, usage_in_kernelmode, usage_in_usermode, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }) } -fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField { +fn get_memory_stats(cg: &cgroups::Cgroup) -> MessageField { let memory_controller: &MemController = get_controller_or_return_singular_none!(cg); // cache from memory stat @@ -729,52 +724,48 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField { let use_hierarchy = value == 1; // get memory data - let usage = SingularPtrField::some(MemoryData { + let usage = MessageField::some(MemoryData { usage: memory.usage_in_bytes, max_usage: memory.max_usage_in_bytes, failcnt: memory.fail_cnt, limit: memory.limit_in_bytes as u64, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }); // get swap usage let memswap = memory_controller.memswap(); - let swap_usage = SingularPtrField::some(MemoryData { + let swap_usage = MessageField::some(MemoryData { usage: memswap.usage_in_bytes, max_usage: memswap.max_usage_in_bytes, failcnt: memswap.fail_cnt, limit: memswap.limit_in_bytes as u64, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }); // get kernel usage let kmem_stat = memory_controller.kmem_stat(); - let kernel_usage = SingularPtrField::some(MemoryData { + let kernel_usage = MessageField::some(MemoryData { usage: kmem_stat.usage_in_bytes, max_usage: kmem_stat.max_usage_in_bytes, failcnt: kmem_stat.fail_cnt, limit: kmem_stat.limit_in_bytes as u64, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }); - SingularPtrField::some(MemoryStats { + MessageField::some(MemoryStats { cache, usage, swap_usage, kernel_usage, use_hierarchy, stats: memory.stat.raw, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }) } -fn get_pids_stats(cg: &cgroups::Cgroup) -> SingularPtrField { +fn get_pids_stats(cg: &cgroups::Cgroup) -> MessageField { let pid_controller: &PidController = get_controller_or_return_singular_none!(cg); let current = pid_controller.get_pid_current().unwrap_or(0); @@ -788,11 +779,10 @@ fn get_pids_stats(cg: &cgroups::Cgroup) -> SingularPtrField { }, } as u64; - SingularPtrField::some(PidsStats { + MessageField::some(PidsStats { current, limit, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }) } @@ -828,8 +818,8 @@ https://github.com/opencontainers/runc/blob/a5847db387ae28c0ca4ebe4beee1a76900c8 Total 0 */ -fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> RepeatedField { - let mut m = RepeatedField::new(); +fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> Vec { + let mut m = Vec::new(); if blkiodata.is_empty() { return m; } @@ -842,16 +832,15 @@ fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> RepeatedField RepeatedField { - let mut m = RepeatedField::new(); +fn get_blkio_stat_ioservice(services: &[IoService]) -> Vec { + let mut m = Vec::new(); if services.is_empty() { return m; @@ -875,17 +864,16 @@ fn build_blkio_stats_entry(major: i16, minor: i16, op: &str, value: u64) -> Blki minor: minor as u64, op: op.to_string(), value, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() } } -fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> SingularPtrField { +fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> MessageField { let blkio_controller: &BlkIoController = get_controller_or_return_singular_none!(cg); let blkio = blkio_controller.blkio(); let mut resp = BlkioStats::new(); - let mut blkio_stats = RepeatedField::new(); + let mut blkio_stats = Vec::new(); let stat = blkio.io_stat; for s in stat { @@ -901,10 +889,10 @@ fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> SingularPtrField { resp.io_service_bytes_recursive = blkio_stats; - SingularPtrField::some(resp) + MessageField::some(resp) } -fn get_blkio_stats(cg: &cgroups::Cgroup) -> SingularPtrField { +fn get_blkio_stats(cg: &cgroups::Cgroup) -> MessageField { if cg.v2() { return get_blkio_stats_v2(cg); } @@ -937,7 +925,7 @@ fn get_blkio_stats(cg: &cgroups::Cgroup) -> SingularPtrField { m.sectors_recursive = get_blkio_stat_blkiodata(&blkio.sectors_recursive); } - SingularPtrField::some(m) + MessageField::some(m) } fn get_hugetlb_stats(cg: &cgroups::Cgroup) -> HashMap { @@ -961,8 +949,7 @@ fn get_hugetlb_stats(cg: &cgroups::Cgroup) -> HashMap { usage, max_usage, failcnt, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }, ); } diff --git a/src/agent/rustjail/src/cgroups/mock.rs b/src/agent/rustjail/src/cgroups/mock.rs index 3bcc99955..8ac77c63b 100644 --- a/src/agent/rustjail/src/cgroups/mock.rs +++ b/src/agent/rustjail/src/cgroups/mock.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 // -use protobuf::{CachedSize, SingularPtrField, UnknownFields}; +use protobuf::MessageField; use crate::cgroups::Manager as CgroupManager; use crate::protocols::agent::{BlkioStats, CgroupStats, CpuStats, MemoryStats, PidsStats}; @@ -33,13 +33,12 @@ impl CgroupManager for Manager { fn get_stats(&self) -> Result { Ok(CgroupStats { - cpu_stats: SingularPtrField::some(CpuStats::default()), - memory_stats: SingularPtrField::some(MemoryStats::new()), - pids_stats: SingularPtrField::some(PidsStats::new()), - blkio_stats: SingularPtrField::some(BlkioStats::new()), + cpu_stats: MessageField::some(CpuStats::default()), + memory_stats: MessageField::some(MemoryStats::new()), + pids_stats: MessageField::some(PidsStats::new()), + blkio_stats: MessageField::some(BlkioStats::new()), hugetlb_stats: HashMap::new(), - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }) } diff --git a/src/agent/rustjail/src/cgroups/systemd/dbus_client.rs b/src/agent/rustjail/src/cgroups/systemd/dbus_client.rs index 09c46f24d..0ff606930 100644 --- a/src/agent/rustjail/src/cgroups/systemd/dbus_client.rs +++ b/src/agent/rustjail/src/cgroups/systemd/dbus_client.rs @@ -26,7 +26,7 @@ pub trait SystemdInterface { fn get_version(&self) -> Result; - fn unit_exist(&self, unit_name: &str) -> Result; + fn unit_exists(&self, unit_name: &str) -> Result; fn add_process(&self, pid: i32, unit_name: &str) -> Result<()>; } @@ -36,8 +36,9 @@ pub struct DBusClient {} impl DBusClient { fn build_proxy(&self) -> Result> { - let connection = zbus::blocking::Connection::system()?; - let proxy = SystemManager::new(&connection)?; + let connection = + zbus::blocking::Connection::system().context("Establishing a D-Bus connection")?; + let proxy = SystemManager::new(&connection).context("Building a D-Bus proxy manager")?; Ok(proxy) } } @@ -108,8 +109,10 @@ impl SystemdInterface for DBusClient { Ok(systemd_version) } - fn unit_exist(&self, unit_name: &str) -> Result { - let proxy = self.build_proxy()?; + fn unit_exists(&self, unit_name: &str) -> Result { + let proxy = self + .build_proxy() + .with_context(|| format!("Checking if systemd unit {} exists", unit_name))?; Ok(proxy.get_unit(unit_name).is_ok()) } diff --git a/src/agent/rustjail/src/cgroups/systemd/manager.rs b/src/agent/rustjail/src/cgroups/systemd/manager.rs index c52e727e4..dcbc65a2c 100644 --- a/src/agent/rustjail/src/cgroups/systemd/manager.rs +++ b/src/agent/rustjail/src/cgroups/systemd/manager.rs @@ -41,7 +41,7 @@ pub struct Manager { impl CgroupManager for Manager { fn apply(&self, pid: pid_t) -> Result<()> { let unit_name = self.unit_name.as_str(); - if self.dbus_client.unit_exist(unit_name).unwrap() { + if self.dbus_client.unit_exists(unit_name)? { self.dbus_client.add_process(pid, self.unit_name.as_str())?; } else { self.dbus_client.start_unit( diff --git a/src/agent/rustjail/src/container.rs b/src/agent/rustjail/src/container.rs index 60b936557..b1d7499cd 100644 --- a/src/agent/rustjail/src/container.rs +++ b/src/agent/rustjail/src/container.rs @@ -48,7 +48,7 @@ use nix::unistd::{self, fork, ForkResult, Gid, Pid, Uid, User}; use std::os::unix::fs::MetadataExt; use std::os::unix::io::AsRawFd; -use protobuf::SingularPtrField; +use protobuf::MessageField; use oci::State as OCIState; use regex::Regex; @@ -875,7 +875,7 @@ impl BaseContainer for LinuxContainer { // what about network interface stats? Ok(StatsContainerResponse { - cgroup_stats: SingularPtrField::some(self.cgroup_manager.as_ref().get_stats()?), + cgroup_stats: MessageField::some(self.cgroup_manager.as_ref().get_stats()?), ..Default::default() }) } diff --git a/src/agent/rustjail/src/lib.rs b/src/agent/rustjail/src/lib.rs index 223b2e407..0570646bc 100644 --- a/src/agent/rustjail/src/lib.rs +++ b/src/agent/rustjail/src/lib.rs @@ -82,11 +82,11 @@ pub fn process_grpc_to_oci(p: &grpc::Process) -> oci::Process { let cap = p.Capabilities.as_ref().unwrap(); Some(oci::LinuxCapabilities { - bounding: cap.Bounding.clone().into_vec(), - effective: cap.Effective.clone().into_vec(), - inheritable: cap.Inheritable.clone().into_vec(), - permitted: cap.Permitted.clone().into_vec(), - ambient: cap.Ambient.clone().into_vec(), + bounding: cap.Bounding.clone(), + effective: cap.Effective.clone(), + inheritable: cap.Inheritable.clone(), + permitted: cap.Permitted.clone(), + ambient: cap.Ambient.clone(), }) } else { None @@ -108,8 +108,8 @@ pub fn process_grpc_to_oci(p: &grpc::Process) -> oci::Process { terminal: p.Terminal, console_size, user, - args: p.Args.clone().into_vec(), - env: p.Env.clone().into_vec(), + args: p.Args.clone(), + env: p.Env.clone(), cwd: p.Cwd.clone(), capabilities, rlimits, @@ -130,9 +130,9 @@ fn root_grpc_to_oci(root: &grpc::Root) -> oci::Root { fn mount_grpc_to_oci(m: &grpc::Mount) -> oci::Mount { oci::Mount { destination: m.destination.clone(), - r#type: m.field_type.clone(), + r#type: m.type_.clone(), source: m.source.clone(), - options: m.options.clone().into_vec(), + options: m.options.clone(), } } @@ -143,8 +143,8 @@ fn hook_grpc_to_oci(h: &[grpcHook]) -> Vec { for e in h.iter() { r.push(oci::Hook { path: e.Path.clone(), - args: e.Args.clone().into_vec(), - env: e.Env.clone().into_vec(), + args: e.Args.clone(), + env: e.Env.clone(), timeout: Some(e.Timeout as i32), }); } @@ -365,7 +365,7 @@ fn seccomp_grpc_to_oci(sec: &grpc::LinuxSeccomp) -> oci::LinuxSeccomp { let mut args = Vec::new(); let errno_ret: u32 = if sys.has_errnoret() { - sys.get_errnoret() + sys.errnoret() } else { libc::EPERM as u32 }; @@ -380,7 +380,7 @@ fn seccomp_grpc_to_oci(sec: &grpc::LinuxSeccomp) -> oci::LinuxSeccomp { } r.push(oci::LinuxSyscall { - names: sys.Names.clone().into_vec(), + names: sys.Names.clone(), action: sys.Action.clone(), errno_ret, args, @@ -391,8 +391,8 @@ fn seccomp_grpc_to_oci(sec: &grpc::LinuxSeccomp) -> oci::LinuxSeccomp { oci::LinuxSeccomp { default_action: sec.DefaultAction.clone(), - architectures: sec.Architectures.clone().into_vec(), - flags: sec.Flags.clone().into_vec(), + architectures: sec.Architectures.clone(), + flags: sec.Flags.clone(), syscalls, } } @@ -462,8 +462,8 @@ fn linux_grpc_to_oci(l: &grpc::Linux) -> oci::Linux { devices, seccomp, rootfs_propagation: l.RootfsPropagation.clone(), - masked_paths: l.MaskedPaths.clone().into_vec(), - readonly_paths: l.ReadonlyPaths.clone().into_vec(), + masked_paths: l.MaskedPaths.clone(), + readonly_paths: l.ReadonlyPaths.clone(), mount_label: l.MountLabel.clone(), intel_rdt, } @@ -564,35 +564,30 @@ mod tests { // All fields specified grpcproc: grpc::Process { Terminal: true, - ConsoleSize: protobuf::SingularPtrField::::some(grpc::Box { + ConsoleSize: protobuf::MessageField::::some(grpc::Box { Height: 123, Width: 456, ..Default::default() }), - User: protobuf::SingularPtrField::::some(grpc::User { + User: protobuf::MessageField::::some(grpc::User { UID: 1234, GID: 5678, AdditionalGids: Vec::from([910, 1112]), Username: String::from("username"), ..Default::default() }), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([String::from("env")])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env")]), Cwd: String::from("cwd"), - Capabilities: protobuf::SingularPtrField::some(grpc::LinuxCapabilities { - Bounding: protobuf::RepeatedField::from(Vec::from([String::from("bnd")])), - Effective: protobuf::RepeatedField::from(Vec::from([String::from("eff")])), - Inheritable: protobuf::RepeatedField::from(Vec::from([String::from( - "inher", - )])), - Permitted: protobuf::RepeatedField::from(Vec::from([String::from("perm")])), - Ambient: protobuf::RepeatedField::from(Vec::from([String::from("amb")])), + Capabilities: protobuf::MessageField::some(grpc::LinuxCapabilities { + Bounding: Vec::from([String::from("bnd")]), + Effective: Vec::from([String::from("eff")]), + Inheritable: Vec::from([String::from("inher")]), + Permitted: Vec::from([String::from("perm")]), + Ambient: Vec::from([String::from("amb")]), ..Default::default() }), - Rlimits: protobuf::RepeatedField::from(Vec::from([ + Rlimits: Vec::from([ grpc::POSIXRlimit { Type: String::from("r#type"), Hard: 123, @@ -605,7 +600,7 @@ mod tests { Soft: 1011, ..Default::default() }, - ])), + ]), NoNewPrivileges: true, ApparmorProfile: String::from("apparmor profile"), OOMScoreAdj: 123456, @@ -655,7 +650,7 @@ mod tests { TestData { // None ConsoleSize grpcproc: grpc::Process { - ConsoleSize: protobuf::SingularPtrField::::none(), + ConsoleSize: protobuf::MessageField::::none(), OOMScoreAdj: 0, ..Default::default() }, @@ -668,7 +663,7 @@ mod tests { TestData { // None User grpcproc: grpc::Process { - User: protobuf::SingularPtrField::::none(), + User: protobuf::MessageField::::none(), OOMScoreAdj: 0, ..Default::default() }, @@ -686,7 +681,7 @@ mod tests { TestData { // None Capabilities grpcproc: grpc::Process { - Capabilities: protobuf::SingularPtrField::none(), + Capabilities: protobuf::MessageField::none(), OOMScoreAdj: 0, ..Default::default() }, @@ -787,99 +782,57 @@ mod tests { TestData { // All specified grpchooks: grpc::Hooks { - Prestart: protobuf::RepeatedField::from(Vec::from([ + Prestart: Vec::from([ grpc::Hook { Path: String::from("prestartpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() }, grpc::Hook { Path: String::from("prestartpath2"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg3"), - String::from("arg4"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env3"), - String::from("env4"), - ])), + Args: Vec::from([String::from("arg3"), String::from("arg4")]), + Env: Vec::from([String::from("env3"), String::from("env4")]), Timeout: 25, ..Default::default() }, - ])), - Poststart: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + ]), + Poststart: Vec::from([grpc::Hook { Path: String::from("poststartpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - Poststop: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + Poststop: Vec::from([grpc::Hook { Path: String::from("poststoppath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - CreateRuntime: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + CreateRuntime: Vec::from([grpc::Hook { Path: String::from("createruntimepath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - CreateContainer: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + CreateContainer: Vec::from([grpc::Hook { Path: String::from("createcontainerpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - StartContainer: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + StartContainer: Vec::from([grpc::Hook { Path: String::from("startcontainerpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), + }]), ..Default::default() }, result: oci::Hooks { @@ -932,72 +885,42 @@ mod tests { TestData { // Prestart empty grpchooks: grpc::Hooks { - Prestart: protobuf::RepeatedField::from(Vec::from([])), - Poststart: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + Prestart: Vec::from([]), + Poststart: Vec::from([grpc::Hook { Path: String::from("poststartpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - Poststop: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + Poststop: Vec::from([grpc::Hook { Path: String::from("poststoppath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - CreateRuntime: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + CreateRuntime: Vec::from([grpc::Hook { Path: String::from("createruntimepath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - CreateContainer: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + CreateContainer: Vec::from([grpc::Hook { Path: String::from("createcontainerpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - StartContainer: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + StartContainer: Vec::from([grpc::Hook { Path: String::from("startcontainerpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), + }]), ..Default::default() }, result: oci::Hooks { @@ -1069,11 +992,8 @@ mod tests { grpcmount: grpc::Mount { destination: String::from("destination"), source: String::from("source"), - field_type: String::from("fieldtype"), - options: protobuf::RepeatedField::from(Vec::from([ - String::from("option1"), - String::from("option2"), - ])), + type_: String::from("fieldtype"), + options: Vec::from([String::from("option1"), String::from("option2")]), ..Default::default() }, result: oci::Mount { @@ -1087,8 +1007,8 @@ mod tests { grpcmount: grpc::Mount { destination: String::from("destination"), source: String::from("source"), - field_type: String::from("fieldtype"), - options: protobuf::RepeatedField::from(Vec::new()), + type_: String::from("fieldtype"), + options: Vec::new(), ..Default::default() }, result: oci::Mount { @@ -1102,8 +1022,8 @@ mod tests { grpcmount: grpc::Mount { destination: String::new(), source: String::from("source"), - field_type: String::from("fieldtype"), - options: protobuf::RepeatedField::from(Vec::from([String::from("option1")])), + type_: String::from("fieldtype"), + options: Vec::from([String::from("option1")]), ..Default::default() }, result: oci::Mount { @@ -1117,8 +1037,8 @@ mod tests { grpcmount: grpc::Mount { destination: String::from("destination"), source: String::from("source"), - field_type: String::new(), - options: protobuf::RepeatedField::from(Vec::from([String::from("option1")])), + type_: String::new(), + options: Vec::from([String::from("option1")]), ..Default::default() }, result: oci::Mount { @@ -1178,27 +1098,15 @@ mod tests { grpchook: &[ grpc::Hook { Path: String::from("path"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() }, grpc::Hook { Path: String::from("path2"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg3"), - String::from("arg4"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env3"), - String::from("env4"), - ])), + Args: Vec::from([String::from("arg3"), String::from("arg4")]), + Env: Vec::from([String::from("env3"), String::from("env4")]), Timeout: 20, ..Default::default() }, diff --git a/src/agent/src/device.rs b/src/agent/src/device.rs index dd8185bb6..8331ecb53 100644 --- a/src/agent/src/device.rs +++ b/src/agent/src/device.rs @@ -761,7 +761,7 @@ async fn vfio_pci_device_handler( device: &Device, sandbox: &Arc>, ) -> Result { - let vfio_in_guest = device.field_type != DRIVER_VFIO_PCI_GK_TYPE; + let vfio_in_guest = device.type_ != DRIVER_VFIO_PCI_GK_TYPE; let mut pci_fixups = Vec::<(pci::Address, pci::Address)>::new(); let mut group = None; @@ -876,9 +876,9 @@ pub async fn add_devices( async fn add_device(device: &Device, sandbox: &Arc>) -> Result { // log before validation to help with debugging gRPC protocol version differences. info!(sl!(), "device-id: {}, device-type: {}, device-vm-path: {}, device-container-path: {}, device-options: {:?}", - device.id, device.field_type, device.vm_path, device.container_path, device.options); + device.id, device.type_, device.vm_path, device.container_path, device.options); - if device.field_type.is_empty() { + if device.type_.is_empty() { return Err(anyhow!("invalid type for device {:?}", device)); } @@ -890,7 +890,7 @@ async fn add_device(device: &Device, sandbox: &Arc>) -> Result virtio_blk_device_handler(device, sandbox).await, DRIVER_BLK_CCW_TYPE => virtio_blk_ccw_device_handler(device, sandbox).await, DRIVER_MMIO_BLK_TYPE => virtiommio_blk_device_handler(device, sandbox).await, @@ -900,7 +900,7 @@ async fn add_device(device: &Device, sandbox: &Arc>) -> Result vfio_ap_device_handler(device, sandbox).await, - _ => Err(anyhow!("Unknown device type {}", device.field_type)), + _ => Err(anyhow!("Unknown device type {}", device.type_)), } } diff --git a/src/agent/src/main.rs b/src/agent/src/main.rs index 5c4c7d576..598ce21a3 100644 --- a/src/agent/src/main.rs +++ b/src/agent/src/main.rs @@ -443,9 +443,8 @@ mod tests { let msg = format!("test[{}]: {:?}", i, d); let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC).unwrap(); defer!({ - // rfd is closed by the use of PipeStream in the crate_logger_task function, - // but we will attempt to close in case of a failure - let _ = unistd::close(rfd); + // XXX: Never try to close rfd, because it will be closed by PipeStream in + // create_logger_task() and it's not safe to close the same fd twice time. unistd::close(wfd).unwrap(); }); diff --git a/src/agent/src/mount.rs b/src/agent/src/mount.rs index bc13a6896..0eff266f2 100644 --- a/src/agent/src/mount.rs +++ b/src/agent/src/mount.rs @@ -211,10 +211,10 @@ async fn ephemeral_storage_handler( // By now we only support one option field: "fsGroup" which // isn't an valid mount option, thus we should remove it when // do mount. - if storage.options.len() > 0 { + if !storage.options.is_empty() { // ephemeral_storage didn't support mount options except fsGroup. let mut new_storage = storage.clone(); - new_storage.options = protobuf::RepeatedField::default(); + new_storage.options = Default::default(); common_storage_handler(logger, &new_storage)?; let opts_vec: Vec = storage.options.to_vec(); @@ -654,7 +654,7 @@ pub fn set_ownership(logger: &Logger, storage: &Storage) -> Result<()> { if storage.fs_group.is_none() { return Ok(()); } - let fs_group = storage.get_fs_group(); + let fs_group = storage.fs_group(); let mut read_only = false; let opts_vec: Vec = storage.options.to_vec(); @@ -671,7 +671,7 @@ pub fn set_ownership(logger: &Logger, storage: &Storage) -> Result<()> { err })?; - if fs_group.group_change_policy == FSGroupChangePolicy::OnRootMismatch + if fs_group.group_change_policy == FSGroupChangePolicy::OnRootMismatch.into() && metadata.gid() == fs_group.group_id { let mut mask = if read_only { RO_MASK } else { RW_MASK }; @@ -1094,7 +1094,6 @@ fn parse_options(option_list: Vec) -> HashMap { #[cfg(test)] mod tests { use super::*; - use protobuf::RepeatedField; use protocols::agent::FSGroup; use std::fs::File; use std::fs::OpenOptions; @@ -2015,9 +2014,8 @@ mod tests { mount_path: "rw_mount", fs_group: Some(FSGroup { group_id: 3000, - group_change_policy: FSGroupChangePolicy::Always, - unknown_fields: Default::default(), - cached_size: Default::default(), + group_change_policy: FSGroupChangePolicy::Always.into(), + ..Default::default() }), read_only: false, expected_group_id: 3000, @@ -2027,9 +2025,8 @@ mod tests { mount_path: "ro_mount", fs_group: Some(FSGroup { group_id: 3000, - group_change_policy: FSGroupChangePolicy::OnRootMismatch, - unknown_fields: Default::default(), - cached_size: Default::default(), + group_change_policy: FSGroupChangePolicy::OnRootMismatch.into(), + ..Default::default() }), read_only: true, expected_group_id: 3000, @@ -2049,10 +2046,7 @@ mod tests { let directory_mode = mount_dir.as_path().metadata().unwrap().permissions().mode(); let mut storage_data = Storage::new(); if d.read_only { - storage_data.set_options(RepeatedField::from_slice(&[ - "foo".to_string(), - "ro".to_string(), - ])); + storage_data.set_options(vec!["foo".to_string(), "ro".to_string()]); } if let Some(fs_group) = d.fs_group.clone() { storage_data.set_fs_group(fs_group); diff --git a/src/agent/src/netlink.rs b/src/agent/src/netlink.rs index 29785fc43..f5e9d271b 100644 --- a/src/agent/src/netlink.rs +++ b/src/agent/src/netlink.rs @@ -7,7 +7,6 @@ use anyhow::{anyhow, Context, Result}; use futures::{future, StreamExt, TryStreamExt}; use ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network}; use nix::errno::Errno; -use protobuf::RepeatedField; use protocols::types::{ARPNeighbor, IPAddress, IPFamily, Interface, Route}; use rtnetlink::{new_connection, packet, IpVersion}; use std::convert::{TryFrom, TryInto}; @@ -83,8 +82,8 @@ impl Handle { // Add new ip addresses from request for ip_address in &iface.IPAddresses { - let ip = IpAddr::from_str(ip_address.get_address())?; - let mask = ip_address.get_mask().parse::()?; + let ip = IpAddr::from_str(ip_address.address())?; + let mask = ip_address.mask().parse::()?; self.add_addresses(link.index(), std::iter::once(IpNetwork::new(ip, mask)?)) .await?; @@ -152,7 +151,7 @@ impl Handle { .map(|p| p.try_into()) .collect::>>()?; - iface.IPAddresses = RepeatedField::from_vec(ips); + iface.IPAddresses = ips; list.push(iface); } @@ -334,7 +333,7 @@ impl Handle { // `rtnetlink` offers a separate request builders for different IP versions (IP v4 and v6). // This if branch is a bit clumsy because it does almost the same. - if route.get_family() == IPFamily::v6 { + if route.family() == IPFamily::v6 { let dest_addr = if !route.dest.is_empty() { Ipv6Network::from_str(&route.dest)? } else { @@ -368,9 +367,9 @@ impl Handle { if Errno::from_i32(message.code.abs()) != Errno::EEXIST { return Err(anyhow!( "Failed to add IP v6 route (src: {}, dst: {}, gtw: {},Err: {})", - route.get_source(), - route.get_dest(), - route.get_gateway(), + route.source(), + route.dest(), + route.gateway(), message )); } @@ -409,9 +408,9 @@ impl Handle { if Errno::from_i32(message.code.abs()) != Errno::EEXIST { return Err(anyhow!( "Failed to add IP v4 route (src: {}, dst: {}, gtw: {},Err: {})", - route.get_source(), - route.get_dest(), - route.get_gateway(), + route.source(), + route.dest(), + route.gateway(), message )); } @@ -506,7 +505,7 @@ impl Handle { self.add_arp_neighbor(&neigh).await.map_err(|err| { anyhow!( "Failed to add ARP neighbor {}: {:?}", - neigh.get_toIPAddress().get_address(), + neigh.toIPAddress().address(), err ) })?; @@ -725,7 +724,7 @@ impl TryFrom
for IPAddress { let mask = format!("{}", value.0.header.prefix_len); Ok(IPAddress { - family, + family: family.into(), address, mask, ..Default::default() diff --git a/src/agent/src/rpc.rs b/src/agent/src/rpc.rs index a30ecb3d8..b99ac8950 100644 --- a/src/agent/src/rpc.rs +++ b/src/agent/src/rpc.rs @@ -21,17 +21,20 @@ use ttrpc::{ use anyhow::{anyhow, Context, Result}; use cgroups::freezer::FreezerState; use oci::{LinuxNamespace, Root, Spec}; -use protobuf::{Message, RepeatedField, SingularPtrField}; +use protobuf::{MessageDyn, MessageField}; use protocols::agent::{ AddSwapRequest, AgentDetails, CopyFileRequest, GetIPTablesRequest, GetIPTablesResponse, GuestDetailsResponse, Interfaces, Metrics, OOMEvent, ReadStreamResponse, Routes, SetIPTablesRequest, SetIPTablesResponse, StatsContainerResponse, VolumeStatsRequest, WaitProcessResponse, WriteStreamResponse, }; -use protocols::csi::{VolumeCondition, VolumeStatsResponse, VolumeUsage, VolumeUsage_Unit}; +use protocols::csi::{ + volume_usage::Unit as VolumeUsage_Unit, VolumeCondition, VolumeStatsResponse, VolumeUsage, +}; use protocols::empty::Empty; use protocols::health::{ - HealthCheckResponse, HealthCheckResponse_ServingStatus, VersionCheckResponse, + health_check_response::ServingStatus as HealthCheckResponse_ServingStatus, HealthCheckResponse, + VersionCheckResponse, }; use protocols::types::Interface; use protocols::{ @@ -132,11 +135,11 @@ macro_rules! is_allowed { if !AGENT_CONFIG .read() .await - .is_allowed_endpoint($req.descriptor().name()) + .is_allowed_endpoint($req.descriptor_dyn().name()) { return Err(ttrpc_error!( ttrpc::Code::UNIMPLEMENTED, - format!("{} is blocked", $req.descriptor().name()), + format!("{} is blocked", $req.descriptor_dyn().name()), )); } }; @@ -194,7 +197,7 @@ impl AgentService { kata_sys_util::validate::verify_id(&cid)?; let mut oci_spec = req.OCI.clone(); - let use_sandbox_pidns = req.get_sandbox_pidns(); + let use_sandbox_pidns = req.sandbox_pidns(); let sandbox; let mut s; @@ -903,7 +906,7 @@ impl agent_ttrpc::AgentService for AgentService { ) -> ttrpc::Result { trace_rpc_call!(ctx, "pause_container", req); is_allowed!(req); - let cid = req.get_container_id(); + let cid = req.container_id(); let s = Arc::clone(&self.sandbox); let mut sandbox = s.lock().await; @@ -927,7 +930,7 @@ impl agent_ttrpc::AgentService for AgentService { ) -> ttrpc::Result { trace_rpc_call!(ctx, "resume_container", req); is_allowed!(req); - let cid = req.get_container_id(); + let cid = req.container_id(); let s = Arc::clone(&self.sandbox); let mut sandbox = s.lock().await; @@ -1082,16 +1085,12 @@ impl agent_ttrpc::AgentService for AgentService { trace_rpc_call!(ctx, "update_routes", req); is_allowed!(req); - let new_routes = req - .routes - .into_option() - .map(|r| r.Routes.into_vec()) - .ok_or_else(|| { - ttrpc_error!( - ttrpc::Code::INVALID_ARGUMENT, - "empty update routes request".to_string(), - ) - })?; + let new_routes = req.routes.into_option().map(|r| r.Routes).ok_or_else(|| { + ttrpc_error!( + ttrpc::Code::INVALID_ARGUMENT, + "empty update routes request".to_string(), + ) + })?; let mut sandbox = self.sandbox.lock().await; @@ -1110,7 +1109,7 @@ impl agent_ttrpc::AgentService for AgentService { })?; Ok(protocols::agent::Routes { - Routes: RepeatedField::from_vec(list), + Routes: list, ..Default::default() }) } @@ -1309,7 +1308,7 @@ impl agent_ttrpc::AgentService for AgentService { })?; Ok(protocols::agent::Interfaces { - Interfaces: RepeatedField::from_vec(list), + Interfaces: list, ..Default::default() }) } @@ -1332,7 +1331,7 @@ impl agent_ttrpc::AgentService for AgentService { .map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, format!("list routes: {:?}", e)))?; Ok(protocols::agent::Routes { - Routes: RepeatedField::from_vec(list), + Routes: list, ..Default::default() }) } @@ -1448,7 +1447,7 @@ impl agent_ttrpc::AgentService for AgentService { let neighs = req .neighbors .into_option() - .map(|n| n.ARPNeighbors.into_vec()) + .map(|n| n.ARPNeighbors) .ok_or_else(|| { ttrpc_error!( ttrpc::Code::INVALID_ARGUMENT, @@ -1532,7 +1531,7 @@ impl agent_ttrpc::AgentService for AgentService { // to get agent details let detail = get_agent_details(); - resp.agent_details = SingularPtrField::some(detail); + resp.agent_details = MessageField::some(detail); Ok(resp) } @@ -1657,8 +1656,8 @@ impl agent_ttrpc::AgentService for AgentService { .map(|u| usage_vec.push(u)) .map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?; - resp.usage = RepeatedField::from_vec(usage_vec); - resp.volume_condition = SingularPtrField::some(condition); + resp.usage = usage_vec; + resp.volume_condition = MessageField::some(condition); Ok(resp) } @@ -1762,7 +1761,7 @@ fn get_volume_capacity_stats(path: &str) -> Result { usage.total = stat.blocks() * block_size; usage.available = stat.blocks_free() * block_size; usage.used = usage.total - usage.available; - usage.unit = VolumeUsage_Unit::BYTES; + usage.unit = VolumeUsage_Unit::BYTES.into(); Ok(usage) } @@ -1774,7 +1773,7 @@ fn get_volume_inode_stats(path: &str) -> Result { usage.total = stat.files(); usage.available = stat.files_free(); usage.used = usage.total - usage.available; - usage.unit = VolumeUsage_Unit::INODES; + usage.unit = VolumeUsage_Unit::INODES.into(); Ok(usage) } @@ -1794,14 +1793,12 @@ fn get_agent_details() -> AgentDetails { detail.set_supports_seccomp(have_seccomp()); detail.init_daemon = unistd::getpid() == Pid::from_raw(1); - detail.device_handlers = RepeatedField::new(); - detail.storage_handlers = RepeatedField::from_vec( - STORAGE_HANDLER_LIST - .to_vec() - .iter() - .map(|x| x.to_string()) - .collect(), - ); + detail.device_handlers = Vec::new(); + detail.storage_handlers = STORAGE_HANDLER_LIST + .to_vec() + .iter() + .map(|x| x.to_string()) + .collect(); detail } @@ -2201,7 +2198,7 @@ fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> { let mut args = vec!["-v".to_string(), module.name.clone()]; - if module.parameters.len() > 0 { + if !module.parameters.is_empty() { args.extend(module.parameters.to_vec()) } diff --git a/src/libs/Cargo.lock b/src/libs/Cargo.lock index 16bdfda6f..2f03109f8 100644 --- a/src/libs/Cargo.lock +++ b/src/libs/Cargo.lock @@ -703,9 +703,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.9.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "parking_lot" @@ -845,9 +845,16 @@ name = "protobuf" version = "2.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" + +[[package]] +name = "protobuf" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55bad9126f378a853655831eb7363b7b01b81d19f8cb1218861086ca4a1a61e" dependencies = [ - "serde", - "serde_derive", + "once_cell", + "protobuf-support", + "thiserror", ] [[package]] @@ -856,17 +863,47 @@ version = "2.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aec1632b7c8f2e620343439a7dfd1f3c47b18906c4be58982079911482b5d707" dependencies = [ - "protobuf", + "protobuf 2.27.1", ] [[package]] -name = "protobuf-codegen-pure" -version = "2.27.1" +name = "protobuf-codegen" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f8122fdb18e55190c796b088a16bdb70cd7acdcd48f7a8b796b58c62e532cc6" +checksum = "0dd418ac3c91caa4032d37cb80ff0d44e2ebe637b2fb243b6234bf89cdac4901" dependencies = [ - "protobuf", - "protobuf-codegen", + "anyhow", + "once_cell", + "protobuf 3.2.0", + "protobuf-parse", + "regex", + "tempfile", + "thiserror", +] + +[[package]] +name = "protobuf-parse" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d39b14605eaa1f6a340aec7f320b34064feb26c93aec35d6a9a2272a8ddfa49" +dependencies = [ + "anyhow", + "indexmap", + "log", + "protobuf 3.2.0", + "protobuf-support", + "tempfile", + "thiserror", + "which", +] + +[[package]] +name = "protobuf-support" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d4d7b8601c814cfb36bcebb79f0e61e45e1e93640cf778837833bbed05c372" +dependencies = [ + "thiserror", ] [[package]] @@ -875,7 +912,7 @@ version = "0.1.0" dependencies = [ "async-trait", "oci", - "protobuf", + "protobuf 3.2.0", "serde", "serde_json", "ttrpc", @@ -1314,9 +1351,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "ttrpc" -version = "0.6.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ecfff459a859c6ba6668ff72b34c2f1d94d9d58f7088414c2674ad0f31cc7d8" +checksum = "a35f22a2964bea14afee161665bb260b83cb48e665e0260ca06ec0e775c8b06c" dependencies = [ "async-trait", "byteorder", @@ -1324,8 +1361,8 @@ dependencies = [ "libc", "log", "nix 0.23.1", - "protobuf", - "protobuf-codegen-pure", + "protobuf 3.2.0", + "protobuf-codegen 3.2.0", "thiserror", "tokio", "tokio-vsock", @@ -1333,28 +1370,28 @@ dependencies = [ [[package]] name = "ttrpc-codegen" -version = "0.2.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809eda4e459820237104e4b61d6b41bbe6c9e1ce6adf4057955e6e6722a90408" +checksum = "94d7f7631d7a9ebed715a47cd4cb6072cbc7ae1d4ec01598971bbec0024340c2" dependencies = [ - "protobuf", - "protobuf-codegen", - "protobuf-codegen-pure", + "protobuf 2.27.1", + "protobuf-codegen 3.2.0", + "protobuf-support", "ttrpc-compiler", ] [[package]] name = "ttrpc-compiler" -version = "0.4.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2978ed3fa047d8fd55cbeb4d4a61d461fb3021a90c9618519c73ce7e5bb66c15" +checksum = "ec3cb5dbf1f0865a34fe3f722290fe776cacb16f50428610b779467b76ddf647" dependencies = [ "derive-new", "prost", "prost-build", "prost-types", - "protobuf", - "protobuf-codegen", + "protobuf 2.27.1", + "protobuf-codegen 2.27.1", "tempfile", ] diff --git a/src/libs/kata-types/src/annotations/mod.rs b/src/libs/kata-types/src/annotations/mod.rs index c8d63125b..16af5ab28 100644 --- a/src/libs/kata-types/src/annotations/mod.rs +++ b/src/libs/kata-types/src/annotations/mod.rs @@ -308,6 +308,14 @@ pub const KATA_ANNO_CFG_DISABLE_NEW_NETNS: &str = /// A sandbox annotation to specify how attached VFIO devices should be treated. pub const KATA_ANNO_CFG_VFIO_MODE: &str = "io.katacontainers.config.runtime.vfio_mode"; +/// A sandbox annotation used to specify prefetch_files.list host path container image +/// being used, +/// and runtime will pass it to Hypervisor to search for corresponding prefetch list file. +/// "io.katacontainers.config.hypervisor.prefetch_files.list" +/// = "/path/to//xyz.com/fedora:36/prefetch_file.list" +pub const KATA_ANNO_CFG_HYPERVISOR_PREFETCH_FILES_LIST: &str = + "io.katacontainers.config.hypervisor.prefetch_files.list"; + /// A helper structure to query configuration information by check annotations. #[derive(Debug, Default, Deserialize)] pub struct Annotation { @@ -673,6 +681,9 @@ impl Annotation { hv.machine_info.validate_entropy_source(value)?; hv.machine_info.entropy_source = value.to_string(); } + KATA_ANNO_CFG_HYPERVISOR_PREFETCH_FILES_LIST => { + hv.prefetch_list_path = value.to_string(); + } // Hypervisor Memory related annotations KATA_ANNO_CFG_HYPERVISOR_DEFAULT_MEMORY => { match byte_unit::Byte::from_str(value) { diff --git a/src/libs/kata-types/src/config/hypervisor/mod.rs b/src/libs/kata-types/src/config/hypervisor/mod.rs index 98ae2cc79..7818b897c 100644 --- a/src/libs/kata-types/src/config/hypervisor/mod.rs +++ b/src/libs/kata-types/src/config/hypervisor/mod.rs @@ -979,6 +979,13 @@ pub struct Hypervisor { #[serde(default, flatten)] pub shared_fs: SharedFsInfo, + /// A sandbox annotation used to specify prefetch_files.list host path container image + /// being used, and runtime will pass it to Hypervisor to search for corresponding + /// prefetch list file: + /// prefetch_list_path = /path/to//xyz.com/fedora:36/prefetch_file.list + #[serde(default)] + pub prefetch_list_path: String, + /// Vendor customized runtime configuration. #[serde(default, flatten)] pub vendor: HypervisorVendor, @@ -1022,6 +1029,10 @@ impl ConfigOps for Hypervisor { hv.network_info.adjust_config()?; hv.security_info.adjust_config()?; hv.shared_fs.adjust_config()?; + resolve_path!( + hv.prefetch_list_path, + "prefetch_list_path `{}` is invalid: {}" + )?; } else { return Err(eother!("Can not find plugin for hypervisor {}", hypervisor)); } @@ -1056,6 +1067,10 @@ impl ConfigOps for Hypervisor { "Hypervisor control executable `{}` is invalid: {}" )?; validate_path!(hv.jailer_path, "Hypervisor jailer path `{}` is invalid: {}")?; + validate_path!( + hv.prefetch_list_path, + "prefetch_files.list path `{}` is invalid: {}" + )?; } else { return Err(eother!("Can not find plugin for hypervisor {}", hypervisor)); } diff --git a/src/libs/kata-types/src/config/runtime.rs b/src/libs/kata-types/src/config/runtime.rs index 1d7364368..067ff6776 100644 --- a/src/libs/kata-types/src/config/runtime.rs +++ b/src/libs/kata-types/src/config/runtime.rs @@ -130,6 +130,12 @@ pub struct Runtime { /// Vendor customized runtime configuration. #[serde(default, flatten)] pub vendor: RuntimeVendor, + + /// If keep_abnormal is enabled, it means that 1) if the runtime exits abnormally, the cleanup process + /// will be skipped, and 2) the runtime will not exit even if the health check fails. + /// This option is typically used to retain abnormal information for debugging. + #[serde(default)] + pub keep_abnormal: bool, } impl ConfigOps for Runtime { diff --git a/src/libs/oci/src/lib.rs b/src/libs/oci/src/lib.rs index 3531c53fc..bb214ae35 100644 --- a/src/libs/oci/src/lib.rs +++ b/src/libs/oci/src/lib.rs @@ -192,11 +192,23 @@ pub struct Hook { pub struct Hooks { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub prestart: Vec, - #[serde(default, skip_serializing_if = "Vec::is_empty")] + #[serde( + rename = "createRuntime", + default, + skip_serializing_if = "Vec::is_empty" + )] pub create_runtime: Vec, - #[serde(default, skip_serializing_if = "Vec::is_empty")] + #[serde( + rename = "createContainer", + default, + skip_serializing_if = "Vec::is_empty" + )] pub create_container: Vec, - #[serde(default, skip_serializing_if = "Vec::is_empty")] + #[serde( + rename = "startContainer", + default, + skip_serializing_if = "Vec::is_empty" + )] pub start_container: Vec, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub poststart: Vec, @@ -837,6 +849,8 @@ pub struct State { #[cfg(test)] mod tests { + use std::vec; + use super::*; #[test] @@ -1027,6 +1041,11 @@ mod tests { "path": "/usr/bin/setup-network" } ], + "createRuntime": [ + { + "path": "/usr/local/bin/nerdctl" + } + ], "poststart": [ { "path": "/usr/bin/notify-start", @@ -1395,6 +1414,12 @@ mod tests { timeout: None, }, ], + create_runtime: vec![crate::Hook { + path: "/usr/local/bin/nerdctl".to_string(), + args: vec![], + env: vec![], + timeout: None, + }], poststart: vec![crate::Hook { path: "/usr/bin/notify-start".to_string(), args: vec![], diff --git a/src/libs/protocols/.gitignore b/src/libs/protocols/.gitignore index a9bf48131..bc7e10bf3 100644 --- a/src/libs/protocols/.gitignore +++ b/src/libs/protocols/.gitignore @@ -1,14 +1,6 @@ Cargo.lock -src/agent.rs -src/agent_ttrpc.rs -src/agent_ttrpc_async.rs -src/csi.rs -src/empty.rs -src/health.rs -src/health_ttrpc.rs -src/health_ttrpc_async.rs -src/image.rs -src/image_ttrpc.rs -src/image_ttrpc_async.rs -src/oci.rs -src/types.rs + +src/*.rs +!src/lib.rs +!src/trans.rs +!src/serde_config.rs diff --git a/src/libs/protocols/Cargo.toml b/src/libs/protocols/Cargo.toml index 03b9c8b3d..9c0033d17 100644 --- a/src/libs/protocols/Cargo.toml +++ b/src/libs/protocols/Cargo.toml @@ -11,12 +11,13 @@ with-serde = [ "serde", "serde_json" ] async = ["ttrpc/async", "async-trait"] [dependencies] -ttrpc = { version = "0.6.0" } +ttrpc = { version = "0.7.1" } async-trait = { version = "0.1.42", optional = true } -protobuf = { version = "2.27.0", features = ["with-serde"] } +protobuf = { version = "3.2.0" } serde = { version = "1.0.130", features = ["derive"], optional = true } serde_json = { version = "1.0.68", optional = true } oci = { path = "../oci" } [build-dependencies] -ttrpc-codegen = "0.2.0" +ttrpc-codegen = "0.4.2" +protobuf = { version = "3.2.0" } diff --git a/src/libs/protocols/build.rs b/src/libs/protocols/build.rs index 4fee17081..af0dc691e 100644 --- a/src/libs/protocols/build.rs +++ b/src/libs/protocols/build.rs @@ -7,7 +7,46 @@ use std::fs::{self, File}; use std::io::{BufRead, BufReader, Read, Write}; use std::path::Path; use std::process::exit; -use ttrpc_codegen::{Codegen, Customize, ProtobufCustomize}; + +use protobuf::{ + descriptor::field_descriptor_proto::Type, + reflect::{EnumDescriptor, FieldDescriptor, MessageDescriptor, OneofDescriptor}, +}; +use ttrpc_codegen::{Codegen, Customize, ProtobufCustomize, ProtobufCustomizeCallback}; + +struct GenSerde; + +impl ProtobufCustomizeCallback for GenSerde { + fn message(&self, _message: &MessageDescriptor) -> ProtobufCustomize { + ProtobufCustomize::default().before("#[cfg_attr(feature = \"with-serde\", derive(::serde::Serialize, ::serde::Deserialize))]") + } + + fn enumeration(&self, _enum_type: &EnumDescriptor) -> ProtobufCustomize { + ProtobufCustomize::default().before("#[cfg_attr(feature = \"with-serde\", derive(::serde::Serialize, ::serde::Deserialize))]") + } + + fn oneof(&self, _oneof: &OneofDescriptor) -> ProtobufCustomize { + ProtobufCustomize::default().before("#[cfg_attr(feature = \"with-serde\", derive(::serde::Serialize, ::serde::Deserialize))]") + } + + fn field(&self, field: &FieldDescriptor) -> ProtobufCustomize { + if field.proto().type_() == Type::TYPE_ENUM { + ProtobufCustomize::default().before( + "#[cfg_attr(feature = \"with-serde\", serde(serialize_with = \"crate::serialize_enum_or_unknown\", deserialize_with = \"crate::deserialize_enum_or_unknown\"))]", + ) + } else if field.proto().type_() == Type::TYPE_MESSAGE && field.is_singular() { + ProtobufCustomize::default().before( + "#[cfg_attr(feature = \"with-serde\", serde(serialize_with = \"crate::serialize_message_field\", deserialize_with = \"crate::deserialize_message_field\"))]", + ) + } else { + ProtobufCustomize::default() + } + } + + fn special_field(&self, _message: &MessageDescriptor, _field: &str) -> ProtobufCustomize { + ProtobufCustomize::default().before("#[cfg_attr(feature = \"with-serde\", serde(skip))]") + } +} fn replace_text_in_file(file_name: &str, from: &str, to: &str) -> Result<(), std::io::Error> { let mut src = File::open(file_name)?; @@ -103,10 +142,10 @@ fn codegen(path: &str, protos: &[&str], async_all: bool) -> Result<(), std::io:: ..Default::default() }; - let protobuf_options = ProtobufCustomize { - serde_derive: Some(true), - ..Default::default() - }; + let protobuf_options = ProtobufCustomize::default() + .gen_mod_rs(false) + .generate_getter(true) + .generate_accessors(true); let out_dir = Path::new("src"); @@ -117,6 +156,7 @@ fn codegen(path: &str, protos: &[&str], async_all: bool) -> Result<(), std::io:: .customize(ttrpc_options) .rust_protobuf() .rust_protobuf_customize(protobuf_options) + .rust_protobuf_customize_callback(GenSerde) .run()?; let autogen_comment = format!("\n//! Generated by {:?} ({:?})", file!(), module_path!()); @@ -147,6 +187,7 @@ fn real_main() -> Result<(), std::io::Error> { "src", &[ "protos/google/protobuf/empty.proto", + "protos/gogo/protobuf/gogoproto/gogo.proto", "protos/oci.proto", "protos/types.proto", "protos/csi.proto", diff --git a/src/libs/protocols/src/lib.rs b/src/libs/protocols/src/lib.rs index 846878400..0fe254704 100644 --- a/src/libs/protocols/src/lib.rs +++ b/src/libs/protocols/src/lib.rs @@ -11,6 +11,7 @@ pub mod agent_ttrpc; pub mod agent_ttrpc_async; pub mod csi; pub mod empty; +mod gogo; pub mod health; pub mod health_ttrpc; #[cfg(feature = "async")] @@ -20,5 +21,13 @@ pub mod image_ttrpc; #[cfg(feature = "async")] pub mod image_ttrpc_async; pub mod oci; +#[cfg(feature = "with-serde")] +mod serde_config; pub mod trans; pub mod types; + +#[cfg(feature = "with-serde")] +pub use serde_config::{ + deserialize_enum_or_unknown, deserialize_message_field, serialize_enum_or_unknown, + serialize_message_field, +}; diff --git a/src/libs/protocols/src/serde_config.rs b/src/libs/protocols/src/serde_config.rs new file mode 100644 index 000000000..064379cd1 --- /dev/null +++ b/src/libs/protocols/src/serde_config.rs @@ -0,0 +1,68 @@ +// Copyright (c) 2023 Ant Group +// +// SPDX-License-Identifier: Apache-2.0 +// + +use protobuf::{EnumOrUnknown, MessageField}; +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "with-serde")] +pub fn serialize_enum_or_unknown( + e: &protobuf::EnumOrUnknown, + s: S, +) -> Result { + e.value().serialize(s) +} + +pub fn serialize_message_field( + e: &protobuf::MessageField, + s: S, +) -> Result { + if e.is_some() { + e.as_ref().unwrap().serialize(s) + } else { + s.serialize_unit() + } +} + +pub fn deserialize_enum_or_unknown<'de, E: Deserialize<'de>, D: serde::Deserializer<'de>>( + d: D, +) -> Result, D::Error> { + i32::deserialize(d).map(EnumOrUnknown::from_i32) +} + +pub fn deserialize_message_field<'de, E: Deserialize<'de>, D: serde::Deserializer<'de>>( + d: D, +) -> Result, D::Error> { + Option::deserialize(d).map(MessageField::from_option) +} + +#[cfg(test)] +mod tests { + use crate::agent::{ExecProcessRequest, StringUser}; + use crate::health::{health_check_response::ServingStatus, HealthCheckResponse}; + + #[test] + fn test_serde_for_enum_or_unknown() { + let mut hc = HealthCheckResponse::new(); + hc.set_status(ServingStatus::SERVING); + + let json = serde_json::to_string(&hc).unwrap(); + let from_json: HealthCheckResponse = serde_json::from_str(&json).unwrap(); + + assert_eq!(from_json, hc); + } + + #[test] + fn test_serde_for_message_field() { + let mut epr = ExecProcessRequest::new(); + let mut str_user = StringUser::new(); + str_user.uid = "Someone's id".to_string(); + epr.set_string_user(str_user); + + let json = serde_json::to_string(&epr).unwrap(); + let from_json: ExecProcessRequest = serde_json::from_str(&json).unwrap(); + + assert_eq!(from_json, epr); + } +} diff --git a/src/libs/protocols/src/trans.rs b/src/libs/protocols/src/trans.rs index d4f8cb9b9..0af80cfa1 100644 --- a/src/libs/protocols/src/trans.rs +++ b/src/libs/protocols/src/trans.rs @@ -15,19 +15,19 @@ use oci::{ }; // translate from interface to ttprc tools -fn from_option>(from: Option) -> ::protobuf::SingularPtrField { +fn from_option>(from: Option) -> protobuf::MessageField { match from { - Some(f) => ::protobuf::SingularPtrField::from_option(Some(T::from(f))), - None => ::protobuf::SingularPtrField::none(), + Some(f) => protobuf::MessageField::from_option(Some(f.into())), + None => protobuf::MessageField::none(), } } -fn from_vec>(from: Vec) -> ::protobuf::RepeatedField { +fn from_vec>(from: Vec) -> Vec { let mut to: Vec = vec![]; for data in from { - to.push(T::from(data)); + to.push(data.into()); } - ::protobuf::RepeatedField::from_vec(to) + to } impl From for crate::oci::Box { @@ -35,8 +35,7 @@ impl From for crate::oci::Box { crate::oci::Box { Height: from.height, Width: from.width, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -48,8 +47,7 @@ impl From for crate::oci::User { GID: from.gid, AdditionalGids: from.additional_gids, Username: from.username, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -57,13 +55,12 @@ impl From for crate::oci::User { impl From for crate::oci::LinuxCapabilities { fn from(from: LinuxCapabilities) -> Self { crate::oci::LinuxCapabilities { - Bounding: from_vec(from.bounding), - Effective: from_vec(from.effective), - Inheritable: from_vec(from.inheritable), - Permitted: from_vec(from.permitted), - Ambient: from_vec(from.ambient), - unknown_fields: Default::default(), - cached_size: Default::default(), + Bounding: from.bounding, + Effective: from.effective, + Inheritable: from.inheritable, + Permitted: from.permitted, + Ambient: from.ambient, + ..Default::default() } } } @@ -74,8 +71,7 @@ impl From for crate::oci::POSIXRlimit { Type: from.r#type, Hard: from.hard, Soft: from.soft, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -86,8 +82,8 @@ impl From for crate::oci::Process { Terminal: from.terminal, ConsoleSize: from_option(from.console_size), User: from_option(Some(from.user)), - Args: from_vec(from.args), - Env: from_vec(from.env), + Args: from.args, + Env: from.env, Cwd: from.cwd, Capabilities: from_option(from.capabilities), Rlimits: from_vec(from.rlimits), @@ -95,8 +91,7 @@ impl From for crate::oci::Process { ApparmorProfile: from.apparmor_profile, OOMScoreAdj: from.oom_score_adj.map_or(0, |t| t as i64), SelinuxLabel: from.selinux_label, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -109,8 +104,7 @@ impl From for crate::oci::LinuxDeviceCgroup { Major: from.major.map_or(0, |t| t), Minor: from.minor.map_or(0, |t| t), Access: from.access, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -125,8 +119,7 @@ impl From for crate::oci::LinuxMemory { KernelTCP: from.kernel_tcp.map_or(0, |t| t), Swappiness: from.swappiness.map_or(0, |t| t), DisableOOMKiller: from.disable_oom_killer.map_or(false, |t| t), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -141,8 +134,7 @@ impl From for crate::oci::LinuxCPU { RealtimePeriod: from.realtime_period.map_or(0, |t| t), Cpus: from.cpus, Mems: from.mems, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -151,8 +143,7 @@ impl From for crate::oci::LinuxPids { fn from(from: LinuxPids) -> Self { crate::oci::LinuxPids { Limit: from.limit, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -165,8 +156,7 @@ impl From for crate::oci::LinuxWeightDevice { Minor: 0, Weight: from.weight.map_or(0, |t| t as u32), LeafWeight: from.leaf_weight.map_or(0, |t| t as u32), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -178,8 +168,7 @@ impl From for crate::oci::LinuxThrottleDevice { Major: 0, Minor: 0, Rate: from.rate, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -194,8 +183,7 @@ impl From for crate::oci::LinuxBlockIO { ThrottleWriteBpsDevice: from_vec(from.throttle_write_bps_device), ThrottleReadIOPSDevice: from_vec(from.throttle_read_iops_device), ThrottleWriteIOPSDevice: from_vec(from.throttle_write_iops_device), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -205,8 +193,7 @@ impl From for crate::oci::LinuxHugepageLimit { crate::oci::LinuxHugepageLimit { Pagesize: from.page_size, Limit: from.limit, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -216,8 +203,7 @@ impl From for crate::oci::LinuxInterfacePriority { crate::oci::LinuxInterfacePriority { Name: from.name, Priority: from.priority, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -227,8 +213,7 @@ impl From for crate::oci::LinuxNetwork { crate::oci::LinuxNetwork { ClassID: from.class_id.map_or(0, |t| t), Priorities: from_vec(from.priorities), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -243,8 +228,7 @@ impl From for crate::oci::LinuxResources { BlockIO: from_option(from.block_io), HugepageLimits: from_vec(from.hugepage_limits), Network: from_option(from.network), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -254,8 +238,7 @@ impl From for crate::oci::Root { crate::oci::Root { Path: from.path, Readonly: from.readonly, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -265,10 +248,9 @@ impl From for crate::oci::Mount { crate::oci::Mount { destination: from.destination, source: from.source, - field_type: from.r#type, - options: from_vec(from.options), - unknown_fields: Default::default(), - cached_size: Default::default(), + type_: from.r#type, + options: from.options, + ..Default::default() } } } @@ -281,11 +263,10 @@ impl From for crate::oci::Hook { } crate::oci::Hook { Path: from.path, - Args: from_vec(from.args), - Env: from_vec(from.env), + Args: from.args, + Env: from.env, Timeout: timeout, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -299,8 +280,7 @@ impl From for crate::oci::Hooks { StartContainer: from_vec(from.start_container), Poststart: from_vec(from.poststart), Poststop: from_vec(from.poststop), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -311,8 +291,7 @@ impl From for crate::oci::LinuxIDMapping { HostID: from.host_id, ContainerID: from.container_id, Size: from.size, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -322,8 +301,7 @@ impl From for crate::oci::LinuxNamespace { crate::oci::LinuxNamespace { Type: from.r#type, Path: from.path, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -338,8 +316,7 @@ impl From for crate::oci::LinuxDevice { FileMode: from.file_mode.map_or(0, |v| v), UID: from.uid.map_or(0, |v| v), GID: from.gid.map_or(0, |v| v), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -351,8 +328,7 @@ impl From for crate::oci::LinuxSeccompArg { Value: from.value, ValueTwo: from.value_two, Op: from.op, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -360,14 +336,13 @@ impl From for crate::oci::LinuxSeccompArg { impl From for crate::oci::LinuxSyscall { fn from(from: LinuxSyscall) -> Self { crate::oci::LinuxSyscall { - Names: from_vec(from.names), + Names: from.names, Action: from.action, Args: from_vec(from.args), - ErrnoRet: Some(crate::oci::LinuxSyscall_oneof_ErrnoRet::errnoret( + ErrnoRet: Some(crate::oci::linux_syscall::ErrnoRet::Errnoret( from.errno_ret, )), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -376,11 +351,10 @@ impl From for crate::oci::LinuxSeccomp { fn from(from: LinuxSeccomp) -> Self { crate::oci::LinuxSeccomp { DefaultAction: from.default_action, - Architectures: from_vec(from.architectures), + Architectures: from.architectures, Syscalls: from_vec(from.syscalls), - Flags: from_vec(from.flags), - unknown_fields: Default::default(), - cached_size: Default::default(), + Flags: from.flags, + ..Default::default() } } } @@ -389,8 +363,7 @@ impl From for crate::oci::LinuxIntelRdt { fn from(from: LinuxIntelRdt) -> Self { crate::oci::LinuxIntelRdt { L3CacheSchema: from.l3_cache_schema, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -407,12 +380,11 @@ impl From for crate::oci::Linux { Devices: from_vec(from.devices), Seccomp: from_option(from.seccomp), RootfsPropagation: from.rootfs_propagation, - MaskedPaths: from_vec(from.masked_paths), - ReadonlyPaths: from_vec(from.readonly_paths), + MaskedPaths: from.masked_paths, + ReadonlyPaths: from.readonly_paths, MountLabel: from.mount_label, IntelRdt: from_option(from.intel_rdt), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -430,8 +402,7 @@ impl From for crate::oci::Spec { Linux: from_option(from.linux), Solaris: Default::default(), Windows: Default::default(), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -449,7 +420,7 @@ impl From for oci::Mount { fn from(mut from: crate::oci::Mount) -> Self { let options = from.take_options().to_vec(); Self { - r#type: from.take_field_type(), + r#type: from.take_type_(), destination: from.take_destination(), source: from.take_source(), options, @@ -460,9 +431,9 @@ impl From for oci::Mount { impl From for oci::LinuxIdMapping { fn from(from: crate::oci::LinuxIDMapping) -> Self { LinuxIdMapping { - container_id: from.get_ContainerID(), - host_id: from.get_HostID(), - size: from.get_Size(), + container_id: from.ContainerID(), + host_id: from.HostID(), + size: from.Size(), } } } @@ -470,18 +441,18 @@ impl From for oci::LinuxIdMapping { impl From for oci::LinuxDeviceCgroup { fn from(mut from: crate::oci::LinuxDeviceCgroup) -> Self { let mut major = None; - if from.get_Major() > 0 { - major = Some(from.get_Major()); + if from.Major() > 0 { + major = Some(from.Major()); } let mut minor = None; - if from.get_Minor() > 0 { - minor = Some(from.get_Minor()) + if from.Minor() > 0 { + minor = Some(from.Minor()) } oci::LinuxDeviceCgroup { - allow: from.get_Allow(), - r#type: Some(from.take_Type()), + allow: from.Allow(), + r#type: from.take_Type(), major, minor, access: from.take_Access(), @@ -492,36 +463,36 @@ impl From for oci::LinuxDeviceCgroup { impl From for oci::LinuxMemory { fn from(from: crate::oci::LinuxMemory) -> Self { let mut limit = None; - if from.get_Limit() > 0 { - limit = Some(from.get_Limit()); + if from.Limit() > 0 { + limit = Some(from.Limit()); } let mut reservation = None; - if from.get_Reservation() > 0 { - reservation = Some(from.get_Reservation()); + if from.Reservation() > 0 { + reservation = Some(from.Reservation()); } let mut swap = None; - if from.get_Swap() > 0 { - swap = Some(from.get_Swap()); + if from.Swap() > 0 { + swap = Some(from.Swap()); } let mut kernel = None; - if from.get_Kernel() > 0 { - kernel = Some(from.get_Kernel()); + if from.Kernel() > 0 { + kernel = Some(from.Kernel()); } let mut kernel_tcp = None; - if from.get_KernelTCP() > 0 { - kernel_tcp = Some(from.get_KernelTCP()); + if from.KernelTCP() > 0 { + kernel_tcp = Some(from.KernelTCP()); } let mut swappiness = None; - if from.get_Swappiness() > 0 { - swappiness = Some(from.get_Swappiness()); + if from.Swappiness() > 0 { + swappiness = Some(from.Swappiness()); } - let disable_oom_killer = Some(from.get_DisableOOMKiller()); + let disable_oom_killer = Some(from.DisableOOMKiller()); oci::LinuxMemory { limit, @@ -538,28 +509,28 @@ impl From for oci::LinuxMemory { impl From for oci::LinuxCpu { fn from(mut from: crate::oci::LinuxCPU) -> Self { let mut shares = None; - if from.get_Shares() > 0 { - shares = Some(from.get_Shares()); + if from.Shares() > 0 { + shares = Some(from.Shares()); } let mut quota = None; - if from.get_Quota() > 0 { - quota = Some(from.get_Quota()); + if from.Quota() > 0 { + quota = Some(from.Quota()); } let mut period = None; - if from.get_Period() > 0 { - period = Some(from.get_Period()); + if from.Period() > 0 { + period = Some(from.Period()); } let mut realtime_runtime = None; - if from.get_RealtimeRuntime() > 0 { - realtime_runtime = Some(from.get_RealtimeRuntime()); + if from.RealtimeRuntime() > 0 { + realtime_runtime = Some(from.RealtimeRuntime()); } let mut realtime_period = None; - if from.get_RealtimePeriod() > 0 { - realtime_period = Some(from.get_RealtimePeriod()); + if from.RealtimePeriod() > 0 { + realtime_period = Some(from.RealtimePeriod()); } let cpus = from.take_Cpus(); @@ -580,7 +551,7 @@ impl From for oci::LinuxCpu { impl From for oci::LinuxPids { fn from(from: crate::oci::LinuxPids) -> Self { oci::LinuxPids { - limit: from.get_Limit(), + limit: from.Limit(), } } } @@ -588,35 +559,35 @@ impl From for oci::LinuxPids { impl From for oci::LinuxBlockIo { fn from(from: crate::oci::LinuxBlockIO) -> Self { let mut weight = None; - if from.get_Weight() > 0 { - weight = Some(from.get_Weight() as u16); + if from.Weight() > 0 { + weight = Some(from.Weight() as u16); } let mut leaf_weight = None; - if from.get_LeafWeight() > 0 { - leaf_weight = Some(from.get_LeafWeight() as u16); + if from.LeafWeight() > 0 { + leaf_weight = Some(from.LeafWeight() as u16); } let mut weight_device = Vec::new(); - for wd in from.get_WeightDevice() { + for wd in from.WeightDevice() { weight_device.push(wd.clone().into()); } let mut throttle_read_bps_device = Vec::new(); - for td in from.get_ThrottleReadBpsDevice() { + for td in from.ThrottleReadBpsDevice() { throttle_read_bps_device.push(td.clone().into()); } let mut throttle_write_bps_device = Vec::new(); - for td in from.get_ThrottleWriteBpsDevice() { + for td in from.ThrottleWriteBpsDevice() { throttle_write_bps_device.push(td.clone().into()); } let mut throttle_read_iops_device = Vec::new(); - for td in from.get_ThrottleReadIOPSDevice() { + for td in from.ThrottleReadIOPSDevice() { throttle_read_iops_device.push(td.clone().into()); } let mut throttle_write_iops_device = Vec::new(); - for td in from.get_ThrottleWriteIOPSDevice() { + for td in from.ThrottleWriteIOPSDevice() { throttle_write_iops_device.push(td.clone().into()); } @@ -661,7 +632,7 @@ impl From for oci::LinuxInterfacePriority { fn from(mut from: crate::oci::LinuxInterfacePriority) -> Self { oci::LinuxInterfacePriority { name: from.take_Name(), - priority: from.get_Priority(), + priority: from.Priority(), } } } @@ -669,11 +640,11 @@ impl From for oci::LinuxInterfacePriority { impl From for oci::LinuxNetwork { fn from(mut from: crate::oci::LinuxNetwork) -> Self { let mut class_id = None; - if from.get_ClassID() > 0 { - class_id = Some(from.get_ClassID()); + if from.ClassID() > 0 { + class_id = Some(from.ClassID()); } let mut priorities = Vec::new(); - for p in from.take_Priorities().to_vec() { + for p in from.take_Priorities() { priorities.push(p.into()) } @@ -688,7 +659,7 @@ impl From for oci::LinuxHugepageLimit { fn from(mut from: crate::oci::LinuxHugepageLimit) -> Self { oci::LinuxHugepageLimit { page_size: from.take_Pagesize(), - limit: from.get_Limit(), + limit: from.Limit(), } } } @@ -696,7 +667,7 @@ impl From for oci::LinuxHugepageLimit { impl From for oci::LinuxResources { fn from(mut from: crate::oci::LinuxResources) -> Self { let mut devices = Vec::new(); - for d in from.take_Devices().to_vec() { + for d in from.take_Devices() { devices.push(d.into()); } @@ -712,16 +683,16 @@ impl From for oci::LinuxResources { let mut pids = None; if from.has_Pids() { - pids = Some(from.get_Pids().clone().into()) + pids = Some(from.Pids().clone().into()) } let mut block_io = None; if from.has_BlockIO() { - block_io = Some(from.get_BlockIO().clone().into()); + block_io = Some(from.BlockIO().clone().into()); } let mut hugepage_limits = Vec::new(); - for hl in from.get_HugepageLimits() { + for hl in from.HugepageLimits() { hugepage_limits.push(hl.clone().into()); } @@ -750,11 +721,11 @@ impl From for oci::LinuxDevice { oci::LinuxDevice { path: from.take_Path(), r#type: from.take_Type(), - major: from.get_Major(), - minor: from.get_Minor(), - file_mode: Some(from.get_FileMode()), - uid: Some(from.get_UID()), - gid: Some(from.get_GID()), + major: from.Major(), + minor: from.Minor(), + file_mode: Some(from.FileMode()), + uid: Some(from.UID()), + gid: Some(from.GID()), } } } @@ -762,9 +733,9 @@ impl From for oci::LinuxDevice { impl From for oci::LinuxSeccompArg { fn from(mut from: crate::oci::LinuxSeccompArg) -> Self { oci::LinuxSeccompArg { - index: from.get_Index() as u32, - value: from.get_Value(), - value_two: from.get_ValueTwo(), + index: from.Index() as u32, + value: from.Value(), + value_two: from.ValueTwo(), op: from.take_Op(), } } @@ -773,14 +744,14 @@ impl From for oci::LinuxSeccompArg { impl From for oci::LinuxSyscall { fn from(mut from: crate::oci::LinuxSyscall) -> Self { let mut args = Vec::new(); - for ag in from.take_Args().to_vec() { + for ag in from.take_Args() { args.push(ag.into()); } oci::LinuxSyscall { names: from.take_Names().to_vec(), action: from.take_Action(), args, - errno_ret: from.get_errnoret(), + errno_ret: from.errnoret(), } } } @@ -788,7 +759,7 @@ impl From for oci::LinuxSyscall { impl From for oci::LinuxSeccomp { fn from(mut from: crate::oci::LinuxSeccomp) -> Self { let mut syscalls = Vec::new(); - for s in from.take_Syscalls().to_vec() { + for s in from.take_Syscalls() { syscalls.push(s.into()); } @@ -813,16 +784,16 @@ impl From for oci::LinuxNamespace { impl From for oci::Linux { fn from(mut from: crate::oci::Linux) -> Self { let mut uid_mappings = Vec::new(); - for id_map in from.take_UIDMappings().to_vec() { + for id_map in from.take_UIDMappings() { uid_mappings.push(id_map.into()) } let mut gid_mappings = Vec::new(); - for id_map in from.take_GIDMappings().to_vec() { + for id_map in from.take_GIDMappings() { gid_mappings.push(id_map.into()) } - let sysctl = from.get_Sysctl().clone(); + let sysctl = from.Sysctl().clone(); let mut resources = None; if from.has_Resources() { resources = Some(from.take_Resources().into()); @@ -830,12 +801,12 @@ impl From for oci::Linux { let cgroups_path = from.take_CgroupsPath(); let mut namespaces = Vec::new(); - for ns in from.take_Namespaces().to_vec() { + for ns in from.take_Namespaces() { namespaces.push(ns.into()) } let mut devices = Vec::new(); - for d in from.take_Devices().to_vec() { + for d in from.take_Devices() { devices.push(d.into()); } @@ -874,8 +845,8 @@ impl From for oci::PosixRlimit { fn from(mut from: crate::oci::POSIXRlimit) -> Self { oci::PosixRlimit { r#type: from.take_Type(), - hard: from.get_Hard(), - soft: from.get_Soft(), + hard: from.Hard(), + soft: from.Soft(), } } } @@ -895,8 +866,8 @@ impl From for oci::LinuxCapabilities { impl From for oci::User { fn from(mut from: crate::oci::User) -> Self { oci::User { - uid: from.get_UID(), - gid: from.get_GID(), + uid: from.UID(), + gid: from.GID(), additional_gids: from.take_AdditionalGids().to_vec(), username: from.take_Username(), } @@ -906,8 +877,8 @@ impl From for oci::User { impl From for oci::Box { fn from(from: crate::oci::Box) -> Self { oci::Box { - height: from.get_Height(), - width: from.get_Width(), + height: from.Height(), + width: from.Width(), } } } @@ -920,22 +891,22 @@ impl From for oci::Process { } let user = from.take_User().into(); - let args = from.take_Args().into_vec(); - let env = from.take_Env().into_vec(); + let args = from.take_Args(); + let env = from.take_Env(); let cwd = from.take_Cwd(); let mut capabilities = None; if from.has_Capabilities() { capabilities = Some(from.take_Capabilities().into()); } let mut rlimits = Vec::new(); - for rl in from.take_Rlimits().to_vec() { + for rl in from.take_Rlimits() { rlimits.push(rl.into()); } - let no_new_privileges = from.get_NoNewPrivileges(); + let no_new_privileges = from.NoNewPrivileges(); let apparmor_profile = from.take_ApparmorProfile(); let mut oom_score_adj = None; - if from.get_OOMScoreAdj() != 0 { - oom_score_adj = Some(from.get_OOMScoreAdj() as i32); + if from.OOMScoreAdj() != 0 { + oom_score_adj = Some(from.OOMScoreAdj() as i32); } let selinux_label = from.take_SelinuxLabel(); @@ -959,8 +930,8 @@ impl From for oci::Process { impl From for oci::Hook { fn from(mut from: crate::oci::Hook) -> Self { let mut timeout = None; - if from.get_Timeout() > 0 { - timeout = Some(from.get_Timeout() as i32); + if from.Timeout() > 0 { + timeout = Some(from.Timeout() as i32); } oci::Hook { path: from.take_Path(), @@ -1020,7 +991,7 @@ impl From for oci::Spec { } let mut mounts = Vec::new(); - for m in from.take_Mounts().into_vec() { + for m in from.take_Mounts() { mounts.push(m.into()) } @@ -1085,7 +1056,7 @@ mod tests { #[test] fn test_from_vec_len_0() { let from: Vec = vec![]; - let to: ::protobuf::RepeatedField = from_vec(from.clone()); + let to: Vec = from_vec(from.clone()); assert_eq!(from.len(), to.len()); } @@ -1094,7 +1065,7 @@ mod tests { let from: Vec = vec![TestA { from: "a".to_string(), }]; - let to: ::protobuf::RepeatedField = from_vec(from.clone()); + let to: Vec = from_vec(from.clone()); assert_eq!(from.len(), to.len()); assert_eq!(from[0].from, to[0].to); diff --git a/src/runtime-rs/Cargo.lock b/src/runtime-rs/Cargo.lock index caff85873..a68b485d1 100644 --- a/src/runtime-rs/Cargo.lock +++ b/src/runtime-rs/Cargo.lock @@ -50,7 +50,7 @@ dependencies = [ "logging", "nix 0.24.3", "oci", - "protobuf", + "protobuf 3.2.0", "protocols", "serde", "serde_json", @@ -423,6 +423,7 @@ dependencies = [ "nix 0.26.2", "serde", "serde_json", + "thiserror", "tokio", ] @@ -474,7 +475,7 @@ dependencies = [ "nix 0.24.3", "oci", "persist", - "protobuf", + "protobuf 3.2.0", "serde_json", "slog", "slog-scope", @@ -507,13 +508,14 @@ checksum = "f3ad85c1f65dc7b37604eb0e89748faf0b9653065f2a8ef69f96a687ec1e9279" [[package]] name = "containerd-shim-protos" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "077ec778a0835d9d85502e8535362130187759b69eddabe2bdb3a68ffb575bd0" +checksum = "ef45f1c71aa587d8f657c546d8da38ea04f113dd05da0ef993c4515fa25fbdd1" dependencies = [ "async-trait", - "protobuf", + "protobuf 3.2.0", "ttrpc", + "ttrpc-codegen", ] [[package]] @@ -1733,6 +1735,16 @@ dependencies = [ "tokio", ] +[[package]] +name = "netns-rs" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23541694f1d7d18cd1a0da3a1352a6ea48b01cbb4a8e7a6e547963823fd5276e" +dependencies = [ + "nix 0.23.2", + "thiserror", +] + [[package]] name = "nix" version = "0.23.2" @@ -2181,9 +2193,16 @@ name = "protobuf" version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + +[[package]] +name = "protobuf" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55bad9126f378a853655831eb7363b7b01b81d19f8cb1218861086ca4a1a61e" dependencies = [ - "serde", - "serde_derive", + "once_cell", + "protobuf-support", + "thiserror", ] [[package]] @@ -2192,36 +2211,47 @@ version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "033460afb75cf755fcfc16dfaed20b86468082a2ea24e05ac35ab4a099a017d6" dependencies = [ - "protobuf", + "protobuf 2.28.0", ] [[package]] -name = "protobuf-codegen-pure" -version = "2.28.0" +name = "protobuf-codegen" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a29399fc94bcd3eeaa951c715f7bea69409b2445356b00519740bcd6ddd865" +checksum = "0dd418ac3c91caa4032d37cb80ff0d44e2ebe637b2fb243b6234bf89cdac4901" dependencies = [ - "protobuf", - "protobuf-codegen", + "anyhow", + "once_cell", + "protobuf 3.2.0", + "protobuf-parse", + "regex", + "tempfile", + "thiserror", ] [[package]] -name = "protobuf-codegen-pure3" -version = "2.28.2" +name = "protobuf-parse" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b351add14db0721ad0842f4858aec11a5088684112ef163fc50f113c63e69b2e" +checksum = "9d39b14605eaa1f6a340aec7f320b34064feb26c93aec35d6a9a2272a8ddfa49" dependencies = [ - "protobuf", - "protobuf-codegen3", + "anyhow", + "indexmap", + "log", + "protobuf 3.2.0", + "protobuf-support", + "tempfile", + "thiserror", + "which", ] [[package]] -name = "protobuf-codegen3" -version = "2.28.2" +name = "protobuf-support" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73c5878d0fa872bd7d61782c6aa2d2d56761ba4ed4514eb6992f5f83162f1d2f" +checksum = "a5d4d7b8601c814cfb36bcebb79f0e61e45e1e93640cf778837833bbed05c372" dependencies = [ - "protobuf", + "thiserror", ] [[package]] @@ -2230,7 +2260,7 @@ version = "0.1.0" dependencies = [ "async-trait", "oci", - "protobuf", + "protobuf 3.2.0", "ttrpc", "ttrpc-codegen", ] @@ -2400,6 +2430,7 @@ dependencies = [ "byte-unit 4.0.18", "cgroups-rs", "futures 0.3.26", + "hex", "hypervisor", "kata-sys-util", "kata-types", @@ -2408,6 +2439,7 @@ dependencies = [ "logging", "netlink-packet-route", "netlink-sys", + "netns-rs", "nix 0.24.3", "oci", "persist", @@ -2463,9 +2495,11 @@ dependencies = [ "lazy_static", "linux_container", "logging", + "netns-rs", "nix 0.25.1", "oci", "persist", + "resource", "serde_json", "shim-interface", "slog", @@ -2661,7 +2695,7 @@ dependencies = [ "logging", "nix 0.24.3", "oci", - "protobuf", + "protobuf 3.2.0", "rand 0.8.5", "serial_test", "service", @@ -3121,9 +3155,9 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "ttrpc" -version = "0.6.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ecfff459a859c6ba6668ff72b34c2f1d94d9d58f7088414c2674ad0f31cc7d8" +checksum = "a35f22a2964bea14afee161665bb260b83cb48e665e0260ca06ec0e775c8b06c" dependencies = [ "async-trait", "byteorder", @@ -3131,8 +3165,8 @@ dependencies = [ "libc", "log", "nix 0.23.2", - "protobuf", - "protobuf-codegen-pure", + "protobuf 3.2.0", + "protobuf-codegen 3.2.0", "thiserror", "tokio", "tokio-vsock", @@ -3140,28 +3174,28 @@ dependencies = [ [[package]] name = "ttrpc-codegen" -version = "0.2.3" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2365c9179ad82b29bda1b0162c7542ab5861a7844abfedd8cfdf8bd7e12358f9" +checksum = "94d7f7631d7a9ebed715a47cd4cb6072cbc7ae1d4ec01598971bbec0024340c2" dependencies = [ - "protobuf", - "protobuf-codegen-pure3", - "protobuf-codegen3", + "protobuf 2.28.0", + "protobuf-codegen 3.2.0", + "protobuf-support", "ttrpc-compiler", ] [[package]] name = "ttrpc-compiler" -version = "0.4.3" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed57c2d6669099791507b8b491b2940f2e8975b52a73fe82efad24257d10e9bc" +checksum = "ec3cb5dbf1f0865a34fe3f722290fe776cacb16f50428610b779467b76ddf647" dependencies = [ "derive-new", "prost", "prost-build", "prost-types", - "protobuf", - "protobuf-codegen3", + "protobuf 2.28.0", + "protobuf-codegen 2.28.0", "tempfile", ] @@ -3282,7 +3316,7 @@ dependencies = [ "nix 0.24.3", "oci", "persist", - "protobuf", + "protobuf 3.2.0", "resource", "serde", "serde_derive", diff --git a/src/runtime-rs/config/configuration-dragonball.toml.in b/src/runtime-rs/config/configuration-dragonball.toml.in index 8b963e12d..4c7d3db05 100644 --- a/src/runtime-rs/config/configuration-dragonball.toml.in +++ b/src/runtime-rs/config/configuration-dragonball.toml.in @@ -206,15 +206,22 @@ container_pipe_size=@PIPESIZE@ #debug_console_enabled = true # Agent connection dialing timeout value in seconds -# (default: 30) -#dial_timeout = 30 +# (default: 45) +dial_timeout = 45 [runtime] # If enabled, the runtime will log additional debug messages to the # system log # (default: disabled) #enable_debug = true -# + +# If enabled, enabled, it means that 1) if the runtime exits abnormally, +# the cleanup process will be skipped, and 2) the runtime will not exit +# even if the health check fails. +# This option is typically used to retain abnormal information for debugging. +# (default: false) +#keep_abnormal = true + # Internetworking model # Determines how the VM should be connected to the # the container network interface diff --git a/src/runtime-rs/crates/agent/Cargo.toml b/src/runtime-rs/crates/agent/Cargo.toml index 0deca014d..4475c6d47 100644 --- a/src/runtime-rs/crates/agent/Cargo.toml +++ b/src/runtime-rs/crates/agent/Cargo.toml @@ -12,12 +12,12 @@ futures = "0.1.27" anyhow = "1.0.26" async-trait = "0.1.48" log = "0.4.14" -protobuf = "2.27.0" +protobuf = "3.2.0" serde = { version = "^1.0", features = ["derive"] } serde_json = ">=1.0.9" slog = "2.5.2" slog-scope = "4.4.0" -ttrpc = { version = "0.6.1" } +ttrpc = { version = "0.7.1" } tokio = { version = "1.8.0", features = ["fs", "rt"] } url = "2.2.2" nix = "0.24.2" diff --git a/src/runtime-rs/crates/agent/src/kata/agent.rs b/src/runtime-rs/crates/agent/src/kata/agent.rs index aa0df0857..d06da15ea 100644 --- a/src/runtime-rs/crates/agent/src/kata/agent.rs +++ b/src/runtime-rs/crates/agent/src/kata/agent.rs @@ -56,7 +56,7 @@ macro_rules! impl_health_service { impl HealthService for KataAgent { $(async fn $name(&self, req: $req) -> Result<$resp> { let r = req.into(); - let (mut client, timeout, _) = self.get_health_client().await.context("get health client")?; + let (client, timeout, _) = self.get_health_client().await.context("get health client")?; let resp = client.$name(new_ttrpc_ctx(timeout * MILLISECOND_TO_NANOSECOND), &r).await?; Ok(resp.into()) })* @@ -75,7 +75,7 @@ macro_rules! impl_agent { impl Agent for KataAgent { $(async fn $name(&self, req: $req) -> Result<$resp> { let r = req.into(); - let (mut client, mut timeout, _) = self.get_agent_client().await.context("get client")?; + let (client, mut timeout, _) = self.get_agent_client().await.context("get client")?; // update new timeout if let Some(v) = $new_timeout { diff --git a/src/runtime-rs/crates/agent/src/kata/trans.rs b/src/runtime-rs/crates/agent/src/kata/trans.rs index 7d33a0992..172095ceb 100644 --- a/src/runtime-rs/crates/agent/src/kata/trans.rs +++ b/src/runtime-rs/crates/agent/src/kata/trans.rs @@ -30,30 +30,18 @@ use crate::{ OomEventResponse, WaitProcessResponse, WriteStreamResponse, }; -fn from_vec, T: Sized>(from: Vec) -> ::protobuf::RepeatedField { - let mut to: Vec = vec![]; - for data in from { - to.push(data.into()); - } - ::protobuf::RepeatedField::from_vec(to) +fn trans_vec>(from: Vec) -> Vec { + from.into_iter().map(|f| f.into()).collect() } -fn into_vec>(from: ::protobuf::RepeatedField) -> Vec { - let mut to: Vec = vec![]; - for data in from.to_vec() { - to.push(data.into()); - } - to -} - -fn from_option>(from: Option) -> ::protobuf::SingularPtrField { +fn from_option>(from: Option) -> protobuf::MessageField { match from { - Some(f) => ::protobuf::SingularPtrField::from_option(Some(T::from(f))), - None => ::protobuf::SingularPtrField::none(), + Some(f) => protobuf::MessageField::from_option(Some(T::from(f))), + None => protobuf::MessageField::none(), } } -fn into_option, T: Sized>(from: ::protobuf::SingularPtrField) -> Option { +fn into_option, T: Sized>(from: protobuf::MessageField) -> Option { from.into_option().map(|f| f.into()) } @@ -84,9 +72,8 @@ impl From for agent::FSGroup { Self { group_id: from.group_id, - group_change_policy: policy, - unknown_fields: Default::default(), - cached_size: Default::default(), + group_change_policy: policy.into(), + ..Default::default() } } } @@ -96,9 +83,8 @@ impl From for agent::StringUser { Self { uid: from.uid, gid: from.gid, - additionalGids: ::protobuf::RepeatedField::from_vec(from.additional_gids), - unknown_fields: Default::default(), - cached_size: Default::default(), + additionalGids: from.additional_gids, + ..Default::default() } } } @@ -107,12 +93,11 @@ impl From for agent::Device { fn from(from: Device) -> Self { Self { id: from.id, - field_type: from.field_type, + type_: from.field_type, vm_path: from.vm_path, container_path: from.container_path, - options: from_vec(from.options), - unknown_fields: Default::default(), - cached_size: Default::default(), + options: trans_vec(from.options), + ..Default::default() } } } @@ -121,14 +106,13 @@ impl From for agent::Storage { fn from(from: Storage) -> Self { Self { driver: from.driver, - driver_options: from_vec(from.driver_options), + driver_options: trans_vec(from.driver_options), source: from.source, fstype: from.fs_type, fs_group: from_option(from.fs_group), - options: from_vec(from.options), + options: trans_vec(from.options), mount_point: from.mount_point, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -137,9 +121,8 @@ impl From for agent::KernelModule { fn from(from: KernelModule) -> Self { Self { name: from.name, - parameters: from_vec(from.parameters), - unknown_fields: Default::default(), - cached_size: Default::default(), + parameters: trans_vec(from.parameters), + ..Default::default() } } } @@ -166,11 +149,10 @@ impl From for IPFamily { impl From for types::IPAddress { fn from(from: IPAddress) -> Self { Self { - family: from.family.into(), + family: protobuf::EnumOrUnknown::new(from.family.into()), address: from.address, mask: from.mask, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -178,7 +160,7 @@ impl From for types::IPAddress { impl From for IPAddress { fn from(src: types::IPAddress) -> Self { Self { - family: src.family.into(), + family: src.family.unwrap().into(), address: "".to_string(), mask: "".to_string(), } @@ -190,14 +172,13 @@ impl From for types::Interface { Self { device: from.device, name: from.name, - IPAddresses: from_vec(from.ip_addresses), + IPAddresses: trans_vec(from.ip_addresses), mtu: from.mtu, hwAddr: from.hw_addr, pciPath: from.pci_addr, - field_type: from.field_type, + type_: from.field_type, raw_flags: from.raw_flags, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -207,11 +188,11 @@ impl From for Interface { Self { device: src.device, name: src.name, - ip_addresses: into_vec(src.IPAddresses), + ip_addresses: trans_vec(src.IPAddresses), mtu: src.mtu, hw_addr: src.hwAddr, pci_addr: src.pciPath, - field_type: src.field_type, + field_type: src.type_, raw_flags: src.raw_flags, } } @@ -220,7 +201,7 @@ impl From for Interface { impl From for Interfaces { fn from(src: agent::Interfaces) -> Self { Self { - interfaces: into_vec(src.Interfaces), + interfaces: trans_vec(src.Interfaces), } } } @@ -233,9 +214,8 @@ impl From for types::Route { device: from.device, source: from.source, scope: from.scope, - family: from.family.into(), - unknown_fields: Default::default(), - cached_size: Default::default(), + family: protobuf::EnumOrUnknown::new(from.family.into()), + ..Default::default() } } } @@ -248,7 +228,7 @@ impl From for Route { device: src.device, source: src.source, scope: src.scope, - family: src.family.into(), + family: src.family.unwrap().into(), } } } @@ -256,9 +236,8 @@ impl From for Route { impl From for agent::Routes { fn from(from: Routes) -> Self { Self { - Routes: from_vec(from.routes), - unknown_fields: Default::default(), - cached_size: Default::default(), + Routes: trans_vec(from.routes), + ..Default::default() } } } @@ -266,7 +245,7 @@ impl From for agent::Routes { impl From for Routes { fn from(src: agent::Routes) -> Self { Self { - routes: into_vec(src.Routes), + routes: trans_vec(src.Routes), } } } @@ -277,12 +256,11 @@ impl From for agent::CreateContainerRequest { container_id: from.process_id.container_id(), exec_id: from.process_id.exec_id(), string_user: from_option(from.string_user), - devices: from_vec(from.devices), - storages: from_vec(from.storages), + devices: trans_vec(from.devices), + storages: trans_vec(from.storages), OCI: from_option(from.oci), sandbox_pidns: from.sandbox_pidns, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -292,8 +270,7 @@ impl From for agent::RemoveContainerRequest { Self { container_id: from.container_id, timeout: from.timeout, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -302,8 +279,7 @@ impl From for agent::StartContainerRequest { fn from(from: ContainerID) -> Self { Self { container_id: from.container_id, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -312,8 +288,7 @@ impl From for agent::StatsContainerRequest { fn from(from: ContainerID) -> Self { Self { container_id: from.container_id, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -322,8 +297,7 @@ impl From for agent::PauseContainerRequest { fn from(from: ContainerID) -> Self { Self { container_id: from.container_id, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -332,8 +306,7 @@ impl From for agent::ResumeContainerRequest { fn from(from: ContainerID) -> Self { Self { container_id: from.container_id, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -344,8 +317,7 @@ impl From for agent::SignalProcessRequest { container_id: from.process_id.container_id(), exec_id: from.process_id.exec_id(), signal: from.signal, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -355,8 +327,7 @@ impl From for agent::WaitProcessRequest { Self { container_id: from.process_id.container_id(), exec_id: from.process_id.exec_id(), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -366,8 +337,7 @@ impl From for agent::UpdateContainerRequest { Self { container_id: from.container_id, resources: from_option(Some(from.resources)), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -378,8 +348,7 @@ impl From for agent::WriteStreamRequest { container_id: from.process_id.container_id(), exec_id: from.process_id.exec_id(), data: from.data, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -402,7 +371,7 @@ impl From for agent::GetIPTablesRequest { impl From for GetIPTablesResponse { fn from(from: agent::GetIPTablesResponse) -> Self { Self { - data: from.get_data().to_vec(), + data: from.data().to_vec(), } } } @@ -420,7 +389,7 @@ impl From for agent::SetIPTablesRequest { impl From for SetIPTablesResponse { fn from(from: agent::SetIPTablesResponse) -> Self { Self { - data: from.get_data().to_vec(), + data: from.data().to_vec(), } } } @@ -432,8 +401,7 @@ impl From for agent::ExecProcessRequest { exec_id: from.process_id.exec_id(), string_user: from_option(from.string_user), process: from_option(from.process), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -515,14 +483,14 @@ impl From for BlkioStatsEntry { impl From for BlkioStats { fn from(src: agent::BlkioStats) -> Self { Self { - io_service_bytes_recursive: into_vec(src.io_service_bytes_recursive), - io_serviced_recursive: into_vec(src.io_serviced_recursive), - io_queued_recursive: into_vec(src.io_queued_recursive), - io_service_time_recursive: into_vec(src.io_service_time_recursive), - io_wait_time_recursive: into_vec(src.io_wait_time_recursive), - io_merged_recursive: into_vec(src.io_merged_recursive), - io_time_recursive: into_vec(src.io_time_recursive), - sectors_recursive: into_vec(src.sectors_recursive), + io_service_bytes_recursive: trans_vec(src.io_service_bytes_recursive), + io_serviced_recursive: trans_vec(src.io_serviced_recursive), + io_queued_recursive: trans_vec(src.io_queued_recursive), + io_service_time_recursive: trans_vec(src.io_service_time_recursive), + io_wait_time_recursive: trans_vec(src.io_wait_time_recursive), + io_merged_recursive: trans_vec(src.io_merged_recursive), + io_time_recursive: trans_vec(src.io_time_recursive), + sectors_recursive: trans_vec(src.sectors_recursive), } } } @@ -570,7 +538,7 @@ impl From for StatsContainerResponse { fn from(src: agent::StatsContainerResponse) -> Self { Self { cgroup_stats: into_option(src.cgroup_stats), - network_stats: into_vec(src.network_stats), + network_stats: trans_vec(src.network_stats), } } } @@ -581,8 +549,7 @@ impl From for agent::ReadStreamRequest { container_id: from.process_id.container_id(), exec_id: from.process_id.exec_id(), len: from.len, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -598,8 +565,7 @@ impl From for agent::CloseStdinRequest { Self { container_id: from.process_id.container_id(), exec_id: from.process_id.exec_id(), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -611,8 +577,7 @@ impl From for agent::TtyWinResizeRequest { exec_id: from.process_id.exec_id(), row: from.row, column: from.column, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -621,8 +586,7 @@ impl From for agent::UpdateInterfaceRequest { fn from(from: UpdateInterfaceRequest) -> Self { Self { interface: from_option(from.interface), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -630,8 +594,7 @@ impl From for agent::UpdateInterfaceRequest { impl From for agent::ListInterfacesRequest { fn from(_: Empty) -> Self { Self { - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -640,8 +603,7 @@ impl From for agent::UpdateRoutesRequest { fn from(from: UpdateRoutesRequest) -> Self { Self { routes: from_option(from.route), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -649,8 +611,7 @@ impl From for agent::UpdateRoutesRequest { impl From for agent::ListRoutesRequest { fn from(_: Empty) -> Self { Self { - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -663,8 +624,7 @@ impl From for types::ARPNeighbor { lladdr: from.ll_addr, state: from.state, flags: from.flags, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -672,9 +632,8 @@ impl From for types::ARPNeighbor { impl From for agent::ARPNeighbors { fn from(from: ARPNeighbors) -> Self { Self { - ARPNeighbors: from_vec(from.neighbors), - unknown_fields: Default::default(), - cached_size: Default::default(), + ARPNeighbors: trans_vec(from.neighbors), + ..Default::default() } } } @@ -683,8 +642,7 @@ impl From for agent::AddARPNeighborsRequest { fn from(from: AddArpNeighborRequest) -> Self { Self { neighbors: from_option(from.neighbors), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -693,14 +651,13 @@ impl From for agent::CreateSandboxRequest { fn from(from: CreateSandboxRequest) -> Self { Self { hostname: from.hostname, - dns: from_vec(from.dns), - storages: from_vec(from.storages), + dns: trans_vec(from.dns), + storages: trans_vec(from.storages), sandbox_pidns: from.sandbox_pidns, sandbox_id: from.sandbox_id, guest_hook_path: from.guest_hook_path, - kernel_modules: from_vec(from.kernel_modules), - unknown_fields: Default::default(), - cached_size: Default::default(), + kernel_modules: trans_vec(from.kernel_modules), + ..Default::default() } } } @@ -708,8 +665,7 @@ impl From for agent::CreateSandboxRequest { impl From for agent::DestroySandboxRequest { fn from(_: Empty) -> Self { Self { - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -720,8 +676,7 @@ impl From for agent::OnlineCPUMemRequest { wait: from.wait, nb_cpus: from.nb_cpus, cpu_only: from.cpu_only, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -730,8 +685,7 @@ impl From for agent::ReseedRandomDevRequest { fn from(from: ReseedRandomDevRequest) -> Self { Self { data: from.data, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -740,8 +694,7 @@ impl From for agent::MemHotplugByProbeRequest { fn from(from: MemHotplugByProbeRequest) -> Self { Self { memHotplugProbeAddr: from.mem_hotplug_probe_addr, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -751,8 +704,7 @@ impl From for agent::SetGuestDateTimeRequest { Self { Sec: from.sec, Usec: from.usec, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -762,8 +714,8 @@ impl From for AgentDetails { Self { version: src.version, init_daemon: src.init_daemon, - device_handlers: into_vec(src.device_handlers), - storage_handlers: into_vec(src.storage_handlers), + device_handlers: trans_vec(src.device_handlers), + storage_handlers: trans_vec(src.storage_handlers), supports_seccomp: src.supports_seccomp, } } @@ -790,8 +742,7 @@ impl From for agent::CopyFileRequest { gid: from.gid, offset: from.offset, data: from.data, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -807,8 +758,7 @@ impl From for WaitProcessResponse { impl From for agent::GetOOMEventRequest { fn from(_: Empty) -> Self { Self { - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -817,8 +767,7 @@ impl From for health::CheckRequest { fn from(from: CheckRequest) -> Self { Self { service: from.service, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -826,7 +775,7 @@ impl From for health::CheckRequest { impl From for HealthCheckResponse { fn from(from: health::HealthCheckResponse) -> Self { Self { - status: from.status as u32, + status: from.status.value() as u32, } } } @@ -852,8 +801,7 @@ impl From for agent::VolumeStatsRequest { fn from(from: VolumeStatsRequest) -> Self { Self { volume_guest_path: from.volume_guest_path, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -862,8 +810,8 @@ impl From for VolumeStatsResponse { fn from(from: csi::VolumeStatsResponse) -> Self { let result: String = format!( "Usage: {:?} Volume Condition: {:?}", - from.get_usage(), - from.get_volume_condition() + from.usage(), + from.volume_condition() ); Self { data: result } } @@ -874,8 +822,7 @@ impl From for agent::ResizeVolumeRequest { Self { volume_guest_path: from.volume_guest_path, size: from.size, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } diff --git a/src/runtime-rs/crates/hypervisor/ch-config/Cargo.toml b/src/runtime-rs/crates/hypervisor/ch-config/Cargo.toml index a51370999..10ed105e3 100644 --- a/src/runtime-rs/crates/hypervisor/ch-config/Cargo.toml +++ b/src/runtime-rs/crates/hypervisor/ch-config/Cargo.toml @@ -23,3 +23,4 @@ api_client = { git = "https://github.com/cloud-hypervisor/cloud-hypervisor", cra kata-types = { path = "../../../../libs/kata-types"} nix = "0.26.2" +thiserror = "1.0.38" diff --git a/src/runtime-rs/crates/hypervisor/ch-config/src/ch_api.rs b/src/runtime-rs/crates/hypervisor/ch-config/src/ch_api.rs index d332a154f..191cb5457 100644 --- a/src/runtime-rs/crates/hypervisor/ch-config/src/ch_api.rs +++ b/src/runtime-rs/crates/hypervisor/ch-config/src/ch_api.rs @@ -91,7 +91,7 @@ pub async fn cloud_hypervisor_vm_fs_add( mut socket: UnixStream, fs_config: FsConfig, ) -> Result> { - let result = task::spawn_blocking(move || -> Result> { + task::spawn_blocking(move || -> Result> { let response = simple_api_full_command_and_response( &mut socket, "PUT", @@ -102,7 +102,5 @@ pub async fn cloud_hypervisor_vm_fs_add( Ok(response) }) - .await?; - - result + .await? } diff --git a/src/runtime-rs/crates/hypervisor/ch-config/src/convert.rs b/src/runtime-rs/crates/hypervisor/ch-config/src/convert.rs index f0f5e88e8..809c26052 100644 --- a/src/runtime-rs/crates/hypervisor/ch-config/src/convert.rs +++ b/src/runtime-rs/crates/hypervisor/ch-config/src/convert.rs @@ -6,8 +6,8 @@ use crate::net_util::MAC_ADDR_LEN; use crate::NamedHypervisorConfig; use crate::VmConfig; use crate::{ - ConsoleConfig, ConsoleOutputMode, CpuFeatures, CpuTopology, CpusConfig, MacAddr, MemoryConfig, - PayloadConfig, PmemConfig, RngConfig, VsockConfig, + ConsoleConfig, ConsoleOutputMode, CpuFeatures, CpuTopology, CpusConfig, DiskConfig, MacAddr, + MemoryConfig, PayloadConfig, PlatformConfig, PmemConfig, RngConfig, VsockConfig, }; use anyhow::{anyhow, Context, Result}; use kata_types::config::default::DEFAULT_CH_ENTROPY_SOURCE; @@ -17,6 +17,8 @@ use std::convert::TryFrom; use std::fmt::Display; use std::path::PathBuf; +use crate::errors::*; + // 1 MiB const MIB: u64 = 1024 * 1024; @@ -24,75 +26,115 @@ const PMEM_ALIGN_BYTES: u64 = 2 * MIB; const DEFAULT_CH_MAX_PHYS_BITS: u8 = 46; +const DEFAULT_VSOCK_CID: u64 = 3; + impl TryFrom for VmConfig { - type Error = anyhow::Error; + type Error = VmConfigError; fn try_from(n: NamedHypervisorConfig) -> Result { - let kernel_params = n.kernel_params; + let kernel_params = if n.kernel_params.is_empty() { + None + } else { + Some(n.kernel_params) + }; + let cfg = n.cfg; - let vsock_socket_path = n.vsock_socket_path; - let sandbox_path = n.sandbox_path; + + let debug = cfg.debug_info.enable_debug; + let confidential_guest = cfg.security_info.confidential_guest; + + let tdx_enabled = n.tdx_enabled; + + let vsock_socket_path = if n.vsock_socket_path.is_empty() { + return Err(VmConfigError::EmptyVsockSocketPath); + } else { + n.vsock_socket_path + }; + + let sandbox_path = if n.sandbox_path.is_empty() { + return Err(VmConfigError::EmptySandboxPath); + } else { + n.sandbox_path + }; + let fs = n.shared_fs_devices; - let cpus = CpusConfig::try_from(cfg.cpu_info)?; + let cpus = CpusConfig::try_from(cfg.cpu_info).map_err(VmConfigError::CPUError)?; - let rng = RngConfig::try_from(cfg.machine_info)?; + let rng = RngConfig::from(cfg.machine_info); // Note how CH handles the different image types: // - // - An image is specified in PmemConfig. + // - A standard image is specified in PmemConfig. // - An initrd/initramfs is specified in PayloadConfig. + // - A confidential guest image is specified by a DiskConfig. + // - If TDX is enabled, the firmware (`td-shim` [1]) must be + // specified in PayloadConfig. + // - A confidential guest initrd is specified by a PayloadConfig with + // firmware. + // + // [1] - https://github.com/confidential-containers/td-shim let boot_info = cfg.boot_info; let use_initrd = !boot_info.initrd.is_empty(); let use_image = !boot_info.image.is_empty(); if use_initrd && use_image { - return Err(anyhow!("cannot specify image and initrd")); + return Err(VmConfigError::MultipleBootFiles); } if !use_initrd && !use_image { - return Err(anyhow!("missing boot file (no image or initrd)")); + return Err(VmConfigError::NoBootFile); } - let initrd = if use_initrd { - Some(PathBuf::from(boot_info.initrd.clone())) - } else { + let pmem = if use_initrd || confidential_guest { None - }; + } else { + let pmem = PmemConfig::try_from(&boot_info).map_err(VmConfigError::PmemError)?; - let pmem = if use_initrd { - None - } else { - let pmem = PmemConfig::try_from(&boot_info)?; Some(vec![pmem]) }; - let payload = PayloadConfig::try_from((boot_info, kernel_params, initrd))?; + let payload = Some( + PayloadConfig::try_from((boot_info.clone(), kernel_params, tdx_enabled)) + .map_err(VmConfigError::PayloadError)?, + ); - let serial = get_serial_cfg()?; - let console = get_console_cfg()?; + let disks = if confidential_guest && use_image { + let disk = DiskConfig::try_from(boot_info).map_err(VmConfigError::DiskError)?; - let memory = MemoryConfig::try_from(cfg.memory_info)?; - - std::fs::create_dir_all(sandbox_path).context("failed to create sandbox path")?; - - let vsock = VsockConfig { - cid: 3, - socket: PathBuf::from(vsock_socket_path), - ..Default::default() + Some(vec![disk]) + } else { + None }; + let serial = get_serial_cfg(debug, confidential_guest); + let console = get_console_cfg(debug, confidential_guest); + + let memory = MemoryConfig::try_from((cfg.memory_info, confidential_guest)) + .map_err(VmConfigError::MemoryError)?; + + std::fs::create_dir_all(sandbox_path.clone()) + .map_err(|e| VmConfigError::SandboxError(sandbox_path, e.to_string()))?; + + let vsock = VsockConfig::try_from((vsock_socket_path, DEFAULT_VSOCK_CID)) + .map_err(VmConfigError::VsockError)?; + + let platform = get_platform_cfg(tdx_enabled); + let cfg = VmConfig { cpus, memory, serial, console, - payload: Some(payload), + payload, fs, pmem, + disks, vsock: Some(vsock), rng, + platform, + ..Default::default() }; @@ -100,30 +142,71 @@ impl TryFrom for VmConfig { } } -impl TryFrom for MemoryConfig { - type Error = anyhow::Error; +impl TryFrom<(String, u64)> for VsockConfig { + type Error = VsockConfigError; - fn try_from(mem: MemoryInfo) -> Result { - let sysinfo = nix::sys::sysinfo::sysinfo()?; + fn try_from(args: (String, u64)) -> Result { + let vsock_socket_path = args.0; + let cid = args.1; + + let path = if vsock_socket_path.is_empty() { + return Err(VsockConfigError::NoVsockSocketPath); + } else { + vsock_socket_path + }; + + let cfg = VsockConfig { + cid, + socket: PathBuf::from(path), + + ..Default::default() + }; + + Ok(cfg) + } +} + +impl TryFrom<(MemoryInfo, bool)> for MemoryConfig { + type Error = MemoryConfigError; + + fn try_from(args: (MemoryInfo, bool)) -> Result { + let mem = args.0; + let confidential_guest = args.1; + + if mem.default_memory == 0 { + return Err(MemoryConfigError::NoDefaultMemory); + } + + let sysinfo = nix::sys::sysinfo::sysinfo().map_err(MemoryConfigError::SysInfoFail)?; let max_mem_bytes = sysinfo.ram_total(); let mem_bytes: u64 = MIB .checked_mul(mem.default_memory as u64) - .ok_or("cannot convert default memory to bytes") - .map_err(|e| anyhow!(e))?; + .ok_or(()) + .map_err(|_| MemoryConfigError::BadDefaultMemSize(mem.default_memory))?; - // The amount of memory that can be hot-plugged is the total less the - // amount allocated at VM start. - let hotplug_size_bytes = max_mem_bytes - .checked_sub(mem_bytes) - .ok_or("failed to calculate max hotplug size for CH") - .map_err(|e| anyhow!(e))?; + if mem_bytes > max_mem_bytes { + return Err(MemoryConfigError::DefaultMemSizeTooBig); + } - let aligned_hotplug_size_bytes = - checked_next_multiple_of(hotplug_size_bytes, PMEM_ALIGN_BYTES) - .ok_or("cannot handle pmem alignment for CH") - .map_err(|e| anyhow!(e))?; + let hotplug_size = if confidential_guest { + None + } else { + // The amount of memory that can be hot-plugged is the total less the + // amount allocated at VM start. + let hotplug_size_bytes = max_mem_bytes + .checked_sub(mem_bytes) + .ok_or(()) + .map_err(|_| MemoryConfigError::BadMemSizeForHotplug(max_mem_bytes))?; + + let aligned_hotplug_size_bytes = + checked_next_multiple_of(hotplug_size_bytes, PMEM_ALIGN_BYTES) + .ok_or(()) + .map_err(|_| MemoryConfigError::BadPmemAlign(hotplug_size_bytes))?; + + Some(aligned_hotplug_size_bytes) + }; let cfg = MemoryConfig { size: mem_bytes, @@ -131,7 +214,7 @@ impl TryFrom for MemoryConfig { // Required shared: true, - hotplug_size: Some(aligned_hotplug_size_bytes), + hotplug_size, ..Default::default() }; @@ -155,26 +238,32 @@ fn checked_next_multiple_of(value: u64, multiple: u64) -> Option { } impl TryFrom for CpusConfig { - type Error = anyhow::Error; + type Error = CpusConfigError; fn try_from(cpu: CpuInfo) -> Result { - let boot_vcpus = u8::try_from(cpu.default_vcpus)?; - let max_vcpus = u8::try_from(cpu.default_maxvcpus)?; + let boot_vcpus = + u8::try_from(cpu.default_vcpus).map_err(CpusConfigError::BootVCPUsTooBig)?; + + let max_vcpus = + u8::try_from(cpu.default_maxvcpus).map_err(CpusConfigError::MaxVCPUsTooBig)?; let topology = CpuTopology { - threads_per_core: 1, cores_per_die: max_vcpus, + threads_per_core: 1, dies_per_package: 1, packages: 1, }; let max_phys_bits = DEFAULT_CH_MAX_PHYS_BITS; + let features = CpuFeatures::from(cpu.cpu_features); + let cfg = CpusConfig { boot_vcpus, max_vcpus, max_phys_bits, topology: Some(topology), + features, ..Default::default() }; @@ -183,76 +272,117 @@ impl TryFrom for CpusConfig { } } -impl TryFrom for CpuFeatures { - type Error = anyhow::Error; - +impl From for CpuFeatures { #[cfg(target_arch = "x86_64")] - fn try_from(s: String) -> Result { + fn from(s: String) -> Self { let amx = s.split(',').any(|x| x == "amx"); - let cpu_features = CpuFeatures { amx }; - - Ok(cpu_features) + CpuFeatures { amx } } #[cfg(not(target_arch = "x86_64"))] - fn try_from(_s: String) -> Result { - Ok(CpuFeatures::default()) + fn from(_s: String) -> Self { + CpuFeatures::default() } } -// The 2nd tuple element is the space separated kernel parameters list. -// The 3rd tuple element is an optional initramfs image to use. -// This cannot be created only from BootInfo since that contains the -// user-specified kernel parameters only. -impl TryFrom<(BootInfo, String, Option)> for PayloadConfig { - type Error = anyhow::Error; +// - The 2nd tuple element is the space separated final kernel parameters list. +// It is made up of both the CH specific kernel parameters and the user +// specified parameters from BootInfo. +// +// The kernel params cannot be created only from BootInfo since that contains +// the user-specified kernel parameters only. +// +// - The 3rd tuple element determines if TDX is enabled. +// +impl TryFrom<(BootInfo, Option, bool)> for PayloadConfig { + type Error = PayloadConfigError; - fn try_from(args: (BootInfo, String, Option)) -> Result { - let b = args.0; + fn try_from(args: (BootInfo, Option, bool)) -> Result { + let boot_info = args.0; let cmdline = args.1; - let initramfs = args.2; + let tdx_enabled = args.2; - let kernel = PathBuf::from(b.kernel); + // The kernel is always specified here, + // not in the top level VmConfig.kernel. + let kernel = if boot_info.kernel.is_empty() { + return Err(PayloadConfigError::NoKernel); + } else { + PathBuf::from(boot_info.kernel) + }; + + let initramfs = if boot_info.initrd.is_empty() { + None + } else { + Some(PathBuf::from(boot_info.initrd)) + }; + + let firmware = if tdx_enabled { + if boot_info.firmware.is_empty() { + return Err(PayloadConfigError::TDXFirmwareMissing); + } else { + Some(PathBuf::from(boot_info.firmware)) + } + } else if boot_info.firmware.is_empty() { + None + } else { + Some(PathBuf::from(boot_info.firmware)) + }; let payload = PayloadConfig { kernel: Some(kernel), - cmdline: Some(cmdline), initramfs, - - ..Default::default() + cmdline, + firmware, }; Ok(payload) } } -impl TryFrom for RngConfig { - type Error = anyhow::Error; +impl TryFrom for DiskConfig { + type Error = DiskConfigError; - fn try_from(m: MachineInfo) -> Result { + fn try_from(boot_info: BootInfo) -> Result { + let path = if boot_info.image.is_empty() { + return Err(DiskConfigError::MissingPath); + } else { + PathBuf::from(boot_info.image) + }; + + let disk = DiskConfig { + path: Some(path), + readonly: true, + + ..Default::default() + }; + + Ok(disk) + } +} + +impl From for RngConfig { + fn from(m: MachineInfo) -> Self { let entropy_source = if !m.entropy_source.is_empty() { m.entropy_source } else { DEFAULT_CH_ENTROPY_SOURCE.to_string() }; - let rng = RngConfig { + RngConfig { src: PathBuf::from(entropy_source), ..Default::default() - }; - - Ok(rng) + } } } impl TryFrom<&BootInfo> for PmemConfig { - type Error = anyhow::Error; + type Error = PmemConfigError; fn try_from(b: &BootInfo) -> Result { let file = if b.image.is_empty() { - return Err(anyhow!("CH PmemConfig only used for images")); + return Err(PmemConfigError::MissingImage); } else { b.image.clone() }; @@ -268,24 +398,52 @@ impl TryFrom<&BootInfo> for PmemConfig { } } -fn get_serial_cfg() -> Result { - let cfg = ConsoleConfig { - file: None, - mode: ConsoleOutputMode::Tty, - iommu: false, +fn get_serial_cfg(debug: bool, confidential_guest: bool) -> ConsoleConfig { + let mode = if confidential_guest { + ConsoleOutputMode::Off + } else if debug { + ConsoleOutputMode::Tty + } else { + ConsoleOutputMode::Off }; - Ok(cfg) + ConsoleConfig { + file: None, + mode, + iommu: false, + } } -fn get_console_cfg() -> Result { - let cfg = ConsoleConfig { - file: None, - mode: ConsoleOutputMode::Off, - iommu: false, +fn get_console_cfg(debug: bool, confidential_guest: bool) -> ConsoleConfig { + let mode = if confidential_guest { + if debug { + ConsoleOutputMode::Tty + } else { + ConsoleOutputMode::Off + } + } else { + ConsoleOutputMode::Off }; - Ok(cfg) + ConsoleConfig { + file: None, + mode, + iommu: false, + } +} + +fn get_platform_cfg(tdx_enabled: bool) -> Option { + if tdx_enabled { + let platform = PlatformConfig { + tdx: true, + + ..Default::default() + }; + + Some(platform) + } else { + None + } } #[allow(dead_code)] @@ -322,3 +480,1422 @@ where Ok(MacAddr { bytes }) } + +#[cfg(test)] +mod tests { + use super::*; + use kata_types::config::hypervisor::{Hypervisor as HypervisorConfig, SecurityInfo}; + + // Generate a valid generic memory info object and a valid CH specific + // memory config object. + fn make_memory_objects( + default_memory_mib: u32, + usable_max_mem_bytes: u64, + confidential_guest: bool, + ) -> (MemoryInfo, MemoryConfig) { + let mem_info = MemoryInfo { + default_memory: default_memory_mib, + + ..Default::default() + }; + + let hotplug_size = if confidential_guest { + None + } else { + checked_next_multiple_of( + usable_max_mem_bytes - (default_memory_mib as u64 * MIB), + PMEM_ALIGN_BYTES, + ) + }; + + let mem_cfg = MemoryConfig { + size: default_memory_mib as u64 * MIB, + shared: true, + hotplug_size, + + ..Default::default() + }; + + (mem_info, mem_cfg) + } + + // The "default" sent to CH but without "cores_per_die" + // to allow the tests to set that value explicitly. + fn make_bare_topology() -> CpuTopology { + CpuTopology { + threads_per_core: 1, + dies_per_package: 1, + packages: 1, + + ..Default::default() + } + } + + fn make_cpu_objects(cpu_default: u8, cpu_max: u8) -> (CpuInfo, CpusConfig) { + let cpu_info = CpuInfo { + default_vcpus: cpu_default as i32, + default_maxvcpus: cpu_max as u32, + + ..Default::default() + }; + + let cpus_config = CpusConfig { + boot_vcpus: cpu_default, + max_vcpus: cpu_max, + topology: Some(CpuTopology { + cores_per_die: cpu_max, + + ..make_bare_topology() + }), + max_phys_bits: DEFAULT_CH_MAX_PHYS_BITS, + + ..Default::default() + }; + + (cpu_info, cpus_config) + } + + fn make_bootinfo_pmemconfig_objects(image: &str) -> (BootInfo, PmemConfig) { + let boot_info = BootInfo { + image: image.to_string(), + + ..Default::default() + }; + + let pmem_config = PmemConfig { + file: PathBuf::from(image), + discard_writes: true, + + ..Default::default() + }; + + (boot_info, pmem_config) + } + + fn make_bootinfo_diskconfig_objects(path: &str) -> (BootInfo, DiskConfig) { + let boot_info = BootInfo { + image: path.to_string(), + + ..Default::default() + }; + + let disk_config = DiskConfig { + path: Some(PathBuf::from(path)), + readonly: true, + + ..Default::default() + }; + + (boot_info, disk_config) + } + + // Create BootInfo and PayloadConfig objects for non-TDX scenarios. + fn make_bootinfo_payloadconfig_objects( + kernel: &str, + initramfs: &str, + firmware: Option<&str>, + cmdline: Option, + ) -> (BootInfo, PayloadConfig) { + let boot_info = if let Some(firmware) = firmware { + BootInfo { + kernel: kernel.into(), + initrd: initramfs.into(), + firmware: firmware.into(), + + ..Default::default() + } + } else { + BootInfo { + kernel: kernel.into(), + initrd: initramfs.into(), + + ..Default::default() + } + }; + + let payload_firmware = firmware.map(PathBuf::from); + + let payload_config = PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + initramfs: Some(PathBuf::from(initramfs)), + firmware: payload_firmware, + cmdline, + }; + + (boot_info, payload_config) + } + + fn make_machineinfo_rngconfig_objects(entropy_source: &str) -> (MachineInfo, RngConfig) { + let machine_info = MachineInfo { + entropy_source: entropy_source.to_string(), + + ..Default::default() + }; + + let rng_config = RngConfig { + src: PathBuf::from(entropy_source.to_string()), + + ..Default::default() + }; + + (machine_info, rng_config) + } + + #[test] + fn test_get_serial_cfg() { + #[derive(Debug)] + struct TestData { + debug: bool, + confidential_guest: bool, + result: ConsoleConfig, + } + + let tests = &[ + TestData { + debug: false, + confidential_guest: false, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Off, + iommu: false, + }, + }, + TestData { + debug: true, + confidential_guest: false, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Tty, + iommu: false, + }, + }, + TestData { + debug: false, + confidential_guest: true, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Off, + iommu: false, + }, + }, + TestData { + debug: true, + confidential_guest: true, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Off, + iommu: false, + }, + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = get_serial_cfg(d.debug, d.confidential_guest); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + assert_eq!(result.file, d.result.file, "{}", msg); + assert_eq!(result.iommu, d.result.iommu, "{}", msg); + assert_eq!(result.mode, d.result.mode, "{}", msg); + } + } + + #[test] + fn test_get_console_cfg() { + #[derive(Debug)] + struct TestData { + debug: bool, + confidential_guest: bool, + result: ConsoleConfig, + } + + let tests = &[ + TestData { + debug: false, + confidential_guest: false, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Off, + iommu: false, + }, + }, + TestData { + debug: true, + confidential_guest: false, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Off, + iommu: false, + }, + }, + TestData { + debug: false, + confidential_guest: true, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Off, + iommu: false, + }, + }, + TestData { + debug: true, + confidential_guest: true, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Tty, + iommu: false, + }, + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = get_console_cfg(d.debug, d.confidential_guest); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + assert_eq!(result, d.result, "{}", msg); + } + } + + #[test] + fn test_get_platform_cfg() { + #[derive(Debug)] + struct TestData { + tdx_enabled: bool, + result: Option, + } + + let tests = &[ + TestData { + tdx_enabled: false, + result: None, + }, + TestData { + tdx_enabled: true, + result: Some(PlatformConfig { + tdx: true, + + ..Default::default() + }), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = get_platform_cfg(d.tdx_enabled); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + assert_eq!(result, d.result, "{}", msg); + } + } + + #[test] + fn test_bootinfo_to_pmemconfig() { + #[derive(Debug)] + struct TestData { + boot_info: BootInfo, + result: Result, + } + + let image = "/an/image"; + + let (boot_info_with_image, pmem_config) = make_bootinfo_pmemconfig_objects(image); + + let tests = &[ + TestData { + boot_info: BootInfo::default(), + result: Err(PmemConfigError::MissingImage), + }, + TestData { + boot_info: boot_info_with_image, + result: Ok(pmem_config), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = PmemConfig::try_from(&d.boot_info); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + if d.result.is_err() { + assert!(result.is_err(), "{}", msg); + + assert_eq!( + &result.unwrap_err(), + d.result.as_ref().unwrap_err(), + "{}", + msg + ); + + continue; + } + + assert!(result.is_ok(), "{}", msg); + assert_eq!(&result.unwrap(), d.result.as_ref().unwrap(), "{}", msg); + } + } + + #[test] + fn test_machineinfo_to_rngconfig() { + #[derive(Debug)] + struct TestData { + machine_info: MachineInfo, + result: RngConfig, + } + + let entropy_source = "/dev/foo"; + + let (machine_info, rng_config) = make_machineinfo_rngconfig_objects(entropy_source); + + let tests = &[ + TestData { + machine_info: MachineInfo::default(), + result: RngConfig { + src: PathBuf::from(DEFAULT_CH_ENTROPY_SOURCE.to_string()), + + ..Default::default() + }, + }, + TestData { + machine_info: MachineInfo { + entropy_source: DEFAULT_CH_ENTROPY_SOURCE.to_string(), + + ..Default::default() + }, + result: RngConfig { + src: PathBuf::from(DEFAULT_CH_ENTROPY_SOURCE.to_string()), + + ..Default::default() + }, + }, + TestData { + machine_info: MachineInfo { + entropy_source: entropy_source.to_string(), + + ..Default::default() + }, + result: RngConfig { + src: PathBuf::from(entropy_source.to_string()), + + ..Default::default() + }, + }, + TestData { + machine_info, + result: rng_config, + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = RngConfig::from(d.machine_info.clone()); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + assert_eq!(result, d.result, "{}", msg); + } + } + + #[test] + fn test_string_to_cpufeatures() { + #[derive(Debug)] + struct TestData<'a> { + s: &'a str, + result: CpuFeatures, + } + + let tests = &[ + TestData { + s: "", + result: CpuFeatures::default(), + }, + #[cfg(target_arch = "x86_64")] + TestData { + s: "amx", + result: CpuFeatures { amx: true }, + }, + #[cfg(target_arch = "x86_64")] + TestData { + s: "amxyz", + result: CpuFeatures { amx: false }, + }, + #[cfg(target_arch = "x86_64")] + TestData { + s: "aamx", + result: CpuFeatures { amx: false }, + }, + #[cfg(not(target_arch = "x86_64"))] + TestData { + s: "amx", + result: CpuFeatures::default(), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = CpuFeatures::from(d.s.to_string()); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + assert_eq!(result, d.result, "{}", msg); + } + } + + #[test] + fn test_bootinfo_to_diskconfig() { + #[derive(Debug)] + struct TestData { + boot_info: BootInfo, + result: Result, + } + + let path = "/some/where"; + + let (boot_info, disk_config) = make_bootinfo_diskconfig_objects(path); + + let tests = &[ + TestData { + boot_info: BootInfo::default(), + result: Err(DiskConfigError::MissingPath), + }, + TestData { + boot_info, + result: Ok(disk_config), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = DiskConfig::try_from(d.boot_info.clone()); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + assert_eq!(result, d.result, "{}", msg); + } + } + + #[test] + fn test_cpuinfo_to_cpusconfig() { + #[derive(Debug)] + struct TestData { + cpu_info: CpuInfo, + result: Result, + } + + let topology = make_bare_topology(); + + let u8_max = std::u8::MAX; + + let (cpu_info, cpus_config) = make_cpu_objects(7, u8_max); + + let tests = &[ + TestData { + cpu_info: CpuInfo::default(), + result: Ok(CpusConfig { + boot_vcpus: 0, + max_vcpus: 0, + topology: Some(CpuTopology { + cores_per_die: 0, + + ..topology + }), + max_phys_bits: DEFAULT_CH_MAX_PHYS_BITS, + + ..Default::default() + }), + }, + TestData { + cpu_info: CpuInfo { + default_vcpus: u8_max as i32, + + ..Default::default() + }, + result: Ok(CpusConfig { + boot_vcpus: u8_max, + max_vcpus: 0, + topology: Some(topology.clone()), + max_phys_bits: DEFAULT_CH_MAX_PHYS_BITS, + + ..Default::default() + }), + }, + TestData { + cpu_info: CpuInfo { + default_vcpus: u8_max as i32 + 1, + + ..Default::default() + }, + result: Err(CpusConfigError::BootVCPUsTooBig( + u8::try_from(u8_max as i32 + 1).unwrap_err(), + )), + }, + TestData { + cpu_info: CpuInfo { + default_maxvcpus: u8_max as u32 + 1, + + ..Default::default() + }, + result: Err(CpusConfigError::MaxVCPUsTooBig( + u8::try_from(u8_max as u32 + 1).unwrap_err(), + )), + }, + TestData { + cpu_info: CpuInfo { + default_vcpus: u8_max as i32, + default_maxvcpus: u8_max as u32, + + ..Default::default() + }, + result: Ok(CpusConfig { + boot_vcpus: u8_max, + max_vcpus: u8_max, + topology: Some(CpuTopology { + cores_per_die: u8_max, + + ..topology + }), + max_phys_bits: DEFAULT_CH_MAX_PHYS_BITS, + + ..Default::default() + }), + }, + TestData { + cpu_info: CpuInfo { + default_vcpus: (u8_max - 1) as i32, + default_maxvcpus: u8_max as u32, + + ..Default::default() + }, + result: Ok(CpusConfig { + boot_vcpus: (u8_max - 1), + max_vcpus: u8_max, + topology: Some(CpuTopology { + cores_per_die: u8_max, + + ..topology + }), + max_phys_bits: DEFAULT_CH_MAX_PHYS_BITS, + + ..Default::default() + }), + }, + TestData { + cpu_info, + result: Ok(cpus_config), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = CpusConfig::try_from(d.cpu_info.clone()); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + if d.result.is_err() { + assert!(result.is_err(), "{}", msg); + + assert_eq!( + &result.unwrap_err(), + d.result.as_ref().unwrap_err(), + "{}", + msg + ); + continue; + } + + assert!(result.is_ok(), "{}", msg); + assert_eq!(&result.unwrap(), d.result.as_ref().unwrap(), "{}", msg); + } + } + + #[test] + fn test_bootinfo_to_payloadconfig() { + #[derive(Debug)] + struct TestData { + boot_info: BootInfo, + cmdline: Option, + tdx: bool, + result: Result, + } + + let cmdline = "debug foo a=b c=d"; + let kernel = "kernel"; + let firmware = "firmware"; + let initramfs = "initramfs"; + + let (boot_info_with_initrd, payload_config_with_initrd) = + make_bootinfo_payloadconfig_objects( + kernel, + initramfs, + Some(firmware), + Some(cmdline.to_string()), + ); + + let boot_info_without_initrd = BootInfo { + kernel: kernel.into(), + firmware: firmware.into(), + + ..Default::default() + }; + + let payload_config_without_initrd = PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + firmware: Some(PathBuf::from(firmware)), + cmdline: Some(cmdline.into()), + + ..Default::default() + }; + + let tests = &[ + TestData { + boot_info: BootInfo::default(), + cmdline: None, + tdx: false, + result: Err(PayloadConfigError::NoKernel), + }, + TestData { + boot_info: BootInfo { + kernel: kernel.into(), + kernel_params: String::new(), + initrd: initramfs.into(), + + ..Default::default() + }, + cmdline: None, + tdx: false, + result: Ok(PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + cmdline: None, + initramfs: Some(PathBuf::from(initramfs)), + + ..Default::default() + }), + }, + TestData { + boot_info: BootInfo { + kernel: kernel.into(), + kernel_params: cmdline.to_string(), + initrd: initramfs.into(), + + ..Default::default() + }, + cmdline: Some(cmdline.to_string()), + tdx: false, + result: Ok(PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + initramfs: Some(PathBuf::from(initramfs)), + cmdline: Some(cmdline.to_string()), + + ..Default::default() + }), + }, + TestData { + boot_info: BootInfo { + kernel: kernel.into(), + initrd: initramfs.into(), + + ..Default::default() + }, + cmdline: None, + tdx: true, + result: Err(PayloadConfigError::TDXFirmwareMissing), + }, + TestData { + boot_info: boot_info_with_initrd, + cmdline: Some(cmdline.to_string()), + tdx: true, + result: Ok(payload_config_with_initrd), + }, + TestData { + boot_info: boot_info_without_initrd, + cmdline: Some(cmdline.to_string()), + tdx: true, + result: Ok(payload_config_without_initrd), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = PayloadConfig::try_from((d.boot_info.clone(), d.cmdline.clone(), d.tdx)); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + if d.result.is_err() { + assert!(result.is_err(), "{}", msg); + + assert_eq!( + &result.unwrap_err(), + d.result.as_ref().unwrap_err(), + "{}", + msg + ); + continue; + } + + assert!(result.is_ok(), "{}", msg); + assert_eq!(&result.unwrap(), d.result.as_ref().unwrap(), "{}", msg); + } + } + + #[test] + fn test_memoryinfo_to_memoryconfig() { + #[derive(Debug)] + struct TestData { + mem_info: MemoryInfo, + confidential_guest: bool, + result: Result, + } + + let sysinfo = nix::sys::sysinfo::sysinfo().unwrap(); + + let actual_max_mem_bytes = sysinfo.ram_total(); + + // Calculate the available MiB value + let max_mem_mib = actual_max_mem_bytes.checked_div(MIB).unwrap(); + + // Undo the operation to get back to the usable amount of max memory + // bytes. + let usable_max_mem_bytes = MIB.checked_mul(max_mem_mib).unwrap(); + + let (mem_info_std, mem_cfg_std) = make_memory_objects(79, usable_max_mem_bytes, false); + let (mem_info_confidential_guest, mem_cfg_confidential_guest) = + make_memory_objects(79, usable_max_mem_bytes, true); + + let tests = &[ + TestData { + mem_info: MemoryInfo::default(), + confidential_guest: false, + result: Err(MemoryConfigError::NoDefaultMemory), + }, + TestData { + mem_info: MemoryInfo { + default_memory: 17, + + ..Default::default() + }, + confidential_guest: true, + result: Ok(MemoryConfig { + size: (17 * MIB), + shared: true, + hotplug_size: None, + + ..Default::default() + }), + }, + TestData { + mem_info: MemoryInfo { + default_memory: max_mem_mib as u32, + + ..Default::default() + }, + confidential_guest: true, + result: Ok(MemoryConfig { + size: usable_max_mem_bytes, + shared: true, + hotplug_size: None, + + ..Default::default() + }), + }, + TestData { + mem_info: MemoryInfo { + default_memory: (max_mem_mib + 1) as u32, + + ..Default::default() + }, + confidential_guest: true, + result: Err(MemoryConfigError::DefaultMemSizeTooBig), + }, + TestData { + mem_info: MemoryInfo { + default_memory: 1024, + + ..Default::default() + }, + confidential_guest: false, + result: Ok(MemoryConfig { + size: 1024_u64 * MIB, + shared: true, + hotplug_size: checked_next_multiple_of( + usable_max_mem_bytes - (1024 * MIB), + PMEM_ALIGN_BYTES, + ), + + ..Default::default() + }), + }, + TestData { + mem_info: mem_info_std, + confidential_guest: false, + result: Ok(mem_cfg_std), + }, + TestData { + mem_info: mem_info_confidential_guest, + confidential_guest: true, + result: Ok(mem_cfg_confidential_guest), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = MemoryConfig::try_from((d.mem_info.clone(), d.confidential_guest)); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + if d.result.is_err() { + assert!(result.is_err(), "{}", msg); + + assert_eq!( + &result.unwrap_err(), + d.result.as_ref().unwrap_err(), + "{}", + msg + ); + continue; + } + + assert!(result.is_ok(), "{}", msg); + assert_eq!(&result.unwrap(), d.result.as_ref().unwrap(), "{}", msg); + } + } + + #[test] + fn test_vsock_config() { + #[derive(Debug)] + struct TestData<'a> { + vsock_socket_path: &'a str, + cid: u64, + result: Result, + } + + let tests = &[ + TestData { + vsock_socket_path: "", + cid: 0, + result: Err(VsockConfigError::NoVsockSocketPath), + }, + TestData { + vsock_socket_path: "vsock_socket_path", + cid: DEFAULT_VSOCK_CID, + result: Ok(VsockConfig { + socket: PathBuf::from("vsock_socket_path"), + cid: DEFAULT_VSOCK_CID, + + ..Default::default() + }), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = VsockConfig::try_from((d.vsock_socket_path.to_string(), d.cid)); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + if d.result.is_err() { + assert!(result.is_err(), "{}", msg); + + assert_eq!( + &result.unwrap_err(), + d.result.as_ref().unwrap_err(), + "{}", + msg + ); + continue; + } + + assert!(result.is_ok(), "{}", msg); + assert_eq!(&result.unwrap(), d.result.as_ref().unwrap(), "{}", msg); + } + } + + #[test] + fn test_named_hypervisor_config_to_vmconfig() { + #[derive(Debug)] + struct TestData { + cfg: NamedHypervisorConfig, + result: Result, + } + + let u8_max = std::u8::MAX; + let sysinfo = nix::sys::sysinfo::sysinfo().unwrap(); + + let actual_max_mem_bytes = sysinfo.ram_total(); + + // Calculate the available MiB value + let max_mem_mib = actual_max_mem_bytes.checked_div(MIB).unwrap(); + + // Undo the operation to get back to the usable amount of max memory + // bytes. + let usable_max_mem_bytes = MIB.checked_mul(max_mem_mib).unwrap(); + + let image = "image"; + let initramfs = "initramfs"; + let kernel = "kernel"; + let firmware = "firmware"; + + let entropy_source = "entropy_source"; + let sandbox_path = "sandbox_path"; + let vsock_socket_path = "vsock_socket_path"; + + let valid_vsock = + VsockConfig::try_from((vsock_socket_path.to_string(), DEFAULT_VSOCK_CID)).unwrap(); + + let (cpu_info, cpus_config) = make_cpu_objects(7, u8_max); + + let (memory_info_std, mem_config_std) = + make_memory_objects(79, usable_max_mem_bytes, false); + + let (memory_info_confidential_guest, mem_config_confidential_guest) = + make_memory_objects(79, usable_max_mem_bytes, true); + + let (_, pmem_config_with_image) = make_bootinfo_pmemconfig_objects(image); + let (machine_info, rng_config) = make_machineinfo_rngconfig_objects(entropy_source); + + let payload_firmware = None; + + let (boot_info_with_initrd, payload_config_with_initrd) = + make_bootinfo_payloadconfig_objects(kernel, initramfs, payload_firmware, None); + + let (boot_info_confidential_guest_image, disk_config_confidential_guest_image) = + make_bootinfo_diskconfig_objects(image); + + let boot_info_confidential_guest_initrd = BootInfo { + kernel: kernel.to_string(), + initrd: initramfs.to_string(), + + ..Default::default() + }; + + let boot_info_tdx_image = BootInfo { + kernel: kernel.to_string(), + image: image.to_string(), + firmware: firmware.to_string(), + + ..Default::default() + }; + + let boot_info_tdx_initrd = BootInfo { + kernel: kernel.to_string(), + initrd: initramfs.to_string(), + firmware: firmware.to_string(), + + ..Default::default() + }; + + let payload_config_confidential_guest_initrd = PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + initramfs: Some(PathBuf::from(initramfs)), + + ..Default::default() + }; + + // XXX: Note that the image is defined in a DiskConfig! + let payload_config_tdx_for_image = PayloadConfig { + firmware: Some(PathBuf::from(firmware)), + kernel: Some(PathBuf::from(kernel)), + + ..Default::default() + }; + + let payload_config_tdx_initrd = PayloadConfig { + firmware: Some(PathBuf::from(firmware)), + initramfs: Some(PathBuf::from(initramfs)), + kernel: Some(PathBuf::from(kernel)), + + ..Default::default() + }; + + //------------------------------ + + let hypervisor_cfg_with_image_and_kernel = HypervisorConfig { + cpu_info: cpu_info.clone(), + memory_info: memory_info_std.clone(), + boot_info: BootInfo { + image: image.to_string(), + kernel: kernel.to_string(), + + ..Default::default() + }, + machine_info: machine_info.clone(), + + ..Default::default() + }; + + let hypervisor_cfg_with_initrd = HypervisorConfig { + cpu_info: cpu_info.clone(), + memory_info: memory_info_std, + boot_info: boot_info_with_initrd, + machine_info: machine_info.clone(), + + ..Default::default() + }; + + let security_info_confidential_guest = SecurityInfo { + confidential_guest: true, + + ..Default::default() + }; + + let hypervisor_cfg_confidential_guest_image = HypervisorConfig { + cpu_info: cpu_info.clone(), + memory_info: memory_info_confidential_guest.clone(), + boot_info: BootInfo { + kernel: kernel.to_string(), + + ..boot_info_confidential_guest_image + }, + machine_info: machine_info.clone(), + security_info: security_info_confidential_guest.clone(), + + ..Default::default() + }; + + let hypervisor_cfg_confidential_guest_initrd = HypervisorConfig { + cpu_info: cpu_info.clone(), + memory_info: memory_info_confidential_guest.clone(), + boot_info: boot_info_confidential_guest_initrd, + machine_info: machine_info.clone(), + security_info: security_info_confidential_guest.clone(), + + ..Default::default() + }; + + let hypervisor_cfg_tdx_image = HypervisorConfig { + cpu_info: cpu_info.clone(), + memory_info: memory_info_confidential_guest.clone(), + boot_info: boot_info_tdx_image, + machine_info: machine_info.clone(), + security_info: security_info_confidential_guest.clone(), + + ..Default::default() + }; + + let hypervisor_cfg_tdx_initrd = HypervisorConfig { + cpu_info, + memory_info: memory_info_confidential_guest, + boot_info: boot_info_tdx_initrd, + machine_info, + security_info: security_info_confidential_guest, + + ..Default::default() + }; + + //------------------------------ + + let vmconfig_with_image_and_kernel = VmConfig { + cpus: cpus_config.clone(), + memory: mem_config_std.clone(), + rng: rng_config.clone(), + vsock: Some(valid_vsock.clone()), + + // rootfs image specific + pmem: Some(vec![pmem_config_with_image]), + + payload: Some(PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + + ..Default::default() + }), + + ..Default::default() + }; + + let vmconfig_with_initrd = VmConfig { + cpus: cpus_config.clone(), + memory: mem_config_std, + rng: rng_config.clone(), + vsock: Some(valid_vsock.clone()), + + // initrd/initramfs specific + payload: Some(payload_config_with_initrd), + + ..Default::default() + }; + + let vmconfig_confidential_guest_image = VmConfig { + cpus: cpus_config.clone(), + memory: mem_config_confidential_guest.clone(), + rng: rng_config.clone(), + vsock: Some(valid_vsock.clone()), + + // Confidential guest image specific + disks: Some(vec![disk_config_confidential_guest_image.clone()]), + + payload: Some(PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + + ..Default::default() + }), + + ..Default::default() + }; + + let vmconfig_confidential_guest_initrd = VmConfig { + cpus: cpus_config.clone(), + memory: mem_config_confidential_guest.clone(), + rng: rng_config.clone(), + vsock: Some(valid_vsock.clone()), + + // Confidential guest initrd specific + payload: Some(payload_config_confidential_guest_initrd), + + ..Default::default() + }; + + let platform_config_tdx = get_platform_cfg(true); + + let vmconfig_tdx_image = VmConfig { + cpus: cpus_config.clone(), + memory: mem_config_confidential_guest.clone(), + rng: rng_config.clone(), + vsock: Some(valid_vsock.clone()), + platform: platform_config_tdx.clone(), + + // TDX specific + payload: Some(payload_config_tdx_for_image), + + // Confidential guest + TDX specific + disks: Some(vec![disk_config_confidential_guest_image]), + + ..Default::default() + }; + + let vmconfig_tdx_initrd = VmConfig { + cpus: cpus_config, + memory: mem_config_confidential_guest, + rng: rng_config, + vsock: Some(valid_vsock), + platform: platform_config_tdx, + + // Confidential guest + TDX specific + payload: Some(payload_config_tdx_initrd), + + ..Default::default() + }; + + //------------------------------ + + let named_hypervisor_cfg_with_image_and_kernel = NamedHypervisorConfig { + sandbox_path: sandbox_path.into(), + vsock_socket_path: vsock_socket_path.into(), + + cfg: hypervisor_cfg_with_image_and_kernel, + + ..Default::default() + }; + + let named_hypervisor_cfg_with_initrd = NamedHypervisorConfig { + sandbox_path: sandbox_path.into(), + vsock_socket_path: vsock_socket_path.into(), + + cfg: hypervisor_cfg_with_initrd, + + ..Default::default() + }; + + let named_hypervisor_cfg_confidential_guest_image = NamedHypervisorConfig { + sandbox_path: sandbox_path.into(), + vsock_socket_path: vsock_socket_path.into(), + + cfg: hypervisor_cfg_confidential_guest_image, + + ..Default::default() + }; + + let named_hypervisor_cfg_confidential_guest_initrd = NamedHypervisorConfig { + sandbox_path: sandbox_path.into(), + vsock_socket_path: vsock_socket_path.into(), + + cfg: hypervisor_cfg_confidential_guest_initrd, + + ..Default::default() + }; + + let named_hypervisor_cfg_tdx_image = NamedHypervisorConfig { + sandbox_path: sandbox_path.into(), + vsock_socket_path: vsock_socket_path.into(), + + cfg: hypervisor_cfg_tdx_image, + + tdx_enabled: true, + + ..Default::default() + }; + + let named_hypervisor_cfg_tdx_initrd = NamedHypervisorConfig { + sandbox_path: sandbox_path.into(), + vsock_socket_path: vsock_socket_path.into(), + + cfg: hypervisor_cfg_tdx_initrd, + + tdx_enabled: true, + + ..Default::default() + }; + + //------------------------------ + + let tests = &[ + TestData { + cfg: NamedHypervisorConfig::default(), + result: Err(VmConfigError::EmptyVsockSocketPath), + }, + TestData { + cfg: NamedHypervisorConfig { + vsock_socket_path: "vsock_socket_path".into(), + + ..Default::default() + }, + result: Err(VmConfigError::EmptySandboxPath), + }, + TestData { + cfg: NamedHypervisorConfig { + sandbox_path: "sandbox_path".into(), + + ..Default::default() + }, + result: Err(VmConfigError::EmptyVsockSocketPath), + }, + TestData { + cfg: NamedHypervisorConfig { + sandbox_path: "sandbox_path".into(), + vsock_socket_path: "vsock_socket_path".into(), + cfg: HypervisorConfig::default(), + + ..Default::default() + }, + result: Err(VmConfigError::NoBootFile), + }, + TestData { + cfg: NamedHypervisorConfig { + sandbox_path: "sandbox_path".into(), + vsock_socket_path: "vsock_socket_path".into(), + cfg: HypervisorConfig { + boot_info: BootInfo { + initrd: "initrd".into(), + image: "image".into(), + + ..Default::default() + }, + + ..Default::default() + }, + + ..Default::default() + }, + result: Err(VmConfigError::MultipleBootFiles), + }, + TestData { + cfg: named_hypervisor_cfg_with_image_and_kernel, + result: Ok(vmconfig_with_image_and_kernel), + }, + TestData { + cfg: named_hypervisor_cfg_with_initrd, + result: Ok(vmconfig_with_initrd), + }, + TestData { + cfg: named_hypervisor_cfg_confidential_guest_image, + result: Ok(vmconfig_confidential_guest_image), + }, + TestData { + cfg: named_hypervisor_cfg_confidential_guest_initrd, + result: Ok(vmconfig_confidential_guest_initrd), + }, + TestData { + cfg: named_hypervisor_cfg_tdx_image, + result: Ok(vmconfig_tdx_image), + }, + TestData { + cfg: named_hypervisor_cfg_tdx_initrd, + result: Ok(vmconfig_tdx_initrd), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = VmConfig::try_from(d.cfg.clone()); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + if d.result.is_err() { + assert!(result.is_err(), "{}", msg); + + assert_eq!( + &result.unwrap_err(), + d.result.as_ref().unwrap_err(), + "{}", + msg + ); + continue; + } + + assert!(result.is_ok(), "{}", msg); + assert_eq!(&result.unwrap(), d.result.as_ref().unwrap(), "{}", msg); + } + } +} diff --git a/src/runtime-rs/crates/hypervisor/ch-config/src/errors.rs b/src/runtime-rs/crates/hypervisor/ch-config/src/errors.rs new file mode 100644 index 000000000..7e062f5e6 --- /dev/null +++ b/src/runtime-rs/crates/hypervisor/ch-config/src/errors.rs @@ -0,0 +1,107 @@ +// Copyright (c) 2023 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 + +use std::convert::TryFrom; +use thiserror::Error; + +#[derive(Error, Debug, PartialEq)] +pub enum VmConfigError { + #[error("empty sandbox path")] + EmptySandboxPath, + + #[error("empty VSOCK socket path")] + EmptyVsockSocketPath, + + #[error("cannot specify image and initrd")] + MultipleBootFiles, + + #[error("missing boot image (no rootfs image or initrd)")] + NoBootFile, + + #[error("CPU config error: {0}")] + CPUError(CpusConfigError), + + #[error("Pmem config error: {0}")] + PmemError(PmemConfigError), + + #[error("Payload config error: {0}")] + PayloadError(PayloadConfigError), + + #[error("Disk config error: {0}")] + DiskError(DiskConfigError), + + #[error("Memory config error: {0}")] + MemoryError(MemoryConfigError), + + // The 2nd arg is actually a std::io::Error but that doesn't implement + // PartialEq, so we convert it to a String. + #[error("Failed to create sandbox path ({0}: {1}")] + SandboxError(String, String), + + #[error("VSOCK config error: {0}")] + VsockError(VsockConfigError), +} + +#[derive(Error, Debug, PartialEq)] +pub enum PmemConfigError { + #[error("Need rootfs image for PmemConfig")] + MissingImage, +} + +#[derive(Error, Debug, PartialEq)] +pub enum DiskConfigError { + #[error("Need path for DiskConfig")] + MissingPath, + + #[error("Found unexpected path for DiskConfig with TDX: {0}")] + UnexpectedPathForTDX(String), +} + +#[derive(Error, Debug, PartialEq)] +pub enum CpusConfigError { + #[error("Too many boot vCPUs specified: {0}")] + BootVCPUsTooBig(>::Error), + + #[error("Too many max vCPUs specified: {0}")] + MaxVCPUsTooBig(>::Error), +} + +#[derive(Error, Debug, PartialEq)] +pub enum PayloadConfigError { + #[error("No kernel specified")] + NoKernel, + + #[error("No initrd/initramfs specified")] + NoInitrd, + + #[error("Need firmware for TDX")] + TDXFirmwareMissing, +} + +#[derive(Error, Debug, PartialEq)] +pub enum MemoryConfigError { + #[error("No default memory specified")] + NoDefaultMemory, + + #[error("Default memory size > available RAM")] + DefaultMemSizeTooBig, + + #[error("Cannot convert default memory to bytes: {0}")] + BadDefaultMemSize(u32), + + #[error("Cannot calculate hotplug memory size from default memory: {0}")] + BadMemSizeForHotplug(u64), + + #[error("Cannot align hotplug memory size from pmem: {0}")] + BadPmemAlign(u64), + + #[error("Failed to query system memory information: {0}")] + SysInfoFail(#[source] nix::errno::Errno), +} + +#[derive(Error, Debug, PartialEq)] +pub enum VsockConfigError { + #[error("Missing VSOCK socket path")] + NoVsockSocketPath, +} diff --git a/src/runtime-rs/crates/hypervisor/ch-config/src/lib.rs b/src/runtime-rs/crates/hypervisor/ch-config/src/lib.rs index 2969e6847..9d6214a77 100644 --- a/src/runtime-rs/crates/hypervisor/ch-config/src/lib.rs +++ b/src/runtime-rs/crates/hypervisor/ch-config/src/lib.rs @@ -17,6 +17,8 @@ pub use net_util::MacAddr; pub const MAX_NUM_PCI_SEGMENTS: u16 = 16; +mod errors; + #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, Default)] pub struct BalloonConfig { pub size: u64, @@ -330,7 +332,6 @@ pub struct PlatformConfig { pub uuid: Option, #[serde(default)] pub oem_strings: Option>, - #[cfg(feature = "tdx")] #[serde(default)] pub tdx: bool, } @@ -425,9 +426,7 @@ pub struct VmConfig { pub fs: Option>, #[serde(skip_serializing_if = "Option::is_none")] pub pmem: Option>, - //#[serde(default = "ConsoleConfig::default_serial")] pub serial: ConsoleConfig, - //#[serde(default = "ConsoleConfig::default_console")] pub console: ConsoleConfig, #[serde(skip_serializing_if = "Option::is_none")] pub devices: Option>, @@ -484,12 +483,13 @@ fn u16_is_zero(v: &u16) -> bool { // Type used to simplify conversion from a generic Hypervisor config // to a CH specific VmConfig. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct NamedHypervisorConfig { pub kernel_params: String, pub sandbox_path: String, pub vsock_socket_path: String, pub cfg: HypervisorConfig, + pub tdx_enabled: bool, pub shared_fs_devices: Option>, } diff --git a/src/runtime-rs/crates/hypervisor/src/ch/inner_hypervisor.rs b/src/runtime-rs/crates/hypervisor/src/ch/inner_hypervisor.rs index 3a2631c35..89747d936 100644 --- a/src/runtime-rs/crates/hypervisor/src/ch/inner_hypervisor.rs +++ b/src/runtime-rs/crates/hypervisor/src/ch/inner_hypervisor.rs @@ -68,6 +68,8 @@ impl CloudHypervisorInner { let enable_debug = cfg.debug_info.enable_debug; + let confidential_guest = cfg.security_info.confidential_guest; + // Note that the configuration option hypervisor.block_device_driver is not used. let rootfs_driver = VM_ROOTFS_DRIVER_PMEM; @@ -81,6 +83,18 @@ impl CloudHypervisorInner { let mut rootfs_param = KernelParams::new_rootfs_kernel_params(rootfs_driver, rootfs_type)?; + let mut extra_params = if enable_debug { + if confidential_guest { + KernelParams::from_string("console=hvc0") + } else { + KernelParams::from_string("console=ttyS0,115200n8") + } + } else { + KernelParams::from_string("quiet") + }; + + params.append(&mut extra_params); + // Add the rootfs device params.append(&mut rootfs_param); @@ -121,11 +135,18 @@ impl CloudHypervisorInner { let kernel_params = self.get_kernel_params().await?; + // FIXME: See: + // + // - https://github.com/kata-containers/kata-containers/issues/6383 + // - https://github.com/kata-containers/kata-containers/pull/6257 + let tdx_enabled = false; + let named_cfg = NamedHypervisorConfig { kernel_params, sandbox_path, vsock_socket_path, cfg: hypervisor_config.clone(), + tdx_enabled, shared_fs_devices, }; @@ -229,7 +250,13 @@ impl CloudHypervisorInner { async fn cloud_hypervisor_launch(&mut self, _timeout_secs: i32) -> Result<()> { self.cloud_hypervisor_ensure_not_launched().await?; - let debug = false; + let cfg = self + .config + .as_ref() + .ok_or("no hypervisor config for CH") + .map_err(|e| anyhow!(e))?; + + let debug = cfg.debug_info.enable_debug; let disable_seccomp = true; diff --git a/src/runtime-rs/crates/hypervisor/src/dragonball/inner_device.rs b/src/runtime-rs/crates/hypervisor/src/dragonball/inner_device.rs index d6f7baecb..48d9a3508 100644 --- a/src/runtime-rs/crates/hypervisor/src/dragonball/inner_device.rs +++ b/src/runtime-rs/crates/hypervisor/src/dragonball/inner_device.rs @@ -188,6 +188,9 @@ impl DragonballInner { let args: Vec<&str> = opt_list.split(',').collect(); for arg in args { match arg { + "cache=none" => fs_cfg.cache_policy = String::from("none"), + "cache=auto" => fs_cfg.cache_policy = String::from("auto"), + "cache=always" => fs_cfg.cache_policy = String::from("always"), "no_open" => fs_cfg.no_open = true, "open" => fs_cfg.no_open = false, "writeback_cache" => fs_cfg.writeback_cache = true, diff --git a/src/runtime-rs/crates/resource/Cargo.toml b/src/runtime-rs/crates/resource/Cargo.toml index 73b577c5a..baafd28b7 100644 --- a/src/runtime-rs/crates/resource/Cargo.toml +++ b/src/runtime-rs/crates/resource/Cargo.toml @@ -16,8 +16,10 @@ bitflags = "1.2.1" byte-unit = "4.0.14" cgroups-rs = "0.3.2" futures = "0.3.11" +hex = "0.4.3" lazy_static = "1.4.0" libc = ">=0.2.39" +netns-rs = "0.1.0" netlink-sys = "0.8.3" netlink-packet-route = "0.13.0" nix = "0.24.2" diff --git a/src/runtime-rs/crates/resource/src/manager.rs b/src/runtime-rs/crates/resource/src/manager.rs index 0a0082512..9514b6013 100644 --- a/src/runtime-rs/crates/resource/src/manager.rs +++ b/src/runtime-rs/crates/resource/src/manager.rs @@ -4,6 +4,7 @@ // SPDX-License-Identifier: Apache-2.0 // +use crate::network::NetworkConfig; use crate::resource_persist::ResourceState; use crate::{manager_inner::ResourceManagerInner, rootfs::Rootfs, volume::Volume, ResourceConfig}; use agent::{Agent, Storage}; @@ -55,6 +56,11 @@ impl ResourceManager { inner.prepare_before_start_vm(device_configs).await } + pub async fn handle_network(&self, network_config: NetworkConfig) -> Result<()> { + let mut inner = self.inner.write().await; + inner.handle_network(network_config).await + } + pub async fn setup_after_start_vm(&self) -> Result<()> { let mut inner = self.inner.write().await; inner.setup_after_start_vm().await diff --git a/src/runtime-rs/crates/resource/src/manager_inner.rs b/src/runtime-rs/crates/resource/src/manager_inner.rs index e8623e37c..6c6e4067a 100644 --- a/src/runtime-rs/crates/resource/src/manager_inner.rs +++ b/src/runtime-rs/crates/resource/src/manager_inner.rs @@ -6,7 +6,7 @@ use std::{sync::Arc, thread}; -use crate::resource_persist::ResourceState; +use crate::{network::NetworkConfig, resource_persist::ResourceState}; use agent::{Agent, Storage}; use anyhow::{anyhow, Context, Ok, Result}; use async_trait::async_trait; @@ -89,32 +89,9 @@ impl ResourceManagerInner { }; } ResourceConfig::Network(c) => { - // 1. When using Rust asynchronous programming, we use .await to - // allow other task to run instead of waiting for the completion of the current task. - // 2. Also, when handling the pod network, we need to set the shim threads - // into the network namespace to perform those operations. - // However, as the increase of the I/O intensive tasks, two issues could be caused by the two points above: - // a. When the future is blocked, the current thread (which is in the pod netns) - // might be take over by other tasks. After the future is finished, the thread take over - // the current task might not be in the pod netns. But the current task still need to run in pod netns - // b. When finish setting up the network, the current thread will be set back to the host namespace. - // In Rust Async, if the current thread is taken over by other task, the netns is dropped on another thread, - // but it is not in netns. So, the previous thread would still remain in the pod netns. - // The solution is to block the future on the current thread, it is enabled by spawn an os thread, create a - // tokio runtime, and block the task on it. - let hypervisor = self.hypervisor.clone(); - let network = thread::spawn(move || -> Result> { - let rt = runtime::Builder::new_current_thread().enable_io().build()?; - let d = rt.block_on(network::new(&c)).context("new network")?; - rt.block_on(d.setup(hypervisor.as_ref())) - .context("setup network")?; - Ok(d) - }) - .join() - .map_err(|e| anyhow!("{:?}", e)) - .context("Couldn't join on the associated thread")? - .context("failed to set up network")?; - self.network = Some(network); + self.handle_network(c) + .await + .context("failed to handle network")?; } }; } @@ -122,6 +99,38 @@ impl ResourceManagerInner { Ok(()) } + pub async fn handle_network(&mut self, network_config: NetworkConfig) -> Result<()> { + // 1. When using Rust asynchronous programming, we use .await to + // allow other task to run instead of waiting for the completion of the current task. + // 2. Also, when handling the pod network, we need to set the shim threads + // into the network namespace to perform those operations. + // However, as the increase of the I/O intensive tasks, two issues could be caused by the two points above: + // a. When the future is blocked, the current thread (which is in the pod netns) + // might be take over by other tasks. After the future is finished, the thread take over + // the current task might not be in the pod netns. But the current task still need to run in pod netns + // b. When finish setting up the network, the current thread will be set back to the host namespace. + // In Rust Async, if the current thread is taken over by other task, the netns is dropped on another thread, + // but it is not in netns. So, the previous thread would still remain in the pod netns. + // The solution is to block the future on the current thread, it is enabled by spawn an os thread, create a + // tokio runtime, and block the task on it. + let hypervisor = self.hypervisor.clone(); + let network = thread::spawn(move || -> Result> { + let rt = runtime::Builder::new_current_thread().enable_io().build()?; + let d = rt + .block_on(network::new(&network_config)) + .context("new network")?; + rt.block_on(d.setup(hypervisor.as_ref())) + .context("setup network")?; + Ok(d) + }) + .join() + .map_err(|e| anyhow!("{:?}", e)) + .context("Couldn't join on the associated thread")? + .context("failed to set up network")?; + self.network = Some(network); + Ok(()) + } + async fn handle_interfaces(&self, network: &dyn Network) -> Result<()> { for i in network.interfaces().await.context("get interfaces")? { // update interface diff --git a/src/runtime-rs/crates/resource/src/network/mod.rs b/src/runtime-rs/crates/resource/src/network/mod.rs index a85c2213d..0fe3aa294 100644 --- a/src/runtime-rs/crates/resource/src/network/mod.rs +++ b/src/runtime-rs/crates/resource/src/network/mod.rs @@ -18,7 +18,7 @@ use network_with_netns::NetworkWithNetns; mod network_pair; use network_pair::NetworkPair; mod utils; -pub use utils::netns::NetnsGuard; +pub use utils::netns::{generate_netns_name, NetnsGuard}; use std::sync::Arc; @@ -38,6 +38,7 @@ pub trait Network: Send + Sync { async fn routes(&self) -> Result>; async fn neighs(&self) -> Result>; async fn save(&self) -> Option>; + async fn remove(&self, h: &dyn Hypervisor) -> Result<()>; } pub async fn new(config: &NetworkConfig) -> Result> { diff --git a/src/runtime-rs/crates/resource/src/network/network_with_netns.rs b/src/runtime-rs/crates/resource/src/network/network_with_netns.rs index 809897eba..bb5273ffc 100644 --- a/src/runtime-rs/crates/resource/src/network/network_with_netns.rs +++ b/src/runtime-rs/crates/resource/src/network/network_with_netns.rs @@ -4,9 +4,12 @@ // SPDX-License-Identifier: Apache-2.0 // -use std::sync::{ - atomic::{AtomicU32, Ordering}, - Arc, +use std::{ + fs, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }, }; use super::endpoint::endpoint_persist::EndpointState; @@ -14,6 +17,7 @@ use anyhow::{anyhow, Context, Result}; use async_trait::async_trait; use futures::stream::TryStreamExt; use hypervisor::Hypervisor; +use netns_rs::get_from_path; use scopeguard::defer; use tokio::sync::RwLock; @@ -33,11 +37,13 @@ pub struct NetworkWithNetNsConfig { pub network_model: String, pub netns_path: String, pub queues: usize, + pub network_created: bool, } struct NetworkWithNetnsInner { netns_path: String, entity_list: Vec, + network_created: bool, } impl NetworkWithNetnsInner { @@ -54,6 +60,7 @@ impl NetworkWithNetnsInner { Ok(Self { netns_path: config.netns_path.to_string(), entity_list, + network_created: config.network_created, }) } } @@ -120,6 +127,26 @@ impl Network for NetworkWithNetns { } Some(endpoint) } + + async fn remove(&self, h: &dyn Hypervisor) -> Result<()> { + let inner = self.inner.read().await; + // The network namespace would have been deleted at this point + // if it has not been created by virtcontainers. + if !inner.network_created { + return Ok(()); + } + { + let _netns_guard = + netns::NetnsGuard::new(&inner.netns_path).context("net netns guard")?; + for e in &inner.entity_list { + e.endpoint.detach(h).await.context("detach")?; + } + } + let netns = get_from_path(inner.netns_path.clone())?; + netns.remove()?; + fs::remove_dir_all(inner.netns_path.clone()).context("failed to remove netns path")?; + Ok(()) + } } async fn get_entity_from_netns(config: &NetworkWithNetNsConfig) -> Result> { diff --git a/src/runtime-rs/crates/resource/src/network/utils/netns.rs b/src/runtime-rs/crates/resource/src/network/utils/netns.rs index bb0343dff..f2dc2ae6f 100644 --- a/src/runtime-rs/crates/resource/src/network/utils/netns.rs +++ b/src/runtime-rs/crates/resource/src/network/utils/netns.rs @@ -9,6 +9,7 @@ use std::{fs::File, os::unix::io::AsRawFd}; use anyhow::{Context, Result}; use nix::sched::{setns, CloneFlags}; use nix::unistd::{getpid, gettid}; +use rand::Rng; pub struct NetnsGuard { old_netns: Option, @@ -50,6 +51,20 @@ impl Drop for NetnsGuard { } } +// generate the network namespace name +pub fn generate_netns_name() -> String { + let mut rng = rand::thread_rng(); + let random_bytes: [u8; 16] = rng.gen(); + format!( + "cnitest-{}-{}-{}-{}-{}", + hex::encode(&random_bytes[..4]), + hex::encode(&random_bytes[4..6]), + hex::encode(&random_bytes[6..8]), + hex::encode(&random_bytes[8..10]), + hex::encode(&random_bytes[10..]) + ) +} + #[cfg(test)] mod tests { use super::*; @@ -67,4 +82,14 @@ mod tests { let empty_path = ""; assert!(NetnsGuard::new(empty_path).unwrap().old_netns.is_none()); } + + #[test] + fn test_generate_netns_name() { + let name1 = generate_netns_name(); + let name2 = generate_netns_name(); + let name3 = generate_netns_name(); + assert_ne!(name1, name2); + assert_ne!(name2, name3); + assert_ne!(name1, name3); + } } diff --git a/src/runtime-rs/crates/resource/src/rootfs/nydus_rootfs.rs b/src/runtime-rs/crates/resource/src/rootfs/nydus_rootfs.rs index 16f9c48dd..008443b87 100644 --- a/src/runtime-rs/crates/resource/src/rootfs/nydus_rootfs.rs +++ b/src/runtime-rs/crates/resource/src/rootfs/nydus_rootfs.rs @@ -3,7 +3,7 @@ // // SPDX-License-Identifier: Apache-2.0 // -use std::{fs, sync::Arc}; +use std::{fs, path::Path, sync::Arc}; use super::{Rootfs, TYPE_OVERLAY_FS}; use crate::{ @@ -28,6 +28,8 @@ const NYDUS_ROOTFS_V6: &str = "v6"; const SNAPSHOT_DIR: &str = "snapshotdir"; const KATA_OVERLAY_DEV_TYPE: &str = "overlayfs"; +// nydus prefetch file list name +const NYDUS_PREFETCH_FILE_LIST: &str = "prefetch_file.list"; pub(crate) struct NydusRootfs { guest_path: String, @@ -42,6 +44,9 @@ impl NydusRootfs { cid: &str, rootfs: &Mount, ) -> Result { + let prefetch_list_path = + get_nydus_prefetch_files(h.hypervisor_config().await.prefetch_list_path).await; + let share_fs_mount = share_fs.get_share_fs_mount(); let extra_options = NydusExtraOptions::new(rootfs).context("failed to parse nydus extra options")?; @@ -59,7 +64,7 @@ impl NydusRootfs { rafs_meta.to_string(), rafs_mnt, extra_options.config.clone(), - None, + prefetch_list_path, ) .await .context("failed to do rafs mount")?; @@ -151,3 +156,67 @@ impl Rootfs for NydusRootfs { Ok(()) } } + +// Check prefetch files list path, and if invalid, discard it directly. +// As the result of caller `rafs_mount`, it returns `Option`. +async fn get_nydus_prefetch_files(nydus_prefetch_path: String) -> Option { + // nydus_prefetch_path is an annotation and pod with it will indicate + // that prefetch_files will be included. + if nydus_prefetch_path.is_empty() { + info!(sl!(), "nydus prefetch files path not set, just skip it."); + + return None; + } + + // Ensure the string ends with "/prefetch_files.list" + if !nydus_prefetch_path.ends_with(format!("/{}", NYDUS_PREFETCH_FILE_LIST).as_str()) { + info!( + sl!(), + "nydus prefetch file path no {:?} file exist.", NYDUS_PREFETCH_FILE_LIST + ); + + return None; + } + + // ensure the prefetch_list_path is a regular file. + let prefetch_list_path = Path::new(nydus_prefetch_path.as_str()); + if !prefetch_list_path.is_file() { + info!( + sl!(), + "nydus prefetch list file {:?} not a regular file", &prefetch_list_path + ); + + return None; + } + + return Some(prefetch_list_path.display().to_string()); +} + +#[cfg(test)] +mod tests { + use super::*; + use std::{fs::File, path::PathBuf}; + use tempfile::tempdir; + + #[tokio::test] + async fn test_get_nydus_prefetch_files() { + let temp_dir = tempdir().unwrap(); + let prefetch_list_path01 = temp_dir.path().join("nydus_prefetch_files"); + // /tmp_dir/nydus_prefetch_files/ + std::fs::create_dir_all(prefetch_list_path01.clone()).unwrap(); + // /tmp_dir/nydus_prefetch_files/prefetch_file.list + let prefetch_list_path02 = prefetch_list_path01 + .as_path() + .join(NYDUS_PREFETCH_FILE_LIST); + let file = File::create(prefetch_list_path02.clone()); + assert!(file.is_ok()); + + let prefetch_file = + get_nydus_prefetch_files(prefetch_list_path02.as_path().display().to_string()).await; + assert!(prefetch_file.is_some()); + assert_eq!(PathBuf::from(prefetch_file.unwrap()), prefetch_list_path02); + + drop(file); + temp_dir.close().unwrap_or_default(); + } +} diff --git a/src/runtime-rs/crates/runtimes/Cargo.toml b/src/runtime-rs/crates/runtimes/Cargo.toml index 3a6ab0a1b..768122684 100644 --- a/src/runtime-rs/crates/runtimes/Cargo.toml +++ b/src/runtime-rs/crates/runtimes/Cargo.toml @@ -8,6 +8,7 @@ license = "Apache-2.0" [dependencies] anyhow = "^1.0" lazy_static = "1.4.0" +netns-rs = "0.1.0" slog = "2.5.2" slog-scope = "4.4.0" tokio = { version = "1.8.0", features = ["rt-multi-thread"] } @@ -26,6 +27,8 @@ oci = { path = "../../../libs/oci" } shim-interface = { path = "../../../libs/shim-interface" } persist = { path = "../persist" } hypervisor = { path = "../hypervisor" } +resource = { path = "../resource" } + # runtime handler linux_container = { path = "./linux_container", optional = true } virt_container = { path = "./virt_container", optional = true } diff --git a/src/runtime-rs/crates/runtimes/common/Cargo.toml b/src/runtime-rs/crates/runtimes/common/Cargo.toml index 78a640e95..440db1486 100644 --- a/src/runtime-rs/crates/runtimes/common/Cargo.toml +++ b/src/runtime-rs/crates/runtimes/common/Cargo.toml @@ -10,17 +10,17 @@ license = "Apache-2.0" [dependencies] anyhow = "^1.0" async-trait = "0.1.48" -containerd-shim-protos = { version = "0.2.0", features = ["async"]} +containerd-shim-protos = { version = "0.3.0", features = ["async"]} lazy_static = "1.4.0" nix = "0.24.2" -protobuf = "2.27.0" +protobuf = "3.2.0" serde_json = "1.0.39" slog = "2.5.2" slog-scope = "4.4.0" strum = { version = "0.24.0", features = ["derive"] } thiserror = "^1.0" tokio = { version = "1.8.0", features = ["rt-multi-thread", "process", "fs"] } -ttrpc = { version = "0.6.1" } +ttrpc = { version = "0.7.1" } persist = {path = "../../persist"} agent = { path = "../../agent" } kata-sys-util = { path = "../../../../libs/kata-sys-util" } diff --git a/src/runtime-rs/crates/runtimes/common/src/lib.rs b/src/runtime-rs/crates/runtimes/common/src/lib.rs index 36977964a..adb5ca002 100644 --- a/src/runtime-rs/crates/runtimes/common/src/lib.rs +++ b/src/runtime-rs/crates/runtimes/common/src/lib.rs @@ -11,5 +11,5 @@ pub mod message; mod runtime_handler; pub use runtime_handler::{RuntimeHandler, RuntimeInstance}; mod sandbox; -pub use sandbox::Sandbox; +pub use sandbox::{Sandbox, SandboxNetworkEnv}; pub mod types; diff --git a/src/runtime-rs/crates/runtimes/common/src/sandbox.rs b/src/runtime-rs/crates/runtimes/common/src/sandbox.rs index 0aee04922..efe06fa43 100644 --- a/src/runtime-rs/crates/runtimes/common/src/sandbox.rs +++ b/src/runtime-rs/crates/runtimes/common/src/sandbox.rs @@ -7,14 +7,20 @@ use anyhow::Result; use async_trait::async_trait; +#[derive(Clone)] +pub struct SandboxNetworkEnv { + pub netns: Option, + pub network_created: bool, +} + #[async_trait] pub trait Sandbox: Send + Sync { async fn start( &self, - netns: Option, dns: Vec, spec: &oci::Spec, state: &oci::State, + network_env: SandboxNetworkEnv, ) -> Result<()>; async fn stop(&self) -> Result<()>; async fn cleanup(&self) -> Result<()>; diff --git a/src/runtime-rs/crates/runtimes/common/src/types/trans_from_agent.rs b/src/runtime-rs/crates/runtimes/common/src/types/trans_from_agent.rs index 887777122..f28f50582 100644 --- a/src/runtime-rs/crates/runtimes/common/src/types/trans_from_agent.rs +++ b/src/runtime-rs/crates/runtimes/common/src/types/trans_from_agent.rs @@ -151,7 +151,7 @@ impl From> for StatsInfo { } if !cg_stats.hugetlb_stats.is_empty() { - let mut p_huge = ::protobuf::RepeatedField::new(); + let mut p_huge = Vec::new(); for (k, v) in cg_stats.hugetlb_stats { let mut h = metrics::HugetlbStat::new(); h.set_pagesize(k); @@ -166,7 +166,7 @@ impl From> for StatsInfo { let net_stats = stats.network_stats; if !net_stats.is_empty() { - let mut p_net = ::protobuf::RepeatedField::new(); + let mut p_net = Vec::new(); for v in net_stats.iter() { let mut h = metrics::NetworkStat::new(); h.set_name(v.name.clone()); @@ -195,10 +195,8 @@ impl From> for StatsInfo { } } -fn copy_blkio_entry( - entry: &[agent::BlkioStatsEntry], -) -> ::protobuf::RepeatedField { - let mut p_entry = ::protobuf::RepeatedField::new(); +fn copy_blkio_entry(entry: &[agent::BlkioStatsEntry]) -> Vec { + let mut p_entry = Vec::new(); for e in entry.iter() { let mut blk = metrics::BlkIOEntry::new(); diff --git a/src/runtime-rs/crates/runtimes/common/src/types/trans_from_shim.rs b/src/runtime-rs/crates/runtimes/common/src/types/trans_from_shim.rs index 4d5d7ddf1..29a4a676c 100644 --- a/src/runtime-rs/crates/runtimes/common/src/types/trans_from_shim.rs +++ b/src/runtime-rs/crates/runtimes/common/src/types/trans_from_shim.rs @@ -16,7 +16,7 @@ use std::{ path::PathBuf, }; -fn trans_from_shim_mount(from: api::Mount) -> Mount { +fn trans_from_shim_mount(from: &api::Mount) -> Mount { let options = from.options.to_vec(); let mut read_only = false; for o in &options { @@ -29,7 +29,7 @@ fn trans_from_shim_mount(from: api::Mount) -> Mount { Mount { source: from.source.clone(), destination: PathBuf::from(&from.target), - fs_type: from.field_type, + fs_type: from.type_.clone(), options, device_id: None, host_shared_fs_path: None, @@ -41,19 +41,14 @@ impl TryFrom for Request { type Error = anyhow::Error; fn try_from(from: api::CreateTaskRequest) -> Result { let options = if from.has_options() { - Some(from.get_options().get_value().to_vec()) + Some(from.options().value.to_vec()) } else { None }; Ok(Request::CreateContainer(ContainerConfig { container_id: from.id.clone(), bundle: from.bundle.clone(), - rootfs_mounts: from - .rootfs - .to_vec() - .into_iter() - .map(trans_from_shim_mount) - .collect(), + rootfs_mounts: from.rootfs.iter().map(trans_from_shim_mount).collect(), terminal: from.terminal, options, stdin: (!from.stdin.is_empty()).then(|| from.stdin.clone()), @@ -84,15 +79,15 @@ impl TryFrom for Request { impl TryFrom for Request { type Error = anyhow::Error; fn try_from(from: api::ExecProcessRequest) -> Result { - let spec = from.get_spec(); + let spec = from.spec(); Ok(Request::ExecProcess(ExecProcessRequest { process: ContainerProcess::new(&from.id, &from.exec_id).context("new process id")?, terminal: from.terminal, stdin: (!from.stdin.is_empty()).then(|| from.stdin.clone()), stdout: (!from.stdout.is_empty()).then(|| from.stdout.clone()), stderr: (!from.stderr.is_empty()).then(|| from.stderr.clone()), - spec_type_url: spec.get_type_url().to_string(), - spec_value: spec.get_value().to_vec(), + spec_type_url: spec.type_url.to_string(), + spec_value: spec.value.to_vec(), })) } } @@ -182,7 +177,7 @@ impl TryFrom for Request { fn try_from(from: api::UpdateTaskRequest) -> Result { Ok(Request::UpdateContainer(UpdateRequest { container_id: from.id.to_string(), - value: from.get_resources().get_value().to_vec(), + value: from.resources().value.to_vec(), })) } } diff --git a/src/runtime-rs/crates/runtimes/common/src/types/trans_into_shim.rs b/src/runtime-rs/crates/runtimes/common/src/types/trans_into_shim.rs index 345e02d93..841805bb0 100644 --- a/src/runtime-rs/crates/runtimes/common/src/types/trans_into_shim.rs +++ b/src/runtime-rs/crates/runtimes/common/src/types/trans_into_shim.rs @@ -16,24 +16,24 @@ use containerd_shim_protos::api; use super::{ProcessExitStatus, ProcessStateInfo, ProcessStatus, Response}; use crate::error::Error; -fn system_time_into(time: time::SystemTime) -> ::protobuf::well_known_types::Timestamp { - let mut proto_time = ::protobuf::well_known_types::Timestamp::new(); - proto_time.set_seconds( - time.duration_since(time::UNIX_EPOCH) - .unwrap_or_default() - .as_secs() - .try_into() - .unwrap_or_default(), - ); +fn system_time_into(time: time::SystemTime) -> ::protobuf::well_known_types::timestamp::Timestamp { + let mut proto_time = ::protobuf::well_known_types::timestamp::Timestamp::new(); + proto_time.seconds = time + .duration_since(time::UNIX_EPOCH) + .unwrap_or_default() + .as_secs() + .try_into() + .unwrap_or_default(); + proto_time } fn option_system_time_into( time: Option, -) -> ::protobuf::SingularPtrField<::protobuf::well_known_types::Timestamp> { +) -> protobuf::MessageField { match time { - Some(v) => ::protobuf::SingularPtrField::some(system_time_into(v)), - None => ::protobuf::SingularPtrField::none(), + Some(v) => ::protobuf::MessageField::some(system_time_into(v)), + None => ::protobuf::MessageField::none(), } } @@ -66,7 +66,7 @@ impl From for api::StateResponse { id: from.container_id.clone(), bundle: from.bundle.clone(), pid: from.pid.pid, - status: from.status.into(), + status: protobuf::EnumOrUnknown::new(from.status.into()), stdin: from.stdin.unwrap_or_default(), stdout: from.stdout.unwrap_or_default(), stderr: from.stderr.unwrap_or_default(), @@ -164,13 +164,13 @@ impl TryFrom for api::StateResponse { impl TryFrom for api::StatsResponse { type Error = anyhow::Error; fn try_from(from: Response) -> Result { - let mut any = ::protobuf::well_known_types::Any::new(); + let mut any = ::protobuf::well_known_types::any::Any::new(); let mut response = api::StatsResponse::new(); match from { Response::StatsContainer(resp) => { if let Some(value) = resp.value { - any.set_type_url(value.type_url); - any.set_value(value.value); + any.type_url = value.type_url; + any.value = value.value; response.set_stats(any); } Ok(response) @@ -193,8 +193,7 @@ impl TryFrom for api::PidsResponse { let mut res = api::PidsResponse::new(); p_info.set_pid(resp.pid); processes.push(p_info); - let v = protobuf::RepeatedField::::from_vec(processes); - res.set_processes(v); + res.set_processes(processes); Ok(res) } _ => Err(anyhow!(Error::UnexpectedResponse( diff --git a/src/runtime-rs/crates/runtimes/src/manager.rs b/src/runtime-rs/crates/runtimes/src/manager.rs index d8aad3a0a..b32c36773 100644 --- a/src/runtime-rs/crates/runtimes/src/manager.rs +++ b/src/runtime-rs/crates/runtimes/src/manager.rs @@ -4,20 +4,22 @@ // SPDX-License-Identifier: Apache-2.0 // -use std::{str::from_utf8, sync::Arc}; - -use anyhow::{anyhow, Context, Result}; +use std::{path::PathBuf, str::from_utf8, sync::Arc}; use crate::{shim_mgmt::server::MgmtServer, static_resource::StaticResourceManager}; +use anyhow::{anyhow, Context, Result}; use common::{ message::Message, types::{Request, Response}, - RuntimeHandler, RuntimeInstance, Sandbox, + RuntimeHandler, RuntimeInstance, Sandbox, SandboxNetworkEnv, }; use hypervisor::Param; +use kata_sys_util::spec::load_oci_spec; use kata_types::{ annotations::Annotation, config::default::DEFAULT_GUEST_DNS_FILE, config::TomlConfig, }; +use netns_rs::NetNs; +use resource::network::generate_netns_name; #[cfg(feature = "linux")] use linux_container::LinuxContainer; @@ -53,7 +55,7 @@ impl RuntimeHandlerManagerInner { &mut self, spec: &oci::Spec, state: &oci::State, - netns: Option, + network_env: SandboxNetworkEnv, dns: Vec, config: Arc, ) -> Result<()> { @@ -77,7 +79,7 @@ impl RuntimeHandlerManagerInner { // start sandbox runtime_instance .sandbox - .start(netns, dns, spec, state) + .start(dns, spec, state, network_env) .await .context("start sandbox")?; self.runtime_instance = Some(Arc::new(runtime_instance)); @@ -104,23 +106,6 @@ impl RuntimeHandlerManagerInner { #[cfg(feature = "virt")] VirtContainer::init().context("init virt container")?; - let netns = if let Some(linux) = &spec.linux { - let mut netns = None; - for ns in &linux.namespaces { - if ns.r#type.as_str() != oci::NETWORKNAMESPACE { - continue; - } - - if !ns.path.is_empty() { - netns = Some(ns.path.clone()); - break; - } - } - netns - } else { - None - }; - for m in &spec.mounts { if m.destination == DEFAULT_GUEST_DNS_FILE { let contents = fs::read_to_string(&m.source).await?; @@ -129,7 +114,42 @@ impl RuntimeHandlerManagerInner { } let config = load_config(spec, options).context("load config")?; - self.init_runtime_handler(spec, state, netns, dns, Arc::new(config)) + + let mut network_created = false; + // set netns to None if we want no network for the VM + let netns = if config.runtime.disable_new_netns { + None + } else { + let mut netns_path = None; + if let Some(linux) = &spec.linux { + for ns in &linux.namespaces { + if ns.r#type.as_str() != oci::NETWORKNAMESPACE { + continue; + } + // get netns path from oci spec + if !ns.path.is_empty() { + netns_path = Some(ns.path.clone()); + } + // if we get empty netns from oci spec, we need to create netns for the VM + else { + let ns_name = generate_netns_name(); + let netns = NetNs::new(ns_name)?; + let path = PathBuf::from(netns.path()).to_str().map(|s| s.to_string()); + info!(sl!(), "the netns path is {:?}", path); + netns_path = path; + network_created = true; + } + break; + } + } + netns_path + }; + + let network_env = SandboxNetworkEnv { + netns, + network_created, + }; + self.init_runtime_handler(spec, state, network_env, dns, Arc::new(config)) .await .context("init runtime handler")?; @@ -171,9 +191,16 @@ impl RuntimeHandlerManager { let sender = inner.msg_sender.clone(); let sandbox_state = persist::from_disk::(&inner.id) .context("failed to load the sandbox state")?; + + let config = if let Ok(spec) = load_oci_spec() { + load_config(&spec, &None).context("load config")? + } else { + TomlConfig::default() + }; + let sandbox_args = SandboxRestoreArgs { sid: inner.id.clone(), - toml_config: TomlConfig::default(), + toml_config: config, sender, }; match sandbox_state.sandbox_type.clone() { @@ -189,6 +216,10 @@ impl RuntimeHandlerManager { } #[cfg(feature = "virt")] name if name == VirtContainer::name() => { + if sandbox_args.toml_config.runtime.keep_abnormal { + info!(sl!(), "skip cleanup for keep_abnormal"); + return Ok(()); + } let sandbox = VirtSandbox::restore(sandbox_args, sandbox_state) .await .context("failed to restore the sandbox")?; @@ -236,7 +267,7 @@ impl RuntimeHandlerManager { id: container_config.container_id.to_string(), status: oci::ContainerState::Creating, pid: 0, - bundle: bundler_path, + bundle: container_config.bundle.clone(), annotations: spec.annotations.clone(), }; diff --git a/src/runtime-rs/crates/runtimes/virt_container/Cargo.toml b/src/runtime-rs/crates/runtimes/virt_container/Cargo.toml index 6dea5e762..f3d8d9375 100644 --- a/src/runtime-rs/crates/runtimes/virt_container/Cargo.toml +++ b/src/runtime-rs/crates/runtimes/virt_container/Cargo.toml @@ -9,12 +9,12 @@ license = "Apache-2.0" anyhow = "^1.0" async-trait = "0.1.48" awaitgroup = "0.6.0" -containerd-shim-protos = { version = "0.2.0", features = ["async"]} +containerd-shim-protos = { version = "0.3.0", features = ["async"]} futures = "0.3.19" lazy_static = "1.4.0" libc = ">=0.2.39" nix = "0.24.2" -protobuf = "2.27.0" +protobuf = "3.2.0" serde = { version = "1.0.100", features = ["derive"] } serde_derive = "1.0.27" serde_json = "1.0.82" diff --git a/src/runtime-rs/crates/runtimes/virt_container/src/health_check.rs b/src/runtime-rs/crates/runtimes/virt_container/src/health_check.rs index f6d60c4c4..81fb3d58b 100644 --- a/src/runtime-rs/crates/runtimes/virt_container/src/health_check.rs +++ b/src/runtime-rs/crates/runtimes/virt_container/src/health_check.rs @@ -21,17 +21,17 @@ const HEALTH_CHECK_STOP_CHANNEL_BUFFER_SIZE: usize = 1; pub struct HealthCheck { pub keep_alive: bool, - keep_vm: bool, + keep_abnormal: bool, stop_tx: mpsc::Sender<()>, stop_rx: Arc>>, } impl HealthCheck { - pub fn new(keep_alive: bool, keep_vm: bool) -> HealthCheck { + pub fn new(keep_alive: bool, keep_abnormal: bool) -> HealthCheck { let (tx, rx) = mpsc::channel(HEALTH_CHECK_STOP_CHANNEL_BUFFER_SIZE); HealthCheck { keep_alive, - keep_vm, + keep_abnormal, stop_tx: tx, stop_rx: Arc::new(Mutex::new(rx)), } @@ -46,7 +46,7 @@ impl HealthCheck { info!(sl!(), "start runtime keep alive"); let stop_rx = self.stop_rx.clone(); - let keep_vm = self.keep_vm; + let keep_abnormal = self.keep_abnormal; let _ = tokio::spawn(async move { let mut version_check_threshold_count = 0; @@ -87,7 +87,7 @@ impl HealthCheck { error!(sl!(), "failed to do {} agent health check: {}", id, e); if let Err(mpsc::error::TryRecvError::Empty) = stop_rx.try_recv() { error!(sl!(), "failed to receive stop monitor signal"); - if !keep_vm { + if !keep_abnormal { ::std::process::exit(1); } } else { diff --git a/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs b/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs index f996c5747..c5ec38e46 100644 --- a/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs +++ b/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs @@ -14,7 +14,7 @@ use anyhow::{anyhow, Context, Result}; use async_trait::async_trait; use common::{ message::{Action, Message}, - Sandbox, + Sandbox, SandboxNetworkEnv, }; use containerd_shim_protos::events::task::TaskOOM; use hypervisor::{dragonball::Dragonball, Hypervisor, HYPERVISOR_DRAGONBALL}; @@ -75,6 +75,8 @@ impl VirtSandbox { hypervisor: Arc, resource_manager: Arc, ) -> Result { + let config = resource_manager.config().await; + let keep_abnormal = config.runtime.keep_abnormal; Ok(Self { sid: sid.to_string(), msg_sender: Arc::new(Mutex::new(msg_sender)), @@ -82,34 +84,25 @@ impl VirtSandbox { agent, hypervisor, resource_manager, - monitor: Arc::new(HealthCheck::new(true, false)), + monitor: Arc::new(HealthCheck::new(true, keep_abnormal)), }) } - async fn prepare_for_start_sandbox( + async fn prepare_config_for_sandbox( &self, _id: &str, - netns: Option, + network_env: SandboxNetworkEnv, ) -> Result> { let mut resource_configs = vec![]; - - let config = self.resource_manager.config().await; - if let Some(netns_path) = netns { - let network_config = ResourceConfig::Network(NetworkConfig::NetworkResourceWithNetNs( - NetworkWithNetNsConfig { - network_model: config.runtime.internetworking_model.clone(), - netns_path, - queues: self - .hypervisor - .hypervisor_config() - .await - .network_info - .network_queues as usize, - }, - )); - resource_configs.push(network_config); + if !network_env.network_created { + if let Some(netns_path) = network_env.netns { + let network_config = ResourceConfig::Network( + self.prepare_network_config(netns_path, network_env.network_created) + .await, + ); + resource_configs.push(network_config); + } } - let hypervisor_config = self.hypervisor.hypervisor_config().await; let virtio_fs_config = ResourceConfig::ShareFs(hypervisor_config.shared_fs); resource_configs.push(virtio_fs_config); @@ -149,16 +142,43 @@ impl VirtSandbox { Ok(()) } + + async fn prepare_network_config( + &self, + netns_path: String, + network_created: bool, + ) -> NetworkConfig { + let config = self.resource_manager.config().await; + NetworkConfig::NetworkResourceWithNetNs(NetworkWithNetNsConfig { + network_model: config.runtime.internetworking_model.clone(), + netns_path, + queues: self + .hypervisor + .hypervisor_config() + .await + .network_info + .network_queues as usize, + network_created, + }) + } + + fn has_prestart_hooks( + &self, + prestart_hooks: Vec, + create_runtime_hooks: Vec, + ) -> bool { + !prestart_hooks.is_empty() || !create_runtime_hooks.is_empty() + } } #[async_trait] impl Sandbox for VirtSandbox { async fn start( &self, - netns: Option, dns: Vec, spec: &oci::Spec, state: &oci::State, + network_env: SandboxNetworkEnv, ) -> Result<()> { let id = &self.sid; @@ -171,13 +191,15 @@ impl Sandbox for VirtSandbox { } self.hypervisor - .prepare_vm(id, netns.clone()) + .prepare_vm(id, network_env.netns.clone()) .await .context("prepare vm")?; // generate device and setup before start vm // should after hypervisor.prepare_vm - let resources = self.prepare_for_start_sandbox(id, netns).await?; + let resources = self + .prepare_config_for_sandbox(id, network_env.clone()) + .await?; self.resource_manager .prepare_before_start_vm(resources) .await @@ -195,8 +217,28 @@ impl Sandbox for VirtSandbox { self.execute_oci_hook_functions(&prestart_hooks, &create_runtime_hooks, state) .await?; - // TODO: if prestart_hooks is not empty, rescan the network endpoints(rely on hotplug endpoints). - // see: https://github.com/kata-containers/kata-containers/issues/6378 + // 1. if there are pre-start hook functions, network config might have been changed. + // We need to rescan the netns to handle the change. + // 2. Do not scan the netns if we want no network for the VM. + // TODO In case of vm factory, scan the netns to hotplug interfaces after the VM is started. + if self.has_prestart_hooks(prestart_hooks, create_runtime_hooks) + && !self + .resource_manager + .config() + .await + .runtime + .disable_new_netns + { + if let Some(netns_path) = network_env.netns { + let network_resource = self + .prepare_network_config(netns_path, network_env.network_created) + .await; + self.resource_manager + .handle_network(network_resource) + .await + .context("set up device after start vm")?; + } + } // connect agent // set agent socket @@ -400,6 +442,7 @@ impl Persist for VirtSandbox { }?; let agent = Arc::new(KataAgent::new(kata_types::config::Agent::default())); let sid = sandbox_args.sid; + let keep_abnormal = config.runtime.keep_abnormal; let args = ManagerArgs { sid: sid.clone(), agent: agent.clone(), @@ -414,7 +457,7 @@ impl Persist for VirtSandbox { agent, hypervisor, resource_manager, - monitor: Arc::new(HealthCheck::new(true, false)), + monitor: Arc::new(HealthCheck::new(true, keep_abnormal)), }) } } diff --git a/src/runtime-rs/crates/service/Cargo.toml b/src/runtime-rs/crates/service/Cargo.toml index 82e2c4fbc..cb414abe3 100644 --- a/src/runtime-rs/crates/service/Cargo.toml +++ b/src/runtime-rs/crates/service/Cargo.toml @@ -11,10 +11,10 @@ async-trait = "0.1.48" slog = "2.5.2" slog-scope = "4.4.0" tokio = { version = "1.8.0", features = ["rt-multi-thread"] } -ttrpc = { version = "0.6.1" } +ttrpc = { version = "0.7.1" } common = { path = "../runtimes/common" } -containerd-shim-protos = { version = "0.2.0", features = ["async"]} +containerd-shim-protos = { version = "0.3.0", features = ["async"]} logging = { path = "../../../libs/logging"} shim-interface = { path = "../../../libs/shim-interface" } runtimes = { path = "../runtimes" } diff --git a/src/runtime-rs/crates/service/src/manager.rs b/src/runtime-rs/crates/service/src/manager.rs index fe31c179b..ff0fd997c 100644 --- a/src/runtime-rs/crates/service/src/manager.rs +++ b/src/runtime-rs/crates/service/src/manager.rs @@ -14,7 +14,7 @@ use std::{ use anyhow::{Context, Result}; use common::message::{Action, Event, Message}; use containerd_shim_protos::{ - protobuf::{well_known_types::Any, Message as ProtobufMessage}, + protobuf::{well_known_types::any::Any, Message as ProtobufMessage}, shim_async, }; use runtimes::RuntimeHandlerManager; diff --git a/src/runtime-rs/crates/shim/Cargo.toml b/src/runtime-rs/crates/shim/Cargo.toml index 76abe1e9f..84521eb00 100644 --- a/src/runtime-rs/crates/shim/Cargo.toml +++ b/src/runtime-rs/crates/shim/Cargo.toml @@ -15,12 +15,12 @@ path = "src/bin/main.rs" [dependencies] anyhow = "^1.0" backtrace = {version = ">=0.3.35", features = ["libunwind", "libbacktrace", "std"], default-features = false} -containerd-shim-protos = { version = "0.2.0", features = ["async"]} +containerd-shim-protos = { version = "0.3.0", features = ["async"]} go-flag = "0.1.0" libc = "0.2.108" log = "0.4.14" nix = "0.24.2" -protobuf = "2.27.0" +protobuf = "3.2.0" sha2 = "=0.9.3" slog = {version = "2.5.2", features = ["std", "release_max_level_trace", "max_level_trace"]} slog-async = "2.5.2" diff --git a/src/runtime-rs/crates/shim/src/shim_delete.rs b/src/runtime-rs/crates/shim/src/shim_delete.rs index e1053927f..412fc8be6 100644 --- a/src/runtime-rs/crates/shim/src/shim_delete.rs +++ b/src/runtime-rs/crates/shim/src/shim_delete.rs @@ -26,12 +26,12 @@ impl ShimExecutor { async fn do_cleanup(&self) -> Result { let mut rsp = api::DeleteResponse::new(); rsp.set_exit_status(128 + libc::SIGKILL as u32); - let mut exited_time = protobuf::well_known_types::Timestamp::new(); + let mut exited_time = protobuf::well_known_types::timestamp::Timestamp::new(); let seconds = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .map_err(Error::SystemTime)? .as_secs() as i64; - exited_time.set_seconds(seconds); + exited_time.seconds = seconds; rsp.set_exited_at(exited_time); let address = self diff --git a/src/runtime/Makefile b/src/runtime/Makefile index 069adac7c..10b1d4808 100644 --- a/src/runtime/Makefile +++ b/src/runtime/Makefile @@ -97,6 +97,8 @@ GENERATED_VARS = \ VERSION \ CONFIG_ACRN_IN \ CONFIG_QEMU_IN \ + CONFIG_QEMU_TDX_IN \ + CONFIG_QEMU_GPU_IN \ CONFIG_CLH_IN \ CONFIG_FC_IN \ CONFIG_CLH_TDX_IN \ @@ -130,8 +132,6 @@ DEFROOTFSTYPE := $(ROOTFSTYPE_EXT4) FIRMWAREPATH := FIRMWAREVOLUMEPATH := -TDVFFIRMWAREPATH := $(PREFIXDEPS)/share/tdvf/OVMF_CODE.fd -TDVFFIRMWAREVOLUMEPATH := $(PREFIXDEPS)/share/tdvf/OVMF_VARS.fd TDSHIMFIRMWAREPATH := ${PREFIXDEPS}/share/td-shim/td-shim.bin SEVFIRMWAREPATH := $(PREFIXDEPS)/share/ovmf/AMDSEV.fd SNPFIRMWAREPATH := $(PREFIXDEPS)/share/ovmf/OVMF.fd @@ -146,12 +146,15 @@ AGENT_AA_KBC_PARAMS_TDX ?= "" AGENT_AA_KBC_PARAMS_SEV ?= "" AGENT_AA_KBC_PARAMS_SNP ?= "" TDXKERNELPARAMS := tdx_disable_filter agent.enable_signature_verification=false $(AGENT_AA_KBC_PARAMS_TDX) -TDXKERNELPARAMS_QEMU += $(TDXKERNELPARAMS) $(ROOTMEASURECONFIGTDX) +KERNELTDXPARAMS += $(TDXKERNELPARAMS) $(ROOTMEASURECONFIGTDX) TDXKERNELPARAMS_CLH += $(TDXKERNELPARAMS) $(ROOTMEASURECONFIG) SEVKERNELPARAMS := $(AGENTCONFIGFILEKERNELPARAM) agent.enable_signature_verification=false $(AGENT_AA_KBC_PARAMS_SEV) SNPKERNELPARAMS := $(AGENTCONFIGFILEKERNELPARAM) agent.enable_signature_verification=false $(AGENT_AA_KBC_PARAMS_SNP) KERNELPARAMS += $(ROOTMEASURECONFIG) agent.enable_signature_verification=false $(AGENT_AA_KBC_PARAMS) +FIRMWARETDVFPATH := $(PREFIXDEPS)/share/tdvf/OVMF.fd +FIRMWARETDVFVOLUMEPATH := $(PREFIXDEPS)/share/tdvf/OVMF_VARS.fd + # Name of default configuration file the runtime will use. CONFIG_FILE = configuration.toml @@ -233,6 +236,7 @@ DEFVALIDENTROPYSOURCES := [\"/dev/urandom\",\"/dev/random\",\"\"] DEFDISABLEBLOCK := false DEFSHAREDFS_CLH_VIRTIOFS := virtio-fs DEFSHAREDFS_QEMU_VIRTIOFS := virtio-fs +DEFSHAREDFS_QEMU_TDX_VIRTIOFS := virtio-9p DEFVIRTIOFSDAEMON := $(LIBEXECDIR)/virtiofsd ifeq ($(ARCH),ppc64le) DEFVIRTIOFSDAEMON := $(LIBEXECDIR)/qemu/virtiofsd @@ -328,7 +332,7 @@ ifneq (,$(QEMUCMD)) CONFIG_PATHS += $(CONFIG_PATH_QEMU_TDX) SYSCONFIG_QEMU_TDX = $(abspath $(SYSCONFDIR)/$(CONFIG_FILE_QEMU_TDX)) - SYSCONFIG_PATHS += $(SYSCONFIG_QEMU_TDX) + SYSCONFIG_PATHS_TDX += $(SYSCONFIG_QEMU_TDX) CONFIGS += $(CONFIG_QEMU_TDX) @@ -380,10 +384,18 @@ ifneq (,$(QEMUCMD)) CONFIGS += $(CONFIG_REMOTE) + + CONFIG_FILE_QEMU_GPU = configuration-qemu-gpu.toml + CONFIG_QEMU_GPU = config/$(CONFIG_FILE_QEMU_GPU) + CONFIG_QEMU_GPU_IN = $(CONFIG_QEMU_GPU).in + + CONFIGS += $(CONFIG_QEMU_GPU) + # qemu-specific options (all should be suffixed by "_QEMU") DEFBLOCKSTORAGEDRIVER_QEMU := virtio-scsi DEFBLOCKDEVICEAIO_QEMU := io_uring DEFNETWORKMODEL_QEMU := tcfilter + KERNELTYPE = uncompressed KERNELNAME = $(call MAKE_KERNEL_NAME,$(KERNELTYPE)) KERNELPATH = $(KERNELDIR)/$(KERNELNAME) @@ -582,10 +594,10 @@ USER_VARS += KERNELTDXPATH_CLH USER_VARS += KERNELPATH_FC USER_VARS += KERNELVIRTIOFSPATH USER_VARS += FIRMWAREPATH +USER_VARS += FIRMWARETDVFPATH USER_VARS += FIRMWAREVOLUMEPATH USER_VARS += TDSHIMFIRMWAREPATH -USER_VARS += TDVFFIRMWAREPATH -USER_VARS += TDVFFIRMWAREVOLUMEPATH +USER_VARS += FIRMWARETDVFVOLUMEPATH USER_VARS += SEVFIRMWAREPATH USER_VARS += SNPFIRMWAREPATH USER_VARS += MACHINEACCELERATORS @@ -598,6 +610,7 @@ USER_VARS += TDXKERNELPARAMS_QEMU USER_VARS += TDXKERNELPARAMS_CLH USER_VARS += SEVKERNELPARAMS USER_VARS += SNPKERNELPARAMS +USER_VARS += KERNELTDXPARAMS USER_VARS += LIBEXECDIR USER_VARS += LOCALSTATEDIR USER_VARS += PKGDATADIR @@ -613,8 +626,11 @@ USER_VARS += PROJECT_TYPE USER_VARS += PROJECT_URL USER_VARS += QEMUBINDIR USER_VARS += QEMUCMD +USER_VARS += QEMUTDXCMD USER_VARS += QEMUPATH +USER_VARS += QEMUTDXPATH USER_VARS += QEMUVALIDHYPERVISORPATHS +USER_VARS += QEMUTDXVALIDHYPERVISORPATHS USER_VARS += QEMUVIRTIOFSCMD USER_VARS += QEMUVIRTIOFSPATH USER_VARS += QEMUSNPPATH @@ -648,6 +664,7 @@ USER_VARS += DEFBLOCKSTORAGEDRIVER_QEMU USER_VARS += DEFBLOCKDEVICEAIO_QEMU USER_VARS += DEFSHAREDFS_CLH_VIRTIOFS USER_VARS += DEFSHAREDFS_QEMU_VIRTIOFS +USER_VARS += DEFSHAREDFS_QEMU_TDX_VIRTIOFS USER_VARS += DEFVIRTIOFSDAEMON USER_VARS += DEFVALIDVIRTIOFSDAEMONPATHS USER_VARS += DEFVIRTIOFSCACHESIZE @@ -770,6 +787,10 @@ define MAKE_KERNEL_VIRTIOFS_NAME $(if $(findstring uncompressed,$1),vmlinux-virtiofs.container,vmlinuz-virtiofs.container) endef +define MAKE_KERNEL_TDX_NAME +$(if $(findstring uncompressed,$1),vmlinux-tdx.container,vmlinuz-tdx.container) +endef + GENERATED_FILES += pkg/katautils/config-settings.go $(RUNTIME_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) | show-summary diff --git a/src/runtime/arch/amd64-options.mk b/src/runtime/arch/amd64-options.mk index ca6ea0bf8..cd9407d62 100644 --- a/src/runtime/arch/amd64-options.mk +++ b/src/runtime/arch/amd64-options.mk @@ -9,10 +9,10 @@ MACHINETYPE := q35 KERNELPARAMS := MACHINEACCELERATORS := CPUFEATURES := pmu=off -TDXCPUFEATURES := -vmx-rdseed-exit,pmu=off QEMUCMD := qemu-system-x86_64 -QEMUTDXCMD := qemu-system-x86_64-tdx +QEMUTDXCMD := qemu-system-x86_64-tdx-experimental +TDXCPUFEATURES := -vmx-rdseed-exit,pmu=off QEMUSNPCMD := qemu-system-x86_64-snp # Firecracker binary name diff --git a/src/runtime/cmd/kata-monitor/README.md b/src/runtime/cmd/kata-monitor/README.md index 5ebbc8cda..f6fcec1d3 100644 --- a/src/runtime/cmd/kata-monitor/README.md +++ b/src/runtime/cmd/kata-monitor/README.md @@ -52,6 +52,8 @@ The **log-level** allows the chose how verbose the logs should be. The default i **NOTE: The debug endpoints are available only if the [Kata Containers configuration file](https://github.com/kata-containers/kata-containers/blob/9d5b03a1b70bbd175237ec4b9f821d6ccee0a1f6/src/runtime/config/configuration-qemu.toml.in#L590-L592) includes** `enable_pprof = true` **in the** `[runtime]` **section**. +The `/metrics` has a query parameter `filter_family`, which filter Kata sandboxes metrics with specific names. If `filter_family` is set to `A` (and `B`, split with `,`), metrics with prefix `A` (and `B`) will only be returned. + The `/sandboxes` endpoint lists the _sandbox ID_ of all the detected Kata runtimes. If accessed via a web browser, it provides html links to the endpoints available for each sandbox. In order to retrieve data for a specific Kata workload, the _sandbox ID_ should be passed in the query string using the _sandbox_ key. The `/agent-url`, and all the `/debug/`* endpoints require `sandbox_id` to be specified in the query string. diff --git a/src/runtime/config/configuration-acrn.toml.in b/src/runtime/config/configuration-acrn.toml.in index 2d2b7065e..ef0207589 100644 --- a/src/runtime/config/configuration-acrn.toml.in +++ b/src/runtime/config/configuration-acrn.toml.in @@ -154,8 +154,8 @@ disable_selinux=@DEFDISABLESELINUX@ #debug_console_enabled = true # Agent connection dialing timeout value in seconds -# (default: 30) -#dial_timeout = 30 +# (default: 45) +dial_timeout = 45 [runtime] # If enabled, the runtime will log additional debug messages to the diff --git a/src/runtime/config/configuration-clh.toml.in b/src/runtime/config/configuration-clh.toml.in index 9f3381da6..bcaccd717 100644 --- a/src/runtime/config/configuration-clh.toml.in +++ b/src/runtime/config/configuration-clh.toml.in @@ -41,6 +41,11 @@ rootfs_type=@DEFROOTFSTYPE@ # Default false # confidential_guest = true +# Enable running clh VMM as a non-root user. +# By default clh VMM run as root. When this is set to true, clh VMM process runs as +# a non-root random user. See documentation for the limitations of this mode. +# rootless = true + # disable applying SELinux on the VMM process (default false) disable_selinux=@DEFDISABLESELINUX@ @@ -300,8 +305,8 @@ block_device_driver = "virtio-blk" #debug_console_enabled = true # Agent connection dialing timeout value in seconds -# (default: 30) -#dial_timeout = 30 +# (default: 45) +dial_timeout = 45 [runtime] # If enabled, the runtime will log additional debug messages to the diff --git a/src/runtime/config/configuration-fc.toml.in b/src/runtime/config/configuration-fc.toml.in index 10dc17700..e28316cfa 100644 --- a/src/runtime/config/configuration-fc.toml.in +++ b/src/runtime/config/configuration-fc.toml.in @@ -284,8 +284,8 @@ kernel_modules=[] #debug_console_enabled = true # Agent connection dialing timeout value in seconds -# (default: 30) -#dial_timeout = 30 +# (default: 45) +dial_timeout = 45 [runtime] # If enabled, the runtime will log additional debug messages to the diff --git a/src/runtime/config/configuration-qemu-gpu.toml.in b/src/runtime/config/configuration-qemu-gpu.toml.in new file mode 100644 index 000000000..33574b17d --- /dev/null +++ b/src/runtime/config/configuration-qemu-gpu.toml.in @@ -0,0 +1,692 @@ +# Copyright (c) 2017-2019 Intel Corporation +# Copyright (c) 2021 Adobe Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# XXX: WARNING: this file is auto-generated. +# XXX: +# XXX: Source file: "@CONFIG_QEMU_IN@" +# XXX: Project: +# XXX: Name: @PROJECT_NAME@ +# XXX: Type: @PROJECT_TYPE@ + +[hypervisor.qemu] +path = "@QEMUPATH@" +kernel = "@KERNELPATH@" +image = "@IMAGEPATH@" +# initrd = "@INITRDPATH@" +machine_type = "@MACHINETYPE@" + +# rootfs filesystem type: +# - ext4 (default) +# - xfs +# - erofs +rootfs_type=@DEFROOTFSTYPE@ + +# Enable confidential guest support. +# Toggling that setting may trigger different hardware features, ranging +# from memory encryption to both memory and CPU-state encryption and integrity. +# The Kata Containers runtime dynamically detects the available feature set and +# aims at enabling the largest possible one, returning an error if none is +# available, or none is supported by the hypervisor. +# +# Known limitations: +# * Does not work by design: +# - CPU Hotplug +# - Memory Hotplug +# - NVDIMM devices +# +# Default false +# confidential_guest = true + +# Choose AMD SEV-SNP confidential guests +# In case of using confidential guests on AMD hardware that supports both SEV +# and SEV-SNP, the following enables SEV-SNP guests. SEV guests are default. +# Default false +# sev_snp_guest = true + +# Enable running QEMU VMM as a non-root user. +# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as +# a non-root random user. See documentation for the limitations of this mode. +# rootless = true + +# List of valid annotation names for the hypervisor +# Each member of the list is a regular expression, which is the base name +# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path" +enable_annotations = @DEFENABLEANNOTATIONS@ + +# List of valid annotations values for the hypervisor +# Each member of the list is a path pattern as described by glob(3). +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @QEMUVALIDHYPERVISORPATHS@ +valid_hypervisor_paths = @QEMUVALIDHYPERVISORPATHS@ + +# Optional space-separated list of options to pass to the guest kernel. +# For example, use `kernel_params = "vsyscall=emulate"` if you are having +# trouble running pre-2.15 glibc. +# +# WARNING: - any parameter specified here will take priority over the default +# parameter value of the same name used to start the virtual machine. +# Do not set values here unless you understand the impact of doing so as you +# may stop the virtual machine from booting. +# To see the list of default parameters, enable hypervisor debug, create a +# container and look for 'default-kernel-parameters' log entries. +kernel_params = "@KERNELPARAMS@" + +# Path to the firmware. +# If you want that qemu uses the default firmware leave this option empty +firmware = "@FIRMWAREPATH@" + +# Path to the firmware volume. +# firmware TDVF or OVMF can be split into FIRMWARE_VARS.fd (UEFI variables +# as configuration) and FIRMWARE_CODE.fd (UEFI program image). UEFI variables +# can be customized per each user while UEFI code is kept same. +firmware_volume = "@FIRMWAREVOLUMEPATH@" + +# Machine accelerators +# comma-separated list of machine accelerators to pass to the hypervisor. +# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"` +machine_accelerators="@MACHINEACCELERATORS@" + +# Qemu seccomp sandbox feature +# comma-separated list of seccomp sandbox features to control the syscall access. +# For example, `seccompsandbox= "on,obsolete=deny,spawn=deny,resourcecontrol=deny"` +# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox +# Another note: enabling this feature may reduce performance, you may enable +# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html +#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@" + +# CPU features +# comma-separated list of cpu features to pass to the cpu +# For example, `cpu_features = "pmu=off,vmx=off" +cpu_features="@CPUFEATURES@" + +# Default number of vCPUs per SB/VM: +# unspecified or 0 --> will be set to @DEFVCPUS@ +# < 0 --> will be set to the actual number of physical cores +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores +default_vcpus = 1 + +# Default maximum number of vCPUs per SB/VM: +# unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when +# the actual number of physical cores is greater than it. +# WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU +# the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs +# can be added to a SB/VM, but the memory footprint will be big. Another example, with +# `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of +# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable, +# unless you know what are you doing. +# NOTICE: on arm platform with gicv2 interrupt controller, set it to 8. +default_maxvcpus = @DEFMAXVCPUS@ + +# Bridges can be used to hot plug devices. +# Limitations: +# * Currently only pci bridges are supported +# * Until 30 devices per bridge can be hot plugged. +# * Until 5 PCI bridges can be cold plugged per VM. +# This limitation could be a bug in qemu or in the kernel +# Default number of bridges per SB/VM: +# unspecified or 0 --> will be set to @DEFBRIDGES@ +# > 1 <= 5 --> will be set to the specified number +# > 5 --> will be set to 5 +default_bridges = @DEFBRIDGES@ + +# Default memory size in MiB for SB/VM. +# If unspecified then it will be set @DEFMEMSZ@ MiB. +default_memory = @DEFMEMSZ@ +# +# Default memory slots per SB/VM. +# If unspecified then it will be set @DEFMEMSLOTS@. +# This is will determine the times that memory will be hotadded to sandbox/VM. +#memory_slots = @DEFMEMSLOTS@ + +# Default maximum memory in MiB per SB / VM +# unspecified or == 0 --> will be set to the actual amount of physical RAM +# > 0 <= amount of physical RAM --> will be set to the specified number +# > amount of physical RAM --> will be set to the actual amount of physical RAM +default_maxmemory = @DEFMAXMEMSZ@ + +# The size in MiB will be plused to max memory of hypervisor. +# It is the memory address space for the NVDIMM devie. +# If set block storage driver (block_device_driver) to "nvdimm", +# should set memory_offset to the size of block device. +# Default 0 +#memory_offset = 0 + +# Specifies virtio-mem will be enabled or not. +# Please note that this option should be used with the command +# "echo 1 > /proc/sys/vm/overcommit_memory". +# Default false +#enable_virtio_mem = true + +# Disable block device from being used for a container's rootfs. +# In case of a storage driver like devicemapper where a container's +# root file system is backed by a block device, the block device is passed +# directly to the hypervisor for performance reasons. +# This flag prevents the block device from being passed to the hypervisor, +# virtio-fs is used instead to pass the rootfs. +disable_block_device_use = @DEFDISABLEBLOCK@ + +# Shared file system type: +# - virtio-fs (default) +# - virtio-9p +# - virtio-fs-nydus +shared_fs = "@DEFSHAREDFS_QEMU_VIRTIOFS@" + +# Path to vhost-user-fs daemon. +virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@" + +# List of valid annotations values for the virtiofs daemon +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @DEFVALIDVIRTIOFSDAEMONPATHS@ +valid_virtio_fs_daemon_paths = @DEFVALIDVIRTIOFSDAEMONPATHS@ + +# Default size of DAX cache in MiB +virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@ + +# Default size of virtqueues +virtio_fs_queue_size = @DEFVIRTIOFSQUEUESIZE@ + +# Extra args for virtiofsd daemon +# +# Format example: +# ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"] +# Examples: +# Set virtiofsd log level to debug : ["-o", "log_level=debug"] or ["-d"] +# +# see `virtiofsd -h` for possible options. +virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@ + +# Cache mode: +# +# - never +# Metadata, data, and pathname lookup are not cached in guest. They are +# always fetched from host and any changes are immediately pushed to host. +# +# - auto +# Metadata and pathname lookup cache expires after a configured amount of +# time (default is 1 second). Data is cached while the file is open (close +# to open consistency). +# +# - always +# Metadata, data, and pathname lookup are cached in guest and never expire. +virtio_fs_cache = "@DEFVIRTIOFSCACHE@" + +# Block storage driver to be used for the hypervisor in case the container +# rootfs is backed by a block device. This is virtio-scsi, virtio-blk +# or nvdimm. +block_device_driver = "@DEFBLOCKSTORAGEDRIVER_QEMU@" + +# aio is the I/O mechanism used by qemu +# Options: +# +# - threads +# Pthread based disk I/O. +# +# - native +# Native Linux I/O. +# +# - io_uring +# Linux io_uring API. This provides the fastest I/O operations on Linux, requires kernel>5.1 and +# qemu >=5.0. +block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@" + +# Specifies cache-related options will be set to block devices or not. +# Default false +#block_device_cache_set = true + +# Specifies cache-related options for block devices. +# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled. +# Default false +#block_device_cache_direct = true + +# Specifies cache-related options for block devices. +# Denotes whether flush requests for the device are ignored. +# Default false +#block_device_cache_noflush = true + +# Enable iothreads (data-plane) to be used. This causes IO to be +# handled in a separate IO thread. This is currently only implemented +# for SCSI. +# +enable_iothreads = @DEFENABLEIOTHREADS@ + +# Enable pre allocation of VM RAM, default false +# Enabling this will result in lower container density +# as all of the memory will be allocated and locked +# This is useful when you want to reserve all the memory +# upfront or in the cases where you want memory latencies +# to be very predictable +# Default false +#enable_mem_prealloc = true + +# Enable huge pages for VM RAM, default false +# Enabling this will result in the VM memory +# being allocated using huge pages. +# This is useful when you want to use vhost-user network +# stacks within the container. This will automatically +# result in memory pre allocation +#enable_hugepages = true + +# Enable vhost-user storage device, default false +# Enabling this will result in some Linux reserved block type +# major range 240-254 being chosen to represent vhost-user devices. +enable_vhost_user_store = @DEFENABLEVHOSTUSERSTORE@ + +# The base directory specifically used for vhost-user devices. +# Its sub-path "block" is used for block devices; "block/sockets" is +# where we expect vhost-user sockets to live; "block/devices" is where +# simulated block device nodes for vhost-user devices to live. +vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@" + +# Enable vIOMMU, default false +# Enabling this will result in the VM having a vIOMMU device +# This will also add the following options to the kernel's +# command line: intel_iommu=on,iommu=pt +#enable_iommu = true + +# Enable IOMMU_PLATFORM, default false +# Enabling this will result in the VM device having iommu_platform=on set +#enable_iommu_platform = true + +# List of valid annotations values for the vhost user store path +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @DEFVALIDVHOSTUSERSTOREPATHS@ +valid_vhost_user_store_paths = @DEFVALIDVHOSTUSERSTOREPATHS@ + +# The timeout for reconnecting on non-server spdk sockets when the remote end goes away. +# qemu will delay this many seconds and then attempt to reconnect. +# Zero disables reconnecting, and the default is zero. +vhost_user_reconnect_timeout_sec = 0 + +# Enable file based guest memory support. The default is an empty string which +# will disable this feature. In the case of virtio-fs, this is enabled +# automatically and '/dev/shm' is used as the backing folder. +# This option will be ignored if VM templating is enabled. +#file_mem_backend = "@DEFFILEMEMBACKEND@" + +# List of valid annotations values for the file_mem_backend annotation +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @DEFVALIDFILEMEMBACKENDS@ +valid_file_mem_backends = @DEFVALIDFILEMEMBACKENDS@ + +# -pflash can add image file to VM. The arguments of it should be in format +# of ["/path/to/flash0.img", "/path/to/flash1.img"] +pflashes = [] + +# This option changes the default hypervisor and kernel parameters +# to enable debug output where available. And Debug also enable the hmp socket. +# +# Default false +#enable_debug = true + +# Disable the customizations done in the runtime when it detects +# that it is running on top a VMM. This will result in the runtime +# behaving as it would when running on bare metal. +# +#disable_nesting_checks = true + +# This is the msize used for 9p shares. It is the number of bytes +# used for 9p packet payload. +#msize_9p = @DEFMSIZE9P@ + +# If false and nvdimm is supported, use nvdimm device to plug guest image. +# Otherwise virtio-block device is used. +# +# nvdimm is not supported when `confidential_guest = true`. +# +# Default is false +#disable_image_nvdimm = true + +# VFIO devices are hotplugged on a bridge by default. +# Enable hotplugging on root bus. This may be required for devices with +# a large PCI bar, as this is a current limitation with hotplugging on +# a bridge. +# Default false +hotplug_vfio_on_root_bus = true + +# Before hot plugging a PCIe device, you need to add a pcie_root_port device. +# Use this parameter when using some large PCI bar devices, such as Nvidia GPU +# The value means the number of pcie_root_port +# This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35" +# Default 0 +pcie_root_port = 1 + +# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off +# security (vhost-net runs ring0) for network I/O performance. +#disable_vhost_net = true + +# +# Default entropy source. +# The path to a host source of entropy (including a real hardware RNG) +# /dev/urandom and /dev/random are two main options. +# Be aware that /dev/random is a blocking source of entropy. If the host +# runs out of entropy, the VMs boot time will increase leading to get startup +# timeouts. +# The source of entropy /dev/urandom is non-blocking and provides a +# generally acceptable source of entropy. It should work well for pretty much +# all practical purposes. +#entropy_source= "@DEFENTROPYSOURCE@" + +# List of valid annotations values for entropy_source +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @DEFVALIDENTROPYSOURCES@ +valid_entropy_sources = @DEFVALIDENTROPYSOURCES@ + +# Path to OCI hook binaries in the *guest rootfs*. +# This does not affect host-side hooks which must instead be added to +# the OCI spec passed to the runtime. +# +# You can create a rootfs with hooks by customizing the osbuilder scripts: +# https://github.com/kata-containers/kata-containers/tree/main/tools/osbuilder +# +# Hooks must be stored in a subdirectory of guest_hook_path according to their +# hook type, i.e. "guest_hook_path/{prestart,poststart,poststop}". +# The agent will scan these directories for executable files and add them, in +# lexicographical order, to the lifecycle of the guest container. +# Hooks are executed in the runtime namespace of the guest. See the official documentation: +# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks +# Warnings will be logged if any error is encountered while scanning for hooks, +# but it will not abort container execution. +guest_hook_path = "/etc/oci/hooks.d" +# +# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM). +# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic. +# Default 0-sized value means unlimited rate. +#rx_rate_limiter_max_rate = 0 +# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM). +# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block) +# to discipline traffic. +# Default 0-sized value means unlimited rate. +#tx_rate_limiter_max_rate = 0 + +# Set where to save the guest memory dump file. +# If set, when GUEST_PANICKED event occurred, +# guest memeory will be dumped to host filesystem under guest_memory_dump_path, +# This directory will be created automatically if it does not exist. +# +# The dumped file(also called vmcore) can be processed with crash or gdb. +# +# WARNING: +# Dump guest’s memory can take very long depending on the amount of guest memory +# and use much disk space. +#guest_memory_dump_path="/var/crash/kata" + +# If enable paging. +# Basically, if you want to use "gdb" rather than "crash", +# or need the guest-virtual addresses in the ELF vmcore, +# then you should enable paging. +# +# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details +#guest_memory_dump_paging=false + +# Enable swap in the guest. Default false. +# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device +# if the swappiness of a container (set by annotation "io.katacontainers.container.resource.swappiness") +# is bigger than 0. +# The size of the swap device should be +# swap_in_bytes (set by annotation "io.katacontainers.container.resource.swap_in_bytes") - memory_limit_in_bytes. +# If swap_in_bytes is not set, the size should be memory_limit_in_bytes. +# If swap_in_bytes and memory_limit_in_bytes is not set, the size should +# be default_memory. +#enable_guest_swap = true + +# use legacy serial for guest console if available and implemented for architecture. Default false +#use_legacy_serial = true + +# disable applying SELinux on the VMM process (default false) +disable_selinux=@DEFDISABLESELINUX@ + +# disable applying SELinux on the container process +# If set to false, the type `container_t` is applied to the container process by default. +# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built +# with `SELINUX=yes`. +# (default: true) +disable_guest_selinux=@DEFDISABLEGUESTSELINUX@ + + +[factory] +# VM templating support. Once enabled, new VMs are created from template +# using vm cloning. They will share the same initial kernel, initramfs and +# agent memory by mapping it readonly. It helps speeding up new container +# creation and saves a lot of memory if there are many kata containers running +# on the same host. +# +# When disabled, new VMs are created from scratch. +# +# Note: Requires "initrd=" to be set ("image=" is not supported). +# +# Default false +#enable_template = true + +# Specifies the path of template. +# +# Default "/run/vc/vm/template" +#template_path = "/run/vc/vm/template" + +# The number of caches of VMCache: +# unspecified or == 0 --> VMCache is disabled +# > 0 --> will be set to the specified number +# +# VMCache is a function that creates VMs as caches before using it. +# It helps speed up new container creation. +# The function consists of a server and some clients communicating +# through Unix socket. The protocol is gRPC in protocols/cache/cache.proto. +# The VMCache server will create some VMs and cache them by factory cache. +# It will convert the VM to gRPC format and transport it when gets +# requestion from clients. +# Factory grpccache is the VMCache client. It will request gRPC format +# VM and convert it back to a VM. If VMCache function is enabled, +# kata-runtime will request VM from factory grpccache when it creates +# a new sandbox. +# +# Default 0 +#vm_cache_number = 0 + +# Specify the address of the Unix socket that is used by VMCache. +# +# Default /var/run/kata-containers/cache.sock +#vm_cache_endpoint = "/var/run/kata-containers/cache.sock" + +[agent.@PROJECT_TYPE@] +# If enabled, make the agent display debug-level messages. +# (default: disabled) +#enable_debug = true + +# Enable agent tracing. +# +# If enabled, the agent will generate OpenTelemetry trace spans. +# +# Notes: +# +# - If the runtime also has tracing enabled, the agent spans will be +# associated with the appropriate runtime parent span. +# - If enabled, the runtime will wait for the container to shutdown, +# increasing the container shutdown time slightly. +# +# (default: disabled) +#enable_tracing = true + +# Comma separated list of kernel modules and their parameters. +# These modules will be loaded in the guest kernel using modprobe(8). +# The following example can be used to load two kernel modules with parameters +# - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"] +# The first word is considered as the module name and the rest as its parameters. +# Container will not be started when: +# * A kernel module is specified and the modprobe command is not installed in the guest +# or it fails loading the module. +# * The module is not available in the guest or it doesn't met the guest kernel +# requirements, like architecture and version. +# +kernel_modules=[] + +# Enable debug console. + +# If enabled, user can connect guest OS running inside hypervisor +# through "kata-runtime exec " command + +#debug_console_enabled = true + +# Agent connection dialing timeout value in seconds +# (default: 30) +#dial_timeout = 30 + +[runtime] +# If enabled, the runtime will log additional debug messages to the +# system log +# (default: disabled) +#enable_debug = true +# +# Internetworking model +# Determines how the VM should be connected to the +# the container network interface +# Options: +# +# - macvtap +# Used when the Container network interface can be bridged using +# macvtap. +# +# - none +# Used when customize network. Only creates a tap device. No veth pair. +# +# - tcfilter +# Uses tc filter rules to redirect traffic from the network interface +# provided by plugin to a tap interface connected to the VM. +# +internetworking_model="@DEFNETWORKMODEL_QEMU@" + +# disable guest seccomp +# Determines whether container seccomp profiles are passed to the virtual +# machine and applied by the kata agent. If set to true, seccomp is not applied +# within the guest +# (default: true) +disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@ + +# vCPUs pinning settings +# if enabled, each vCPU thread will be scheduled to a fixed CPU +# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet) +# enable_vcpus_pinning = false + +# Apply a custom SELinux security policy to the container process inside the VM. +# This is used when you want to apply a type other than the default `container_t`, +# so general users should not uncomment and apply it. +# (format: "user:role:type") +# Note: You cannot specify MCS policy with the label because the sensitivity levels and +# categories are determined automatically by high-level container runtimes such as containerd. +#guest_selinux_label="@DEFGUESTSELINUXLABEL@" + +# If enabled, the runtime will create opentracing.io traces and spans. +# (See https://www.jaegertracing.io/docs/getting-started). +# (default: disabled) +#enable_tracing = true + +# Set the full url to the Jaeger HTTP Thrift collector. +# The default if not set will be "http://localhost:14268/api/traces" +#jaeger_endpoint = "" + +# Sets the username to be used if basic auth is required for Jaeger. +#jaeger_user = "" + +# Sets the password to be used if basic auth is required for Jaeger. +#jaeger_password = "" + +# If enabled, the runtime will not create a network namespace for shim and hypervisor processes. +# This option may have some potential impacts to your host. It should only be used when you know what you're doing. +# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only +# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge +# (like OVS) directly. +# (default: false) +#disable_new_netns = true + +# if enabled, the runtime will add all the kata processes inside one dedicated cgroup. +# The container cgroups in the host are not created, just one single cgroup per sandbox. +# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox. +# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation. +# The sandbox cgroup is constrained if there is no container type annotation. +# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType +sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ + +# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In +# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful +# when a hardware architecture or hypervisor solutions is utilized which does not support CPU and/or memory hotplug. +# Compatibility for determining appropriate sandbox (VM) size: +# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O +# does not yet support sandbox sizing annotations. +# - When running single containers using a tool like ctr, container sizing information will be available. +static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT@ + +# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path. +# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory. +# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts` +# These will not be exposed to the container workloads, and are only provided for potential guest services. +sandbox_bind_mounts=@DEFBINDMOUNTS@ + +# VFIO Mode +# Determines how VFIO devices should be be presented to the container. +# Options: +# +# - vfio +# Matches behaviour of OCI runtimes (e.g. runc) as much as +# possible. VFIO devices will appear in the container as VFIO +# character devices under /dev/vfio. The exact names may differ +# from the host (they need to match the VM's IOMMU group numbers +# rather than the host's) +# +# - guest-kernel +# This is a Kata-specific behaviour that's useful in certain cases. +# The VFIO device is managed by whatever driver in the VM kernel +# claims it. This means it will appear as one or more device nodes +# or network interfaces depending on the nature of the device. +# Using this mode requires specially built workloads that know how +# to locate the relevant device interfaces within the VM. +# +vfio_mode="@DEFVFIOMODE@" + +# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will +# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest. +disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@ + +# Enabled experimental feature list, format: ["a", "b"]. +# Experimental features are features not stable enough for production, +# they may break compatibility, and are prepared for a big version bump. +# Supported experimental features: +# (default: []) +experimental=@DEFAULTEXPFEATURES@ + +# If enabled, user can run pprof tools with shim v2 process through kata-monitor. +# (default: false) +# enable_pprof = true + +# WARNING: All the options in the following section have not been implemented yet. +# This section was added as a placeholder. DO NOT USE IT! +[image] +# Container image service. +# +# Offload the CRI image management service to the Kata agent. +# (default: false) +#service_offload = true + +# Container image decryption keys provisioning. +# Applies only if service_offload is true. +# Keys can be provisioned locally (e.g. through a special command or +# a local file) or remotely (usually after the guest is remotely attested). +# The provision setting is a complete URL that lets the Kata agent decide +# which method to use in order to fetch the keys. +# +# Keys can be stored in a local file, in a measured and attested initrd: +#provision=data:///local/key/file +# +# Keys could be fetched through a special command or binary from the +# initrd (guest) image, e.g. a firmware call: +#provision=file:///path/to/bin/fetcher/in/guest +# +# Keys can be remotely provisioned. The Kata agent fetches them from e.g. +# a HTTPS URL: +#provision=https://my-key-broker.foo/tenant/ diff --git a/src/runtime/config/configuration-qemu-tdx.toml.in b/src/runtime/config/configuration-qemu-tdx.toml.in index 33616a997..3adfb480a 100644 --- a/src/runtime/config/configuration-qemu-tdx.toml.in +++ b/src/runtime/config/configuration-qemu-tdx.toml.in @@ -14,9 +14,16 @@ [hypervisor.qemu] path = "@QEMUTDXPATH@" kernel = "@KERNELTDXPATH@" -image = "@IMAGETDXPATH@" +image = "@IMAGEPATH@" +# initrd = "@INITRDPATH@" machine_type = "@MACHINETYPE@" +# rootfs filesystem type: +# - ext4 (default) +# - xfs +# - erofs +rootfs_type=@DEFROOTFSTYPE@ + # Enable confidential guest support. # Toggling that setting may trigger different hardware features, ranging # from memory encryption to both memory and CPU-state encryption and integrity. @@ -59,17 +66,17 @@ valid_hypervisor_paths = @QEMUTDXVALIDHYPERVISORPATHS@ # may stop the virtual machine from booting. # To see the list of default parameters, enable hypervisor debug, create a # container and look for 'default-kernel-parameters' log entries. -kernel_params = "@TDXKERNELPARAMS_QEMU@" +kernel_params = "@KERNELTDXPARAMS@" # Path to the firmware. # If you want that qemu uses the default firmware leave this option empty -firmware = "@TDVFFIRMWAREPATH@" +firmware = "@FIRMWARETDVFPATH@" # Path to the firmware volume. # firmware TDVF or OVMF can be split into FIRMWARE_VARS.fd (UEFI variables # as configuration) and FIRMWARE_CODE.fd (UEFI program image). UEFI variables # can be customized per each user while UEFI code is kept same. -firmware_volume = "@TDVFFIRMWAREVOLUMEPATH@" +firmware_volume = "@FIRMWARETDVFVOLUMEPATH@" # Machine accelerators # comma-separated list of machine accelerators to pass to the hypervisor. @@ -165,7 +172,7 @@ disable_block_device_use = @DEFDISABLEBLOCK@ # - virtio-fs (default) # - virtio-9p # - virtio-fs-nydus -shared_fs = "@DEFSHAREDFS_QEMU_VIRTIOFS@" +shared_fs = "@DEFSHAREDFS_QEMU_TDX_VIRTIOFS@" # Path to vhost-user-fs daemon. virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@" @@ -178,6 +185,9 @@ valid_virtio_fs_daemon_paths = @DEFVALIDVIRTIOFSDAEMONPATHS@ # Default size of DAX cache in MiB virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@ +# Default size of virtqueues +virtio_fs_queue_size = @DEFVIRTIOFSQUEUESIZE@ + # Extra args for virtiofsd daemon # # Format example: @@ -190,7 +200,7 @@ virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@ # Cache mode: # -# - none +# - never # Metadata, data, and pathname lookup are not cached in guest. They are # always fetched from host and any changes are immediately pushed to host. # @@ -208,6 +218,20 @@ virtio_fs_cache = "@DEFVIRTIOFSCACHE@" # or nvdimm. block_device_driver = "@DEFBLOCKSTORAGEDRIVER_QEMU@" +# aio is the I/O mechanism used by qemu +# Options: +# +# - threads +# Pthread based disk I/O. +# +# - native +# Native Linux I/O. +# +# - io_uring +# Linux io_uring API. This provides the fastest I/O operations on Linux, requires kernel>5.1 and +# qemu >=5.0. +block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@" + # Specifies cache-related options will be set to block devices or not. # Default false #block_device_cache_set = true @@ -271,6 +295,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@" # Your distribution recommends: @DEFVALIDVHOSTUSERSTOREPATHS@ valid_vhost_user_store_paths = @DEFVALIDVHOSTUSERSTOREPATHS@ +# The timeout for reconnecting on non-server spdk sockets when the remote end goes away. +# qemu will delay this many seconds and then attempt to reconnect. +# Zero disables reconnecting, and the default is zero. +vhost_user_reconnect_timeout_sec = 0 + # Enable file based guest memory support. The default is an empty string which # will disable this feature. In the case of virtio-fs, this is enabled # automatically and '/dev/shm' is used as the backing folder. @@ -287,7 +316,7 @@ valid_file_mem_backends = @DEFVALIDFILEMEMBACKENDS@ pflashes = [] # This option changes the default hypervisor and kernel parameters -# to enable debug output where available. +# to enable debug output where available. And Debug also enable the hmp socket. # # Default false #enable_debug = true @@ -499,8 +528,8 @@ kernel_modules=[] #debug_console_enabled = true # Agent connection dialing timeout value in seconds -# (default: 30) -#dial_timeout = 30 +# (default: 60) +dial_timeout = 60 [runtime] # If enabled, the runtime will log additional debug messages to the @@ -533,6 +562,11 @@ internetworking_model="@DEFNETWORKMODEL_QEMU@" # (default: true) disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@ +# vCPUs pinning settings +# if enabled, each vCPU thread will be scheduled to a fixed CPU +# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet) +# enable_vcpus_pinning = false + # Apply a custom SELinux security policy to the container process inside the VM. # This is used when you want to apply a type other than the default `container_t`, # so general users should not uncomment and apply it. @@ -630,7 +664,7 @@ experimental=@DEFAULTEXPFEATURES@ # # Offload the CRI image management service to the Kata agent. # (default: false) -service_offload = @DEFSERVICEOFFLOAD@ +#service_offload = true # Container image decryption keys provisioning. # Applies only if service_offload is true. diff --git a/src/runtime/config/configuration-qemu.toml.in b/src/runtime/config/configuration-qemu.toml.in index 030b200fd..add7ed607 100644 --- a/src/runtime/config/configuration-qemu.toml.in +++ b/src/runtime/config/configuration-qemu.toml.in @@ -563,8 +563,8 @@ kernel_modules=[] #debug_console_enabled = true # Agent connection dialing timeout value in seconds -# (default: 30) -#dial_timeout = 30 +# (default: 45) +dial_timeout = 45 [runtime] # If enabled, the runtime will log additional debug messages to the diff --git a/src/runtime/pkg/containerd-shim-v2/shim_management.go b/src/runtime/pkg/containerd-shim-v2/shim_management.go index f9c31b8b2..0c6d5c6e2 100644 --- a/src/runtime/pkg/containerd-shim-v2/shim_management.go +++ b/src/runtime/pkg/containerd-shim-v2/shim_management.go @@ -243,7 +243,7 @@ func (s *service) genericIPTablesHandler(w http.ResponseWriter, r *http.Request, func (s *service) startManagementServer(ctx context.Context, ociSpec *specs.Spec) { // metrics socket will under sandbox's bundle path - metricsAddress := SocketAddress(s.id) + metricsAddress := ServerSocketAddress(s.id) listener, err := cdshim.NewSocket(metricsAddress) if err != nil { @@ -312,14 +312,38 @@ func GetSandboxesStoragePathRust() string { return "/run/kata" } -// SocketAddress returns the address of the unix domain socket for communicating with the +// SocketPath returns the path of the socket using the given storagePath +func SocketPath(id string, storagePath string) string { + return filepath.Join(string(filepath.Separator), storagePath, id, "shim-monitor.sock") +} + +// SocketPathGo returns the path of the socket to be used with the go runtime +func SocketPathGo(id string) string { + return SocketPath(id, GetSandboxesStoragePath()) +} + +// SocketPathRust returns the path of the socket to be used with the rust runtime +func SocketPathRust(id string) string { + return SocketPath(id, GetSandboxesStoragePathRust()) +} + +// ServerSocketAddress returns the address of the unix domain socket the shim management endpoint +// should listen. +// NOTE: this code is only called by the go shim management implementation. +func ServerSocketAddress(id string) string { + return fmt.Sprintf("unix://%s", SocketPathGo(id)) +} + +// ClientSocketAddress returns the address of the unix domain socket for communicating with the // shim management endpoint -func SocketAddress(id string) string { +// NOTE: this code allows various go clients, e.g. kata-runtime or kata-monitor commands, to +// connect to the rust shim management implementation. +func ClientSocketAddress(id string) string { // get the go runtime uds path - socketPath := filepath.Join(string(filepath.Separator), GetSandboxesStoragePath(), id, "shim-monitor.sock") + socketPath := SocketPathGo(id) // if the path not exist, use the rust runtime uds path instead if _, err := os.Stat(socketPath); err != nil { - return fmt.Sprintf("unix://%s", filepath.Join(string(filepath.Separator), GetSandboxesStoragePathRust(), id, "shim-monitor.sock")) + socketPath = SocketPathRust(id) } return fmt.Sprintf("unix://%s", socketPath) } diff --git a/src/runtime/pkg/device/drivers/vfio.go b/src/runtime/pkg/device/drivers/vfio.go index 94139aaa2..1099f8f0b 100644 --- a/src/runtime/pkg/device/drivers/vfio.go +++ b/src/runtime/pkg/device/drivers/vfio.go @@ -54,6 +54,25 @@ func NewVFIODevice(devInfo *config.DeviceInfo) *VFIODevice { } } +// Ignore specific PCI devices, supply the pciClass and the bitmask to check +// against the device class, deviceBDF for meaningfull info message +func (device *VFIODevice) checkIgnorePCIClass(pciClass string, deviceBDF string, bitmask uint64) (bool, error) { + if pciClass == "" { + return false, nil + } + pciClassID, err := strconv.ParseUint(pciClass, 0, 32) + if err != nil { + return false, err + } + // ClassID is 16 bits, remove the two trailing zeros + pciClassID = pciClassID >> 8 + if pciClassID&bitmask == bitmask { + deviceLogger().Infof("Ignoring PCI (Host) Bridge deviceBDF %v Class %x", deviceBDF, pciClassID) + return true, nil + } + return false, nil +} + // Attach is standard interface of api.Device, it's used to add device to some // DeviceReceiver func (device *VFIODevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (retErr error) { @@ -88,6 +107,18 @@ func (device *VFIODevice) Attach(ctx context.Context, devReceiver api.DeviceRece } id := utils.MakeNameID("vfio", device.DeviceInfo.ID+strconv.Itoa(i), maxDevIDSize) + pciClass := getPCIDeviceProperty(deviceBDF, PCISysFsDevicesClass) + // We need to ignore Host or PCI Bridges that are in the same IOMMU group as the + // passed-through devices. One CANNOT pass-through a PCI bridge or Host bridge. + // Class 0x0604 is PCI bridge, 0x0600 is Host bridge + ignorePCIDevice, err := device.checkIgnorePCIClass(pciClass, deviceBDF, 0x0600) + if err != nil { + return err + } + if ignorePCIDevice { + continue + } + var vfio config.VFIODev switch vfioDeviceType { @@ -100,7 +131,7 @@ func (device *VFIODevice) Attach(ctx context.Context, devReceiver api.DeviceRece BDF: deviceBDF, SysfsDev: deviceSysfsDev, IsPCIe: isPCIe, - Class: getPCIDeviceProperty(deviceBDF, PCISysFsDevicesClass), + Class: pciClass, } if isPCIe { vfioPCI.Bus = fmt.Sprintf("%s%d", pcieRootPortPrefix, len(AllPCIeDevs)) @@ -121,6 +152,7 @@ func (device *VFIODevice) Attach(ctx context.Context, devReceiver api.DeviceRece default: return fmt.Errorf("Failed to append device: VFIO device type unrecognized") } + device.VfioDevs = append(device.VfioDevs, &vfio) } diff --git a/src/runtime/pkg/govmm/qemu/qemu.go b/src/runtime/pkg/govmm/qemu/qemu.go index 5ff258aed..92345397b 100644 --- a/src/runtime/pkg/govmm/qemu/qemu.go +++ b/src/runtime/pkg/govmm/qemu/qemu.go @@ -363,16 +363,12 @@ func (object Object) QemuParams(config *Config) []string { case TDXGuest: objectParams = append(objectParams, string(object.Type)) + objectParams = append(objectParams, "sept-ve-disable=on") objectParams = append(objectParams, fmt.Sprintf("id=%s", object.ID)) if object.Debug { objectParams = append(objectParams, "debug=on") } - deviceParams = append(deviceParams, string(object.Driver)) - deviceParams = append(deviceParams, fmt.Sprintf("id=%s", object.DeviceID)) - deviceParams = append(deviceParams, fmt.Sprintf("file=%s", object.File)) - if object.FirmwareVolume != "" { - deviceParams = append(deviceParams, fmt.Sprintf("config-firmware-volume=%s", object.FirmwareVolume)) - } + config.Bios = object.File case SEVGuest: objectParams = append(objectParams, string(object.Type)) objectParams = append(objectParams, fmt.Sprintf("id=%s", object.ID)) diff --git a/src/runtime/pkg/kata-monitor/metrics.go b/src/runtime/pkg/kata-monitor/metrics.go index 98ecb68f0..e45a8f19d 100644 --- a/src/runtime/pkg/kata-monitor/metrics.go +++ b/src/runtime/pkg/kata-monitor/metrics.go @@ -114,25 +114,32 @@ func (km *KataMonitor) ProcessMetricsRequest(w http.ResponseWriter, r *http.Requ writer = gz } - // create encoder to encode metrics. - encoder := expfmt.NewEncoder(writer, contentType) - - // gather metrics collected for management agent. - mfs, err := prometheus.DefaultGatherer.Gather() + filterFamilies, err := getFilterFamilyFromReq(r) if err != nil { - monitorLog.WithError(err).Error("failed to Gather metrics from prometheus.DefaultGatherer") - w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte(err.Error())) return } - // encode metric gathered in current process - if err := encodeMetricFamily(mfs, encoder); err != nil { - monitorLog.WithError(err).Warnf("failed to encode metrics") + // create encoder to encode metrics. + encoder := expfmt.NewEncoder(writer, contentType) + + if len(filterFamilies) == 0 { + // gather metrics collected for management agent. + mfs, err := prometheus.DefaultGatherer.Gather() + if err != nil { + monitorLog.WithError(err).Error("failed to Gather metrics from prometheus.DefaultGatherer") + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err.Error())) + return + } + + // encode metric gathered in current process + if err := encodeMetricFamily(mfs, encoder); err != nil { + monitorLog.WithError(err).Warnf("failed to encode metrics") + } } // aggregate sandboxes metrics and write to response by encoder - if err := km.aggregateSandboxMetrics(encoder); err != nil { + if err := km.aggregateSandboxMetrics(encoder, filterFamilies); err != nil { monitorLog.WithError(err).Errorf("failed aggregateSandboxMetrics") scrapeFailedCount.Inc() } @@ -155,7 +162,7 @@ func encodeMetricFamily(mfs []*dto.MetricFamily, encoder expfmt.Encoder) error { } // aggregateSandboxMetrics will get metrics from one sandbox and do some process -func (km *KataMonitor) aggregateSandboxMetrics(encoder expfmt.Encoder) error { +func (km *KataMonitor) aggregateSandboxMetrics(encoder expfmt.Encoder, filterFamilies []string) error { // get all kata sandboxes from cache sandboxes := km.sandboxCache.getSandboxList() // save running kata pods as a metrics. @@ -230,9 +237,21 @@ func (km *KataMonitor) aggregateSandboxMetrics(encoder expfmt.Encoder) error { } // write metrics to response. - for _, mf := range metricsMap { - if err := encoder.Encode(mf); err != nil { - return err + if len(filterFamilies) > 0 { + for _, filterName := range filterFamilies { + for fullName, mf := range metricsMap { + if strings.HasPrefix(fullName, filterName) { + if err := encoder.Encode(mf); err != nil { + return err + } + } + } + } + } else { + for _, mf := range metricsMap { + if err := encoder.Encode(mf); err != nil { + return err + } } } return nil diff --git a/src/runtime/pkg/kata-monitor/pprof.go b/src/runtime/pkg/kata-monitor/pprof.go index 0d768e428..afaae8556 100644 --- a/src/runtime/pkg/kata-monitor/pprof.go +++ b/src/runtime/pkg/kata-monitor/pprof.go @@ -32,7 +32,7 @@ func (km *KataMonitor) composeSocketAddress(r *http.Request) (string, error) { return "", err } - return shim.SocketAddress(sandbox), nil + return shim.ClientSocketAddress(sandbox), nil } func (km *KataMonitor) proxyRequest(w http.ResponseWriter, r *http.Request, diff --git a/src/runtime/pkg/kata-monitor/shim_client.go b/src/runtime/pkg/kata-monitor/shim_client.go index 388ac6fff..3730c8af0 100644 --- a/src/runtime/pkg/kata-monitor/shim_client.go +++ b/src/runtime/pkg/kata-monitor/shim_client.go @@ -8,6 +8,7 @@ package katamonitor import ( "fmt" "net/http" + "strings" "time" shim "github.com/kata-containers/kata-containers/src/runtime/pkg/containerd-shim-v2" @@ -36,3 +37,11 @@ func getSandboxIDFromReq(r *http.Request) (string, error) { func getSandboxFS() string { return shim.GetSandboxesStoragePath() } + +func getFilterFamilyFromReq(r *http.Request) ([]string, error) { + filterFamilies := r.URL.Query().Get("filter_family") + if filterFamilies != "" { + return strings.Split(filterFamilies, ","), nil + } + return nil, nil +} diff --git a/src/runtime/pkg/katautils/config.go b/src/runtime/pkg/katautils/config.go index 247093e78..fbe304b5e 100644 --- a/src/runtime/pkg/katautils/config.go +++ b/src/runtime/pkg/katautils/config.go @@ -786,6 +786,16 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { kataUtilsLogger.Info("Setting 'disable_image_nvdimm = true' as microvm does not support NVDIMM") } + // Nvdimm can only be support when UEFI/ACPI is enabled on arm64, otherwise disable it. + if goruntime.GOARCH == "arm64" && firmware == "" { + if p, err := h.PFlash(); err == nil { + if len(p) == 0 { + h.DisableImageNvdimm = true + kataUtilsLogger.Info("Setting 'disable_image_nvdimm = true' if there is no firmware specified") + } + } + } + blockDriver, err := h.blockDeviceDriver() if err != nil { return vc.HypervisorConfig{}, err @@ -1071,6 +1081,7 @@ func newClhHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { EnableAnnotations: h.EnableAnnotations, DisableSeccomp: h.DisableSeccomp, ConfidentialGuest: h.ConfidentialGuest, + Rootless: h.Rootless, DisableSeLinux: h.DisableSeLinux, DisableGuestSeLinux: h.DisableGuestSeLinux, NetRateLimiterBwMaxRate: h.getNetRateLimiterBwMaxRate(), diff --git a/src/runtime/pkg/katautils/config_test.go b/src/runtime/pkg/katautils/config_test.go index 683b76ddc..d9557afbe 100644 --- a/src/runtime/pkg/katautils/config_test.go +++ b/src/runtime/pkg/katautils/config_test.go @@ -13,6 +13,7 @@ import ( "path" "path/filepath" "reflect" + goruntime "runtime" "strings" "syscall" "testing" @@ -182,6 +183,10 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf SNPGuestPolicy: defaultSNPGuestPolicy, } + if goruntime.GOARCH == "arm64" && len(hypervisorConfig.PFlash) == 0 && hypervisorConfig.FirmwarePath == "" { + hypervisorConfig.DisableImageNvdimm = true + } + agentConfig := vc.KataAgentConfig{ LongLiveConn: true, } diff --git a/src/runtime/pkg/utils/shimclient/shim_management_client.go b/src/runtime/pkg/utils/shimclient/shim_management_client.go index 1b9635c17..28ef3708d 100644 --- a/src/runtime/pkg/utils/shimclient/shim_management_client.go +++ b/src/runtime/pkg/utils/shimclient/shim_management_client.go @@ -19,7 +19,7 @@ import ( // BuildShimClient builds and returns an http client for communicating with the provided sandbox func BuildShimClient(sandboxID string, timeout time.Duration) (*http.Client, error) { - return buildUnixSocketClient(shim.SocketAddress(sandboxID), timeout) + return buildUnixSocketClient(shim.ClientSocketAddress(sandboxID), timeout) } // buildUnixSocketClient build http client for Unix socket diff --git a/src/runtime/virtcontainers/clh.go b/src/runtime/virtcontainers/clh.go index f23393b97..cfd924099 100644 --- a/src/runtime/virtcontainers/clh.go +++ b/src/runtime/virtcontainers/clh.go @@ -19,6 +19,7 @@ import ( "net/http/httputil" "os" "os/exec" + "os/user" "path/filepath" "regexp" "strconv" @@ -37,6 +38,8 @@ import ( "github.com/kata-containers/kata-containers/src/runtime/pkg/device/config" hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors" "github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace" + pkgUtils "github.com/kata-containers/kata-containers/src/runtime/pkg/utils" + "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils" ) @@ -653,7 +656,7 @@ func (clh *cloudHypervisor) StartVM(ctx context.Context, timeout int) error { clh.Logger().WithField("function", "StartVM").Info("starting Sandbox") vmPath := filepath.Join(clh.config.VMStorePath, clh.id) - err := os.MkdirAll(vmPath, DirMode) + err := utils.MkdirAllWithInheritedOwner(vmPath, DirMode) if err != nil { return err } @@ -1364,9 +1367,16 @@ func (clh *cloudHypervisor) launchClh() (int, error) { cmdHypervisor.Stdout = clh.console } } - cmdHypervisor.Stderr = cmdHypervisor.Stdout + attr := syscall.SysProcAttr{} + attr.Credential = &syscall.Credential{ + Uid: clh.config.Uid, + Gid: clh.config.Gid, + Groups: clh.config.Groups, + } + cmdHypervisor.SysProcAttr = &attr + err = utils.StartCmd(cmdHypervisor) if err != nil { return -1, err @@ -1691,6 +1701,30 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error { clh.Logger().WithError(err).WithField("path", dir).Warnf("failed to remove vm path") } } + if rootless.IsRootless() { + if _, err := user.Lookup(clh.config.User); err != nil { + clh.Logger().WithError(err).WithFields( + log.Fields{ + "user": clh.config.User, + "uid": clh.config.Uid, + }).Warn("failed to find the user, it might have been removed") + return nil + } + + if err := pkgUtils.RemoveVmmUser(clh.config.User); err != nil { + clh.Logger().WithError(err).WithFields( + log.Fields{ + "user": clh.config.User, + "uid": clh.config.Uid, + }).Warn("failed to delete the user") + return nil + } + clh.Logger().WithFields( + log.Fields{ + "user": clh.config.User, + "uid": clh.config.Uid, + }).Debug("successfully removed the non root user") + } clh.reset() diff --git a/src/runtime/virtcontainers/hypervisor_linux_amd64.go b/src/runtime/virtcontainers/hypervisor_linux_amd64.go index 8cfc9aca9..043b36c9f 100644 --- a/src/runtime/virtcontainers/hypervisor_linux_amd64.go +++ b/src/runtime/virtcontainers/hypervisor_linux_amd64.go @@ -8,15 +8,28 @@ package virtcontainers import "os" const ( - tdxSysFirmwareDir = "/sys/firmware/tdx_seam/" + tdxSeamSysFirmwareDir = "/sys/firmware/tdx_seam/" - tdxCPUFlag = "tdx" + tdxSysFirmwareDir = "/sys/firmware/tdx/" sevKvmParameterPath = "/sys/module/kvm_amd/parameters/sev" snpKvmParameterPath = "/sys/module/kvm_amd/parameters/sev_snp" ) +// TDX is supported and properly loaded when the firmware directory (either tdx or tdx_seam) exists or `tdx` is part of the CPU flag +func checkTdxGuestProtection(flags map[string]bool) bool { + if d, err := os.Stat(tdxSysFirmwareDir); err == nil && d.IsDir() { + return true + } + + if d, err := os.Stat(tdxSeamSysFirmwareDir); err == nil && d.IsDir() { + return true + } + + return false +} + // Implementation of this function is architecture specific func availableGuestProtection() (guestProtection, error) { flags, err := CPUFlags(procCPUInfo) @@ -24,10 +37,10 @@ func availableGuestProtection() (guestProtection, error) { return noneProtection, err } - // TDX is supported and properly loaded when the firmware directory exists or `tdx` is part of the CPU flags - if d, err := os.Stat(tdxSysFirmwareDir); (err == nil && d.IsDir()) || flags[tdxCPUFlag] { + if checkTdxGuestProtection(flags) { return tdxProtection, nil } + // SEV-SNP is supported and enabled when the kvm module `sev_snp` parameter is set to `Y` // SEV-SNP support infers SEV (-ES) support if _, err := os.Stat(snpKvmParameterPath); err == nil { diff --git a/src/runtime/virtcontainers/nydusd.go b/src/runtime/virtcontainers/nydusd.go index 9a2e1a638..3c42e4cde 100644 --- a/src/runtime/virtcontainers/nydusd.go +++ b/src/runtime/virtcontainers/nydusd.go @@ -157,7 +157,7 @@ func (nd *nydusd) args() ([]string, error) { logLevel = "debug" } args := []string{ - "virtiofs", "--hybrid-mode", + "virtiofs", "--log-level", logLevel, "--apisock", nd.apiSockPath, "--sock", nd.sockPath, diff --git a/src/runtime/virtcontainers/nydusd_test.go b/src/runtime/virtcontainers/nydusd_test.go index 481866ffc..a8ec6dc9b 100644 --- a/src/runtime/virtcontainers/nydusd_test.go +++ b/src/runtime/virtcontainers/nydusd_test.go @@ -99,13 +99,13 @@ func TestNydusdArgs(t *testing.T) { apiSockPath: "/var/lib/api.sock", debug: true, } - expected := "virtiofs --hybrid-mode --log-level debug --apisock /var/lib/api.sock --sock /var/lib/vhost-user.sock" + expected := "virtiofs --log-level debug --apisock /var/lib/api.sock --sock /var/lib/vhost-user.sock" args, err := nd.args() assert.NoError(err) assert.Equal(expected, strings.Join(args, " ")) nd.debug = false - expected = "virtiofs --hybrid-mode --log-level info --apisock /var/lib/api.sock --sock /var/lib/vhost-user.sock" + expected = "virtiofs --log-level info --apisock /var/lib/api.sock --sock /var/lib/vhost-user.sock" args, err = nd.args() assert.NoError(err) assert.Equal(expected, strings.Join(args, " ")) diff --git a/src/runtime/virtcontainers/qemu.go b/src/runtime/virtcontainers/qemu.go index aa87247d9..744fa9716 100644 --- a/src/runtime/virtcontainers/qemu.go +++ b/src/runtime/virtcontainers/qemu.go @@ -1267,6 +1267,7 @@ func (q *qemu) cleanupVM() error { "user": q.config.User, "uid": q.config.Uid, }).Warn("failed to delete the user") + return nil } q.Logger().WithFields( logrus.Fields{ diff --git a/src/runtime/virtcontainers/qemu_amd64.go b/src/runtime/virtcontainers/qemu_amd64.go index d24953e61..e9cda5b9e 100644 --- a/src/runtime/virtcontainers/qemu_amd64.go +++ b/src/runtime/virtcontainers/qemu_amd64.go @@ -260,7 +260,7 @@ func (q *qemuAmd64) enableProtection() error { if q.qemuMachine.Options != "" { q.qemuMachine.Options += "," } - q.qemuMachine.Options += "kvm-type=tdx,confidential-guest-support=tdx" + q.qemuMachine.Options += "confidential-guest-support=tdx" logger.Info("Enabling TDX guest protection") return nil case sevProtection: diff --git a/src/tools/agent-ctl/Cargo.lock b/src/tools/agent-ctl/Cargo.lock index 234830c3a..bfa0177fb 100644 --- a/src/tools/agent-ctl/Cargo.lock +++ b/src/tools/agent-ctl/Cargo.lock @@ -31,9 +31,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.51" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b26702f315f53b6071259e15dd9d64528213b44d61de1ec926eca7715d62203" +checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" [[package]] name = "arc-swap" @@ -114,7 +114,7 @@ checksum = "d7d78656ba01f1b93024b7c3a0467f1608e4be67d725749fdcd7d2c7678fd7a2" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -131,7 +131,7 @@ checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -176,7 +176,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd9e32d7420c85055e8107e5b2463c4eeefeaac18b52359fe9f9c08a18f342b2" dependencies = [ "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -340,7 +340,7 @@ checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -351,7 +351,7 @@ checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -398,7 +398,7 @@ checksum = "f58dc3c5e468259f19f2d46304a6b28f1c3d034442e14b322d2b850e36f6d5ae" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -527,7 +527,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -593,9 +593,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "heck" @@ -651,9 +651,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.7.0" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown", @@ -728,7 +728,7 @@ dependencies = [ "logging", "nix 0.23.1", "oci", - "protobuf", + "protobuf 3.2.0", "protocols", "rand 0.8.4", "rustjail", @@ -1084,11 +1084,11 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.34" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f84e92c0f7c9d58328b85a78557813e4bd845130db68d7184635344399423b1" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] @@ -1129,7 +1129,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -1144,31 +1144,68 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.27.1" +version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + +[[package]] +name = "protobuf" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55bad9126f378a853655831eb7363b7b01b81d19f8cb1218861086ca4a1a61e" dependencies = [ - "serde", - "serde_derive", + "once_cell", + "protobuf-support", + "thiserror", ] [[package]] name = "protobuf-codegen" -version = "2.27.1" +version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aec1632b7c8f2e620343439a7dfd1f3c47b18906c4be58982079911482b5d707" +checksum = "033460afb75cf755fcfc16dfaed20b86468082a2ea24e05ac35ab4a099a017d6" dependencies = [ - "protobuf", + "protobuf 2.28.0", ] [[package]] -name = "protobuf-codegen-pure" -version = "2.27.1" +name = "protobuf-codegen" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f8122fdb18e55190c796b088a16bdb70cd7acdcd48f7a8b796b58c62e532cc6" +checksum = "0dd418ac3c91caa4032d37cb80ff0d44e2ebe637b2fb243b6234bf89cdac4901" dependencies = [ - "protobuf", - "protobuf-codegen", + "anyhow", + "once_cell", + "protobuf 3.2.0", + "protobuf-parse", + "regex", + "tempfile", + "thiserror", +] + +[[package]] +name = "protobuf-parse" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d39b14605eaa1f6a340aec7f320b34064feb26c93aec35d6a9a2272a8ddfa49" +dependencies = [ + "anyhow", + "indexmap", + "log", + "protobuf 3.2.0", + "protobuf-support", + "tempfile", + "thiserror", + "which", +] + +[[package]] +name = "protobuf-support" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d4d7b8601c814cfb36bcebb79f0e61e45e1e93640cf778837833bbed05c372" +dependencies = [ + "thiserror", ] [[package]] @@ -1176,7 +1213,7 @@ name = "protocols" version = "0.1.0" dependencies = [ "oci", - "protobuf", + "protobuf 3.2.0", "serde", "serde_json", "ttrpc", @@ -1185,9 +1222,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.10" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -1347,7 +1384,7 @@ dependencies = [ "nix 0.24.2", "oci", "path-absolutize", - "protobuf", + "protobuf 3.2.0", "protocols", "regex", "rlimit", @@ -1386,22 +1423,22 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.132" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9875c23cf305cd1fd7eb77234cbb705f21ea6a72c637a5c6db5fe4b8e7f008" +checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.132" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc0db5cb2556c0e558887d9bbdcf6ac4471e83ff66cf696e5419024d1606276" +checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.14", ] [[package]] @@ -1423,7 +1460,7 @@ checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -1552,6 +1589,17 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "syn" +version = "2.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcf316d5356ed6847742d036f8a39c3b8435cac10bd528a4bd461928a6ab34d5" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "take_mut" version = "0.2.2" @@ -1598,7 +1646,7 @@ checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -1645,7 +1693,7 @@ checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -1677,7 +1725,7 @@ checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -1691,43 +1739,43 @@ dependencies = [ [[package]] name = "ttrpc" -version = "0.6.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7d6c992964a013c17814c08d31708d577b0aae44ebadb58755659dd824c2d1" +checksum = "a35f22a2964bea14afee161665bb260b83cb48e665e0260ca06ec0e775c8b06c" dependencies = [ "byteorder", "libc", "log", "nix 0.23.1", - "protobuf", - "protobuf-codegen-pure", + "protobuf 3.2.0", + "protobuf-codegen 3.2.0", "thiserror", ] [[package]] name = "ttrpc-codegen" -version = "0.2.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809eda4e459820237104e4b61d6b41bbe6c9e1ce6adf4057955e6e6722a90408" +checksum = "94d7f7631d7a9ebed715a47cd4cb6072cbc7ae1d4ec01598971bbec0024340c2" dependencies = [ - "protobuf", - "protobuf-codegen", - "protobuf-codegen-pure", + "protobuf 2.28.0", + "protobuf-codegen 3.2.0", + "protobuf-support", "ttrpc-compiler", ] [[package]] name = "ttrpc-compiler" -version = "0.4.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2978ed3fa047d8fd55cbeb4d4a61d461fb3021a90c9618519c73ce7e5bb66c15" +checksum = "ec3cb5dbf1f0865a34fe3f722290fe776cacb16f50428610b779467b76ddf647" dependencies = [ "derive-new", "prost", "prost-build", "prost-types", - "protobuf", - "protobuf-codegen", + "protobuf 2.28.0", + "protobuf-codegen 2.28.0", "tempfile", ] @@ -1741,6 +1789,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "unicode-ident" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" + [[package]] name = "unicode-segmentation" version = "1.8.0" @@ -1804,7 +1858,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn", + "syn 1.0.82", "wasm-bindgen-shared", ] @@ -1826,7 +1880,7 @@ checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -1981,7 +2035,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn", + "syn 1.0.82", ] [[package]] @@ -2018,5 +2072,5 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] diff --git a/src/tools/agent-ctl/Cargo.toml b/src/tools/agent-ctl/Cargo.toml index 25852d83b..033380396 100644 --- a/src/tools/agent-ctl/Cargo.toml +++ b/src/tools/agent-ctl/Cargo.toml @@ -27,12 +27,12 @@ logging = { path = "../../libs/logging" } slog = "2.7.0" slog-scope = "4.4.0" rand = "0.8.4" -protobuf = "2.27.0" +protobuf = "3.2.0" nix = "0.23.0" libc = "0.2.112" # XXX: Must be the same as the version used by the agent -ttrpc = { version = "0.6.0" } +ttrpc = { version = "0.7.1" } # For parsing timeouts humantime = "2.1.0" diff --git a/src/tools/agent-ctl/src/utils.rs b/src/tools/agent-ctl/src/utils.rs index e2c56fbd2..021300a60 100644 --- a/src/tools/agent-ctl/src/utils.rs +++ b/src/tools/agent-ctl/src/utils.rs @@ -268,8 +268,7 @@ fn root_oci_to_ttrpc(bundle_dir: &str, root: &ociRoot) -> Result { let ttrpc_root = ttrpcRoot { Path: path, Readonly: root.readonly, - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + ..Default::default() }; Ok(ttrpc_root) @@ -281,9 +280,9 @@ fn process_oci_to_ttrpc(p: &ociProcess) -> ttrpcProcess { let mut b = ttrpcBox::new(); b.set_Width(s.width); b.set_Height(s.height); - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), }; let oom_score_adj: i64 = match p.oom_score_adj { @@ -298,23 +297,23 @@ fn process_oci_to_ttrpc(p: &ociProcess) -> ttrpcProcess { // FIXME: Implement RLimits OCI spec handling (copy from p.rlimits) //let rlimits = vec![ttrpcPOSIXRlimit::new()]; - let rlimits = protobuf::RepeatedField::new(); + let rlimits = Vec::new(); let capabilities = match &p.capabilities { Some(c) => { let mut gc = ttrpcLinuxCapabilities::new(); - gc.set_Bounding(protobuf::RepeatedField::from_slice(&c.bounding)); - gc.set_Effective(protobuf::RepeatedField::from_slice(&c.effective)); - gc.set_Inheritable(protobuf::RepeatedField::from_slice(&c.inheritable)); - gc.set_Permitted(protobuf::RepeatedField::from_slice(&c.permitted)); - gc.set_Ambient(protobuf::RepeatedField::from_slice(&c.ambient)); + gc.set_Bounding(c.bounding.clone()); + gc.set_Effective(c.effective.clone()); + gc.set_Inheritable(c.inheritable.clone()); + gc.set_Permitted(c.permitted.clone()); + gc.set_Ambient(c.ambient.clone()); - protobuf::SingularPtrField::some(gc) + protobuf::MessageField::some(gc) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), }; - let mut env = protobuf::RepeatedField::new(); + let mut env = Vec::new(); for pair in &p.env { env.push(pair.to_string()); } @@ -322,8 +321,8 @@ fn process_oci_to_ttrpc(p: &ociProcess) -> ttrpcProcess { ttrpcProcess { Terminal: p.terminal, ConsoleSize: console_size, - User: protobuf::SingularPtrField::some(user), - Args: protobuf::RepeatedField::from_vec(p.args.clone()), + User: protobuf::MessageField::some(user), + Args: p.args.clone(), Env: env, Cwd: p.cwd.clone(), Capabilities: capabilities, @@ -332,13 +331,12 @@ fn process_oci_to_ttrpc(p: &ociProcess) -> ttrpcProcess { ApparmorProfile: p.apparmor_profile.clone(), OOMScoreAdj: oom_score_adj, SelinuxLabel: p.selinux_label.clone(), - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + ..Default::default() } } fn mount_oci_to_ttrpc(m: &ociMount) -> ttrpcMount { - let mut ttrpc_options = protobuf::RepeatedField::new(); + let mut ttrpc_options = Vec::new(); for op in &m.options { ttrpc_options.push(op.to_string()); } @@ -346,17 +344,14 @@ fn mount_oci_to_ttrpc(m: &ociMount) -> ttrpcMount { ttrpcMount { destination: m.destination.clone(), source: m.source.clone(), - field_type: m.r#type.clone(), + type_: m.r#type.clone(), options: ttrpc_options, - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + ..Default::default() } } -fn idmaps_oci_to_ttrpc( - res: &[oci::LinuxIdMapping], -) -> protobuf::RepeatedField { - let mut ttrpc_idmaps = protobuf::RepeatedField::new(); +fn idmaps_oci_to_ttrpc(res: &[oci::LinuxIdMapping]) -> Vec { + let mut ttrpc_idmaps = Vec::new(); for m in res.iter() { let mut idmapping = ttrpcLinuxIDMapping::default(); idmapping.set_HostID(m.host_id); @@ -367,10 +362,8 @@ fn idmaps_oci_to_ttrpc( ttrpc_idmaps } -fn devices_oci_to_ttrpc( - res: &[oci::LinuxDeviceCgroup], -) -> protobuf::RepeatedField { - let mut ttrpc_devices = protobuf::RepeatedField::new(); +fn devices_oci_to_ttrpc(res: &[oci::LinuxDeviceCgroup]) -> Vec { + let mut ttrpc_devices = Vec::new(); for d in res.iter() { let mut device = ttrpcLinuxDeviceCgroup::default(); device.set_Major(d.major.unwrap_or(0)); @@ -383,12 +376,10 @@ fn devices_oci_to_ttrpc( ttrpc_devices } -fn memory_oci_to_ttrpc( - res: &Option, -) -> protobuf::SingularPtrField { +fn memory_oci_to_ttrpc(res: &Option) -> protobuf::MessageField { let memory = if res.is_some() { let mem = res.as_ref().unwrap(); - protobuf::SingularPtrField::some(ttrpcLinuxMemory { + protobuf::MessageField::some(ttrpcLinuxMemory { Limit: mem.limit.unwrap_or(0), Reservation: mem.reservation.unwrap_or(0), Swap: mem.swap.unwrap_or(0), @@ -396,16 +387,15 @@ fn memory_oci_to_ttrpc( KernelTCP: mem.kernel_tcp.unwrap_or(0), Swappiness: mem.swappiness.unwrap_or(0), DisableOOMKiller: mem.disable_oom_killer.unwrap_or(false), - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + ..Default::default() }) } else { - protobuf::SingularPtrField::none() + protobuf::MessageField::none() }; memory } -fn cpu_oci_to_ttrpc(res: &Option) -> protobuf::SingularPtrField { +fn cpu_oci_to_ttrpc(res: &Option) -> protobuf::MessageField { match &res { Some(s) => { let mut cpu = ttrpcLinuxCPU::default(); @@ -414,27 +404,25 @@ fn cpu_oci_to_ttrpc(res: &Option) -> protobuf::SingularPtrField protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), } } -fn pids_oci_to_ttrpc(res: &Option) -> protobuf::SingularPtrField { +fn pids_oci_to_ttrpc(res: &Option) -> protobuf::MessageField { match &res { Some(s) => { let mut b = ttrpcLinuxPids::new(); b.set_Limit(s.limit); - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), } } -fn hugepage_limits_oci_to_ttrpc( - res: &[oci::LinuxHugepageLimit], -) -> protobuf::RepeatedField { - let mut ttrpc_hugepage_limits = protobuf::RepeatedField::new(); +fn hugepage_limits_oci_to_ttrpc(res: &[oci::LinuxHugepageLimit]) -> Vec { + let mut ttrpc_hugepage_limits = Vec::new(); for h in res.iter() { let mut hugepage_limit = ttrpcLinuxHugepageLimit::default(); hugepage_limit.set_Limit(h.limit); @@ -446,28 +434,26 @@ fn hugepage_limits_oci_to_ttrpc( fn network_oci_to_ttrpc( res: &Option, -) -> protobuf::SingularPtrField { +) -> protobuf::MessageField { match &res { Some(s) => { let mut b = ttrpcLinuxNetwork::new(); b.set_ClassID(s.class_id.unwrap_or(0)); - let mut priorities = protobuf::RepeatedField::new(); + let mut priorities = Vec::new(); for pr in s.priorities.iter() { let mut lip = ttrpcLinuxInterfacePriority::new(); lip.set_Name(pr.name.clone()); lip.set_Priority(pr.priority); priorities.push(lip); } - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), } } -fn weight_devices_oci_to_ttrpc( - res: &[oci::LinuxWeightDevice], -) -> protobuf::RepeatedField { - let mut ttrpc_weight_devices = protobuf::RepeatedField::new(); +fn weight_devices_oci_to_ttrpc(res: &[oci::LinuxWeightDevice]) -> Vec { + let mut ttrpc_weight_devices = Vec::new(); for dev in res.iter() { let mut device = ttrpcLinuxWeightDevice::default(); device.set_Major(dev.blk.major); @@ -489,8 +475,8 @@ fn weight_devices_oci_to_ttrpc( fn throttle_devices_oci_to_ttrpc( res: &[oci::LinuxThrottleDevice], -) -> protobuf::RepeatedField { - let mut ttrpc_throttle_devices = protobuf::RepeatedField::new(); +) -> Vec { + let mut ttrpc_throttle_devices = Vec::new(); for dev in res.iter() { let mut device = ttrpcLinuxThrottleDevice::default(); device.set_Major(dev.blk.major); @@ -503,7 +489,7 @@ fn throttle_devices_oci_to_ttrpc( fn block_io_oci_to_ttrpc( res: &Option, -) -> protobuf::SingularPtrField { +) -> protobuf::MessageField { match &res { Some(s) => { let mut b = ttrpcLinuxBlockIO::new(); @@ -529,9 +515,9 @@ fn block_io_oci_to_ttrpc( b.set_ThrottleWriteIOPSDevice(throttle_devices_oci_to_ttrpc( &s.throttle_write_iops_device, )); - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), } } @@ -552,15 +538,12 @@ fn resources_oci_to_ttrpc(res: &oci::LinuxResources) -> ttrpcLinuxResources { BlockIO: block_io, HugepageLimits: hugepage_limits, Network: network, - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + ..Default::default() } } -fn namespace_oci_to_ttrpc( - res: &[oci::LinuxNamespace], -) -> protobuf::RepeatedField { - let mut ttrpc_namespace = protobuf::RepeatedField::new(); +fn namespace_oci_to_ttrpc(res: &[oci::LinuxNamespace]) -> Vec { + let mut ttrpc_namespace = Vec::new(); for n in res.iter() { let mut ns = ttrpcLinuxNamespace::default(); ns.set_Path(n.path.clone()); @@ -570,10 +553,8 @@ fn namespace_oci_to_ttrpc( ttrpc_namespace } -fn linux_devices_oci_to_ttrpc( - res: &[oci::LinuxDevice], -) -> protobuf::RepeatedField { - let mut ttrpc_linux_devices = protobuf::RepeatedField::new(); +fn linux_devices_oci_to_ttrpc(res: &[oci::LinuxDevice]) -> Vec { + let mut ttrpc_linux_devices = Vec::new(); for n in res.iter() { let mut ld = ttrpcLinuxDevice::default(); ld.set_FileMode(n.file_mode.unwrap_or(0)); @@ -590,22 +571,22 @@ fn linux_devices_oci_to_ttrpc( fn seccomp_oci_to_ttrpc(sec: &oci::LinuxSeccomp) -> ttrpcLinuxSeccomp { let mut ttrpc_seccomp = ttrpcLinuxSeccomp::default(); - let mut ttrpc_arch = protobuf::RepeatedField::new(); + let mut ttrpc_arch = Vec::new(); for a in &sec.architectures { ttrpc_arch.push(std::string::String::from(a)); } ttrpc_seccomp.set_Architectures(ttrpc_arch); ttrpc_seccomp.set_DefaultAction(sec.default_action.clone()); - let mut ttrpc_flags = protobuf::RepeatedField::new(); + let mut ttrpc_flags = Vec::new(); for f in &sec.flags { ttrpc_flags.push(std::string::String::from(f)); } ttrpc_seccomp.set_Flags(ttrpc_flags); - let mut ttrpc_syscalls = protobuf::RepeatedField::new(); + let mut ttrpc_syscalls = Vec::new(); for sys in &sec.syscalls { let mut ttrpc_sys = ttrpcLinuxSyscall::default(); ttrpc_sys.set_Action(sys.action.clone()); - let mut ttrpc_args = protobuf::RepeatedField::new(); + let mut ttrpc_args = Vec::new(); for arg in &sys.args { let mut a = ttrpcLinuxSeccompArg::default(); a.set_Index(arg.index as u64); @@ -632,9 +613,9 @@ fn linux_oci_to_ttrpc(l: &ociLinux) -> ttrpcLinux { let ttrpc_linux_resources = match &l.resources { Some(s) => { let b = resources_oci_to_ttrpc(s); - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), }; let ttrpc_namespaces = namespace_oci_to_ttrpc(&l.namespaces); @@ -642,17 +623,17 @@ fn linux_oci_to_ttrpc(l: &ociLinux) -> ttrpcLinux { let ttrpc_seccomp = match &l.seccomp { Some(s) => { let b = seccomp_oci_to_ttrpc(s); - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), }; let ttrpc_intel_rdt = match &l.intel_rdt { Some(s) => { let b = intel_rdt_oci_to_ttrpc(s); - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), }; ttrpcLinux { @@ -665,38 +646,37 @@ fn linux_oci_to_ttrpc(l: &ociLinux) -> ttrpcLinux { Devices: ttrpc_linux_devices, Seccomp: ttrpc_seccomp, RootfsPropagation: l.rootfs_propagation.clone(), - MaskedPaths: protobuf::RepeatedField::from_slice(&l.masked_paths), - ReadonlyPaths: protobuf::RepeatedField::from_slice(&l.readonly_paths), + MaskedPaths: l.masked_paths.clone(), + ReadonlyPaths: l.readonly_paths.clone(), MountLabel: l.mount_label.clone(), IntelRdt: ttrpc_intel_rdt, - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + ..Default::default() } } fn oci_to_ttrpc(bundle_dir: &str, cid: &str, oci: &ociSpec) -> Result { let process = match &oci.process { - Some(p) => protobuf::SingularPtrField::some(process_oci_to_ttrpc(p)), - None => protobuf::SingularPtrField::none(), + Some(p) => protobuf::MessageField::some(process_oci_to_ttrpc(p)), + None => protobuf::MessageField::none(), }; let root = match &oci.root { Some(r) => { let ttrpc_root = root_oci_to_ttrpc(bundle_dir, r)?; - protobuf::SingularPtrField::some(ttrpc_root) + protobuf::MessageField::some(ttrpc_root) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), }; - let mut mounts = protobuf::RepeatedField::new(); + let mut mounts = Vec::new(); for m in &oci.mounts { mounts.push(mount_oci_to_ttrpc(m)); } let linux = match &oci.linux { - Some(l) => protobuf::SingularPtrField::some(linux_oci_to_ttrpc(l)), - None => protobuf::SingularPtrField::none(), + Some(l) => protobuf::MessageField::some(linux_oci_to_ttrpc(l)), + None => protobuf::MessageField::none(), }; if cid.len() < MIN_HOSTNAME_LEN as usize { @@ -713,13 +693,12 @@ fn oci_to_ttrpc(bundle_dir: &str, cid: &str, oci: &ociSpec) -> Result Root: root, Hostname: hostname, Mounts: mounts, - Hooks: protobuf::SingularPtrField::none(), + Hooks: protobuf::MessageField::none(), Annotations: HashMap::new(), Linux: linux, - Solaris: protobuf::SingularPtrField::none(), - Windows: protobuf::SingularPtrField::none(), - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + Solaris: protobuf::MessageField::none(), + Windows: protobuf::MessageField::none(), + ..Default::default() }; Ok(ttrpc_spec) diff --git a/src/tools/kata-ctl/Cargo.lock b/src/tools/kata-ctl/Cargo.lock index 3edafa91d..6acf131f0 100644 --- a/src/tools/kata-ctl/Cargo.lock +++ b/src/tools/kata-ctl/Cargo.lock @@ -13,7 +13,7 @@ dependencies = [ "logging", "nix 0.24.3", "oci", - "protobuf", + "protobuf 3.2.0", "protocols", "serde", "serde_json", @@ -657,7 +657,6 @@ dependencies = [ "test-utils", "thiserror", "tokio", - "ttrpc", "url", "vmm-sys-util", ] @@ -1108,9 +1107,16 @@ name = "protobuf" version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + +[[package]] +name = "protobuf" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55bad9126f378a853655831eb7363b7b01b81d19f8cb1218861086ca4a1a61e" dependencies = [ - "serde", - "serde_derive", + "once_cell", + "protobuf-support", + "thiserror", ] [[package]] @@ -1119,27 +1125,47 @@ version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "033460afb75cf755fcfc16dfaed20b86468082a2ea24e05ac35ab4a099a017d6" dependencies = [ - "protobuf", + "protobuf 2.28.0", ] [[package]] -name = "protobuf-codegen-pure" -version = "2.28.0" +name = "protobuf-codegen" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a29399fc94bcd3eeaa951c715f7bea69409b2445356b00519740bcd6ddd865" +checksum = "0dd418ac3c91caa4032d37cb80ff0d44e2ebe637b2fb243b6234bf89cdac4901" dependencies = [ - "protobuf", - "protobuf-codegen", + "anyhow", + "once_cell", + "protobuf 3.2.0", + "protobuf-parse", + "regex", + "tempfile", + "thiserror", ] [[package]] -name = "protobuf-codegen-pure3" -version = "2.28.1" +name = "protobuf-parse" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0a3cf0a7de7570cb67bfb9a9a585b5841b49790a1be0ef104340a2110b91135" +checksum = "9d39b14605eaa1f6a340aec7f320b34064feb26c93aec35d6a9a2272a8ddfa49" dependencies = [ - "protobuf", - "protobuf-codegen", + "anyhow", + "indexmap", + "log", + "protobuf 3.2.0", + "protobuf-support", + "tempfile", + "thiserror", + "which", +] + +[[package]] +name = "protobuf-support" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d4d7b8601c814cfb36bcebb79f0e61e45e1e93640cf778837833bbed05c372" +dependencies = [ + "thiserror", ] [[package]] @@ -1148,7 +1174,7 @@ version = "0.1.0" dependencies = [ "async-trait", "oci", - "protobuf", + "protobuf 3.2.0", "ttrpc", "ttrpc-codegen", ] @@ -1771,9 +1797,9 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "ttrpc" -version = "0.6.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ecfff459a859c6ba6668ff72b34c2f1d94d9d58f7088414c2674ad0f31cc7d8" +checksum = "a35f22a2964bea14afee161665bb260b83cb48e665e0260ca06ec0e775c8b06c" dependencies = [ "async-trait", "byteorder", @@ -1781,8 +1807,8 @@ dependencies = [ "libc", "log", "nix 0.23.2", - "protobuf", - "protobuf-codegen-pure", + "protobuf 3.2.0", + "protobuf-codegen 3.2.0", "thiserror", "tokio", "tokio-vsock", @@ -1790,28 +1816,28 @@ dependencies = [ [[package]] name = "ttrpc-codegen" -version = "0.2.2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df80affc2cf8c589172b05ba2b8e8a88722ebf4e28b86604615497a8b6fb78c0" +checksum = "94d7f7631d7a9ebed715a47cd4cb6072cbc7ae1d4ec01598971bbec0024340c2" dependencies = [ - "protobuf", - "protobuf-codegen", - "protobuf-codegen-pure3", + "protobuf 2.28.0", + "protobuf-codegen 3.2.0", + "protobuf-support", "ttrpc-compiler", ] [[package]] name = "ttrpc-compiler" -version = "0.4.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8db19ce6af25713061dd805d6733b6f0c45904bd63526ce5d2568c858b7edc71" +checksum = "ec3cb5dbf1f0865a34fe3f722290fe776cacb16f50428610b779467b76ddf647" dependencies = [ "derive-new", "prost", "prost-build", "prost-types", - "protobuf", - "protobuf-codegen", + "protobuf 2.28.0", + "protobuf-codegen 2.28.0", "tempfile", ] diff --git a/src/tools/kata-ctl/Cargo.toml b/src/tools/kata-ctl/Cargo.toml index 664a19ebc..3de3a532a 100644 --- a/src/tools/kata-ctl/Cargo.toml +++ b/src/tools/kata-ctl/Cargo.toml @@ -37,7 +37,6 @@ libc = "0.2.138" slog = "2.7.0" slog-scope = "4.4.0" hyper = "0.14.20" -ttrpc = "0.6.0" tokio = "1.8.0" [target.'cfg(target_arch = "s390x")'.dependencies] @@ -49,5 +48,6 @@ reqwest = { version = "0.11", default-features = false, features = ["json", "blo [dev-dependencies] semver = "1.0.12" tempfile = "3.1.0" +nix = "0.25.0" test-utils = { path = "../../libs/test-utils" } micro_http = { git = "https://github.com/firecracker-microvm/micro-http", branch = "main" } diff --git a/src/tools/kata-ctl/src/arch/aarch64/mod.rs b/src/tools/kata-ctl/src/arch/aarch64/mod.rs index 7966123bd..a6137856f 100644 --- a/src/tools/kata-ctl/src/arch/aarch64/mod.rs +++ b/src/tools/kata-ctl/src/arch/aarch64/mod.rs @@ -7,6 +7,7 @@ pub use arch_specific::*; mod arch_specific { + use crate::check; use crate::types::*; use anyhow::Result; use std::path::Path; @@ -19,7 +20,7 @@ mod arch_specific { // List of check functions static CHECK_LIST: &[CheckItem] = &[CheckItem { - name: CheckType::CheckCpu, + name: CheckType::Cpu, descr: "This parameter performs the host check", fp: check, perm: PermissionType::NonPrivileged, @@ -39,4 +40,10 @@ mod arch_specific { pub fn get_checks() -> Option<&'static [CheckItem<'static>]> { Some(CHECK_LIST) } + + #[allow(dead_code)] + // Guest protection is not supported on ARM64. + pub fn available_guest_protection() -> Result { + Ok(check::GuestProtection::NoProtection) + } } diff --git a/src/tools/kata-ctl/src/arch/powerpc64le/mod.rs b/src/tools/kata-ctl/src/arch/powerpc64le/mod.rs index 1cc49d70c..8290dbb13 100644 --- a/src/tools/kata-ctl/src/arch/powerpc64le/mod.rs +++ b/src/tools/kata-ctl/src/arch/powerpc64le/mod.rs @@ -8,6 +8,7 @@ use crate::types::*; pub use arch_specific::*; mod arch_specific { + use crate::check; use anyhow::Result; pub const ARCH_CPU_VENDOR_FIELD: &str = ""; @@ -20,4 +21,19 @@ mod arch_specific { pub fn get_checks() -> Option<&'static [CheckItem<'static>]> { None } + + const PEF_SYS_FIRMWARE_DIR: &str = "/sys/firmware/ultravisor/"; + + pub fn available_guest_protection() -> Result { + if !Uid::effective().is_root() { + return Err(check::ProtectionError::NoPerms); + } + + let metadata = fs::metadata(PEF_SYS_FIRMWARE_DIR); + if metadata.is_ok() && metadata.unwrap().is_dir() { + Ok(check::GuestProtection::Pef) + } + + Ok(check::GuestProtection::NoProtection) + } } diff --git a/src/tools/kata-ctl/src/arch/s390x/mod.rs b/src/tools/kata-ctl/src/arch/s390x/mod.rs index b3196547b..7a9940dcf 100644 --- a/src/tools/kata-ctl/src/arch/s390x/mod.rs +++ b/src/tools/kata-ctl/src/arch/s390x/mod.rs @@ -11,6 +11,10 @@ mod arch_specific { use crate::check; use crate::types::*; use anyhow::{anyhow, Result}; + use nix::unistd::Uid; + use std::collections::HashMap; + use std::io::BufRead; + use std::io::BufReader; const CPUINFO_DELIMITER: &str = "processor "; const CPUINFO_FEATURES_TAG: &str = "features"; @@ -56,7 +60,7 @@ mod arch_specific { // List of check functions static CHECK_LIST: &[CheckItem] = &[CheckItem { - name: CheckType::CheckCpu, + name: CheckType::Cpu, descr: "This parameter performs the cpu check", fp: check, perm: PermissionType::NonPrivileged, @@ -65,4 +69,115 @@ mod arch_specific { pub fn get_checks() -> Option<&'static [CheckItem<'static>]> { Some(CHECK_LIST) } + + #[allow(dead_code)] + fn retrieve_cpu_facilities() -> Result> { + let f = std::fs::File::open(check::PROC_CPUINFO)?; + let mut reader = BufReader::new(f); + let mut contents = String::new(); + let facilities_field = "facilities"; + let mut facilities = HashMap::new(); + + while reader.read_line(&mut contents)? > 0 { + let fields: Vec<&str> = contents.split_whitespace().collect(); + if fields.len() < 2 { + contents.clear(); + continue; + } + + if !fields[0].starts_with(facilities_field) { + contents.clear(); + continue; + } + + let mut start = 1; + if fields[1] == ":" { + start = 2; + } + + for field in fields.iter().skip(start) { + let bit = field.parse::()?; + facilities.insert(bit, true); + } + return Ok(facilities); + } + + Ok(facilities) + } + + #[allow(dead_code)] + pub fn check_cmd_line( + kernel_cmdline_path: &str, + search_param: &str, + search_values: &[&str], + ) -> Result { + let f = std::fs::File::open(kernel_cmdline_path)?; + let reader = BufReader::new(f); + + let check_fn = if search_values.is_empty() { + |param: &str, search_param: &str, _search_values: &[&str]| { + return param.eq_ignore_ascii_case(search_param); + } + } else { + |param: &str, search_param: &str, search_values: &[&str]| { + let split: Vec<&str> = param.splitn(2, "=").collect(); + if split.len() < 2 || split[0] != search_param { + return false; + } + + for value in search_values { + if value.eq_ignore_ascii_case(split[1]) { + return true; + } + } + false + } + }; + + for line in reader.lines() { + for field in line?.split_whitespace() { + if check_fn(field, search_param, search_values) { + return Ok(true); + } + } + } + Ok(false) + } + + #[allow(dead_code)] + // Guest protection is not supported on ARM64. + pub fn available_guest_protection() -> Result { + if !Uid::effective().is_root() { + return Err(check::ProtectionError::NoPerms)?; + } + + let facilities = retrieve_cpu_facilities().map_err(|err| { + check::ProtectionError::CheckFailed(format!( + "Error retrieving cpu facilities file : {}", + err.to_string() + )) + })?; + + // Secure Execution + // https://www.kernel.org/doc/html/latest/virt/kvm/s390-pv.html + let se_cpu_facility_bit: i32 = 158; + if !facilities.contains_key(&se_cpu_facility_bit) { + return Ok(check::GuestProtection::NoProtection); + } + + let cmd_line_values = vec!["1", "on", "y", "yes"]; + let se_cmdline_param = "prot_virt"; + + let se_cmdline_present = + check_cmd_line("/proc/cmdline", se_cmdline_param, &cmd_line_values) + .map_err(|err| check::ProtectionError::CheckFailed(err.to_string()))?; + + if !se_cmdline_present { + return Err(check::ProtectionError::InvalidValue(String::from( + "Protected Virtualization is not enabled on kernel command line!", + ))); + } + + Ok(check::GuestProtection::Se) + } } diff --git a/src/tools/kata-ctl/src/arch/x86_64/mod.rs b/src/tools/kata-ctl/src/arch/x86_64/mod.rs index 924536a13..026312624 100644 --- a/src/tools/kata-ctl/src/arch/x86_64/mod.rs +++ b/src/tools/kata-ctl/src/arch/x86_64/mod.rs @@ -3,28 +3,61 @@ // SPDX-License-Identifier: Apache-2.0 // +#![allow(dead_code)] + #[cfg(target_arch = "x86_64")] pub use arch_specific::*; mod arch_specific { use crate::check; + use crate::check::{GuestProtection, ProtectionError}; use crate::types::*; use anyhow::{anyhow, Result}; + use nix::unistd::Uid; + use std::fs; + use std::path::Path; const CPUINFO_DELIMITER: &str = "\nprocessor"; const CPUINFO_FLAGS_TAG: &str = "flags"; const CPU_FLAGS_INTEL: &[&str] = &["lm", "sse4_1", "vmx"]; const CPU_ATTRIBS_INTEL: &[&str] = &["GenuineIntel"]; + const VMM_FLAGS: &[&str] = &["hypervisor"]; + pub const ARCH_CPU_VENDOR_FIELD: &str = check::GENERIC_CPU_VENDOR_FIELD; pub const ARCH_CPU_MODEL_FIELD: &str = check::GENERIC_CPU_MODEL_FIELD; // List of check functions - static CHECK_LIST: &[CheckItem] = &[CheckItem { - name: CheckType::CheckCpu, - descr: "This parameter performs the cpu check", - fp: check_cpu, - perm: PermissionType::NonPrivileged, - }]; + static CHECK_LIST: &[CheckItem] = &[ + CheckItem { + name: CheckType::Cpu, + descr: "This parameter performs the cpu check", + fp: check_cpu, + perm: PermissionType::NonPrivileged, + }, + CheckItem { + name: CheckType::KernelModules, + descr: "This parameter performs the kvm check", + fp: check_kernel_modules, + perm: PermissionType::NonPrivileged, + }, + ]; + + static MODULE_LIST: &[KernelModule] = &[ + KernelModule { + name: "kvm", + parameter: KernelParam { + name: "kvmclock_periodic_sync", + value: KernelParamType::Simple("Y"), + }, + }, + KernelModule { + name: "kvm_intel", + parameter: KernelParam { + name: "unrestricted_guest", + value: KernelParamType::Predicate(unrestricted_guest_param_check), + }, + }, + ]; pub fn get_checks() -> Option<&'static [CheckItem<'static>]> { Some(CHECK_LIST) @@ -61,4 +94,263 @@ mod arch_specific { Ok(()) } + + fn retrieve_cpu_flags() -> Result { + let cpu_info = check::get_single_cpu_info(check::PROC_CPUINFO, CPUINFO_DELIMITER)?; + + let cpu_flags = check::get_cpu_flags(&cpu_info, CPUINFO_FLAGS_TAG).map_err(|e| { + anyhow!( + "Error parsing CPU flags, file {:?}, {:?}", + check::PROC_CPUINFO, + e + ) + })?; + + Ok(cpu_flags) + } + + pub const TDX_SYS_FIRMWARE_DIR: &str = "/sys/firmware/tdx_seam/"; + pub const TDX_CPU_FLAG: &str = "tdx"; + pub const SEV_KVM_PARAMETER_PATH: &str = "/sys/module/kvm_amd/parameters/sev"; + pub const SNP_KVM_PARAMETER_PATH: &str = "/sys/module/kvm_amd/parameters/sev_snp"; + + pub fn available_guest_protection() -> Result { + if !Uid::effective().is_root() { + return Err(ProtectionError::NoPerms); + } + + arch_guest_protection( + TDX_SYS_FIRMWARE_DIR, + TDX_CPU_FLAG, + SEV_KVM_PARAMETER_PATH, + SNP_KVM_PARAMETER_PATH, + ) + } + + pub fn arch_guest_protection( + tdx_path: &str, + tdx_flag: &str, + sev_path: &str, + snp_path: &str, + ) -> Result { + let flags = + retrieve_cpu_flags().map_err(|err| ProtectionError::CheckFailed(err.to_string()))?; + + let metadata = fs::metadata(tdx_path); + + if metadata.is_ok() && metadata.unwrap().is_dir() && flags.contains(tdx_flag) { + return Ok(GuestProtection::Tdx); + } + + let check_contents = |file_name: &str| -> Result { + let file_path = Path::new(file_name); + if !file_path.exists() { + return Ok(false); + } + + let contents = fs::read_to_string(file_name).map_err(|err| { + ProtectionError::CheckFailed(format!("Error reading file {} : {}", file_name, err)) + })?; + + if contents == "Y" { + return Ok(true); + } + Ok(false) + }; + + if check_contents(snp_path)? { + return Ok(GuestProtection::Snp); + } + + if check_contents(sev_path)? { + return Ok(GuestProtection::Sev); + } + + Ok(GuestProtection::NoProtection) + } + + fn running_on_vmm() -> Result { + match check::get_single_cpu_info(check::PROC_CPUINFO, CPUINFO_DELIMITER) { + Ok(cpu_info) => { + // check if the 'hypervisor' flag exist in the cpu features + let missing_hypervisor_flag = check::check_cpu_attribs(&cpu_info, VMM_FLAGS)?; + + if missing_hypervisor_flag.is_empty() { + return Ok(true); + } + } + Err(e) => { + return Err(anyhow!( + "Unable to determine if the OS is running on a VM: {}: {}", + e, + check::PROC_CPUINFO + )); + } + } + + Ok(false) + } + + // check the host kernel parameter value is valid + // and check if we are running inside a VMM + fn unrestricted_guest_param_check( + module: &str, + param_name: &str, + param_value_host: &str, + ) -> Result<()> { + let expected_param_value: char = 'Y'; + + let running_on_vmm_alt = running_on_vmm()?; + + if running_on_vmm_alt { + let msg = format!("You are running in a VM, where the kernel module '{}' parameter '{:}' has a value '{:}'. This causes conflict when running kata.", + module, + param_name, + param_value_host + ); + return Err(anyhow!(msg)); + } + + if param_value_host == expected_param_value.to_string() { + Ok(()) + } else { + let error_msg = format!( + "Kernel Module: '{:}' parameter '{:}' should have value '{:}', but found '{:}.'.", + module, param_name, expected_param_value, param_value_host + ); + + let action_msg = format!("Remove the '{:}' module using `rmmod` and then reload using `modprobe`, setting '{:}={:}'", + module, + param_name, + expected_param_value + ); + + return Err(anyhow!("{} {}", error_msg, action_msg)); + } + } + + fn check_kernel_param( + module: &str, + param_name: &str, + param_value_host: &str, + param_type: KernelParamType, + ) -> Result<()> { + match param_type { + KernelParamType::Simple(param_value_req) => { + if param_value_host != param_value_req { + return Err(anyhow!( + "Kernel module '{}': parameter '{}' should have value '{}', but found '{}'", + module, + param_name, + param_value_req, + param_value_host + )); + } + Ok(()) + } + KernelParamType::Predicate(pred_func) => { + pred_func(module, param_name, param_value_host) + } + } + } + + fn check_kernel_modules(_args: &str) -> Result<()> { + println!("INFO: check kernel modules for: x86_64"); + + for module in MODULE_LIST { + let module_loaded = + check::check_kernel_module_loaded(module.name, module.parameter.name); + + match module_loaded { + Ok(param_value_host) => { + let parameter_check = check_kernel_param( + module.name, + module.parameter.name, + ¶m_value_host, + module.parameter.value.clone(), + ); + + match parameter_check { + Ok(_v) => println!("{} Ok", module.name), + Err(e) => return Err(e), + } + } + Err(err) => { + eprintln!("WARNING {:}", err.replace('\n', "")) + } + } + } + Ok(()) + } +} + +#[cfg(target_arch = "x86_64")] +#[cfg(test)] +mod tests { + use super::*; + use crate::check; + use nix::unistd::Uid; + use std::fs; + use std::io::Write; + use tempfile::tempdir; + + #[test] + fn test_available_guest_protection_no_privileges() { + if !Uid::effective().is_root() { + let res = available_guest_protection(); + assert!(res.is_err()); + assert_eq!( + "No permission to check guest protection", + res.unwrap_err().to_string() + ); + } + } + + fn test_arch_guest_protection_snp() { + // Test snp + let dir = tempdir().unwrap(); + let snp_file_path = dir.path().join("sev_snp"); + let path = snp_file_path.clone(); + let mut snp_file = fs::File::create(snp_file_path).unwrap(); + writeln!(snp_file, "Y").unwrap(); + + let actual = + arch_guest_protection("/xyz/tmp", TDX_CPU_FLAG, "/xyz/tmp", path.to_str().unwrap()); + assert!(actual.is_ok()); + assert_eq!(actual.unwrap(), check::GuestProtection::Snp); + + writeln!(snp_file, "N").unwrap(); + let actual = + arch_guest_protection("/xyz/tmp", TDX_CPU_FLAG, "/xyz/tmp", path.to_str().unwrap()); + assert!(actual.is_ok()); + assert_eq!(actual.unwrap(), check::GuestProtection::NoProtection); + } + + fn test_arch_guest_protection_sev() { + // Test sev + let dir = tempdir().unwrap(); + let sev_file_path = dir.path().join("sev"); + let sev_path = sev_file_path.clone(); + let mut sev_file = fs::File::create(sev_file_path).unwrap(); + writeln!(sev_file, "Y").unwrap(); + + let actual = arch_guest_protection( + "/xyz/tmp", + TDX_CPU_FLAG, + sev_path.to_str().unwrap(), + "/xyz/tmp", + ); + assert!(actual.is_ok()); + assert_eq!(actual.unwrap(), check::GuestProtection::Sev); + + writeln!(sev_file, "N").unwrap(); + let actual = arch_guest_protection( + "/xyz/tmp", + TDX_CPU_FLAG, + sev_path.to_str().unwrap(), + "/xyz/tmp", + ); + assert!(actual.is_ok()); + assert_eq!(actual.unwrap(), check::GuestProtection::NoProtection); + } } diff --git a/src/tools/kata-ctl/src/check.rs b/src/tools/kata-ctl/src/check.rs index bda635515..dfb9a3b7b 100644 --- a/src/tools/kata-ctl/src/check.rs +++ b/src/tools/kata-ctl/src/check.rs @@ -8,6 +8,11 @@ use anyhow::{anyhow, Result}; use reqwest::header::{CONTENT_TYPE, USER_AGENT}; use serde::{Deserialize, Serialize}; +use thiserror::Error; + +#[cfg(any(target_arch = "x86_64"))] +use std::process::{Command, Stdio}; + #[derive(Debug, Deserialize, Serialize, PartialEq)] struct Release { tag_name: String, @@ -16,6 +21,12 @@ struct Release { tarball_url: String, } +#[allow(dead_code)] +const MODPROBE_PATH: &str = "/sbin/modprobe"; + +#[allow(dead_code)] +const MODINFO_PATH: &str = "/sbin/modinfo"; + const KATA_GITHUB_RELEASE_URL: &str = "https://api.github.com/repos/kata-containers/kata-containers/releases"; @@ -28,6 +39,7 @@ const ERR_NO_CPUINFO: &str = "cpu_info string is empty"; #[allow(dead_code)] pub const GENERIC_CPU_VENDOR_FIELD: &str = "vendor_id"; + #[allow(dead_code)] pub const GENERIC_CPU_MODEL_FIELD: &str = "model name"; @@ -35,8 +47,8 @@ pub const GENERIC_CPU_MODEL_FIELD: &str = "model name"; pub const PROC_CPUINFO: &str = "/proc/cpuinfo"; #[cfg(any(target_arch = "s390x", target_arch = "x86_64"))] -fn get_cpu_info(cpu_info_file: &str) -> Result { - let contents = std::fs::read_to_string(cpu_info_file)?; +fn read_file_contents(file_path: &str) -> Result { + let contents = std::fs::read_to_string(file_path)?; Ok(contents) } @@ -44,7 +56,7 @@ fn get_cpu_info(cpu_info_file: &str) -> Result { // the specified cpuinfo file by parsing based on a specified delimiter #[cfg(any(target_arch = "s390x", target_arch = "x86_64"))] pub fn get_single_cpu_info(cpu_info_file: &str, substring: &str) -> Result { - let contents = get_cpu_info(cpu_info_file)?; + let contents = read_file_contents(cpu_info_file)?; if contents.is_empty() { return Err(anyhow!(ERR_NO_CPUINFO)); @@ -56,7 +68,6 @@ pub fn get_single_cpu_info(cpu_info_file: &str, substring: &str) -> Result Result { } if cpu_flags_tag.is_empty() { - return Err(anyhow!("cpu flags delimiter string is empty")); + return Err(anyhow!("cpu flags delimiter string is empty"))?; } let subcontents: Vec<&str> = cpu_info.split('\n').collect(); @@ -125,6 +136,30 @@ pub fn check_cpu_attribs( Ok(missing_attribs) } +#[allow(dead_code)] +#[derive(Debug, PartialEq)] +pub enum GuestProtection { + NoProtection, + Tdx, + Sev, + Snp, + Pef, + Se, +} + +#[allow(dead_code)] +#[derive(Error, Debug)] +pub enum ProtectionError { + #[error("No permission to check guest protection")] + NoPerms, + + #[error("Failed to check guest protection: {0}")] + CheckFailed(String), + + #[error("Invalid guest protection value: {0}")] + InvalidValue(String), +} + pub fn run_network_checks() -> Result<()> { Ok(()) } @@ -197,6 +232,86 @@ pub fn check_official_releases() -> Result<()> { Ok(()) } +#[cfg(any(target_arch = "x86_64"))] +pub fn check_kernel_module_loaded(module: &str, parameter: &str) -> Result { + const MODPROBE_PARAMETERS_DRY_RUN: &str = "--dry-run"; + const MODPROBE_PARAMETERS_FIRST_TIME: &str = "--first-time"; + const MODULES_PATH: &str = "/sys/module"; + + let status_modinfo_success; + + // Partial check w/ modinfo + // verifies that the module exists + match Command::new(MODINFO_PATH) + .arg(module) + .stdout(Stdio::piped()) + .output() + { + Ok(v) => { + status_modinfo_success = v.status.success(); + + // The module is already not loaded. + if !status_modinfo_success { + let msg = String::from_utf8_lossy(&v.stderr).replace('\n', ""); + return Err(msg); + } + } + Err(_e) => { + let msg = format!( + "Command {:} not found, verify that `kmod` package is already installed.", + MODINFO_PATH, + ); + return Err(msg); + } + } + + // Partial check w/ modprobe + // check that the module is already loaded + match Command::new(MODPROBE_PATH) + .arg(MODPROBE_PARAMETERS_DRY_RUN) + .arg(MODPROBE_PARAMETERS_FIRST_TIME) + .arg(module) + .stdout(Stdio::piped()) + .output() + { + Ok(v) => { + // a successful simulated modprobe insert, means the module is not already loaded + let status_modprobe_success = v.status.success(); + + if status_modprobe_success && status_modinfo_success { + // This condition is true in the case that the module exist, but is not already loaded + let msg = format!("The kernel module `{:}` exist but is not already loaded. Try reloading it using 'modprobe {:}=Y'", + module, module + ); + return Err(msg); + } + } + + Err(_e) => { + let msg = format!( + "Command {:} not found, verify that `kmod` package is already installed.", + MODPROBE_PATH, + ); + return Err(msg); + } + } + + let module_path = format!("{}/{}/parameters/{}", MODULES_PATH, module, parameter); + + // Here the currently loaded kernel parameter value + // is retrieved and returned on success + match read_file_contents(&module_path) { + Ok(result) => Ok(result.replace('\n', "")), + Err(_e) => { + let msg = format!( + "'{:}' kernel module parameter `{:}` not found.", + module, parameter + ); + Err(msg) + } + } +} + #[cfg(any(target_arch = "s390x", target_arch = "x86_64"))] #[cfg(test)] mod tests { @@ -388,4 +503,64 @@ mod tests { assert!(!v.minor.to_string().is_empty()); assert!(!v.patch.to_string().is_empty()); } + + #[cfg(any(target_arch = "x86_64"))] + #[test] + fn check_module_loaded() { + #[allow(dead_code)] + #[derive(Debug)] + struct TestData<'a> { + module_name: &'a str, + param_name: &'a str, + param_value: &'a str, + result: Result, + } + + let tests = &[ + // Failure scenarios + TestData { + module_name: "", + param_name: "", + param_value: "", + result: Err(anyhow!("modinfo: ERROR: Module {} not found.", "")), + }, + TestData { + module_name: "kvm", + param_name: "", + param_value: "", + result: Err(anyhow!( + "'{:}' kernel module parameter `{:}` not found.", + "kvm", + "" + )), + }, + // Success scenarios + TestData { + module_name: "kvm", + param_name: "kvmclock_periodic_sync", + param_value: "Y", + result: Ok("Y".to_string()), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + let result = check_kernel_module_loaded(d.module_name, d.param_name); + let msg = format!("{}, result: {:?}", msg, result); + + if d.result.is_ok() { + assert_eq!( + result.as_ref().unwrap(), + d.result.as_ref().unwrap(), + "{}", + msg + ); + continue; + } + + let expected_error = format!("{}", &d.result.as_ref().unwrap_err()); + let actual_error = result.unwrap_err().to_string(); + assert!(actual_error == expected_error, "{}", msg); + } + } } diff --git a/src/tools/kata-ctl/src/ops/check_ops.rs b/src/tools/kata-ctl/src/ops/check_ops.rs index fa3aa688b..f2dbea702 100644 --- a/src/tools/kata-ctl/src/ops/check_ops.rs +++ b/src/tools/kata-ctl/src/ops/check_ops.rs @@ -73,15 +73,18 @@ pub fn handle_check(checkcmd: CheckArgument) -> Result<()> { match command { CheckSubCommand::All => { // run architecture-specific tests - handle_builtin_check(CheckType::CheckCpu, "")?; + handle_builtin_check(CheckType::Cpu, "")?; // run code that uses network checks check::run_network_checks()?; + + // run kernel module checks + handle_builtin_check(CheckType::KernelModules, "")?; } CheckSubCommand::NoNetworkChecks => { // run architecture-specific tests - handle_builtin_check(CheckType::CheckCpu, "")?; + handle_builtin_check(CheckType::Cpu, "")?; } CheckSubCommand::CheckVersionOnly => { diff --git a/src/tools/kata-ctl/src/types.rs b/src/tools/kata-ctl/src/types.rs index 483e5bce7..26f5954d8 100644 --- a/src/tools/kata-ctl/src/types.rs +++ b/src/tools/kata-ctl/src/types.rs @@ -12,8 +12,9 @@ pub type BuiltinCmdFp = fn(args: &str) -> Result<()>; // CheckType encodes the name of each check provided by kata-ctl. #[derive(Debug, strum_macros::Display, EnumString, PartialEq)] pub enum CheckType { - CheckCpu, - CheckNetwork, + Cpu, + Network, + KernelModules, } // PermissionType is used to show whether a check needs to run with elevated (super-user) @@ -33,3 +34,39 @@ pub struct CheckItem<'a> { pub fp: BuiltinCmdFp, pub perm: PermissionType, } + +// Builtin module parameter check handler type. +// +// BuiltinModuleParamFp represents a predicate function to determine if a +// kernel parameter _value_ is as expected. If not, the returned Error will +// explain what is wrong. +// +// Parameters: +// +// - module: name of kernel module. +// - param: name of parameter for the kernel module. +// - value: value of the kernel parameter. +pub type BuiltinModuleParamFp = fn(module: &str, param: &str, value: &str) -> Result<()>; + +// KernelParamType encodes the value and a handler +// function for kernel module parameters +#[allow(dead_code)] +#[derive(Clone)] +pub enum KernelParamType<'a> { + Simple(&'a str), + Predicate(BuiltinModuleParamFp), +} + +// Parameters is used to encode the module parameters +#[derive(Clone)] +pub struct KernelParam<'a> { + pub name: &'a str, + pub value: KernelParamType<'a>, +} + +// KernelModule is used to describe a kernel module along with its required parameters. +#[allow(dead_code)] +pub struct KernelModule<'a> { + pub name: &'a str, + pub parameter: KernelParam<'a>, +} diff --git a/src/tools/kata-ctl/src/utils.rs b/src/tools/kata-ctl/src/utils.rs index 8e6815d02..03c005e9f 100644 --- a/src/tools/kata-ctl/src/utils.rs +++ b/src/tools/kata-ctl/src/utils.rs @@ -144,6 +144,12 @@ pub fn get_generic_cpu_details(cpu_info_file: &str) -> Result<(String, String)> Ok((vendor, model)) } +const VHOST_VSOCK_DEVICE: &str = "/dev/vhost-vsock"; +pub fn supports_vsocks(vsock_path: &str) -> Result { + let metadata = fs::metadata(vsock_path)?; + Ok(metadata.is_file()) +} + #[cfg(test)] mod tests { use super::*; @@ -287,4 +293,30 @@ mod tests { ); assert_eq!(actual, expected); } + + #[test] + fn check_supports_vsocks_valid() { + let dir = tempdir().unwrap(); + let file_path = dir.path().join("vhost-vsock"); + let path = file_path.clone(); + let _file = fs::File::create(file_path).unwrap(); + let res = supports_vsocks(path.to_str().unwrap()).unwrap(); + assert!(res); + } + + #[test] + fn check_supports_vsocks_dir() { + let dir = tempdir().unwrap(); + let file_path = dir.path().join("vhost-vsock"); + let path = file_path.clone(); + fs::create_dir(file_path).unwrap(); + let res = supports_vsocks(path.to_str().unwrap()).unwrap(); + assert!(!res); + } + + #[test] + fn check_supports_vsocks_missing_file() { + let res = supports_vsocks("/xyz/vhost-vsock"); + assert!(res.is_err()); + } } diff --git a/src/tools/trace-forwarder/Cargo.lock b/src/tools/trace-forwarder/Cargo.lock index ce1f994d2..7a6a7c0ac 100644 --- a/src/tools/trace-forwarder/Cargo.lock +++ b/src/tools/trace-forwarder/Cargo.lock @@ -289,7 +289,6 @@ dependencies = [ "opentelemetry 0.14.0", "opentelemetry-jaeger", "privdrop", - "protobuf", "serde", "serde_json", "slog", @@ -527,12 +526,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "protobuf" -version = "2.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" - [[package]] name = "quote" version = "1.0.10" diff --git a/src/tools/trace-forwarder/Cargo.toml b/src/tools/trace-forwarder/Cargo.toml index fc0b69631..0b6d3d550 100644 --- a/src/tools/trace-forwarder/Cargo.toml +++ b/src/tools/trace-forwarder/Cargo.toml @@ -23,7 +23,6 @@ serde_json = "1.0.44" anyhow = "1.0.31" opentelemetry = { version = "0.14.0", features=["serialize"] } opentelemetry-jaeger = "0.13.0" -protobuf = "2.27.0" tracing-opentelemetry = "0.16.0" tracing = "0.1.29" tracing-subscriber = "0.3.3" diff --git a/tests/common.bash b/tests/common.bash new file mode 100644 index 000000000..a29b29b87 --- /dev/null +++ b/tests/common.bash @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +# This file contains common functions that +# are being used by our metrics and integration tests + +die() { + local msg="$*" + echo -e "[$(basename $0):${BASH_LINENO[0]}] ERROR: $msg" >&2 + exit 1 +} + +warn() { + local msg="$*" + echo -e "[$(basename $0):${BASH_LINENO[0]}] WARNING: $msg" +} + +info() { + local msg="$*" + echo -e "[$(basename $0):${BASH_LINENO[0]}] INFO: $msg" +} + +handle_error() { + local exit_code="${?}" + local line_number="${1:-}" + echo -e "[$(basename $0):$line_number] ERROR: $(eval echo "$BASH_COMMAND")" + exit "${exit_code}" +} +trap 'handle_error $LINENO' ERR + +waitForProcess() { + wait_time="$1" + sleep_time="$2" + cmd="$3" + while [ "$wait_time" -gt 0 ]; do + if eval "$cmd"; then + return 0 + else + sleep "$sleep_time" + wait_time=$((wait_time-sleep_time)) + fi + done + return 1 +} diff --git a/tests/integration/kubernetes/filter_k8s_test.sh b/tests/integration/kubernetes/filter_k8s_test.sh new file mode 100755 index 000000000..2b90076d9 --- /dev/null +++ b/tests/integration/kubernetes/filter_k8s_test.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# +# Copyright (c) 2019 ARM Limited +# +# SPDX-License-Identifier: Apache-2.0 + +set -o errexit +set -o nounset +set -o pipefail + +GOPATH_LOCAL="${GOPATH%%:*}" +KATA_DIR="${GOPATH_LOCAL}/src/github.com/kata-containers" +TEST_DIR="${KATA_DIR}/tests" +CI_DIR="${TEST_DIR}/.ci" + +K8S_FILTER_FLAG="kubernetes" + +source "${CI_DIR}/lib.sh" + +main() +{ + local K8S_CONFIG_FILE="$1" + local K8S_TEST_UNION="$2" + local result=() + + mapfile -d " " -t _K8S_TEST_UNION <<< "${K8S_TEST_UNION}" + + # install yq if not exist + ${CI_DIR}/install_yq.sh > /dev/null + + local K8S_SKIP_UNION=$("${GOPATH_LOCAL}/bin/yq" read "${K8S_CONFIG_FILE}" "${K8S_FILTER_FLAG}") + [ "${K8S_SKIP_UNION}" == "null" ] && return + mapfile -t _K8S_SKIP_UNION <<< "${K8S_SKIP_UNION}" + + for TEST_ENTRY in "${_K8S_TEST_UNION[@]}" + do + local flag="false" + for SKIP_ENTRY in "${_K8S_SKIP_UNION[@]}" + do + SKIP_ENTRY="${SKIP_ENTRY#- }.bats" + [ "$SKIP_ENTRY" == "$TEST_ENTRY" ] && flag="true" + done + [ "$flag" == "false" ] && result+=("$TEST_ENTRY") + done + echo ${result[@]} +} + +main "$@" diff --git a/tests/integration/kubernetes/filter_out_per_arch/aarch64.yaml b/tests/integration/kubernetes/filter_out_per_arch/aarch64.yaml new file mode 100644 index 000000000..8474a67fc --- /dev/null +++ b/tests/integration/kubernetes/filter_out_per_arch/aarch64.yaml @@ -0,0 +1,23 @@ +# +# Copyright (c) 2018 ARM Limited +# +# SPDX-License-Identifier: Apache-2.0 + +# for now, not all integration test suites are fully passed in aarch64. +# some need to be tested, and some need to be refined. +# sequence of 'test' holds supported integration tests components. +test: + - functional + - kubernetes + - cri-containerd + +kubernetes: + - k8s-cpu-ns + - k8s-limit-range + - k8s-number-cpus + - k8s-expose-ip + - k8s-oom + - k8s-block-volume + - k8s-inotify + - k8s-qos-pods + - k8s-footloose diff --git a/tests/integration/kubernetes/filter_out_per_arch/ppc64le.yaml b/tests/integration/kubernetes/filter_out_per_arch/ppc64le.yaml new file mode 100644 index 000000000..d8644e019 --- /dev/null +++ b/tests/integration/kubernetes/filter_out_per_arch/ppc64le.yaml @@ -0,0 +1,11 @@ +# +# Copyright (c) 2019 IBM +# +# SPDX-License-Identifier: Apache-2.0 + +kubernetes: + - k8s-block-volume + - k8s-limit-range + - k8s-number-cpus + - k8s-oom + - k8s-inotify diff --git a/tests/integration/kubernetes/filter_out_per_arch/s390x.yaml b/tests/integration/kubernetes/filter_out_per_arch/s390x.yaml new file mode 100644 index 000000000..224539d8b --- /dev/null +++ b/tests/integration/kubernetes/filter_out_per_arch/s390x.yaml @@ -0,0 +1,8 @@ +# +# Copyright (c) 2021 IBM +# +# SPDX-License-Identifier: Apache-2.0 + +kubernetes: + - k8s-caps + - k8s-inotify diff --git a/tests/integration/kubernetes/k8s-attach-handlers.bats b/tests/integration/kubernetes/k8s-attach-handlers.bats new file mode 100644 index 000000000..10a7a0f19 --- /dev/null +++ b/tests/integration/kubernetes/k8s-attach-handlers.bats @@ -0,0 +1,42 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + nginx_version="${docker_images_nginx_version}" + nginx_image="nginx:$nginx_version" + + pod_name="handlers" + + get_pod_config_dir +} + +@test "Running with postStart and preStop handlers" { + # Create yaml + sed -e "s/\${nginx_version}/${nginx_image}/" \ + "${pod_config_dir}/lifecycle-events.yaml" > "${pod_config_dir}/test-lifecycle-events.yaml" + + # Create the pod with postStart and preStop handlers + kubectl create -f "${pod_config_dir}/test-lifecycle-events.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Check postStart message + display_message="cat /usr/share/message" + check_postStart=$(kubectl exec $pod_name -- sh -c "$display_message" | grep "Hello from the postStart handler") +} + +teardown(){ + # Debugging information + kubectl describe "pod/$pod_name" + + rm -f "${pod_config_dir}/test-lifecycle-events.yaml" + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-caps.bats b/tests/integration/kubernetes/k8s-caps.bats new file mode 100644 index 000000000..3126af640 --- /dev/null +++ b/tests/integration/kubernetes/k8s-caps.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2021 Apple Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="pod-caps" + get_pod_config_dir +# We expect the capabilities mask to very per distribution, runtime +# configuration. Even for this, we should expect a few common items to +# not be set in the mask unless we are failing to apply capabilities. If +# we fail to configure, we'll see all bits set for permitted: 0x03fffffffff +# We do expect certain parts of the mask to be common when we set appropriately: +# b20..b23 should be cleared for all (no CAP_SYS_{PACCT, ADMIN, NICE, BOOT}) +# b0..b11 are consistent across the distros: +# 0x5fb: 0101 1111 1011 +# | | \- should be cleared (CAP_DAC_READ_SEARCH) +# | \- should be cleared (CAP_LINUX_IMMUTABLE) +# \- should be cleared (CAP_NET_BROADCAST) +# Example match: +# CapPrm: 00000000a80425fb + expected="CapPrm.*..0..5fb$" +} + +@test "Check capabilities of pod" { + # Create pod + kubectl create -f "${pod_config_dir}/pod-caps.yaml" + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Verify expected capabilities for the running container. Add retry to ensure + # that the container had time to execute: + wait_time=5 + sleep_time=1 + cmd="kubectl logs $pod_name | grep -q $expected" + waitForProcess "$wait_time" "$sleep_time" "$cmd" + + # Verify expected capabilities from exec context: + kubectl exec "$pod_name" -- sh -c "cat /proc/self/status" | grep -q "$expected" +} + +teardown() { + # Debugging information + echo "expected capability mask:" + echo "$expected" + echo "observed: " + kubectl logs "pod/$pod_name" + kubectl exec "$pod_name" -- sh -c "cat /proc/self/status | grep Cap" + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-configmap.bats b/tests/integration/kubernetes/k8s-configmap.bats new file mode 100644 index 000000000..6809ba130 --- /dev/null +++ b/tests/integration/kubernetes/k8s-configmap.bats @@ -0,0 +1,43 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "ConfigMap for a pod" { + config_name="test-configmap" + pod_name="config-env-test-pod" + + # Create ConfigMap + kubectl create -f "${pod_config_dir}/configmap.yaml" + + # View the values of the keys + kubectl get configmaps $config_name -o yaml | grep -q "data-" + + # Create a pod that consumes the ConfigMap + kubectl create -f "${pod_config_dir}/pod-configmap.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check env + cmd="env" + kubectl exec $pod_name -- sh -c $cmd | grep "KUBE_CONFIG_1=value-1" + kubectl exec $pod_name -- sh -c $cmd | grep "KUBE_CONFIG_2=value-2" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" + kubectl delete configmap "$config_name" +} diff --git a/tests/integration/kubernetes/k8s-copy-file.bats b/tests/integration/kubernetes/k8s-copy-file.bats new file mode 100644 index 000000000..0106e12c3 --- /dev/null +++ b/tests/integration/kubernetes/k8s-copy-file.bats @@ -0,0 +1,83 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir + file_name="file.txt" + content="Hello" +} + +@test "Copy file in a pod" { + # Create pod + pod_name="pod-copy-file-from-host" + ctr_name="ctr-copy-file-from-host" + + pod_config=$(mktemp --tmpdir pod_config.XXXXXX.yaml) + cp "$pod_config_dir/busybox-template.yaml" "$pod_config" + sed -i "s/POD_NAME/$pod_name/" "$pod_config" + sed -i "s/CTR_NAME/$ctr_name/" "$pod_config" + + kubectl create -f "${pod_config}" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Create a file + echo "$content" > "$file_name" + + # Copy file into a pod + kubectl cp "$file_name" $pod_name:/tmp + + # Print environment variables + kubectl exec $pod_name -- sh -c "cat /tmp/$file_name | grep $content" +} + +@test "Copy from pod to host" { + # Create pod + pod_name="pod-copy-file-to-host" + ctr_name="ctr-copy-file-to-host" + + pod_config=$(mktemp --tmpdir pod_config.XXXXXX.yaml) + cp "$pod_config_dir/busybox-template.yaml" "$pod_config" + sed -i "s/POD_NAME/$pod_name/" "$pod_config" + sed -i "s/CTR_NAME/$ctr_name/" "$pod_config" + + kubectl create -f "${pod_config}" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + kubectl logs "$pod_name" || true + kubectl describe pod "$pod_name" || true + kubectl get pods --all-namespaces + + # Create a file in the pod + kubectl exec "$pod_name" -- sh -c "cd /tmp && echo $content > $file_name" + + kubectl logs "$pod_name" || true + kubectl describe pod "$pod_name" || true + kubectl get pods --all-namespaces + + # Copy file from pod to host + kubectl cp "$pod_name":/tmp/"$file_name" "$file_name" + + # Verify content + cat "$file_name" | grep "$content" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + rm -f "$file_name" + kubectl delete pod "$pod_name" + + rm -f "$pod_config" +} diff --git a/tests/integration/kubernetes/k8s-cpu-ns.bats b/tests/integration/kubernetes/k8s-cpu-ns.bats new file mode 100644 index 000000000..4d5f2e883 --- /dev/null +++ b/tests/integration/kubernetes/k8s-cpu-ns.bats @@ -0,0 +1,82 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + [ "${KATA_HYPERVISOR}" == "dragonball" ] && skip "test not working see: ${dragonball_limitations}" + [ "${KATA_HYPERVISOR}" == "qemu-tdx" ] && skip "TEEs do not support memory / CPU hotplug" + + pod_name="constraints-cpu-test" + container_name="first-cpu-container" + sharessyspath="/sys/fs/cgroup/cpu/cpu.shares" + quotasyspath="/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + periodsyspath="/sys/fs/cgroup/cpu/cpu.cfs_period_us" + total_cpus=2 + total_requests=512 + total_cpu_container=1 + + get_pod_config_dir +} + +@test "Check CPU constraints" { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + [ "${KATA_HYPERVISOR}" == "dragonball" ] && skip "test not working see: ${dragonball_limitations}" + [ "${KATA_HYPERVISOR}" == "qemu-tdx" ] && skip "TEEs do not support memory / CPU hotplug" + + # Create the pod + kubectl create -f "${pod_config_dir}/pod-cpu.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + retries="10" + + num_cpus_cmd='grep -e "^processor" /proc/cpuinfo |wc -l' + # Check the total of cpus + for _ in $(seq 1 "$retries"); do + # Get number of cpus + total_cpus_container=$(kubectl exec pod/"$pod_name" -c "$container_name" \ + -- sh -c "$num_cpus_cmd") + # Verify number of cpus + [ "$total_cpus_container" -le "$total_cpus" ] + [ "$total_cpus_container" -eq "$total_cpus" ] && break + sleep 1 + done + [ "$total_cpus_container" -eq "$total_cpus" ] + + # Check the total of requests + total_requests_container=$(kubectl exec $pod_name -c $container_name \ + -- sh -c "cat $sharessyspath") + + [ "$total_requests_container" -eq "$total_requests" ] + + # Check the cpus inside the container + + total_cpu_quota=$(kubectl exec $pod_name -c $container_name \ + -- sh -c "cat $quotasyspath") + + total_cpu_period=$(kubectl exec $pod_name -c $container_name \ + -- sh -c "cat $periodsyspath") + + division_quota_period=$(echo $((total_cpu_quota/total_cpu_period))) + + [ "$division_quota_period" -eq "$total_cpu_container" ] +} + +teardown() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + [ "${KATA_HYPERVISOR}" == "dragonball" ] && skip "test not working see: ${dragonball_limitations}" + [ "${KATA_HYPERVISOR}" == "qemu-tdx" ] && skip "TEEs do not support memory / CPU hotplug" + + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-credentials-secrets.bats b/tests/integration/kubernetes/k8s-credentials-secrets.bats new file mode 100644 index 000000000..51d2ba995 --- /dev/null +++ b/tests/integration/kubernetes/k8s-credentials-secrets.bats @@ -0,0 +1,62 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + get_pod_config_dir +} + +@test "Credentials using secrets" { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + secret_name="test-secret" + pod_name="secret-test-pod" + second_pod_name="secret-envars-test-pod" + + # Create the secret + kubectl create -f "${pod_config_dir}/inject_secret.yaml" + + # View information about the secret + kubectl get secret "${secret_name}" -o yaml | grep "type: Opaque" + + # Create a pod that has access to the secret through a volume + kubectl create -f "${pod_config_dir}/pod-secret.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # List the files + cmd="ls /tmp/secret-volume" + kubectl exec $pod_name -- sh -c "$cmd" | grep -w "password" + kubectl exec $pod_name -- sh -c "$cmd" | grep -w "username" + + # Create a pod that has access to the secret data through environment variables + kubectl create -f "${pod_config_dir}/pod-secret-env.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$second_pod_name" + + # Display environment variables + second_cmd="printenv" + kubectl exec $second_pod_name -- sh -c "$second_cmd" | grep -w "SECRET_USERNAME" + kubectl exec $second_pod_name -- sh -c "$second_cmd" | grep -w "SECRET_PASSWORD" +} + +teardown() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + # Debugging information + kubectl describe "pod/$pod_name" + kubectl describe "pod/$second_pod_name" + + kubectl delete pod "$pod_name" "$second_pod_name" + kubectl delete secret "$secret_name" +} diff --git a/tests/integration/kubernetes/k8s-custom-dns.bats b/tests/integration/kubernetes/k8s-custom-dns.bats new file mode 100644 index 000000000..aa2532364 --- /dev/null +++ b/tests/integration/kubernetes/k8s-custom-dns.bats @@ -0,0 +1,34 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="custom-dns-test" + file_name="/etc/resolv.conf" + get_pod_config_dir +} + +@test "Check custom dns" { + # Create the pod + kubectl create -f "${pod_config_dir}/pod-custom-dns.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Check dns config at /etc/resolv.conf + kubectl exec "$pod_name" -- cat "$file_name" | grep -q "nameserver 1.2.3.4" + kubectl exec "$pod_name" -- cat "$file_name" | grep -q "search dns.test.search" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-empty-dirs.bats b/tests/integration/kubernetes/k8s-empty-dirs.bats new file mode 100644 index 000000000..0bf901caa --- /dev/null +++ b/tests/integration/kubernetes/k8s-empty-dirs.bats @@ -0,0 +1,74 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +assert_equal() { + local expected=$1 + local actual=$2 + if [[ "$expected" != "$actual" ]]; then + echo "expected: $expected, got: $actual" + return 1 + fi +} + +setup() { + pod_name="sharevol-kata" + get_pod_config_dir + pod_logs_file="" +} + +@test "Empty dir volumes" { + # Create the pod + kubectl create -f "${pod_config_dir}/pod-empty-dir.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check volume mounts + cmd="mount | grep cache" + kubectl exec $pod_name -- sh -c "$cmd" | grep "/tmp/cache type tmpfs" + + # Check it can write up to the volume limit (50M) + cmd="dd if=/dev/zero of=/tmp/cache/file1 bs=1M count=50; echo $?" + kubectl exec $pod_name -- sh -c "$cmd" | tail -1 | grep 0 +} + +@test "Empty dir volume when FSGroup is specified with non-root container" { + # This is a reproducer of k8s e2e "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is non-root" test + pod_file="${pod_config_dir}/pod-empty-dir-fsgroup.yaml" + agnhost_name="${container_images_agnhost_name}" + agnhost_version="${container_images_agnhost_version}" + image="${agnhost_name}:${agnhost_version}" + + # Try to avoid timeout by prefetching the image. + sed -e "s#\${agnhost_image}#${image}#" "$pod_file" |\ + kubectl create -f - + cmd="kubectl get pods ${pod_name} | grep Completed" + waitForProcess "${wait_time}" "${sleep_time}" "${cmd}" + + pod_logs_file="$(mktemp)" + for container in mounttest-container mounttest-container-2; do + kubectl logs "$pod_name" "$container" > "$pod_logs_file" + # Check owner UID of file + uid=$(cat $pod_logs_file | grep 'owner UID of' | sed 's/.*:\s//') + assert_equal "1001" "$uid" + # Check owner GID of file + gid=$(cat $pod_logs_file | grep 'owner GID of' | sed 's/.*:\s//') + assert_equal "123" "$gid" + done +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" + + [ ! -f "$pod_logs_file" ] || rm -f "$pod_logs_file" +} diff --git a/tests/integration/kubernetes/k8s-env.bats b/tests/integration/kubernetes/k8s-env.bats new file mode 100644 index 000000000..ee09d10f2 --- /dev/null +++ b/tests/integration/kubernetes/k8s-env.bats @@ -0,0 +1,40 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="test-env" + get_pod_config_dir +} + +@test "Environment variables" { + # Create pod + kubectl create -f "${pod_config_dir}/pod-env.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Print environment variables + cmd="printenv" + kubectl exec $pod_name -- sh -c $cmd | grep "MY_POD_NAME=$pod_name" + kubectl exec $pod_name -- sh -c $cmd | \ + grep "HOST_IP=\([0-9]\+\(\.\|$\)\)\{4\}" + # Requested 32Mi of memory + kubectl exec $pod_name -- sh -c $cmd | \ + grep "MEMORY_REQUESTS=$((1024 * 1024 * 32))" + # Memory limits allocated by the node + kubectl exec $pod_name -- sh -c $cmd | grep "MEMORY_LIMITS=[1-9]\+" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-exec.bats b/tests/integration/kubernetes/k8s-exec.bats new file mode 100644 index 000000000..aa14d7160 --- /dev/null +++ b/tests/integration/kubernetes/k8s-exec.bats @@ -0,0 +1,65 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2020 Ant Financial +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir + pod_name="busybox" + first_container_name="first-test-container" + second_container_name="second-test-container" +} + +@test "Kubectl exec" { + # Create the pod + kubectl create -f "${pod_config_dir}/busybox-pod.yaml" + + # Get pod specification + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Run commands in Pod + ## Cases for -it options + # TODO: enable -i option after updated to new CRI-O + # see: https://github.com/kata-containers/tests/issues/2770 + # kubectl exec -i "$pod_name" -- ls -tl / + # kubectl exec -it "$pod_name" -- ls -tl / + kubectl exec "$pod_name" -- date + + ## Case for stdin + kubectl exec -i "$pod_name" -- sh <<-EOF +echo abc > /tmp/abc.txt +grep abc /tmp/abc.txt +exit +EOF + + ## Case for return value + ### Command return non-zero code + run bash -c "kubectl exec -i $pod_name -- sh <<-EOF +exit 123 +EOF" + echo "run status: $status" 1>&2 + echo "run output: $output" 1>&2 + [ "$status" -eq 123 ] + + ## Cases for target container + ### First container + container_name=$(kubectl exec $pod_name -c $first_container_name -- env | grep CONTAINER_NAME) + [ "$container_name" == "CONTAINER_NAME=$first_container_name" ] + + ### Second container + container_name=$(kubectl exec $pod_name -c $second_container_name -- env | grep CONTAINER_NAME) + [ "$container_name" == "CONTAINER_NAME=$second_container_name" ] + +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-footloose.bats b/tests/integration/kubernetes/k8s-footloose.bats new file mode 100644 index 000000000..b8b10db7a --- /dev/null +++ b/tests/integration/kubernetes/k8s-footloose.bats @@ -0,0 +1,58 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="footubuntu" + config_name="ssh-config-map" + get_pod_config_dir + + # Creates ssh-key + key_path=$(mktemp --tmpdir) + public_key_path="${key_path}.pub" + echo -e 'y\n' | sudo ssh-keygen -t rsa -N "" -f "$key_path" + + # Create ConfigMap.yaml + configmap_yaml="${pod_config_dir}/footloose-rsa-configmap.yaml" + sed -e "/\${ssh_key}/r ${public_key_path}" -e "/\${ssh_key}/d" \ + "${pod_config_dir}/footloose-configmap.yaml" > "$configmap_yaml" + sed -i 's/ssh-rsa/ ssh-rsa/' "$configmap_yaml" +} + +@test "Footloose pod" { + cmd="uname -r" + sleep_connect="10" + + # Create ConfigMap + kubectl create -f "$configmap_yaml" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-footloose.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Get pod ip + pod_ip=$(kubectl get pod $pod_name --template={{.status.podIP}}) + + # Exec to the pod + kubectl exec $pod_name -- sh -c "$cmd" + + # Connect to the VM + sleep "$sleep_connect" + ssh -i "$key_path" -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no 2>/dev/null root@"$pod_ip" "$cmd" +} + +teardown() { + kubectl delete pod "$pod_name" + kubectl delete configmap "$config_name" + sudo rm -rf "$public_key_path" + sudo rm -rf "$key_path" + sudo rm -rf "$configmap_yaml" +} diff --git a/tests/integration/kubernetes/k8s-inotify.bats b/tests/integration/kubernetes/k8s-inotify.bats new file mode 100644 index 000000000..f3dbc073f --- /dev/null +++ b/tests/integration/kubernetes/k8s-inotify.bats @@ -0,0 +1,46 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2021 Apple Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + get_pod_config_dir +} + +@test "configmap update works, and preserves symlinks" { + pod_name="inotify-configmap-testing" + + # Create configmap for my deployment + kubectl apply -f "${pod_config_dir}"/inotify-configmap.yaml + + # Create deployment that expects identity-certs + kubectl apply -f "${pod_config_dir}"/inotify-configmap-pod.yaml + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Update configmap + kubectl apply -f "${pod_config_dir}"/inotify-updated-configmap.yaml + + # Ideally we'd wait for the pod to complete... + sleep 120 + + # Verify we saw the update + result=$(kubectl get pod "$pod_name" --output="jsonpath={.status.containerStatuses[]}") + echo $result | grep -vq Error + + kubectl delete configmap cm +} + + + +teardown() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + # Debugging information + kubectl describe "pod/$pod_name" + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-job.bats b/tests/integration/kubernetes/k8s-job.bats new file mode 100644 index 000000000..e1fd3cc38 --- /dev/null +++ b/tests/integration/kubernetes/k8s-job.bats @@ -0,0 +1,49 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "Run a job to completion" { + job_name="job-pi-test" + + # Create job + kubectl apply -f "${pod_config_dir}/job.yaml" + + # Verify job + kubectl describe jobs/"$job_name" | grep "SuccessfulCreate" + + # List pods that belong to the job + pod_name=$(kubectl get pods --selector=job-name=$job_name --output=jsonpath='{.items[*].metadata.name}') + + # Verify that the job is completed + cmd="kubectl get pods -o jsonpath='{.items[*].status.phase}' | grep Succeeded" + waitForProcess "$wait_time" "$sleep_time" "$cmd" + + # Verify the output of the pod + pi_number="3.14" + kubectl logs "$pod_name" | grep "$pi_number" +} + +teardown() { + kubectl delete pod "$pod_name" + # Verify that pod is not running + run kubectl get pods + echo "$output" + [[ "$output" =~ "No resources found" ]] + + + kubectl delete jobs/"$job_name" + # Verify that the job is not running + run kubectl get jobs + echo "$output" + [[ "$output" =~ "No resources found" ]] +} diff --git a/tests/integration/kubernetes/k8s-kill-all-process-in-container.bats b/tests/integration/kubernetes/k8s-kill-all-process-in-container.bats new file mode 100644 index 000000000..5081b8d7d --- /dev/null +++ b/tests/integration/kubernetes/k8s-kill-all-process-in-container.bats @@ -0,0 +1,37 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2022 AntGroup Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="busybox" + first_container_name="first-test-container" + + get_pod_config_dir +} + +@test "Check PID namespaces" { + # Create the pod + kubectl create -f "${pod_config_dir}/initcontainer-shareprocesspid.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Check PID from first container + first_pid_container=$(kubectl exec $pod_name -c $first_container_name \ + -- ps | grep "tail" || true) + # Verify that the tail process didn't exist + [ -z $first_pid_container ] || die "found processes pid: $first_pid_container" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-limit-range.bats b/tests/integration/kubernetes/k8s-limit-range.bats new file mode 100644 index 000000000..7e5686c36 --- /dev/null +++ b/tests/integration/kubernetes/k8s-limit-range.bats @@ -0,0 +1,41 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir + namespace_name="default-cpu-example" + pod_name="default-cpu-test" +} + +@test "Limit range for storage" { + # Create namespace + kubectl create namespace "$namespace_name" + + # Create the LimitRange in the namespace + kubectl create -f "${pod_config_dir}/limit-range.yaml" --namespace=${namespace_name} + + # Create the pod + kubectl create -f "${pod_config_dir}/pod-cpu-defaults.yaml" --namespace=${namespace_name} + + # Get pod specification + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" --namespace="$namespace_name" + + # Check limits + # Find the 500 millicpus specified at the yaml + kubectl describe pod "$pod_name" --namespace="$namespace_name" | grep "500m" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" + kubectl delete namespaces "$namespace_name" +} diff --git a/tests/integration/kubernetes/k8s-liveness-probes.bats b/tests/integration/kubernetes/k8s-liveness-probes.bats new file mode 100644 index 000000000..5c8a736e7 --- /dev/null +++ b/tests/integration/kubernetes/k8s-liveness-probes.bats @@ -0,0 +1,80 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + sleep_liveness=20 + agnhost_name="${container_images_agnhost_name}" + agnhost_version="${container_images_agnhost_version}" + + get_pod_config_dir +} + +@test "Liveness probe" { + pod_name="liveness-exec" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-liveness.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check liveness probe returns a success code + kubectl describe pod "$pod_name" | grep -E "Liveness|#success=1" + + # Sleep necessary to check liveness probe returns a failure code + sleep "$sleep_liveness" + kubectl describe pod "$pod_name" | grep "Liveness probe failed" +} + +@test "Liveness http probe" { + pod_name="liveness-http" + + # Create pod + sed -e "s#\${agnhost_image}#${agnhost_name}:${agnhost_version}#" \ + "${pod_config_dir}/pod-http-liveness.yaml" |\ + kubectl create -f - + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check liveness probe returns a success code + kubectl describe pod "$pod_name" | grep -E "Liveness|#success=1" + + # Sleep necessary to check liveness probe returns a failure code + sleep "$sleep_liveness" + kubectl describe pod "$pod_name" | grep "Started container" +} + + +@test "Liveness tcp probe" { + pod_name="tcptest" + + # Create pod + sed -e "s#\${agnhost_image}#${agnhost_name}:${agnhost_version}#" \ + "${pod_config_dir}/pod-tcp-liveness.yaml" |\ + kubectl create -f - + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check liveness probe returns a success code + kubectl describe pod "$pod_name" | grep -E "Liveness|#success=1" + + # Sleep necessary to check liveness probe returns a failure code + sleep "$sleep_liveness" + kubectl describe pod "$pod_name" | grep "Started container" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-memory.bats b/tests/integration/kubernetes/k8s-memory.bats new file mode 100644 index 000000000..5bcffaab4 --- /dev/null +++ b/tests/integration/kubernetes/k8s-memory.bats @@ -0,0 +1,56 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="memory-test" + get_pod_config_dir +} + +setup_yaml() { + sed \ + -e "s/\${memory_size}/${memory_limit_size}/" \ + -e "s/\${memory_allocated}/${allocated_size}/" \ + "${pod_config_dir}/pod-memory-limit.yaml" +} + + +@test "Exceeding memory constraints" { + memory_limit_size="50Mi" + allocated_size="250M" + # Create test .yaml + setup_yaml > "${pod_config_dir}/test_exceed_memory.yaml" + + # Create the pod exceeding memory constraints + run kubectl create -f "${pod_config_dir}/test_exceed_memory.yaml" + [ "$status" -ne 0 ] + + rm -f "${pod_config_dir}/test_exceed_memory.yaml" +} + +@test "Running within memory constraints" { + memory_limit_size="600Mi" + allocated_size="150M" + # Create test .yaml + setup_yaml > "${pod_config_dir}/test_within_memory.yaml" + + # Create the pod within memory constraints + kubectl create -f "${pod_config_dir}/test_within_memory.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + rm -f "${pod_config_dir}/test_within_memory.yaml" + kubectl delete pod "$pod_name" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" || true +} diff --git a/tests/integration/kubernetes/k8s-nested-configmap-secret.bats b/tests/integration/kubernetes/k8s-nested-configmap-secret.bats new file mode 100644 index 000000000..b84fb89cc --- /dev/null +++ b/tests/integration/kubernetes/k8s-nested-configmap-secret.bats @@ -0,0 +1,39 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2021 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + get_pod_config_dir + + pod_name="nested-configmap-secret-pod" +} + +@test "Nested mount of a secret volume in a configmap volume for a pod" { + # Creates a configmap, secret and pod that mounts the secret inside the configmap + kubectl create -f "${pod_config_dir}/pod-nested-configmap-secret.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check config/secret value are correct + [ "myconfig" == $(kubectl exec $pod_name -- cat /config/config_key) ] + [ "mysecret" == $(kubectl exec $pod_name -- cat /config/secret/secret_key) ] +} + +teardown() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + # Debugging information + kubectl describe "pod/$pod_name" + + # Delete the configmap, secret, and pod used for testing + kubectl delete -f "${pod_config_dir}/pod-nested-configmap-secret.yaml" +} diff --git a/tests/integration/kubernetes/k8s-nginx-connectivity.bats b/tests/integration/kubernetes/k8s-nginx-connectivity.bats new file mode 100644 index 000000000..bc7271dc8 --- /dev/null +++ b/tests/integration/kubernetes/k8s-nginx-connectivity.bats @@ -0,0 +1,53 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + nginx_version="${docker_images_nginx_version}" + nginx_image="nginx:$nginx_version" + busybox_image="busybox" + deployment="nginx-deployment" + + get_pod_config_dir +} + +@test "Verify nginx connectivity between pods" { + + # Create test .yaml + sed -e "s/\${nginx_version}/${nginx_image}/" \ + "${pod_config_dir}/${deployment}.yaml" > "${pod_config_dir}/test-${deployment}.yaml" + + kubectl create -f "${pod_config_dir}/test-${deployment}.yaml" + kubectl wait --for=condition=Available --timeout=$timeout deployment/${deployment} + kubectl expose deployment/${deployment} + + busybox_pod="test-nginx" + kubectl run $busybox_pod --restart=Never -it --image="$busybox_image" \ + -- sh -c 'i=1; while [ $i -le '"$wait_time"' ]; do wget --timeout=5 '"$deployment"' && break; sleep 1; i=$(expr $i + 1); done' + + # check pod's status, it should be Succeeded. + # or {.status.containerStatuses[0].state.terminated.reason} = "Completed" + [ $(kubectl get pods/$busybox_pod -o jsonpath="{.status.phase}") = "Succeeded" ] + kubectl logs "$busybox_pod" | grep "index.html" +} + +teardown() { + # Debugging information + kubectl describe "pod/$busybox_pod" + kubectl get "pod/$busybox_pod" -o yaml + kubectl logs "$busybox_pod" + kubectl get deployment/${deployment} -o yaml + kubectl get service/${deployment} -o yaml + kubectl get endpoints/${deployment} -o yaml + + rm -f "${pod_config_dir}/test-${deployment}.yaml" + kubectl delete deployment "$deployment" + kubectl delete service "$deployment" + kubectl delete pod "$busybox_pod" +} diff --git a/tests/integration/kubernetes/k8s-number-cpus.bats b/tests/integration/kubernetes/k8s-number-cpus.bats new file mode 100644 index 000000000..338963f6d --- /dev/null +++ b/tests/integration/kubernetes/k8s-number-cpus.bats @@ -0,0 +1,47 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="cpu-test" + container_name="c1" + get_pod_config_dir +} + +# Skip on aarch64 due to missing cpu hotplug related functionality. +@test "Check number of cpus" { + # Create pod + kubectl create -f "${pod_config_dir}/pod-number-cpu.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + retries="10" + max_number_cpus="3" + + num_cpus_cmd='cat /proc/cpuinfo |grep processor|wc -l' + for _ in $(seq 1 "$retries"); do + # Get number of cpus + number_cpus=$(kubectl exec pod/"$pod_name" -c "$container_name" \ + -- sh -c "$num_cpus_cmd") + if [[ "$number_cpus" =~ ^[0-9]+$ ]]; then + # Verify number of cpus + [ "$number_cpus" -le "$max_number_cpus" ] + [ "$number_cpus" -eq "$max_number_cpus" ] && break + fi + sleep 1 + done +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-oom.bats b/tests/integration/kubernetes/k8s-oom.bats new file mode 100644 index 000000000..f89b761f8 --- /dev/null +++ b/tests/integration/kubernetes/k8s-oom.bats @@ -0,0 +1,37 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2020 Ant Group +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="pod-oom" + get_pod_config_dir +} + +@test "Test OOM events for pods" { + # Create pod + kubectl create -f "${pod_config_dir}/$pod_name.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check if OOMKilled + cmd="kubectl get pods "$pod_name" -o jsonpath='{.status.containerStatuses[0].state.terminated.reason}' | grep OOMKilled" + + waitForProcess "$wait_time" "$sleep_time" "$cmd" + + rm -f "${pod_config_dir}/test_pod_oom.yaml" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + kubectl get "pod/$pod_name" -o yaml + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-optional-empty-configmap.bats b/tests/integration/kubernetes/k8s-optional-empty-configmap.bats new file mode 100644 index 000000000..05c779b77 --- /dev/null +++ b/tests/integration/kubernetes/k8s-optional-empty-configmap.bats @@ -0,0 +1,39 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2021 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "Optional and Empty ConfigMap Volume for a pod" { + config_name="empty-config" + pod_name="optional-empty-config-test-pod" + + # Create Empty ConfigMap + kubectl create configmap "$config_name" + + # Create a pod that consumes the "empty-config" and "optional-missing-config" ConfigMaps as volumes + kubectl create -f "${pod_config_dir}/pod-optional-empty-configmap.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check configmap folders exist + kubectl exec $pod_name -- sh -c ls /empty-config + kubectl exec $pod_name -- sh -c ls /optional-missing-config +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" + kubectl delete configmap "$config_name" +} diff --git a/tests/integration/kubernetes/k8s-optional-empty-secret.bats b/tests/integration/kubernetes/k8s-optional-empty-secret.bats new file mode 100644 index 000000000..958603416 --- /dev/null +++ b/tests/integration/kubernetes/k8s-optional-empty-secret.bats @@ -0,0 +1,39 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2021 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "Optional and Empty Secret Volume for a pod" { + secret_name="empty-secret" + pod_name="optional-empty-secret-test-pod" + + # Create Empty Secret + kubectl create secret generic "$secret_name" + + # Create a pod that consumes the "empty-secret" and "optional-missing-secret" Secrets as volumes + kubectl create -f "${pod_config_dir}/pod-optional-empty-secret.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check secret folders exist + kubectl exec $pod_name -- sh -c ls /empty-secret + kubectl exec $pod_name -- sh -c ls /optional-missing-secret +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" + kubectl delete secret "$secret_name" +} diff --git a/tests/integration/kubernetes/k8s-parallel.bats b/tests/integration/kubernetes/k8s-parallel.bats new file mode 100644 index 000000000..4408ea5e5 --- /dev/null +++ b/tests/integration/kubernetes/k8s-parallel.bats @@ -0,0 +1,48 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir + job_name="jobtest" + names=( "test1" "test2" "test3" ) +} + +@test "Parallel jobs" { + # Create yaml files + for i in "${names[@]}"; do + sed "s/\$ITEM/$i/" ${pod_config_dir}/job-template.yaml > ${pod_config_dir}/job-$i.yaml + done + + # Create the jobs + for i in "${names[@]}"; do + kubectl create -f "${pod_config_dir}/job-$i.yaml" + done + + # Check the jobs + kubectl get jobs -l jobgroup=${job_name} + + # Check the pods + kubectl wait --for=condition=Ready --timeout=$timeout pod -l jobgroup=${job_name} + + # Check output of the jobs + for i in $(kubectl get pods -l jobgroup=${job_name} -o name); do + kubectl logs ${i} + done +} + +teardown() { + # Delete jobs + kubectl delete jobs -l jobgroup=${job_name} + + # Remove generated yaml files + for i in "${names[@]}"; do + rm -f ${pod_config_dir}/job-$i.yaml + done +} diff --git a/tests/integration/kubernetes/k8s-pid-ns.bats b/tests/integration/kubernetes/k8s-pid-ns.bats new file mode 100644 index 000000000..8726af48b --- /dev/null +++ b/tests/integration/kubernetes/k8s-pid-ns.bats @@ -0,0 +1,48 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="busybox" + first_container_name="first-test-container" + second_container_name="second-test-container" + + get_pod_config_dir +} + +@test "Check PID namespaces" { + # Create the pod + kubectl create -f "${pod_config_dir}/busybox-pod.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Check PID from first container + first_pid_container=$(kubectl exec $pod_name -c $first_container_name \ + -- ps | grep "/pause") + # Verify that is not empty + check_first_pid=$(echo $first_pid_container | wc -l) + [ "$check_first_pid" == "1" ] + + # Check PID from second container + second_pid_container=$(kubectl exec $pod_name -c $second_container_name \ + -- ps | grep "/pause") + # Verify that is not empty + check_second_pid=$(echo $second_pid_container | wc -l) + [ "$check_second_pid" == "1" ] + + [ "$first_pid_container" == "$second_pid_container" ] +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-pod-quota.bats b/tests/integration/kubernetes/k8s-pod-quota.bats new file mode 100644 index 000000000..addc37bb3 --- /dev/null +++ b/tests/integration/kubernetes/k8s-pod-quota.bats @@ -0,0 +1,37 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "Pod quota" { + resource_name="pod-quota" + deployment_name="deploymenttest" + namespace="test-quota-ns" + + # Create the resourcequota + kubectl create -f "${pod_config_dir}/resource-quota.yaml" + + # View information about resourcequota + kubectl get -n "$namespace" resourcequota "$resource_name" \ + --output=yaml | grep 'pods: "2"' + + # Create deployment + kubectl create -f "${pod_config_dir}/pod-quota-deployment.yaml" + + # View deployment + kubectl wait --for=condition=Available --timeout=$timeout \ + -n "$namespace" deployment/${deployment_name} +} + +teardown() { + kubectl delete -n "$namespace" deployment "$deployment_name" + kubectl delete -f "${pod_config_dir}/resource-quota.yaml" +} diff --git a/tests/integration/kubernetes/k8s-port-forward.bats b/tests/integration/kubernetes/k8s-port-forward.bats new file mode 100644 index 000000000..d46c15f42 --- /dev/null +++ b/tests/integration/kubernetes/k8s-port-forward.bats @@ -0,0 +1,71 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" +source "/etc/os-release" || source "/usr/lib/os-release" + +issue="https://github.com/kata-containers/runtime/issues/1834" + +setup() { + skip "test not working see: ${issue}" + get_pod_config_dir +} + +@test "Port forwarding" { + skip "test not working see: ${issue}" + deployment_name="redis-master" + + # Create deployment + kubectl apply -f "${pod_config_dir}/redis-master-deployment.yaml" + + # Check deployment + kubectl wait --for=condition=Available --timeout=$timeout deployment/"$deployment_name" + kubectl expose deployment/"$deployment_name" + + # Get pod name + pod_name=$(kubectl get pods --output=jsonpath={.items..metadata.name}) + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # View replicaset + kubectl get rs + + # Create service + kubectl apply -f "${pod_config_dir}/redis-master-service.yaml" + + # Check service + kubectl get svc | grep redis + + # Check redis service + port_redis=$(kubectl get pods $pod_name --template='{{(index (index .spec.containers 0).ports 0).containerPort}}{{"\n"}}') + + # Verify that redis is running in the pod and listening on port + port=6379 + [ "$port_redis" -eq "$port" ] + + # Forward a local port to a port on the pod + (2&>1 kubectl port-forward "$pod_name" 7000:"$port"> /dev/null) & + + # Run redis-cli + retries="10" + ok="0" + + for _ in $(seq 1 "$retries"); do + if sudo -E redis-cli -p 7000 ping | grep -q "PONG" ; then + ok="1" + break; + fi + sleep 1 + done + + [ "$ok" -eq "1" ] +} + +teardown() { + skip "test not working see: ${issue}" + kubectl delete -f "${pod_config_dir}/redis-master-deployment.yaml" + kubectl delete -f "${pod_config_dir}/redis-master-service.yaml" +} diff --git a/tests/integration/kubernetes/k8s-projected-volume.bats b/tests/integration/kubernetes/k8s-projected-volume.bats new file mode 100644 index 000000000..33788e475 --- /dev/null +++ b/tests/integration/kubernetes/k8s-projected-volume.bats @@ -0,0 +1,63 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + get_pod_config_dir +} + +@test "Projected volume" { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + password="1f2d1e2e67df" + username="admin" + pod_name="test-projected-volume" + + TMP_FILE=$(mktemp username.XXXX) + SECOND_TMP_FILE=$(mktemp password.XXXX) + + # Create files containing the username and password + echo "$username" > $TMP_FILE + echo "$password" > $SECOND_TMP_FILE + + # Package these files into secrets + kubectl create secret generic user --from-file=$TMP_FILE + kubectl create secret generic pass --from-file=$SECOND_TMP_FILE + + # Create pod + kubectl create -f "${pod_config_dir}/pod-projected-volume.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check that the projected sources exists + cmd="ls /projected-volume | grep username" + kubectl exec $pod_name -- sh -c "$cmd" + sec_cmd="ls /projected-volume | grep password" + kubectl exec $pod_name -- sh -c "$sec_cmd" + + # Check content of the projected sources + check_cmd="cat /projected-volume/username*" + kubectl exec $pod_name -- sh -c "$check_cmd" | grep "$username" + sec_check_cmd="cat /projected-volume/password*" + kubectl exec $pod_name -- sh -c "$sec_check_cmd" | grep "$password" +} + +teardown() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + # Debugging information + kubectl describe "pod/$pod_name" + + rm -f $TMP_FILE $SECOND_TMP_FILE + kubectl delete pod "$pod_name" + kubectl delete secret pass user +} diff --git a/tests/integration/kubernetes/k8s-qos-pods.bats b/tests/integration/kubernetes/k8s-qos-pods.bats new file mode 100644 index 000000000..6f1df43ef --- /dev/null +++ b/tests/integration/kubernetes/k8s-qos-pods.bats @@ -0,0 +1,58 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" +TEST_INITRD="${TEST_INITRD:-no}" + +# Not working on ARM CI see https://github.com/kata-containers/tests/issues/4727 +setup() { + get_pod_config_dir +} + +@test "Guaranteed QoS" { + pod_name="qos-test" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-guaranteed.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check pod class + kubectl get pod "$pod_name" --output=yaml | grep "qosClass: Guaranteed" +} + +@test "Burstable QoS" { + pod_name="burstable-test" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-burstable.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check pod class + kubectl get pod "$pod_name" --output=yaml | grep "qosClass: Burstable" +} + +@test "BestEffort QoS" { + pod_name="besteffort-test" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-besteffort.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check pod class + kubectl get pod "$pod_name" --output=yaml | grep "qosClass: BestEffort" +} + +teardown() { + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-replication.bats b/tests/integration/kubernetes/k8s-replication.bats new file mode 100644 index 000000000..e8f14e4dd --- /dev/null +++ b/tests/integration/kubernetes/k8s-replication.bats @@ -0,0 +1,62 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + nginx_version="${docker_images_nginx_version}" + nginx_image="nginx:$nginx_version" + + get_pod_config_dir +} + +@test "Replication controller" { + replication_name="replicationtest" + + # Create yaml + sed -e "s/\${nginx_version}/${nginx_image}/" \ + "${pod_config_dir}/replication-controller.yaml" > "${pod_config_dir}/test-replication-controller.yaml" + + # Create replication controller + kubectl create -f "${pod_config_dir}/test-replication-controller.yaml" + + # Check replication controller + local cmd="kubectl describe replicationcontrollers/$replication_name | grep replication-controller" + waitForProcess "$wait_time" "$sleep_time" "$cmd" + + number_of_replicas=$(kubectl get replicationcontrollers/"$replication_name" \ + --output=jsonpath='{.spec.replicas}') + [ "${number_of_replicas}" -gt 0 ] + + # The replicas pods can be in running, waiting, succeeded or failed + # status. We need them all on running state before proceed. + cmd="kubectl describe rc/\"${replication_name}\"" + cmd+="| grep \"Pods Status\" | grep \"${number_of_replicas} Running\"" + waitForProcess "$wait_time" "$sleep_time" "$cmd" + + # Check number of pods created for the + # replication controller is equal to the + # number of replicas that we defined + launched_pods=($(kubectl get pods --selector=app=nginx-rc-test \ + --output=jsonpath={.items..metadata.name})) + [ "${#launched_pods[@]}" -eq "$number_of_replicas" ] + + # Check pod creation + for pod_name in ${launched_pods[@]}; do + cmd="kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name" + waitForProcess "$wait_time" "$sleep_time" "$cmd" + done +} + +teardown() { + # Debugging information + kubectl describe replicationcontrollers/"$replication_name" + + rm -f "${pod_config_dir}/test-replication-controller.yaml" + kubectl delete rc "$replication_name" +} diff --git a/tests/integration/kubernetes/k8s-scale-nginx.bats b/tests/integration/kubernetes/k8s-scale-nginx.bats new file mode 100644 index 000000000..3f11236f3 --- /dev/null +++ b/tests/integration/kubernetes/k8s-scale-nginx.bats @@ -0,0 +1,36 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + nginx_version="${docker_images_nginx_version}" + nginx_image="nginx:$nginx_version" + replicas="3" + deployment="nginx-deployment" + get_pod_config_dir +} + +@test "Scale nginx deployment" { + + sed -e "s/\${nginx_version}/${nginx_image}/" \ + "${pod_config_dir}/${deployment}.yaml" > "${pod_config_dir}/test-${deployment}.yaml" + + kubectl create -f "${pod_config_dir}/test-${deployment}.yaml" + kubectl wait --for=condition=Available --timeout=$timeout deployment/${deployment} + kubectl expose deployment/${deployment} + kubectl scale deployment/${deployment} --replicas=${replicas} + cmd="kubectl get deployment/${deployment} -o yaml | grep 'availableReplicas: ${replicas}'" + waitForProcess "$wait_time" "$sleep_time" "$cmd" +} + +teardown() { + rm -f "${pod_config_dir}/test-${deployment}.yaml" + kubectl delete deployment "$deployment" + kubectl delete service "$deployment" +} diff --git a/tests/integration/kubernetes/k8s-seccomp.bats b/tests/integration/kubernetes/k8s-seccomp.bats new file mode 100644 index 000000000..c6a840cb3 --- /dev/null +++ b/tests/integration/kubernetes/k8s-seccomp.bats @@ -0,0 +1,35 @@ +# +# Copyright (c) 2021 Red Hat +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="seccomp-container" + get_pod_config_dir +} + +@test "Support seccomp runtime/default profile" { + expected_seccomp_mode="2" + # Create pod + kubectl create -f "${pod_config_dir}/pod-seccomp.yaml" + + # Wait it to complete + cmd="kubectl get pods ${pod_name} | grep Completed" + waitForProcess "${wait_time}" "${sleep_time}" "${cmd}" + + # Expect Seccomp on mode 2 (filter) + seccomp_mode="$(kubectl logs ${pod_name} | sed 's/Seccomp:\s*\([0-9]\)/\1/')" + [ "$seccomp_mode" -eq "$expected_seccomp_mode" ] +} + +teardown() { + # For debugging purpose + echo "seccomp mode is ${seccomp_mode}, expected $expected_seccomp_mode" + kubectl describe "pod/${pod_name}" + + kubectl delete -f "${pod_config_dir}/pod-seccomp.yaml" || true +} diff --git a/tests/integration/kubernetes/k8s-security-context.bats b/tests/integration/kubernetes/k8s-security-context.bats new file mode 100644 index 000000000..a8f9d7ba9 --- /dev/null +++ b/tests/integration/kubernetes/k8s-security-context.bats @@ -0,0 +1,35 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "Security context" { + pod_name="security-context-test" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-security-context.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check user + cmd="ps --user 1000 -f" + process="tail -f /dev/null" + kubectl exec $pod_name -- sh -c $cmd | grep "$process" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-shared-volume.bats b/tests/integration/kubernetes/k8s-shared-volume.bats new file mode 100644 index 000000000..5e3b6a270 --- /dev/null +++ b/tests/integration/kubernetes/k8s-shared-volume.bats @@ -0,0 +1,51 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "Containers with shared volume" { + pod_name="test-shared-volume" + first_container_name="busybox-first-container" + second_container_name="busybox-second-container" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-shared-volume.yaml" + + # Check pods + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Communicate containers + cmd="cat /tmp/pod-data" + msg="Hello from the $second_container_name" + kubectl exec "$pod_name" -c "$first_container_name" -- sh -c "$cmd" | grep "$msg" +} + +@test "initContainer with shared volume" { + pod_name="initcontainer-shared-volume" + last_container="last" + + # Create pod + kubectl create -f "${pod_config_dir}/initContainer-shared-volume.yaml" + + # Check pods + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + cmd='test $(cat /volume/initContainer) -lt $(cat /volume/container)' + kubectl exec "$pod_name" -c "$last_container" -- sh -c "$cmd" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-sysctls.bats b/tests/integration/kubernetes/k8s-sysctls.bats new file mode 100644 index 000000000..aca6c50d1 --- /dev/null +++ b/tests/integration/kubernetes/k8s-sysctls.bats @@ -0,0 +1,34 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="sysctl-test" + get_pod_config_dir +} + +@test "Setting sysctl" { + # Create pod + kubectl apply -f "${pod_config_dir}/pod-sysctl.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Check sysctl configuration + cmd="cat /proc/sys/kernel/shm_rmid_forced" + result=$(kubectl exec $pod_name -- sh -c "$cmd") + [ "${result}" = 0 ] +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/run_kubernetes_tests.sh b/tests/integration/kubernetes/run_kubernetes_tests.sh new file mode 100755 index 000000000..db1e16633 --- /dev/null +++ b/tests/integration/kubernetes/run_kubernetes_tests.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +set -e + +kubernetes_dir=$(dirname "$(readlink -f "$0")") + +TARGET_ARCH="${TARGET_ARCH:-x86_64}" +KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}" +K8S_TEST_DEBUG="${K8S_TEST_DEBUG:-false}" + +if [ -n "${K8S_TEST_UNION:-}" ]; then + K8S_TEST_UNION=($K8S_TEST_UNION) +else + K8S_TEST_UNION=( \ + "k8s-attach-handlers.bats" \ + "k8s-caps.bats" \ + "k8s-configmap.bats" \ + "k8s-copy-file.bats" \ + "k8s-cpu-ns.bats" \ + "k8s-credentials-secrets.bats" \ + "k8s-custom-dns.bats" \ + "k8s-empty-dirs.bats" \ + "k8s-env.bats" \ + "k8s-exec.bats" \ + "k8s-inotify.bats" \ + "k8s-job.bats" \ + "k8s-kill-all-process-in-container.bats" \ + "k8s-limit-range.bats" \ + "k8s-liveness-probes.bats" \ + "k8s-memory.bats" \ + "k8s-nested-configmap-secret.bats" \ + "k8s-number-cpus.bats" \ + "k8s-oom.bats" \ + "k8s-optional-empty-configmap.bats" \ + "k8s-optional-empty-secret.bats" \ + "k8s-parallel.bats" \ + "k8s-pid-ns.bats" \ + "k8s-pod-quota.bats" \ + "k8s-port-forward.bats" \ + "k8s-projected-volume.bats" \ + "k8s-qos-pods.bats" \ + "k8s-replication.bats" \ + "k8s-scale-nginx.bats" \ + "k8s-seccomp.bats" \ + "k8s-sysctls.bats" \ + "k8s-security-context.bats" \ + "k8s-shared-volume.bats" \ + "k8s-nginx-connectivity.bats" \ + ) +fi + +# we may need to skip a few test cases when running on non-x86_64 arch +arch_config_file="${kubernetes_dir}/filter_out_per_arch/${TARGET_ARCH}.yaml" +if [ -f "${arch_config_file}" ]; then + arch_k8s_test_union=$(${kubernetes_dir}/filter_k8s_test.sh ${arch_config_file} "${K8S_TEST_UNION[*]}") + mapfile -d " " -t K8S_TEST_UNION <<< "${arch_k8s_test_union}" +fi + +info "Run tests" +for K8S_TEST_ENTRY in ${K8S_TEST_UNION[@]} +do + bats "${K8S_TEST_ENTRY}" +done diff --git a/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod.yaml b/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod.yaml new file mode 100644 index 000000000..9b5bb530c --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod.yaml @@ -0,0 +1,32 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: busybox +spec: + terminationGracePeriodSeconds: 0 + shareProcessNamespace: true + runtimeClassName: kata + containers: + - name: first-test-container + image: quay.io/prometheus/busybox:latest + env: + - name: CONTAINER_NAME + value: "first-test-container" + command: + - sleep + - "30" + - name: second-test-container + image: quay.io/prometheus/busybox:latest + env: + - name: CONTAINER_NAME + value: "second-test-container" + command: + - sleep + - "30" + stdin: true + tty: true diff --git a/tests/integration/kubernetes/runtimeclass_workloads/busybox-template.yaml b/tests/integration/kubernetes/runtimeclass_workloads/busybox-template.yaml new file mode 100644 index 000000000..a849e5c29 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/busybox-template.yaml @@ -0,0 +1,19 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: POD_NAME +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + shareProcessNamespace: true + containers: + - name: CTR_NAME + image: quay.io/prometheus/busybox:latest + command: + - sleep + - "120" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/configmap.yaml b/tests/integration/kubernetes/runtimeclass_workloads/configmap.yaml new file mode 100644 index 000000000..9d62e8ace --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/configmap.yaml @@ -0,0 +1,12 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-configmap +data: + data-1: value-1 + data-2: value-2 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/footloose-configmap.yaml b/tests/integration/kubernetes/runtimeclass_workloads/footloose-configmap.yaml new file mode 100644 index 000000000..40a8bb881 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/footloose-configmap.yaml @@ -0,0 +1,12 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +data: + authorized_keys: | + ${ssh_key} +kind: ConfigMap +metadata: + name: ssh-config-map diff --git a/tests/integration/kubernetes/runtimeclass_workloads/initContainer-shared-volume.yaml b/tests/integration/kubernetes/runtimeclass_workloads/initContainer-shared-volume.yaml new file mode 100644 index 000000000..508261b33 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/initContainer-shared-volume.yaml @@ -0,0 +1,29 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: initcontainer-shared-volume +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + initContainers: + - name: first + image: quay.io/prometheus/busybox:latest + command: [ "sh", "-c", "echo ${EPOCHREALTIME//.} > /volume/initContainer" ] + volumeMounts: + - mountPath: /volume + name: volume + containers: + - name: last + image: quay.io/prometheus/busybox:latest + command: [ "sh", "-c", "echo ${EPOCHREALTIME//.} > /volume/container; tail -f /dev/null" ] + volumeMounts: + - mountPath: /volume + name: volume + volumes: + - name: volume + emptyDir: {} diff --git a/tests/integration/kubernetes/runtimeclass_workloads/initcontainer-shareprocesspid.yaml b/tests/integration/kubernetes/runtimeclass_workloads/initcontainer-shareprocesspid.yaml new file mode 100644 index 000000000..a3f20fae0 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/initcontainer-shareprocesspid.yaml @@ -0,0 +1,26 @@ +# +# Copyright (c) 2022 AntGroup Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: busybox +spec: + terminationGracePeriodSeconds: 0 + shareProcessNamespace: true + runtimeClassName: kata + initContainers: + - name: first + image: quay.io/prometheus/busybox:latest + command: [ "sh", "-c", "echo 'nohup tail -f /dev/null >/dev/null 2>&1 &' > /init.sh && chmod +x /init.sh && /init.sh" ] + containers: + - name: first-test-container + image: quay.io/prometheus/busybox:latest + env: + - name: CONTAINER_NAME + value: "first-test-container" + command: + - sleep + - "300" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/inject_secret.yaml b/tests/integration/kubernetes/runtimeclass_workloads/inject_secret.yaml new file mode 100644 index 000000000..ec42d7c6f --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/inject_secret.yaml @@ -0,0 +1,12 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Secret +metadata: + name: test-secret +data: + username: bXktYXBw + password: Mzk1MjgkdmRnN0pi diff --git a/tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap-pod.yaml b/tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap-pod.yaml new file mode 100644 index 000000000..c85240c94 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap-pod.yaml @@ -0,0 +1,32 @@ +# +# Copyright (c) 2021 Apple Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: v1 +kind: Pod +metadata: + name: inotify-configmap-testing +spec: + containers: + - name: c1 + image: quay.io/kata-containers/fsnotify:latest + command: ["bash"] + args: ["-c", "inotifywait --timeout 120 -r /config/ && [[ -L /config/config.toml ]] && echo success" ] + resources: + requests: + cpu: 1 + memory: 50Mi + limits: + cpu: 1 + memory: 1024Mi + volumeMounts: + - name: config + mountPath: /config + runtimeClassName: kata + restartPolicy: Never + volumes: + - name: config + configMap: + name: cm diff --git a/tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap.yaml b/tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap.yaml new file mode 100644 index 000000000..02c01d749 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap.yaml @@ -0,0 +1,13 @@ +# +# Copyright (c) 2021 Apple Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: v1 +data: + config.toml: |- + foo original... +kind: ConfigMap +metadata: + name: cm diff --git a/tests/integration/kubernetes/runtimeclass_workloads/inotify-updated-configmap.yaml b/tests/integration/kubernetes/runtimeclass_workloads/inotify-updated-configmap.yaml new file mode 100644 index 000000000..5442bdd17 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/inotify-updated-configmap.yaml @@ -0,0 +1,14 @@ +# +# Copyright (c) 2021 Apple Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: v1 +data: + config.toml: |- + foo original... + ... updated +kind: ConfigMap +metadata: + name: cm diff --git a/tests/integration/kubernetes/runtimeclass_workloads/job-template.yaml b/tests/integration/kubernetes/runtimeclass_workloads/job-template.yaml new file mode 100644 index 000000000..1e7760d95 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/job-template.yaml @@ -0,0 +1,25 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: batch/v1 +kind: Job +metadata: + name: process-item-$ITEM + labels: + jobgroup: jobtest +spec: + template: + metadata: + name: jobtest + labels: + jobgroup: jobtest + spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + restartPolicy: Never diff --git a/tests/integration/kubernetes/runtimeclass_workloads/job.yaml b/tests/integration/kubernetes/runtimeclass_workloads/job.yaml new file mode 100644 index 000000000..688667fc2 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/job.yaml @@ -0,0 +1,20 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: batch/v1 +kind: Job +metadata: + name: job-pi-test +spec: + template: + spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: pi + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh", "-c", "echo 'scale=5; 4*a(1)' | bc -l"] + restartPolicy: Never + backoffLimit: 4 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/kata-runtimeclass.yaml b/tests/integration/kubernetes/runtimeclass_workloads/kata-runtimeclass.yaml new file mode 100644 index 000000000..83bdfd2de --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/kata-runtimeclass.yaml @@ -0,0 +1,9 @@ +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: kata +handler: kata +overhead: + podFixed: + memory: "160Mi" + cpu: "250m" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/lifecycle-events.yaml b/tests/integration/kubernetes/runtimeclass_workloads/lifecycle-events.yaml new file mode 100644 index 000000000..7a8c731dc --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/lifecycle-events.yaml @@ -0,0 +1,23 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +apiVersion: v1 +kind: Pod +metadata: + name: handlers +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: handlers-container + image: quay.io/sjenning/${nginx_version} + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + preStop: + exec: + command: ["/usr/sbin/nginx","-s","quit"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/limit-range.yaml b/tests/integration/kubernetes/runtimeclass_workloads/limit-range.yaml new file mode 100644 index 000000000..8f774a277 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/limit-range.yaml @@ -0,0 +1,16 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: LimitRange +metadata: + name: cpu-limit-range +spec: + limits: + - default: + cpu: 1 + defaultRequest: + cpu: 0.5 + type: Container diff --git a/tests/integration/kubernetes/runtimeclass_workloads/nginx-deployment.yaml b/tests/integration/kubernetes/runtimeclass_workloads/nginx-deployment.yaml new file mode 100644 index 000000000..5a63b09c5 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/nginx-deployment.yaml @@ -0,0 +1,26 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 2 + template: + metadata: + labels: + app: nginx + spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: nginx + image: quay.io/sjenning/${nginx_version} + ports: + - containerPort: 80 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-besteffort.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-besteffort.yaml new file mode 100644 index 000000000..49280f85c --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-besteffort.yaml @@ -0,0 +1,16 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: besteffort-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: qos-besteffort + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh", "-c", "tail -f /dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-burstable.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-burstable.yaml new file mode 100644 index 000000000..aed6df794 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-burstable.yaml @@ -0,0 +1,21 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: burstable-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: qos-burstable + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh", "-c", "tail -f /dev/null"] + resources: + limits: + memory: "200Mi" + requests: + memory: "100Mi" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-caps.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-caps.yaml new file mode 100644 index 000000000..1493315d6 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-caps.yaml @@ -0,0 +1,18 @@ +# +# Copyright (c) 2021 Apple Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: pod-caps +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: ["sh"] + args: ["-c", "cat /proc/self/status | grep Cap && sleep infinity"] + restartPolicy: Never diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-configmap.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-configmap.yaml new file mode 100644 index 000000000..f0deed156 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-configmap.yaml @@ -0,0 +1,28 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: config-env-test-pod +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + env: + - name: KUBE_CONFIG_1 + valueFrom: + configMapKeyRef: + name: test-configmap + key: data-1 + - name: KUBE_CONFIG_2 + valueFrom: + configMapKeyRef: + name: test-configmap + key: data-2 + restartPolicy: Never diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-cpu-defaults.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-cpu-defaults.yaml new file mode 100644 index 000000000..5121c7459 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-cpu-defaults.yaml @@ -0,0 +1,16 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: default-cpu-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: default-cpu-demo-ctr + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-cpu.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-cpu.yaml new file mode 100644 index 000000000..cf04c4b50 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-cpu.yaml @@ -0,0 +1,23 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: constraints-cpu-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: first-cpu-container + image: quay.io/prometheus/busybox:latest + command: + - sleep + - "30" + resources: + limits: + cpu: "1" + requests: + cpu: "500m" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-custom-dns.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-custom-dns.yaml new file mode 100644 index 000000000..680577a5f --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-custom-dns.yaml @@ -0,0 +1,23 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + namespace: default + name: custom-dns-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + dnsPolicy: "None" + dnsConfig: + nameservers: + - 1.2.3.4 + searches: + - dns.test.search diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir-fsgroup.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir-fsgroup.yaml new file mode 100644 index 000000000..e887cc92c --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir-fsgroup.yaml @@ -0,0 +1,44 @@ +# +# Copyright (c) 2021 Red Hat, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: sharevol-kata +spec: + runtimeClassName: kata + restartPolicy: Never + securityContext: + runAsUser: 1001 + fsGroup: 123 + containers: + - name: mounttest-container + image: ${agnhost_image} + args: + - mounttest + - --fs_type=/test-volume + - --new_file_0660=/test-volume/test-file + - --file_perm=/test-volume/test-file + - --file_owner=/test-volume/test-file + volumeMounts: + - name: emptydir-volume + mountPath: /test-volume + - name: mounttest-container-2 + image: ${agnhost_image} + args: + - mounttest + - --fs_type=/test-volume-2 + - --new_file_0660=/test-volume-2/test-file + - --file_perm=/test-volume-2/test-file + - --file_owner=/test-volume-2/test-file + volumeMounts: + - name: mem-emptydir-volume + mountPath: /test-volume-2 + volumes: + - name: emptydir-volume + emptyDir: {} + - name: mem-emptydir-volume + emptyDir: + medium: Memory diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir.yaml new file mode 100644 index 000000000..20dc02242 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir.yaml @@ -0,0 +1,28 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: sharevol-kata +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + volumeMounts: + - name: host-empty-vol + mountPath: "/host/cache" + - name: memory-empty-vol + mountPath: "/tmp/cache" + volumes: + - name: host-empty-vol + emptyDir: {} + - name: memory-empty-vol + emptyDir: + medium: Memory + sizeLimit: "50M" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-env.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-env.yaml new file mode 100644 index 000000000..96c4ca60d --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-env.yaml @@ -0,0 +1,46 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: test-env +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: [ "sh", "-c"] + args: + - while true; do + echo -en '\n'; + printenv MY_POD_NAME; + printenv HOST_IP; + printenv MEMORY_REQUESTS; + printenv MEMORY_LIMITS; + sleep 1; + done; + resources: + requests: + memory: "32Mi" + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: MEMORY_REQUESTS + valueFrom: + resourceFieldRef: + resource: requests.memory + - name: MEMORY_LIMITS + valueFrom: + resourceFieldRef: + resource: limits.memory + restartPolicy: Never diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-file-volume.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-file-volume.yaml new file mode 100644 index 000000000..4784b1477 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-file-volume.yaml @@ -0,0 +1,26 @@ +# +# Copyright (c) 2022 Ant Group +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: test-file-volume +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + restartPolicy: Never + volumes: + - name: shared-file + hostPath: + path: HOST_FILE + type: File + containers: + - name: busybox-file-volume-container + image: busybox + volumeMounts: + - name: shared-file + mountPath: MOUNT_PATH + command: ["/bin/sh"] + args: ["-c", "tail -f /dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-footloose.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-footloose.yaml new file mode 100644 index 000000000..9f427b27d --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-footloose.yaml @@ -0,0 +1,59 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: footubuntu +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + volumes: + - name: runv + emptyDir: + medium: "Memory" + - name: runlockv + emptyDir: + medium: "Memory" + - name: tmpv + emptyDir: + medium: "Memory" + - name: fakecgroup + hostPath: + path: /sys/fs/cgroup + - name: ssh-dir + emptyDir: + medium: "Memory" + - name: ssh-config-map + configMap: + name: ssh-config-map + defaultMode: 384 + containers: + - name: vmcontainer + image: quay.io/footloose/ubuntu18.04:latest + command: ["/sbin/init"] + volumeMounts: + - name: runv + mountPath: /run + - name: runlockv + mountPath: /run/lock + - name: tmpv + mountPath: /tmp + - name: fakecgroup + readOnly: true + mountPath: /sys/fs/cgroup + - name: ssh-dir + mountPath: /root/.ssh + - name: ssh-config-map + mountPath: /root/.ssh/authorized_keys + subPath: authorized_keys + # These containers are run during pod initialization + initContainers: + - name: install + image: quay.io/prometheus/busybox:latest + command: ["sh", "-c", "chmod 700 /root/.ssh"] + volumeMounts: + - name: ssh-dir + mountPath: /root/.ssh diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-guaranteed.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-guaranteed.yaml new file mode 100644 index 000000000..ee8893a1e --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-guaranteed.yaml @@ -0,0 +1,23 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: qos-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: qos-guaranteed + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh", "-c", "tail -f /dev/null"] + resources: + limits: + memory: "200Mi" + cpu: "700m" + requests: + memory: "200Mi" + cpu: "700m" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-http-liveness.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-http-liveness.yaml new file mode 100644 index 000000000..3d336761f --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-http-liveness.yaml @@ -0,0 +1,25 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + labels: + test: liveness-test + name: liveness-http +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: liveness + image: ${agnhost_image} + args: + - liveness + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 3 + periodSeconds: 3 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-hugepage.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-hugepage.yaml new file mode 100644 index 000000000..8156f7bcb --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-hugepage.yaml @@ -0,0 +1,30 @@ +# +# Copyright (c) 2022 Ant Group +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: hugepage-pod +spec: + runtimeClassName: kata + containers: + - name: hugepage-container + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh"] + args: ["-c", "tail -f /dev/null"] + volumeMounts: + - mountPath: /hugepages + name: hugepage + resources: + limits: + hugepages-${hugepages_size}: 512Mi + memory: 512Mi + requests: + hugepages-${hugepages_size}: 512Mi + memory: 512Mi + volumes: + - name: hugepage + emptyDir: + medium: HugePages diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-liveness.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-liveness.yaml new file mode 100644 index 000000000..fe2371c4a --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-liveness.yaml @@ -0,0 +1,28 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + labels: + test: liveness + name: liveness-exec +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: liveness + image: quay.io/prometheus/busybox:latest + args: + - /bin/sh + - -c + - touch /tmp/healthy; echo "Check status"; sleep 6; rm -rf /tmp/healthy; echo "Check dead"; sleep 12 + livenessProbe: + exec: + command: + - cat + - /tmp/healthy + initialDelaySeconds: 3 + periodSeconds: 3 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-memory-limit.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-memory-limit.yaml new file mode 100644 index 000000000..fb8fbca48 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-memory-limit.yaml @@ -0,0 +1,23 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: memory-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: memory-test-ctr + image: quay.io/kata-containers/sysbench-kata:latest + imagePullPolicy: IfNotPresent + resources: + limits: + memory: "${memory_size}" + requests: + memory: "500Mi" + command: ["stress"] + args: ["--vm", "1", "--vm-bytes", "${memory_allocated}", "--vm-hang", "1"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-nested-configmap-secret.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-nested-configmap-secret.yaml new file mode 100644 index 000000000..4d76ab65b --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-nested-configmap-secret.yaml @@ -0,0 +1,44 @@ +# +# Copyright (c) 2021 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: ConfigMap +metadata: + name: config +data: + config_key: myconfig +--- +apiVersion: v1 +data: + secret_key: bXlzZWNyZXQ= #mysecret +kind: Secret +metadata: + name: secret +type: Opaque +--- +apiVersion: v1 +kind: Pod +metadata: + name: nested-configmap-secret-pod +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + volumeMounts: + - mountPath: /config + name: config + - mountPath: /config/secret + name: secret + volumes: + - name: secret + secret: + secretName: secret + - name: config + configMap: + name: config + restartPolicy: Never diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-number-cpu.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-number-cpu.yaml new file mode 100644 index 000000000..55f9597b2 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-number-cpu.yaml @@ -0,0 +1,27 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: cpu-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: c1 + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + resources: + limits: + cpu: "500m" + - name: c2 + image: quay.io/prometheus/busybox:latest + command: + - sleep + - "10" + resources: + limits: + cpu: "500m" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-oom.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-oom.yaml new file mode 100644 index 000000000..672c54e68 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-oom.yaml @@ -0,0 +1,25 @@ +# +# Copyright (c) 2020 Ant Group +# +# SPDX-License-Identifier: Apache-2.0 +# + +apiVersion: v1 +kind: Pod +metadata: + name: pod-oom + namespace: default +spec: + runtimeClassName: kata + restartPolicy: Never + containers: + - image: quay.io/kata-containers/sysbench-kata:latest + imagePullPolicy: IfNotPresent + name: oom-test + command: ["/bin/sh"] + args: ["-c", "sleep 2; stress --vm 2 --vm-bytes 400M --timeout 30s"] + resources: + limits: + memory: 500Mi + requests: + memory: 500Mi diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-configmap.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-configmap.yaml new file mode 100644 index 000000000..73008cf6b --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-configmap.yaml @@ -0,0 +1,30 @@ +# +# Copyright (c) 2021 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: optional-empty-config-test-pod +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + volumeMounts: + - mountPath: /empty-config + name: empty-config + - mountPath: /optional-missing-config + name: optional-missing-config + volumes: + - name: empty-config + configMap: + name: empty-config + - name: optional-missing-config + configMap: + name: optional-missing-config + optional: true + restartPolicy: Never \ No newline at end of file diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-secret.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-secret.yaml new file mode 100644 index 000000000..931db13ef --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-secret.yaml @@ -0,0 +1,30 @@ +# +# Copyright (c) 2021 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: optional-empty-secret-test-pod +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + volumeMounts: + - mountPath: /empty-secret + name: empty-secret + - mountPath: /optional-missing-secret + name: optional-missing-secret + volumes: + - name: empty-secret + secret: + secretName: empty-secret + - name: optional-missing-secret + secret: + secretName: optional-missing-secret + optional: true + restartPolicy: Never \ No newline at end of file diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-projected-volume.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-projected-volume.yaml new file mode 100644 index 000000000..66d954d2d --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-projected-volume.yaml @@ -0,0 +1,28 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: test-projected-volume +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-projected-volume + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + volumeMounts: + - name: all-in-one + mountPath: "/projected-volume" + readOnly: true + volumes: + - name: all-in-one + projected: + sources: + - secret: + name: user + - secret: + name: pass diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-quota-deployment.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-quota-deployment.yaml new file mode 100644 index 000000000..ecdaf5e64 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-quota-deployment.yaml @@ -0,0 +1,26 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: apps/v1 +kind: Deployment +metadata: + name: deploymenttest + namespace: test-quota-ns +spec: + selector: + matchLabels: + purpose: quota-demo + replicas: 2 + template: + metadata: + labels: + purpose: quota-demo + spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: pod-quota-demo + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-readonly-volume.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-readonly-volume.yaml new file mode 100644 index 000000000..8835bae99 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-readonly-volume.yaml @@ -0,0 +1,27 @@ +# +# Copyright (c) 2021 Ant Group +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: test-readonly-volume +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + restartPolicy: Never + volumes: + - name: shared-data + hostPath: + path: /tmp + type: Directory + containers: + - name: busybox-ro-volume-container + image: busybox + volumeMounts: + - name: shared-data + mountPath: /tmp + readOnly: true + command: ["/bin/sh"] + args: ["-c", "tail -f /dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-seccomp.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-seccomp.yaml new file mode 100644 index 000000000..5a00b7fca --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-seccomp.yaml @@ -0,0 +1,22 @@ +# +# Copyright (c) 2021 Red Hat +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: seccomp-container + annotations: + io.katacontainers.config.runtime.disable_guest_seccomp: "false" +spec: + runtimeClassName: kata + terminationGracePeriodSeconds: 0 + restartPolicy: Never + containers: + - name: busybox + image: quay.io/prometheus/busybox:latest + command: ["grep", "Seccomp:", "/proc/self/status"] + securityContext: + seccompProfile: + type: RuntimeDefault diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-secret-env.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-secret-env.yaml new file mode 100644 index 000000000..59ef3d264 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-secret-env.yaml @@ -0,0 +1,27 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: secret-envars-test-pod +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: envars-test-container + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh", "-c", "tail -f /dev/null"] + env: + - name: SECRET_USERNAME + valueFrom: + secretKeyRef: + name: test-secret + key: username + - name: SECRET_PASSWORD + valueFrom: + secretKeyRef: + name: test-secret + key: password diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-secret.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-secret.yaml new file mode 100644 index 000000000..c5350ae3d --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-secret.yaml @@ -0,0 +1,25 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: secret-test-pod +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh", "-c", "tail -f /dev/null"] + volumeMounts: + # name must match the volume name below + - name: secret-volume + mountPath: /tmp/secret-volume + # The secret data is exposed to Containers in the Pod through a Volume. + volumes: + - name: secret-volume + secret: + secretName: test-secret diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-security-context.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-security-context.yaml new file mode 100644 index 000000000..60b92b79b --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-security-context.yaml @@ -0,0 +1,18 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: security-context-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + securityContext: + runAsUser: 1000 + containers: + - name: sec-text + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-shared-volume.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-shared-volume.yaml new file mode 100644 index 000000000..1f795e46f --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-shared-volume.yaml @@ -0,0 +1,31 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: test-shared-volume +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + restartPolicy: Never + volumes: + - name: shared-data + emptyDir: {} + containers: + - name: busybox-first-container + image: quay.io/prometheus/busybox:latest + volumeMounts: + - name: shared-data + mountPath: /tmp + command: ["/bin/sh"] + args: ["-c", "tail -f /dev/null"] + - name: busybox-second-container + image: quay.io/prometheus/busybox:latest + volumeMounts: + - name: shared-data + mountPath: /tmp + command: ["/bin/sh"] + args: ["-c", "echo Hello from the busybox-second-container > /tmp/pod-data && tail -f /dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-sysctl.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-sysctl.yaml new file mode 100644 index 000000000..36a1e99bd --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-sysctl.yaml @@ -0,0 +1,28 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: sysctl-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + securityContext: + sysctls: + - name: kernel.shm_rmid_forced + value: "0" + containers: + - name: test + securityContext: + privileged: true + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + initContainers: + - name: init-sys + securityContext: + privileged: true + image: quay.io/prometheus/busybox:latest + command: ['sh', '-c', 'echo "64000" > /proc/sys/vm/max_map_count'] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-tcp-liveness.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-tcp-liveness.yaml new file mode 100644 index 000000000..6d5343cfe --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-tcp-liveness.yaml @@ -0,0 +1,31 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: tcptest + labels: + app: tcp-liveness +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: tcp-liveness + image: ${agnhost_image} + args: + - liveness + ports: + - containerPort: 8080 + readinessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 15 + periodSeconds: 20 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/redis-master-deployment.yaml b/tests/integration/kubernetes/runtimeclass_workloads/redis-master-deployment.yaml new file mode 100644 index 000000000..7dcc8cda9 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/redis-master-deployment.yaml @@ -0,0 +1,36 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-master + labels: + app: redis +spec: + selector: + matchLabels: + app: redis + role: master + tier: backend + replicas: 1 + template: + metadata: + labels: + app: redis + role: master + tier: backend + spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: master + image: quay.io/libpod/redis + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/redis-master-service.yaml b/tests/integration/kubernetes/runtimeclass_workloads/redis-master-service.yaml new file mode 100644 index 000000000..cb32ac1a2 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/redis-master-service.yaml @@ -0,0 +1,21 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Service +metadata: + name: redis-master + labels: + app: redis + role: master + tier: backend +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis + role: master + tier: backend diff --git a/tests/integration/kubernetes/runtimeclass_workloads/replication-controller.yaml b/tests/integration/kubernetes/runtimeclass_workloads/replication-controller.yaml new file mode 100644 index 000000000..a971d5a98 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/replication-controller.yaml @@ -0,0 +1,26 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: ReplicationController +metadata: + name: replicationtest +spec: + replicas: 1 + selector: + app: nginx-rc-test + template: + metadata: + name: nginx + labels: + app: nginx-rc-test + spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: nginxtest + image: quay.io/sjenning/${nginx_version} + ports: + - containerPort: 80 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/resource-quota.yaml b/tests/integration/kubernetes/runtimeclass_workloads/resource-quota.yaml new file mode 100644 index 000000000..a8d84d9ad --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/resource-quota.yaml @@ -0,0 +1,20 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: List +items: +- apiVersion: v1 + kind: Namespace + metadata: + name: test-quota-ns +- apiVersion: v1 + kind: ResourceQuota + metadata: + name: pod-quota + namespace: test-quota-ns + spec: + hard: + pods: "2" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/stress/Dockerfile b/tests/integration/kubernetes/runtimeclass_workloads/stress/Dockerfile new file mode 100644 index 000000000..3609eb4a4 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/stress/Dockerfile @@ -0,0 +1,13 @@ +# +# Copyright (c) 2021 IBM Corp. +# +# SPDX-License-Identifier: Apache-2.0 + +# The image has only the 'latest' tag so it needs to ignore DL3007 +#hadolint ignore=DL3007 +FROM quay.io/libpod/ubuntu:latest +RUN apt-get -y update && \ + apt-get -y upgrade && \ + apt-get -y --no-install-recommends install stress && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* diff --git a/tests/integration/kubernetes/runtimeclass_workloads/vfio.yaml b/tests/integration/kubernetes/runtimeclass_workloads/vfio.yaml new file mode 100644 index 000000000..33ea60b7a --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/vfio.yaml @@ -0,0 +1,24 @@ +# +# Copyright (c) 2020 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: vfio +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: c1 + image: quay.io/prometheus/busybox:latest + command: + - sh + tty: true + stdin: true + resources: + limits: + intel.com/virtio_net: "1" + requests: + intel.com/virtio_net: "1" diff --git a/tests/integration/kubernetes/tests_common.sh b/tests/integration/kubernetes/tests_common.sh new file mode 100644 index 000000000..481cf4a57 --- /dev/null +++ b/tests/integration/kubernetes/tests_common.sh @@ -0,0 +1,39 @@ +# +# Copyright (c) 2021 Red Hat, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# This script is evoked within an OpenShift Build to product the binary image, +# which will contain the Kata Containers installation into a given destination +# directory. +# +# This contains variables and functions common to all e2e tests. + +# Variables used by the kubernetes tests +export docker_images_nginx_version="1.15-alpine" +export container_images_agnhost_name="k8s.gcr.io/e2e-test-images/agnhost" +export container_images_agnhost_version="2.21" + +# Timeout options, mainly for use with waitForProcess(). Use them unless the +# operation needs to wait longer. +wait_time=90 +sleep_time=3 + +# Timeout for use with `kubectl wait`, unless it needs to wait longer. +# Note: try to keep timeout and wait_time equal. +timeout=90s + +# issues that can't test yet. +fc_limitations="https://github.com/kata-containers/documentation/issues/351" +dragonball_limitations="https://github.com/kata-containers/kata-containers/issues/6621" + +# Path to the kubeconfig file which is used by kubectl and other tools. +# Note: the init script sets that variable but if you want to run the tests in +# your own provisioned cluster and you know what you are doing then you should +# overwrite it. +export KUBECONFIG="${KUBECONFIG:-$HOME/.kube/config}" + +get_pod_config_dir() { + pod_config_dir="${BATS_TEST_DIRNAME}/runtimeclass_workloads" + info "k8s configured to use runtimeclass" +} diff --git a/tools/osbuilder/README.md b/tools/osbuilder/README.md index 343d2bf60..9415de74e 100644 --- a/tools/osbuilder/README.md +++ b/tools/osbuilder/README.md @@ -80,7 +80,7 @@ filesystem components to generate an initrd. 3. When generating an image, the initrd is extracted to obtain the base rootfs for the image. -Ubuntu is the default distro for building the rootfs, to use a different one, you can set `DISTRO=alpine|clearlinux|debian|ubuntu`. +Ubuntu is the default distro for building the rootfs, to use a different one, you can set `DISTRO=alpine|clearlinux|debian|ubuntu|cbl-mariner`. For example `make USE_DOCKER=true DISTRO=alpine rootfs` will make an Alpine rootfs using Docker. ### Rootfs creation @@ -209,9 +209,9 @@ of the the osbuilder distributions. > Note: this table is not relevant for the dracut build method, since it supports any Linux distribution and architecture where dracut is available. -| |Alpine |CentOS Stream |Clear Linux |Debian/Ubuntu | -|-- |-- |-- |-- |-- | -|**ARM64** |:heavy_check_mark:|:heavy_check_mark:| | | -|**PPC64le**| |:heavy_check_mark:| |:heavy_check_mark:| -|**s390x** | |:heavy_check_mark:| |:heavy_check_mark:| -|**x86_64** |:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:| +| |Alpine |CentOS Stream |Clear Linux |Debian/Ubuntu |CBL-Mariner | +|-- |-- |-- |-- |-- |-- | +|**ARM64** |:heavy_check_mark:|:heavy_check_mark:| | | | +|**PPC64le**| |:heavy_check_mark:| |:heavy_check_mark:| | +|**s390x** | |:heavy_check_mark:| |:heavy_check_mark:| | +|**x86_64** |:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:| diff --git a/tools/osbuilder/dracut/dracut.conf.d/05-base.conf b/tools/osbuilder/dracut/dracut.conf.d/05-base.conf index 1dd41c4a1..88591dda9 100644 --- a/tools/osbuilder/dracut/dracut.conf.d/05-base.conf +++ b/tools/osbuilder/dracut/dracut.conf.d/05-base.conf @@ -14,4 +14,4 @@ hostonly_cmdline="no" # create reproducible images reproducible="yes" # dracut modules to include (NOTE: these are NOT kernel modules) -dracutmodules="kernel-modules udev-rules syslog systemd" +dracutmodules="kernel-modules udev-rules syslog systemd dbus" diff --git a/tools/osbuilder/rootfs-builder/cbl-mariner/Dockerfile.in b/tools/osbuilder/rootfs-builder/cbl-mariner/Dockerfile.in new file mode 100644 index 000000000..6fa29807d --- /dev/null +++ b/tools/osbuilder/rootfs-builder/cbl-mariner/Dockerfile.in @@ -0,0 +1,15 @@ +# Copyright (c) 2023 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +ARG IMAGE_REGISTRY=mcr.microsoft.com +FROM ${IMAGE_REGISTRY}/cbl-mariner/base/core:@OS_VERSION@ + +RUN tdnf -y install \ + ca-certificates \ + build-essential \ + dnf \ + git \ + tar + +@INSTALL_RUST@ diff --git a/tools/osbuilder/rootfs-builder/cbl-mariner/config.sh b/tools/osbuilder/rootfs-builder/cbl-mariner/config.sh new file mode 100644 index 000000000..694124acd --- /dev/null +++ b/tools/osbuilder/rootfs-builder/cbl-mariner/config.sh @@ -0,0 +1,10 @@ +# Copyright (c) 2023 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +OS_NAME=cbl-mariner +OS_VERSION=${OS_VERSION:-2.0} +LIBC="gnu" +PACKAGES="core-packages-base-image ca-certificates" +[ "$AGENT_INIT" = no ] && PACKAGES+=" systemd" +[ "$SECCOMP" = yes ] && PACKAGES+=" libseccomp" diff --git a/tools/osbuilder/rootfs-builder/cbl-mariner/rootfs_lib.sh b/tools/osbuilder/rootfs-builder/cbl-mariner/rootfs_lib.sh new file mode 100644 index 000000000..0288d4d77 --- /dev/null +++ b/tools/osbuilder/rootfs-builder/cbl-mariner/rootfs_lib.sh @@ -0,0 +1,26 @@ +# Copyright (c) 2023 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +build_rootfs() +{ + # Mandatory + local ROOTFS_DIR="$1" + + [ -z "$ROOTFS_DIR" ] && die "need rootfs" + + # In case of support EXTRA packages, use it to allow + # users add more packages to the base rootfs + local EXTRA_PKGS=${EXTRA_PKGS:-""} + + check_root + mkdir -p "${ROOTFS_DIR}" + PKG_MANAGER="tdnf" + + DNF="${PKG_MANAGER} -y --installroot=${ROOTFS_DIR} --noplugins --releasever=${OS_VERSION}" + + info "install packages for rootfs" + $DNF install ${EXTRA_PKGS} ${PACKAGES} + + rm -rf ${ROOTFS_DIR}/usr/share/{bash-completion,cracklib,doc,info,locale,man,misc,pixmaps,terminfo,zoneinfo,zsh} +} diff --git a/tools/osbuilder/rootfs-builder/rootfs.sh b/tools/osbuilder/rootfs-builder/rootfs.sh index eceb4f031..b4062ce8c 100755 --- a/tools/osbuilder/rootfs-builder/rootfs.sh +++ b/tools/osbuilder/rootfs-builder/rootfs.sh @@ -486,9 +486,6 @@ prepare_overlay() ln -sf /init ./sbin/init fi - # Kata systemd unit file - mkdir -p ./etc/systemd/system/basic.target.wants/ - ln -sf /usr/lib/systemd/system/kata-containers.target ./etc/systemd/system/basic.target.wants/kata-containers.target popd > /dev/null } @@ -637,9 +634,12 @@ EOF if [ "${AGENT_INIT}" == "yes" ]; then setup_agent_init "${AGENT_DEST}" "${init}" else - # Setup systemd service for kata-agent + # Setup systemd-based environment for kata-agent mkdir -p "${ROOTFS_DIR}/etc/systemd/system/basic.target.wants" ln -sf "/usr/lib/systemd/system/kata-containers.target" "${ROOTFS_DIR}/etc/systemd/system/basic.target.wants/kata-containers.target" + mkdir -p "${ROOTFS_DIR}/etc/systemd/system/kata-containers.target.wants" + ln -sf "/usr/lib/systemd/system/dbus.socket" "${ROOTFS_DIR}/etc/systemd/system/kata-containers.target.wants/dbus.socket" + chmod g+rx,o+x "${ROOTFS_DIR}" fi info "Check init is installed" diff --git a/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml b/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml index bd177834f..e83b87ead 100644 --- a/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml +++ b/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml @@ -21,6 +21,9 @@ spec: image: quay.io/kata-containers/kata-deploy-cc:v0 imagePullPolicy: Always command: [ "bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh reset" ] + readinessProbe: + exec: + command: [ "bash", "-c", "[ -f /opt/kata/kata-deployed ]", "&&", "bash", "-c", "[ $? == 1 ]" ] env: - name: NODE_NAME valueFrom: diff --git a/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml b/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml index b850d604f..c371a4a8a 100644 --- a/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml +++ b/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml @@ -18,6 +18,9 @@ spec: - name: kube-kata image: quay.io/kata-containers/kata-deploy-cc:v0 imagePullPolicy: Always + readinessProbe: + exec: + command: [ "bash", "-c", "[ -f /opt/kata/kata-deployed ]", "&&", "bash", "-c", "[ $? == 0 ]" ] lifecycle: preStop: exec: diff --git a/tools/packaging/kata-deploy/local-build/Makefile b/tools/packaging/kata-deploy/local-build/Makefile index 1a1d51575..74192d536 100644 --- a/tools/packaging/kata-deploy/local-build/Makefile +++ b/tools/packaging/kata-deploy/local-build/Makefile @@ -39,9 +39,15 @@ all: serial-targets \ firecracker-tarball \ kernel-tarball \ kernel-dragonball-experimental-tarball \ + kernel-tdx-experimental-tarball \ + kernel-gpu \ + kernel-gpu-snp-tarball \ + kernel-gpu-tdx-experimental-tarball \ nydus-tarball \ qemu-tarball \ + qemu-tdx-experimental-tarball \ shim-v2-tarball \ + tdvf-tarball \ virtiofsd-tarball serial-targets: @@ -65,15 +71,30 @@ kernel-tarball: kernel-dragonball-experimental-tarball: ${MAKE} $@-build +kernel-gpu-tarball: + ${MAKE} $@-build + +kernel-gpu-snp-tarball: + ${MAKE} $@-build + +kernel-gpu-tdx-experimental-tarball: + ${MAKE} $@-build + kernel-experimental-tarball: ${MAKE} $@-build +kernel-tdx-experimental-tarball: + ${MAKE} $@-build + nydus-tarball: ${MAKE} $@-build qemu-tarball: ${MAKE} $@-build +qemu-tdx-experimental-tarball: + ${MAKE} $@-build + rootfs-image-tarball: ${MAKE} $@-build @@ -83,6 +104,9 @@ rootfs-initrd-tarball: shim-v2-tarball: ${MAKE} $@-build +tdvf-tarball: + ${MAKE} $@-build + virtiofsd-tarball: ${MAKE} $@-build diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index c0a27e720..13cddc3b1 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -28,6 +28,7 @@ readonly initramfs_builder="${static_build_dir}/initramfs/build.sh" readonly kernel_builder="${static_build_dir}/kernel/build.sh" readonly ovmf_builder="${static_build_dir}/ovmf/build.sh" readonly qemu_builder="${static_build_dir}/qemu/build-static-qemu.sh" +readonly qemu_experimental_builder="${static_build_dir}/qemu/build-static-qemu-experimental.sh" readonly shimv2_builder="${static_build_dir}/shim-v2/build.sh" readonly td_shim_builder="${static_build_dir}/td-shim/build.sh" readonly virtiofsd_builder="${static_build_dir}/virtiofsd/build.sh" @@ -88,11 +89,17 @@ options: kernel kernel-dragonball-experimental kernel-experimental + kernel-tdx-experimental + kernel-gpu + kernel-gpu-snp + kernel-gpu-tdx-experimental nydus qemu + qemu-tdx-experimental rootfs-image rootfs-initrd shim-v2 + tdvf virtiofsd cc cc-cloud-hypervisor @@ -138,17 +145,9 @@ install_cached_component() { info "Using cached tarball of ${component}" echo "Downloading tarball from: ${jenkins_build_url}/${component_tarball_name}" - wget "${jenkins_build_url}/${component_tarball_name}" || return cleanup_and_fail - wget "${jenkins_build_url}/sha256sum-${component_tarball_name}" || return cleanup_and_fail - sha256sum -c "sha256sum-${component_tarball_name}" || return cleanup_and_fail - if [ -n "${root_hash_vanilla}" ]; then - wget "${jenkins_build_url}/${root_hash_vanilla}" || return cleanup_and_fail - mv "${root_hash_vanilla}" "${repo_root_dir}/tools/osbuilder/" - fi - if [ -n "${root_hash_tdx}" ]; then - wget "${jenkins_build_url}/${root_hash_tdx}" || return cleanup_and_fail - mv "${root_hash_tdx}" "${repo_root_dir}/tools/osbuilder/" - fi + wget "${jenkins_build_url}/${component_tarball_name}" || return $(cleanup_and_fail) + wget "${jenkins_build_url}/sha256sum-${component_tarball_name}" || return $(cleanup_and_fail) + sha256sum -c "sha256sum-${component_tarball_name}" || return $(cleanup_and_fail) mv "${component_tarball_name}" "${component_tarball_path}" } @@ -611,77 +610,133 @@ install_cc_initrd() { } #Install kernel asset -install_kernel() { - export kernel_version="$(yq r $versions_yaml assets.kernel.version)" +install_kernel_helper() { + local kernel_version_yaml_path="${1}" + local kernel_name="${2}" + local extra_cmd=${3} + + export kernel_version="$(get_from_kata_deps ${kernel_version_yaml_path})" local kernel_kata_config_version="$(cat ${repo_root_dir}/tools/packaging/kernel/kata_config_version)" - install_cached_component \ - "kernel" \ - "${jenkins_url}/job/kata-containers-main-kernel-$(uname -m)/${cached_artifacts_path}" \ + install_cached_tarball_component \ + "${kernel_name}" \ + "${jenkins_url}/job/kata-containers-main-${kernel_name}-$(uname -m)/${cached_artifacts_path}" \ "${kernel_version}-${kernel_kata_config_version}" \ "$(get_kernel_image_name)" \ "${final_tarball_name}" \ "${final_tarball_path}" \ && return 0 - DESTDIR="${destdir}" PREFIX="${prefix}" "${kernel_builder}" -f -v "${kernel_version}" + info "build ${kernel_name}" + info "Kernel version ${kernel_version}" + DESTDIR="${destdir}" PREFIX="${prefix}" "${kernel_builder}" -v "${kernel_version}" ${extra_cmd} } -#Install dragonball experimental kernel asset -install_dragonball_experimental_kernel() { - info "build dragonball experimental kernel" - export kernel_version="$(yq r $versions_yaml assets.kernel-dragonball-experimental.version)" - local kernel_kata_config_version="$(cat ${repo_root_dir}/tools/packaging/kernel/kata_config_version)" +#Install kernel asset +install_kernel() { + install_kernel_helper \ + "assets.kernel.version" \ + "kernel" \ + "-f" +} - install_cached_component \ +install_kernel_dragonball_experimental() { + install_kernel_helper \ + "assets.kernel-dragonball-experimental.version" \ "kernel-dragonball-experimental" \ - "${jenkins_url}/job/kata-containers-main-kernel-dragonball-experimental-$(uname -m)/${cached_artifacts_path}" \ - "${kernel_version}-${kernel_kata_config_version}" \ - "$(get_kernel_image_name)" \ - "${final_tarball_name}" \ - "${final_tarball_path}" \ - && return 0 + "-e -t dragonball" +} - info "kernel version ${kernel_version}" - DESTDIR="${destdir}" PREFIX="${prefix}" "${kernel_builder}" -e -t dragonball -v ${kernel_version} +#Install GPU enabled kernel asset +install_kernel_gpu() { + local kernel_url="$(get_from_kata_deps assets.kernel.url)" + + install_kernel_helper \ + "assets.kernel.version" \ + "kernel-gpu" \ + "-g nvidia -u ${kernel_url} -H deb" +} + +#Install GPU and SNP enabled kernel asset +install_kernel_gpu_snp() { + local kernel_url="$(get_from_kata_deps assets.kernel.snp.url)" + + install_kernel_helper \ + "assets.kernel.snp.version" \ + "kernel-gpu-snp" \ + "-x snp -g nvidia -u ${kernel_url} -H deb" +} + +#Install GPU and TDX experimental enabled kernel asset +install_kernel_gpu_tdx_experimental() { + local kernel_url="$(get_from_kata_deps assets.kernel-tdx-experimental.url)" + + install_kernel_helper \ + "assets.kernel-tdx-experimental.version" \ + "kernel-gpu-tdx" \ + "-x tdx -g nvidia -u ${kernel_url} -H deb" } #Install experimental kernel asset -install_experimental_kernel() { - info "build experimental kernel" - export kernel_version="$(yq r $versions_yaml assets.kernel-experimental.tag)" - local kernel_kata_config_version="$(cat ${repo_root_dir}/tools/packaging/kernel/kata_config_version)" - - install_cached_component \ +install_kernel_experimental() { + install_kernel_helper \ + "assets.kernel-experimental.version" \ "kernel-experimental" \ - "${jenkins_url}/job/kata-containers-main-kernel-experimental-$(uname -m)/${cached_artifacts_path}" \ - "${kernel_version}-${kernel_kata_config_version}" \ - "$(get_kernel_image_name)" \ - "${final_tarball_name}" \ - "${final_tarball_path}" \ - && return 0 - - info "Kernel version ${kernel_version}" - DESTDIR="${destdir}" PREFIX="${prefix}" "${kernel_builder}" -f -b experimental -v ${kernel_version} + "-f -b experimental" } -# Install static qemu asset -install_qemu() { - export qemu_repo="$(yq r $versions_yaml assets.hypervisor.qemu.url)" - export qemu_version="$(yq r $versions_yaml assets.hypervisor.qemu.version)" +#Install experimental TDX kernel asset +install_kernel_tdx_experimental() { + local kernel_url="$(get_from_kata_deps assets.kernel-tdx-experimental.url)" - install_cached_component \ - "QEMU" \ - "${jenkins_url}/job/kata-containers-main-qemu-$(uname -m)/${cached_artifacts_path}" \ + install_kernel_helper \ + "assets.kernel-tdx-experimental.version" \ + "kernel-tdx-experimental" \ + "-x tdx -u ${kernel_url}" +} + +install_qemu_helper() { + local qemu_repo_yaml_path="${1}" + local qemu_version_yaml_path="${2}" + local qemu_name="${3}" + local builder="${4}" + local qemu_tarball_name="${qemu_tarball_name:-kata-static-qemu.tar.gz}" + + export qemu_repo="$(get_from_kata_deps ${qemu_repo_yaml_path})" + export qemu_version="$(get_from_kata_deps ${qemu_version_yaml_path})" + + install_cached_tarball_component \ + "${qemu_name}" \ + "${jenkins_url}/job/kata-containers-main-${qemu_name}-$(uname -m)/${cached_artifacts_path}" \ "${qemu_version}-$(calc_qemu_files_sha256sum)" \ "$(get_qemu_image_name)" \ "${final_tarball_name}" \ "${final_tarball_path}" \ && return 0 - info "build static qemu" - "${qemu_builder}" - tar xvf "${builddir}/kata-static-qemu.tar.gz" -C "${destdir}" + info "build static ${qemu_name}" + "${builder}" + tar xvf "${qemu_tarball_name}" -C "${destdir}" +} + +# Install static qemu asset +install_qemu() { + install_qemu_helper \ + "assets.hypervisor.qemu.url" \ + "assets.hypervisor.qemu.version" \ + "qemu" \ + "${qemu_builder}" +} + +install_qemu_tdx_experimental() { + export qemu_suffix="tdx-experimental" + export qemu_tarball_name="kata-static-qemu-${qemu_suffix}.tar.gz" + + install_qemu_helper \ + "assets.hypervisor.qemu-${qemu_suffix}.url" \ + "assets.hypervisor.qemu-${qemu_suffix}.tag" \ + "qemu-${qemu_suffix}" \ + "${qemu_experimental_builder}" } # Install static firecracker asset @@ -786,6 +841,31 @@ install_shimv2() { DESTDIR="${destdir}" PREFIX="${prefix}" "${shimv2_builder}" } +install_ovmf() { + ovmf_type="${1:-x86_64}" + tarball_name="${2:-edk2.tar.xz}" + + local component_name="ovmf" + local component_version="$(get_from_kata_deps "externals.ovmf.${ovmf_type}.version")" + [ "${ovmf_type}" == "tdx" ] && component_name="tdvf" + install_cached_tarball_component \ + "${component_name}" \ + "${jenkins_url}/job/kata-containers-main-ovmf-${ovmf_type}-$(uname -m)/${cached_artifacts_path}" \ + "${component_version}" \ + "$(get_ovmf_image_name)" \ + "${final_tarball_name}" \ + "${final_tarball_path}" \ + && return 0 + + DESTDIR="${destdir}" PREFIX="${prefix}" ovmf_build="${ovmf_type}" "${ovmf_builder}" + tar xvf "${builddir}/${tarball_name}" -C "${destdir}" +} + +# Install TDVF +install_tdvf() { + install_ovmf "tdx" "edk2-tdx.tar.gz" +} + get_kata_version() { local v v=$(cat "${version_file}") @@ -808,9 +888,13 @@ handle_build() { install_image install_initrd install_kernel + install_kernel_dragonball_experimental + install_kernel_tdx_experimental install_nydus install_qemu + install_qemu_tdx_experimental install_shimv2 + install_tdvf install_virtiofsd ;; @@ -868,18 +952,30 @@ handle_build() { nydus) install_nydus ;; - kernel-dragonball-experimental) install_dragonball_experimental_kernel;; + kernel-dragonball-experimental) install_kernel_dragonball_experimental ;; - kernel-experimental) install_experimental_kernel;; + kernel-experimental) install_kernel_experimental ;; + + kernel-tdx-experimental) install_kernel_tdx_experimental ;; + + kernel-gpu) install_kernel_gpu ;; + + kernel-gpu-snp) install_kernel_gpu_snp;; + + kernel-gpu-tdx-experimental) install_kernel_gpu_tdx_experimental;; qemu) install_qemu ;; + qemu-tdx-experimental) install_qemu_tdx_experimental ;; + rootfs-image) install_image ;; rootfs-initrd) install_initrd ;; shim-v2) install_shimv2 ;; + tdvf) install_tdvf ;; + virtiofsd) install_virtiofsd ;; *) diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh index 7a2869a80..332723c51 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh @@ -20,7 +20,7 @@ IMAGE_TAG="${REGISTRY}:kata-containers-$(git rev-parse HEAD)-$(uname -m)" echo "Building the image" docker build --tag ${IMAGE_TAG} . -echo "Pushing the image to quay.io" +echo "Pushing the image to the registry" docker push ${IMAGE_TAG} if [ -n "${TAG}" ]; then @@ -29,7 +29,7 @@ if [ -n "${TAG}" ]; then echo "Building the ${ADDITIONAL_TAG} image" docker build --tag ${ADDITIONAL_TAG} . - echo "Pushing the image ${ADDITIONAL_TAG} to quay.io" + echo "Pushing the image ${ADDITIONAL_TAG} to the registry" docker push ${ADDITIONAL_TAG} fi diff --git a/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml b/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml index d3260d4a8..f65ec6b0a 100644 --- a/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml +++ b/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml @@ -14,6 +14,19 @@ scheduling: --- kind: RuntimeClass apiVersion: node.k8s.io/v1 +metadata: + name: kata-qemu-tdx +handler: kata-qemu-tdx +overhead: + podFixed: + memory: "2048Mi" + cpu: "1.0" +scheduling: + nodeSelector: + katacontainers.io/kata-runtime: "true" +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1 metadata: name: kata-clh handler: kata-clh @@ -50,3 +63,16 @@ overhead: scheduling: nodeSelector: katacontainers.io/kata-runtime: "true" +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: kata-qemu-gpu +handler: kata-qemu-gpu +overhead: + podFixed: + memory: "160Mi" + cpu: "250m" +scheduling: + nodeSelector: + katacontainers.io/kata-runtime: "true" diff --git a/tools/packaging/kata-deploy/scripts/kata-deploy.sh b/tools/packaging/kata-deploy/scripts/kata-deploy.sh index a4a4f9d5c..5ae15802a 100644 --- a/tools/packaging/kata-deploy/scripts/kata-deploy.sh +++ b/tools/packaging/kata-deploy/scripts/kata-deploy.sh @@ -16,6 +16,8 @@ containerd_conf_file_backup="${containerd_conf_file}.bak" shims=( "fc" "qemu" + "qemu-tdx" + "qemu-gpu" "clh" "dragonball" ) @@ -58,7 +60,17 @@ function install_artifacts() { echo "copying kata artifacts onto host" cp -au /opt/kata-artifacts/opt/kata/* /opt/kata/ chmod +x /opt/kata/bin/* - chmod +x /opt/kata/runtime-rs/bin/* + [ -d /opt/kata/runtime-rs/bin ] && \ + chmod +x /opt/kata/runtime-rs/bin/* +} + +function wait_till_node_is_ready() { + local ready="False" + + while ! [[ "${ready}" == "True" ]]; do + sleep 2s + ready=$(kubectl get node $NODE_NAME -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') + done } function configure_cri_runtime() { @@ -74,6 +86,8 @@ function configure_cri_runtime() { esac systemctl daemon-reload systemctl restart "$1" + + wait_till_node_is_ready } function configure_different_shims_base() { @@ -264,6 +278,8 @@ function reset_runtime() { if [ "$1" == "crio" ] || [ "$1" == "containerd" ]; then systemctl restart kubelet fi + + wait_till_node_is_ready } function main() { @@ -308,11 +324,13 @@ function main() { install_artifacts configure_cri_runtime "$runtime" kubectl label node "$NODE_NAME" --overwrite katacontainers.io/kata-runtime=true + touch /opt/kata/kata-deployed ;; cleanup) cleanup_cri_runtime "$runtime" kubectl label node "$NODE_NAME" --overwrite katacontainers.io/kata-runtime=cleanup remove_artifacts + rm /opt/kata/kata-deployed ;; reset) reset_runtime $runtime diff --git a/tools/packaging/kernel/README.md b/tools/packaging/kernel/README.md index ce4ea30c4..d9b78480a 100644 --- a/tools/packaging/kernel/README.md +++ b/tools/packaging/kernel/README.md @@ -47,6 +47,7 @@ Options: -f : Enable force generate config when setup. -g : GPU vendor, intel or nvidia. -h : Display this help. + -H : Linux headers for guest fs module building. -k : Path to kernel to build. -p : Path to a directory with patches to apply to kernel, only patches in top-level directory are applied. -t : Hypervisor_target. diff --git a/tools/packaging/kernel/build-kernel.sh b/tools/packaging/kernel/build-kernel.sh index 203114312..9b58de355 100755 --- a/tools/packaging/kernel/build-kernel.sh +++ b/tools/packaging/kernel/build-kernel.sh @@ -60,6 +60,8 @@ DESTDIR="${DESTDIR:-/}" PREFIX="${PREFIX:-/usr}" #Kernel URL kernel_url="" +#Linux headers for GPU guest fs module building +linux_headers="" KATA_BUILD_CC=${KATA_BUILD_CC:-no} packaging_scripts_dir="${script_dir}/../scripts" @@ -95,6 +97,7 @@ Options: -f : Enable force generate config when setup. -g : GPU vendor, intel or nvidia. -h : Display this help. + -H : Linux headers for guest fs module building. -k : Path to kernel to build. -p : Path to a directory with patches to apply to kernel. -s : Skip .config checks @@ -241,6 +244,23 @@ get_kernel_frag_path() { info "Add kernel config for GPU due to '-g ${gpu_vendor}'" local gpu_configs="$(ls ${gpu_path}/${gpu_vendor}.conf)" all_configs="${all_configs} ${gpu_configs}" + # If conf_guest is set we need to update the CONFIG_LOCALVERSION + # to match the suffix created in install_kata + # -nvidia-gpu-{snp|tdx}, the linux headers will be named the very + # same if build with make deb-pkg for TDX or SNP. + if [[ "${conf_guest}" != "" ]];then + local gpu_cc_configs=$(mktemp).conf + local gpu_subst_configs="$(ls ${gpu_path}/${gpu_vendor}.conf.in)" + + export CONF_GUEST_SUFFIX="-${conf_guest}" + envsubst <${gpu_subst_configs} >${gpu_cc_configs} + unset CONF_GUEST_SUFFIX + + all_configs="${all_configs} ${gpu_cc_configs}" + else + local gpu_configs="$(ls ${gpu_path}/${gpu_vendor}.conf)" + all_configs="${all_configs} ${gpu_configs}" + fi fi if [ "${KATA_BUILD_CC}" == "yes" ]; then @@ -416,10 +436,30 @@ build_kernel() { popd >>/dev/null } +build_kernel_headers() { + local kernel_path=${1:-} + [ -n "${kernel_path}" ] || die "kernel_path not provided" + [ -d "${kernel_path}" ] || die "path to kernel does not exist, use ${script_name} setup" + [ -n "${arch_target}" ] || arch_target="$(uname -m)" + arch_target=$(arch_to_kernel "${arch_target}") + pushd "${kernel_path}" >>/dev/null + + if [ "$linux_headers" == "deb" ]; then + make -j $(nproc ${CI:+--ignore 1}) deb-pkg ARCH="${arch_target}" + fi + if [ "$linux_headers" == "rpm" ]; then + make -j $(nproc ${CI:+--ignore 1}) rpm-pkg ARCH="${arch_target}" + fi + + popd >>/dev/null +} + install_kata() { local kernel_path=${1:-} [ -n "${kernel_path}" ] || die "kernel_path not provided" [ -d "${kernel_path}" ] || die "path to kernel does not exist, use ${script_name} setup" + [ -n "${arch_target}" ] || arch_target="$(uname -m)" + arch_target=$(arch_to_kernel "${arch_target}") pushd "${kernel_path}" >>/dev/null config_version=$(get_config_version) [ -n "${config_version}" ] || die "failed to get config version" @@ -429,14 +469,15 @@ install_kata() { if [[ ${build_type} != "" ]]; then suffix="-${build_type}" fi - if [[ ${gpu_vendor} != "" ]];then - suffix="-${gpu_vendor}-gpu${suffix}" - fi if [[ ${conf_guest} != "" ]];then suffix="-${conf_guest}${suffix}" fi + if [[ ${gpu_vendor} != "" ]];then + suffix="-${gpu_vendor}-gpu${suffix}" + fi + vmlinuz="vmlinuz-${kernel_version}-${config_version}${suffix}" vmlinux="vmlinux-${kernel_version}-${config_version}${suffix}" @@ -474,7 +515,7 @@ install_kata() { } main() { - while getopts "a:b:c:deEfg:hk:p:t:u:v:x:" opt; do + while getopts "a:b:c:deEfg:hH:k:p:t:u:v:x:" opt; do case "$opt" in a) arch_target="${OPTARG}" @@ -505,6 +546,9 @@ main() { h) usage 0 ;; + H) + linux_headers="${OPTARG}" + ;; k) kernel_path="$(realpath ${OPTARG})" ;; @@ -593,8 +637,10 @@ main() { build) build_kernel "${kernel_path}" ;; + build-headers) + build_kernel_headers "${kernel_path}" + ;; install) - build_kernel "${kernel_path}" install_kata "${kernel_path}" ;; setup) diff --git a/tools/packaging/kernel/configs/fragments/gpu/nvidia.conf.in b/tools/packaging/kernel/configs/fragments/gpu/nvidia.conf.in new file mode 100644 index 000000000..73cce6173 --- /dev/null +++ b/tools/packaging/kernel/configs/fragments/gpu/nvidia.conf.in @@ -0,0 +1,14 @@ +# Support mmconfig PCI config space access. +# It's used to enable the MMIO access method for PCIe devices. +CONFIG_PCI_MMCONFIG=y + +# Support for loading modules. +# It is used to support loading GPU drivers. +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y + +# CRYPTO_FIPS requires this config when loading modules is enabled. +CONFIG_MODULE_SIG=y + +# Linux kernel version suffix +CONFIG_LOCALVERSION="-nvidia-gpu${CONF_GUEST_SUFFIX}" diff --git a/tools/packaging/kernel/configs/fragments/x86_64/tdx/tdx.conf b/tools/packaging/kernel/configs/fragments/x86_64/tdx/tdx.conf index 1b1f8751e..2f877a5c9 100644 --- a/tools/packaging/kernel/configs/fragments/x86_64/tdx/tdx.conf +++ b/tools/packaging/kernel/configs/fragments/x86_64/tdx/tdx.conf @@ -5,13 +5,9 @@ CONFIG_DMA_RESTRICTED_POOL=y CONFIG_EFI=y CONFIG_EFI_STUB=y CONFIG_INTEL_IOMMU_SVM=y -CONFIG_INTEL_TDX_ATTESTATION=y -CONFIG_INTEL_TDX_FIXES=y CONFIG_INTEL_TDX_GUEST=y CONFIG_OF=y CONFIG_OF_RESERVED_MEM=y CONFIG_X86_5LEVEL=y CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y -CONFIG_X86_MEM_ENCRYPT_COMMON=y CONFIG_X86_PLATFORM_DEVICES=y -CONFIG_X86_PLATFORM_DRIVERS_INTEL=y diff --git a/tools/packaging/kernel/kata_config_version b/tools/packaging/kernel/kata_config_version index c6e1b79de..f35a418cd 100644 --- a/tools/packaging/kernel/kata_config_version +++ b/tools/packaging/kernel/kata_config_version @@ -1 +1 @@ -102cc +105cc \ No newline at end of file diff --git a/tools/packaging/kernel/patches/5.19-TDX-v2.x/no_patches.txt b/tools/packaging/kernel/patches/5.19-TDX-v2.x/no_patches.txt new file mode 100644 index 000000000..e69de29bb diff --git a/tools/packaging/qemu/patches/tag_patches/ad4c7f529a279685da84297773b4ec8080153c2d-plus-TDX-v1.3/no_patches.txt b/tools/packaging/qemu/patches/tag_patches/ad4c7f529a279685da84297773b4ec8080153c2d-plus-TDX-v1.3/no_patches.txt new file mode 100644 index 000000000..e69de29bb diff --git a/tools/packaging/scripts/configure-hypervisor.sh b/tools/packaging/scripts/configure-hypervisor.sh index 827d08d80..751b2866f 100755 --- a/tools/packaging/scripts/configure-hypervisor.sh +++ b/tools/packaging/scripts/configure-hypervisor.sh @@ -242,9 +242,12 @@ generate_qemu_options() { # Disable graphical network access qemu_options+=(size:--disable-vnc) qemu_options+=(size:--disable-vnc-jpeg) - if ! gt_eq "${qemu_version}" "7.1.0" ; then + if ! gt_eq "${qemu_version}" "7.0.50" ; then qemu_options+=(size:--disable-vnc-png) + else + qemu_options+=(size:--disable-png) fi + qemu_options+=(size:--disable-vnc-sasl) # Disable PAM authentication: it's a feature used together with VNC access @@ -358,7 +361,7 @@ generate_qemu_options() { qemu_options+=(size:--disable-vde) # Don't build other options which can't be depent on build server. - if ! gt_eq "${qemu_version}" "7.1.0" ; then + if ! gt_eq "${qemu_version}" "7.0.50" ; then qemu_options+=(size:--disable-xfsctl) qemu_options+=(size:--disable-libxml2) fi diff --git a/tools/packaging/scripts/lib.sh b/tools/packaging/scripts/lib.sh index 4a1198842..913397649 100755 --- a/tools/packaging/scripts/lib.sh +++ b/tools/packaging/scripts/lib.sh @@ -222,6 +222,11 @@ get_td_shim_image_name() { echo "${CC_BUILDER_REGISTRY}:td-shim-$(get_from_kata_deps "externals.td-shim.toolchain")-$(get_last_modification ${td_shim_script_dir})-$(uname -m)" } +get_ovmf_image_name() { + ovmf_script_dir="${repo_root_dir}/tools/packaging/static-build/ovmf" + echo "${BUILDER_REGISTRY}:ovmf-$(get_last_modification ${ovmf_script_dir})-$(uname -m)" +} + get_virtiofsd_image_name() { ARCH=$(uname -m) case ${ARCH} in diff --git a/tools/packaging/static-build/cache_components_main.sh b/tools/packaging/static-build/cache_components_main.sh index e447ab4bf..0e8a0120f 100755 --- a/tools/packaging/static-build/cache_components_main.sh +++ b/tools/packaging/static-build/cache_components_main.sh @@ -12,7 +12,9 @@ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "${script_dir}/../scripts/lib.sh" -KERNEL_FLAVOUR="${KERNEL_FLAVOUR:-kernel}" # kernel | kernel-experimental | kernel-arm-experimetnal | kernel-dragonball-experimental +KERNEL_FLAVOUR="${KERNEL_FLAVOUR:-kernel}" # kernel | kernel-experimental | kernel-arm-experimental | kernel-dragonball-experimental | kernel-tdx-experimental +OVMF_FLAVOUR="${OVMF_FLAVOUR:-x86_64}" # x86_64 | tdx +QEMU_FLAVOUR="${QEMU_FLAVOUR:-qemu}" # qemu | qemu-tdx-experimental ROOTFS_IMAGE_TYPE="${ROOTFS_IMAGE_TYPE:-image}" # image | initrd cache_clh_artifacts() { @@ -41,9 +43,18 @@ cache_nydus_artifacts() { create_cache_asset "${nydus_tarball_name}" "${current_nydus_version}" "" } +cache_ovmf_artifacts() { + local current_ovmf_version="$(get_from_kata_deps "externals.ovmf.${OVMF_FLAVOUR}.version")" + [ "${OVMF_FLAVOUR}" == "tdx" ] && OVMF_FLAVOUR="tdvf" + local ovmf_tarball_name="kata-static-${OVMF_FLAVOUR}.tar.xz" + local current_ovmf_image="$(get_ovmf_image_name)" + create_cache_asset "${ovmf_tarball_name}" "${current_ovmf_version}" "${current_ovmf_image}" +} + cache_qemu_artifacts() { - local qemu_tarball_name="kata-static-qemu.tar.xz" - local current_qemu_version=$(get_from_kata_deps "assets.hypervisor.qemu.version") + local qemu_tarball_name="kata-static-${QEMU_FLAVOUR}.tar.xz" + local current_qemu_version=$(get_from_kata_deps "assets.hypervisor.${QEMU_FLAVOUR}.version") + [ -z "${current_qemu_version}" ] && current_qemu_version=$(get_from_kata_deps "assets.hypervisor.${QEMU_FLAVOUR}.tag") local qemu_sha=$(calc_qemu_files_sha256sum) local current_qemu_image="$(get_qemu_image_name)" create_cache_asset "${qemu_tarball_name}" "${current_qemu_version}-${qemu_sha}" "${current_qemu_image}" @@ -105,10 +116,12 @@ Usage: $0 "[options]" -c Cloud hypervisor cache -F Firecracker cache -k Kernel cache - * Export KERNEL_FLAVOUR="kernel|kernek-experimental|kernel-arm-experimental|kernel-dragonball-experimental" for a specific build + * Export KERNEL_FLAVOUR="kernel | kernel-experimental | kernel-arm-experimental | kernel-dragonball-experimental | kernel-tdx-experimental" for a specific build The default KERNEL_FLAVOUR value is "kernel" -n Nydus cache -q QEMU cache + * Export QEMU_FLAVOUR="qemu | qemu-tdx-experimental" for a specific build + The default QEMU_FLAVOUR value is "qemu" -r RootFS cache * Export ROOTFS_IMAGE_TYPE="image|initrd" for one of those two types The default ROOTFS_IMAGE_TYPE value is "image" @@ -124,12 +137,13 @@ main() { local firecracker_component="${firecracker_component:-}" local kernel_component="${kernel_component:-}" local nydus_component="${nydus_component:-}" + local ovmf_component="${ovmf_component:-}" local qemu_component="${qemu_component:-}" local rootfs_component="${rootfs_component:-}" local shim_v2_component="${shim_v2_component:-}" local virtiofsd_component="${virtiofsd_component:-}" local OPTIND - while getopts ":cFknqrsvh:" opt + while getopts ":cFknoqrsvh:" opt do case "$opt" in c) @@ -144,6 +158,9 @@ main() { n) nydus_component="1" ;; + o) + ovmf_component="1" + ;; q) qemu_component="1" ;; @@ -173,6 +190,7 @@ main() { [[ -z "${firecracker_component}" ]] && \ [[ -z "${kernel_component}" ]] && \ [[ -z "${nydus_component}" ]] && \ + [[ -z "${ovmf_component}" ]] && \ [[ -z "${qemu_component}" ]] && \ [[ -z "${rootfs_component}" ]] && \ [[ -z "${shim_v2_component}" ]] && \ @@ -187,6 +205,7 @@ main() { [ "${firecracker_component}" == "1" ] && cache_firecracker_artifacts [ "${kernel_component}" == "1" ] && cache_kernel_artifacts [ "${nydus_component}" == "1" ] && cache_nydus_artifacts + [ "${ovmf_component}" == "1" ] && cache_ovmf_artifacts [ "${qemu_component}" == "1" ] && cache_qemu_artifacts [ "${rootfs_component}" == "1" ] && cache_rootfs_artifacts [ "${shim_v2_component}" == "1" ] && cache_shim_v2_artifacts diff --git a/tools/packaging/static-build/kernel/Dockerfile b/tools/packaging/static-build/kernel/Dockerfile index 34a74a5ee..183f8a47e 100644 --- a/tools/packaging/static-build/kernel/Dockerfile +++ b/tools/packaging/static-build/kernel/Dockerfile @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: Apache-2.0 -FROM ubuntu:20.04 +FROM ubuntu:22.04 ENV DEBIAN_FRONTEND=noninteractive # kernel deps @@ -19,6 +19,8 @@ RUN apt-get update && \ kmod \ libelf-dev \ libssl-dev \ + gettext \ + rsync \ + cpio \ patch && \ - if [ "$(uname -m)" = "s390x" ]; then apt-get install -y --no-install-recommends libssl-dev; fi && \ - apt-get clean && rm -rf /var/lib/lists/ + apt-get clean && apt-get autoclean diff --git a/tools/packaging/static-build/kernel/build.sh b/tools/packaging/static-build/kernel/build.sh index e99693403..a9dd223f9 100755 --- a/tools/packaging/static-build/kernel/build.sh +++ b/tools/packaging/static-build/kernel/build.sh @@ -39,3 +39,9 @@ sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \ --env DESTDIR="${DESTDIR}" --env PREFIX="${PREFIX}" \ "${container_image}" \ bash -c "${kernel_builder} $* install" + +sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \ + -w "${PWD}" \ + --env DESTDIR="${DESTDIR}" --env PREFIX="${PREFIX}" \ + "${container_image}" \ + bash -c "${kernel_builder} $* build-headers" diff --git a/tools/packaging/static-build/ovmf/build-ovmf.sh b/tools/packaging/static-build/ovmf/build-ovmf.sh index ed8a6e1d1..19c3230d1 100755 --- a/tools/packaging/static-build/ovmf/build-ovmf.sh +++ b/tools/packaging/static-build/ovmf/build-ovmf.sh @@ -57,7 +57,7 @@ fi info "Building ovmf" build_cmd="build -b ${build_target} -t ${toolchain} -a ${architecture} -p ${ovmf_package}" if [ "${ovmf_build}" == "tdx" ]; then - build_cmd+=" -D DEBUG_ON_SERIAL_PORT=TRUE -D TDX_MEM_PARTIAL_ACCEPT=512 -D TDX_EMULATION_ENABLE=FALSE -D TDX_ACCEPT_PAGE_SIZE=2M" + build_cmd+=" -D DEBUG_ON_SERIAL_PORT=FALSE -D TDX_MEM_PARTIAL_ACCEPT=512 -D TDX_EMULATION_ENABLE=FALSE -D SECURE_BOOT_ENABLE=TRUE -D TDX_ACCEPT_PAGE_SIZE=2M" fi eval "${build_cmd}" @@ -71,7 +71,6 @@ if [ "${ovmf_build}" == "tdx" ]; then build_path_arch="${build_path_target_toolchain}/X64" stat "${build_path_fv}/OVMF_CODE.fd" stat "${build_path_fv}/OVMF_VARS.fd" - stat "${build_path_arch}/DumpTdxEventLog.efi" fi #need to leave tmp dir @@ -94,7 +93,6 @@ fi if [ "${ovmf_build}" == "tdx" ]; then install $build_root/$ovmf_dir/"${build_path_fv}"/OVMF_CODE.fd ${install_dir} install $build_root/$ovmf_dir/"${build_path_fv}"/OVMF_VARS.fd ${install_dir} - install $build_root/$ovmf_dir/"${build_path_arch}"/DumpTdxEventLog.efi ${install_dir} fi local_dir=${PWD} diff --git a/tools/packaging/static-build/ovmf/build.sh b/tools/packaging/static-build/ovmf/build.sh index 3929710db..ff48a2aba 100755 --- a/tools/packaging/static-build/ovmf/build.sh +++ b/tools/packaging/static-build/ovmf/build.sh @@ -25,9 +25,6 @@ package_output_dir="${package_output_dir:-}" if [ -z "$ovmf_repo" ]; then case "${ovmf_build}" in - "tdx") - ovmf_repo=$(get_from_kata_deps "externals.ovmf.tdx.url" "${kata_version}") - ;; "sev") ovmf_repo=$(get_from_kata_deps "externals.ovmf.sev.url" "${kata_version}") ;; diff --git a/tools/packaging/static-build/qemu/build-static-qemu-experimental.sh b/tools/packaging/static-build/qemu/build-static-qemu-experimental.sh index be50fb977..1e0541c54 100755 --- a/tools/packaging/static-build/qemu/build-static-qemu-experimental.sh +++ b/tools/packaging/static-build/qemu/build-static-qemu-experimental.sh @@ -14,6 +14,8 @@ source "${script_dir}/../../scripts/lib.sh" qemu_repo="${qemu_repo:-}" qemu_version="${qemu_version:-}" +qemu_suffix="${qemu_suffix:-experimental}" +qemu_tarball_name="${qemu_tarball_name:-kata-static-qemu-experimental.tar.gz}" if [ -z "$qemu_repo" ]; then info "Get qemu information from runtime versions.yaml" @@ -26,4 +28,4 @@ fi [ -n "$qemu_version" ] || qemu_version=$(get_from_kata_deps "assets.hypervisor.qemu-experimental.version") [ -n "$qemu_version" ] || die "failed to get qemu version" -"${script_dir}/build-base-qemu.sh" "${qemu_repo}" "${qemu_version}" "experimental" "kata-static-qemu-experimental.tar.gz" +"${script_dir}/build-base-qemu.sh" "${qemu_repo}" "${qemu_version}" "${qemu_suffix}" "${qemu_tarball_name}" diff --git a/utils/kata-manager.sh b/utils/kata-manager.sh index fbc911ab4..c51fb516c 100755 --- a/utils/kata-manager.sh +++ b/utils/kata-manager.sh @@ -160,6 +160,8 @@ github_get_release_file_url() -r '.[] | select(.tag_name == $version) | .assets[].browser_download_url' |\ grep "/${regex}$") + download_url=$(echo $download_url | awk '{print $1}') + [ -z "$download_url" ] && die "Cannot determine download URL for version $version ($url)" echo "$download_url" @@ -254,9 +256,14 @@ pre_checks() { info "Running pre-checks" + local skip_containerd="${1:-}" + [ -z "$skip_containerd" ] && die "no skip_containerd value" + command -v "${kata_shim_v2}" &>/dev/null \ && die "Please remove existing $kata_project installation" + [skip_containerd = "false" ] && return 0 + local ret { containerd_installed; ret=$?; } || true @@ -315,6 +322,9 @@ setup() local force="${2:-}" [ -z "$force" ] && die "no force value" + local skip_containerd="${3:-}" + [ -z "$skip_containerd" ] && die "no skip_containerd value" + [ "$cleanup" = "true" ] && trap cleanup EXIT source /etc/os-release || source /usr/lib/os-release @@ -324,7 +334,7 @@ setup() [ "$force" = "true" ] && return 0 - pre_checks + pre_checks "$skip_containerd" } # Download the requested version of the specified project. @@ -673,8 +683,8 @@ handle_installation() local force="${2:-}" [ -z "$force" ] && die "no force value" - local only_kata="${3:-}" - [ -z "$only_kata" ] && die "no only Kata value" + local skip_containerd="${3:-}" + [ -z "$skip_containerd" ] && die "no only Kata value" local enable_debug="${4:-}" [ -z "$enable_debug" ] && die "no enable debug value" @@ -691,11 +701,11 @@ handle_installation() [ "$only_run_test" = "true" ] && test_installation && return 0 - setup "$cleanup" "$force" + setup "$cleanup" "$force" "$skip_containerd" handle_kata "$kata_version" "$enable_debug" - [ "$only_kata" = "false" ] && \ + [ "$skip_containerd" = "false" ] && \ handle_containerd \ "$containerd_version" \ "$force" \ @@ -703,7 +713,7 @@ handle_installation() [ "$disable_test" = "false" ] && test_installation - if [ "$only_kata" = "true" ] + if [ "$skip_containerd" = "true" ] then info "$kata_project is now installed" else @@ -717,7 +727,7 @@ handle_args() { local cleanup="true" local force="false" - local only_kata="false" + local skip_containerd="false" local disable_test="false" local only_run_test="false" local enable_debug="false" @@ -735,7 +745,7 @@ handle_args() f) force="true" ;; h) usage; exit 0 ;; k) kata_version="$OPTARG" ;; - o) only_kata="true" ;; + o) skip_containerd="true" ;; r) cleanup="false" ;; t) disable_test="true" ;; T) only_run_test="true" ;; @@ -750,7 +760,7 @@ handle_args() handle_installation \ "$cleanup" \ "$force" \ - "$only_kata" \ + "$skip_containerd" \ "$enable_debug" \ "$disable_test" \ "$only_run_test" \ diff --git a/versions.yaml b/versions.yaml index 8e6073fe5..b894d3f82 100644 --- a/versions.yaml +++ b/versions.yaml @@ -98,10 +98,6 @@ assets: uscan-url: >- https://github.com/qemu/qemu/tags .*/v?(\d\S+)\.tar\.gz - tdx: - description: "VMM that uses KVM and supports TDX" - url: "https://github.com/kata-containers/qemu" - tag: "TDX-v3.1" snp: description: "VMM that uses KVM and supports AMD SEV-SNP" url: "https://github.com/AMDESE/qemu" @@ -112,6 +108,12 @@ assets: url: "https://github.com/qemu/qemu" version: "7a800cf9496fddddf71b21a00991e0ec757a170a" + qemu-tdx-experimental: + # yamllint disable-line rule:line-length + description: "QEMU with TDX support - based on https://github.com/intel/tdx-tools/releases/tag/2023ww01" + url: "https://github.com/kata-containers/qemu" + tag: "ad4c7f529a279685da84297773b4ec8080153c2d-plus-TDX-v1.3" + image: description: | Root filesystem disk image used to boot the guest virtual @@ -156,10 +158,6 @@ assets: description: "Linux kernel optimised for virtual machines" url: "https://cdn.kernel.org/pub/linux/kernel/v5.x/" version: "v5.19.2" - tdx: - description: "Linux kernel that supports TDX" - url: "https://github.com/kata-containers/linux/archive/refs/tags" - tag: "5.15-plus-TDX" sev: description: "Linux kernel that supports SEV" url: "https://cdn.kernel.org/pub/linux/kernel/v5.x/" @@ -184,6 +182,12 @@ assets: url: "https://cdn.kernel.org/pub/linux/kernel/v5.x/" version: "v5.10.25" + kernel-tdx-experimental: + # yamllint disable-line rule:line-length + description: "Linux kernel with TDX support -- based on https://github.com/intel/tdx-tools/releases/tag/2023ww01" + url: "https://github.com/kata-containers/linux/archive/refs/tags" + version: "5.19-TDX-v2.2" + externals: description: "Third-party projects used by the system" @@ -277,7 +281,7 @@ externals: nydus: description: "Nydus image acceleration service" url: "https://github.com/dragonflyoss/image-service" - version: "v2.1.1" + version: "v2.2.0" nydus-snapshotter: description: "Snapshotter for Nydus image acceleration service" @@ -301,11 +305,11 @@ externals: package: "OvmfPkg/AmdSev/AmdSevX64.dsc" package_output_dir: "AmdSev" tdx: - url: "https://github.com/tianocore/edk2-staging" - description: "TDVF build needed for TDX measured direct boot." - version: "2022-tdvf-ww28.5" - package: "OvmfPkg/OvmfPkgX64.dsc" - package_output_dir: "OvmfX64" + # yamllint disable-line rule:line-length + description: "QEMU with TDX support - based on https://github.com/intel/tdx-tools/releases/tag/2023ww01" + version: "edk2-stable202211" + package: "OvmfPkg/IntelTdx/IntelTdxX64.dsc" + package_output_dir: "IntelTdx" td-shim: description: "Confidential Containers Shim Firmware"