Merge pull request #8022 from fidencio/topic/stable-3.2-backport-everything-tests-related

stable-3.2: Backport everything related to the tests
This commit is contained in:
Fabiano Fidêncio
2023-10-11 16:11:41 +02:00
committed by GitHub
229 changed files with 9192 additions and 2806 deletions

View File

@@ -39,7 +39,7 @@ jobs:
popd &>/dev/null
- name: Checkout code to allow hub to communicate with the project
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Add issue to issue backlog
env:

View File

@@ -21,7 +21,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v1
uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
- name: Install PR sizing label script
run: |

200
.github/workflows/basic-ci-amd64.yaml vendored Normal file
View File

@@ -0,0 +1,200 @@
name: CI | Basic amd64 tests
on:
workflow_call:
inputs:
tarball-suffix:
required: false
type: string
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
run-cri-containerd:
strategy:
# We can set this to true whenever we're 100% sure that
# the all the tests are not flaky, otherwise we'll fail
# all the tests due to a single flaky instance.
fail-fast: false
matrix:
containerd_version: ['lts', 'active']
vmm: ['clh', 'qemu']
runs-on: garm-ubuntu-2204-smaller
env:
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
GOPATH: ${{ github.workspace }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install dependencies
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
- name: get-kata-tarball
uses: actions/download-artifact@v3
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
path: kata-artifacts
- name: Install kata
run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
- name: Run cri-containerd tests
run: bash tests/integration/cri-containerd/gha-run.sh run
run-containerd-stability:
strategy:
fail-fast: false
matrix:
containerd_version: ['lts', 'active']
vmm: ['clh', 'qemu']
runs-on: garm-ubuntu-2204-smaller
env:
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
GOPATH: ${{ github.workspace }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install dependencies
run: bash tests/stability/gha-run.sh install-dependencies
- name: get-kata-tarball
uses: actions/download-artifact@v3
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
path: kata-artifacts
- name: Install kata
run: bash tests/stability/gha-run.sh install-kata kata-artifacts
- name: Run containerd-stability tests
run: bash tests/stability/gha-run.sh run
run-nydus:
strategy:
# We can set this to true whenever we're 100% sure that
# the all the tests are not flaky, otherwise we'll fail
# all the tests due to a single flaky instance.
fail-fast: false
matrix:
containerd_version: ['lts', 'active']
vmm: ['clh', 'qemu', 'dragonball']
runs-on: garm-ubuntu-2204-smaller
env:
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
GOPATH: ${{ github.workspace }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install dependencies
run: bash tests/integration/nydus/gha-run.sh install-dependencies
- name: get-kata-tarball
uses: actions/download-artifact@v3
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
path: kata-artifacts
- name: Install kata
run: bash tests/integration/nydus/gha-run.sh install-kata kata-artifacts
- name: Run nydus tests
run: bash tests/integration/nydus/gha-run.sh run
run-runk:
runs-on: garm-ubuntu-2204-smaller
env:
CONTAINERD_VERSION: lts
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install dependencies
run: bash tests/integration/runk/gha-run.sh install-dependencies
- name: get-kata-tarball
uses: actions/download-artifact@v3
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
path: kata-artifacts
- name: Install kata
run: bash tests/integration/runk/gha-run.sh install-kata kata-artifacts
- name: Run tracing tests
run: bash tests/integration/runk/gha-run.sh run
run-vfio:
strategy:
fail-fast: false
matrix:
vmm: ['clh', 'qemu']
runs-on: garm-ubuntu-2304
env:
GOPATH: ${{ github.workspace }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install dependencies
run: bash tests/functional/vfio/gha-run.sh install-dependencies
- name: get-kata-tarball
uses: actions/download-artifact@v3
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
path: kata-artifacts
- name: Run vfio tests
timeout-minutes: 15
run: bash tests/functional/vfio/gha-run.sh run

View File

@@ -16,6 +16,10 @@ on:
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
build-asset:
@@ -23,9 +27,12 @@ jobs:
strategy:
matrix:
asset:
- agent
- agent-ctl
- cloud-hypervisor
- cloud-hypervisor-glibc
- firecracker
- kata-ctl
- kernel
- kernel-sev
- kernel-dragonball-experimental
@@ -33,6 +40,7 @@ jobs:
- kernel-nvidia-gpu
- kernel-nvidia-gpu-snp
- kernel-nvidia-gpu-tdx-experimental
- log-parser-rs
- nydus
- ovmf
- ovmf-sev
@@ -44,12 +52,16 @@ jobs:
- rootfs-initrd
- rootfs-initrd-mariner
- rootfs-initrd-sev
- runk
- shim-v2
- tdvf
- trace-forwarder
- virtiofsd
stage:
- ${{ inputs.stage }}
exclude:
- asset: agent
stage: release
- asset: cloud-hypervisor-glibc
stage: release
steps:
@@ -66,6 +78,12 @@ jobs:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0 # This is needed in order to keep the commit ids history
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Build ${{ matrix.asset }}
run: |
make "${KATA_ASSET}-tarball"
@@ -76,6 +94,10 @@ jobs:
KATA_ASSET: ${{ matrix.asset }}
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }}
ARTEFACT_REGISTRY: ghcr.io
ARTEFACT_REGISTRY_USERNAME: ${{ github.actor }}
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: store-artifact ${{ matrix.asset }}
uses: actions/upload-artifact@v3
@@ -92,6 +114,12 @@ jobs:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: get-artifacts
uses: actions/download-artifact@v3
with:

View File

@@ -16,10 +16,14 @@ on:
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
build-asset:
runs-on: arm64
runs-on: arm64-builder
strategy:
matrix:
asset:
@@ -52,6 +56,13 @@ jobs:
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0 # This is needed in order to keep the commit ids history
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Build ${{ matrix.asset }}
run: |
make "${KATA_ASSET}-tarball"
@@ -62,6 +73,10 @@ jobs:
KATA_ASSET: ${{ matrix.asset }}
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }}
ARTEFACT_REGISTRY: ghcr.io
ARTEFACT_REGISTRY_USERNAME: ${{ github.actor }}
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: store-artifact ${{ matrix.asset }}
uses: actions/upload-artifact@v3
@@ -72,7 +87,7 @@ jobs:
if-no-files-found: error
create-kata-tarball:
runs-on: arm64
runs-on: arm64-builder
needs: build-asset
steps:
- name: Adjust a permission for repo
@@ -82,6 +97,12 @@ jobs:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: get-artifacts
uses: actions/download-artifact@v3
with:

View File

@@ -16,6 +16,10 @@ on:
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
build-asset:
@@ -48,6 +52,13 @@ jobs:
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0 # This is needed in order to keep the commit ids history
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Build ${{ matrix.asset }}
run: |
make "${KATA_ASSET}-tarball"
@@ -59,6 +70,10 @@ jobs:
KATA_ASSET: ${{ matrix.asset }}
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }}
ARTEFACT_REGISTRY: ghcr.io
ARTEFACT_REGISTRY_USERNAME: ${{ github.actor }}
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: store-artifact ${{ matrix.asset }}
uses: actions/upload-artifact@v3
@@ -79,6 +94,12 @@ jobs:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: get-artifacts
uses: actions/download-artifact@v3
with:

View File

@@ -15,4 +15,5 @@ jobs:
commit-hash: ${{ github.sha }}
pr-number: "nightly"
tag: ${{ github.sha }}-nightly
target-branch: ${{ github.ref_name }}
secrets: inherit

View File

@@ -28,4 +28,5 @@ jobs:
commit-hash: ${{ github.event.pull_request.head.sha }}
pr-number: ${{ github.event.pull_request.number }}
tag: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}
target-branch: ${{ github.event.pull_request.base.ref }}
secrets: inherit

View File

@@ -11,6 +11,10 @@ on:
tag:
required: true
type: string
target-branch:
required: false
type: string
default: ""
jobs:
build-kata-static-tarball-amd64:
@@ -18,6 +22,7 @@ jobs:
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
publish-kata-deploy-payload-amd64:
needs: build-kata-static-tarball-amd64
@@ -28,8 +33,94 @@ jobs:
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-amd64
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
secrets: inherit
build-and-publish-tee-confidential-unencrypted-image:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Kata Containers ghcr.io
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker build and push
uses: docker/build-push-action@v4
with:
tags: ghcr.io/kata-containers/test-images:unencrypted-${{ inputs.pr-number }}
push: true
context: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/
platforms: linux/amd64, linux/s390x
file: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/Dockerfile
run-docker-tests-on-garm:
needs: build-kata-static-tarball-amd64
uses: ./.github/workflows/run-docker-tests-on-garm.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
run-nerdctl-tests-on-garm:
needs: build-kata-static-tarball-amd64
uses: ./.github/workflows/run-nerdctl-tests-on-garm.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
run-kata-deploy-tests-on-aks:
needs: publish-kata-deploy-payload-amd64
uses: ./.github/workflows/run-kata-deploy-tests-on-aks.yaml
with:
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-amd64
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
secrets: inherit
run-kata-deploy-tests-on-garm:
needs: publish-kata-deploy-payload-amd64
uses: ./.github/workflows/run-kata-deploy-tests-on-garm.yaml
with:
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-amd64
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
secrets: inherit
run-kata-monitor-tests:
needs: build-kata-static-tarball-amd64
uses: ./.github/workflows/run-kata-monitor-tests.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
run-k8s-tests-on-aks:
needs: publish-kata-deploy-payload-amd64
uses: ./.github/workflows/run-k8s-tests-on-aks.yaml
@@ -39,34 +130,43 @@ jobs:
tag: ${{ inputs.tag }}-amd64
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
secrets: inherit
run-k8s-tests-on-sev:
run-k8s-tests-on-garm:
needs: publish-kata-deploy-payload-amd64
uses: ./.github/workflows/run-k8s-tests-on-sev.yaml
uses: ./.github/workflows/run-k8s-tests-on-garm.yaml
with:
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-amd64
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
secrets: inherit
run-k8s-tests-on-snp:
run-k8s-tests-with-crio-on-garm:
needs: publish-kata-deploy-payload-amd64
uses: ./.github/workflows/run-k8s-tests-on-snp.yaml
uses: ./.github/workflows/run-k8s-tests-with-crio-on-garm.yaml
with:
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-amd64
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
secrets: inherit
run-k8s-tests-on-tdx:
needs: publish-kata-deploy-payload-amd64
uses: ./.github/workflows/run-k8s-tests-on-tdx.yaml
run-kata-coco-tests:
needs: [publish-kata-deploy-payload-amd64, build-and-publish-tee-confidential-unencrypted-image]
uses: ./.github/workflows/run-kata-coco-tests.yaml
with:
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-amd64
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
run-metrics-tests:
needs: build-kata-static-tarball-amd64
@@ -74,24 +174,12 @@ jobs:
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
run-cri-containerd-tests:
run-basic-amd64-tests:
needs: build-kata-static-tarball-amd64
uses: ./.github/workflows/run-cri-containerd-tests.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
run-nydus-tests:
needs: build-kata-static-tarball-amd64
uses: ./.github/workflows/run-nydus-tests.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
run-vfio-tests:
needs: build-kata-static-tarball-amd64
uses: ./.github/workflows/run-vfio-tests.yaml
uses: ./.github/workflows/basic-ci-amd64.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}

View File

@@ -21,6 +21,6 @@ jobs:
with:
go-version: 1.19.3
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Build utils
run: ./ci/darwin-test.sh

View File

@@ -22,7 +22,7 @@ jobs:
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
path: ./src/github.com/${{ github.repository }}

View File

@@ -38,7 +38,17 @@ jobs:
- name: Checkout code to allow hub to communicate with the project
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
- name: Move issue to "In progress"
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}

View File

@@ -4,6 +4,7 @@ on:
branches:
- main
- stable-*
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
@@ -15,6 +16,7 @@ jobs:
with:
commit-hash: ${{ github.sha }}
push-to-registry: yes
target-branch: ${{ github.ref_name }}
secrets: inherit
build-assets-arm64:
@@ -22,6 +24,7 @@ jobs:
with:
commit-hash: ${{ github.sha }}
push-to-registry: yes
target-branch: ${{ github.ref_name }}
secrets: inherit
build-assets-s390x:
@@ -29,6 +32,7 @@ jobs:
with:
commit-hash: ${{ github.sha }}
push-to-registry: yes
target-branch: ${{ github.ref_name }}
secrets: inherit
publish-kata-deploy-payload-amd64:
@@ -39,6 +43,7 @@ jobs:
registry: quay.io
repo: kata-containers/kata-deploy-ci
tag: kata-containers-amd64
target-branch: ${{ github.ref_name }}
secrets: inherit
publish-kata-deploy-payload-arm64:
@@ -49,6 +54,7 @@ jobs:
registry: quay.io
repo: kata-containers/kata-deploy-ci
tag: kata-containers-arm64
target-branch: ${{ github.ref_name }}
secrets: inherit
publish-kata-deploy-payload-s390x:
@@ -59,6 +65,7 @@ jobs:
registry: quay.io
repo: kata-containers/kata-deploy-ci
tag: kata-containers-s390x
target-branch: ${{ github.ref_name }}
secrets: inherit
publish-manifest:

View File

@@ -17,6 +17,10 @@ on:
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
kata-payload:
@@ -25,6 +29,13 @@ jobs:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: get-kata-tarball
uses: actions/download-artifact@v3

View File

@@ -17,10 +17,14 @@ on:
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
kata-payload:
runs-on: arm64
runs-on: arm64-builder
steps:
- name: Adjust a permission for repo
run: |
@@ -29,6 +33,13 @@ jobs:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: get-kata-tarball
uses: actions/download-artifact@v3

View File

@@ -17,6 +17,10 @@ on:
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
kata-payload:
@@ -29,6 +33,13 @@ jobs:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: get-kata-tarball
uses: actions/download-artifact@v3

View File

@@ -14,7 +14,7 @@ jobs:
kata-deploy:
needs: build-kata-static-tarball-arm64
runs-on: arm64
runs-on: arm64-builder
steps:
- name: Login to Kata Containers docker.io
uses: docker/login-action@v2

View File

@@ -36,7 +36,17 @@ jobs:
- name: Checkout code to allow hub to communicate with the project
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
- name: Install porting checker script
run: |

View File

@@ -1,42 +0,0 @@
name: CI | Run cri-containerd tests
on:
workflow_call:
inputs:
tarball-suffix:
required: false
type: string
commit-hash:
required: false
type: string
jobs:
run-cri-containerd:
strategy:
fail-fast: true
matrix:
containerd_version: ['lts', 'active']
vmm: ['clh', 'qemu']
runs-on: garm-ubuntu-2204
env:
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
GOPATH: ${{ github.workspace }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
- name: Install dependencies
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
- name: get-kata-tarball
uses: actions/download-artifact@v3
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
path: kata-artifacts
- name: Install kata
run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
- name: Run cri-containerd tests
run: bash tests/integration/cri-containerd/gha-run.sh run

View File

@@ -0,0 +1,56 @@
name: CI | Run docker integration tests
on:
workflow_call:
inputs:
tarball-suffix:
required: false
type: string
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
run-docker-tests:
strategy:
# We can set this to true whenever we're 100% sure that
# all the tests are not flaky, otherwise we'll fail them
# all due to a single flaky instance.
fail-fast: false
matrix:
vmm:
- clh
- qemu
runs-on: garm-ubuntu-2304-smaller
env:
KATA_HYPERVISOR: ${{ matrix.vmm }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install dependencies
run: bash tests/integration/docker/gha-run.sh install-dependencies
- name: get-kata-tarball
uses: actions/download-artifact@v3
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
path: kata-artifacts
- name: Install kata
run: bash tests/integration/docker/gha-run.sh install-kata kata-artifacts
- name: Run docker smoke test
timeout-minutes: 5
run: bash tests/integration/docker/gha-run.sh run

View File

@@ -17,6 +17,10 @@ on:
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
run-k8s-tests:
@@ -29,6 +33,9 @@ jobs:
- clh
- dragonball
- qemu
instance-type:
- small
- normal
include:
- host_os: cbl-mariner
vmm: clh
@@ -40,11 +47,20 @@ jobs:
GH_PR_NUMBER: ${{ inputs.pr-number }}
KATA_HOST_OS: ${{ matrix.host_os }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBERNETES: "vanilla"
USING_NFD: "false"
K8S_TEST_HOST_TYPE: ${{ matrix.instance-type }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Download Azure CLI
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli

View File

@@ -0,0 +1,88 @@
name: CI | Run kubernetes tests on GARM
on:
workflow_call:
inputs:
registry:
required: true
type: string
repo:
required: true
type: string
tag:
required: true
type: string
pr-number:
required: true
type: string
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
run-k8s-tests:
strategy:
fail-fast: false
matrix:
vmm:
- clh #cloud-hypervisor
- fc #firecracker
- qemu
snapshotter:
- devmapper
k8s:
- k3s
instance:
- garm-ubuntu-2004
- garm-ubuntu-2004-smaller
include:
- instance: garm-ubuntu-2004
instance-type: normal
- instance: garm-ubuntu-2004-smaller
instance-type: small
runs-on: ${{ matrix.instance }}
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
PR_NUMBER: ${{ inputs.pr-number }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBERNETES: ${{ matrix.k8s }}
SNAPSHOTTER: ${{ matrix.snapshotter }}
USING_NFD: "false"
K8S_TEST_HOST_TYPE: ${{ matrix.instance-type }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Deploy ${{ matrix.k8s }}
run: bash tests/integration/kubernetes/gha-run.sh deploy-k8s
- name: Configure the ${{ matrix.snapshotter }} snapshotter
run: bash tests/integration/kubernetes/gha-run.sh configure-snapshotter
- name: Deploy Kata
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-garm
- name: Install `bats`
run: bash tests/integration/kubernetes/gha-run.sh install-bats
- name: Run tests
timeout-minutes: 30
run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Delete kata-deploy
if: always()
run: bash tests/integration/kubernetes/gha-run.sh cleanup-garm

View File

@@ -1,48 +0,0 @@
name: CI | Run kubernetes tests on SEV
on:
workflow_call:
inputs:
registry:
required: true
type: string
repo:
required: true
type: string
tag:
required: true
type: string
commit-hash:
required: false
type: string
jobs:
run-k8s-tests:
strategy:
fail-fast: false
matrix:
vmm:
- qemu-sev
runs-on: sev
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBECONFIG: /home/kata/.kube/config
USING_NFD: "false"
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
- name: Deploy Kata
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-sev
- name: Run tests
timeout-minutes: 30
run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Delete kata-deploy
if: always()
run: bash tests/integration/kubernetes/gha-run.sh cleanup-sev

View File

@@ -1,48 +0,0 @@
name: CI | Run kubernetes tests on SEV-SNP
on:
workflow_call:
inputs:
registry:
required: true
type: string
repo:
required: true
type: string
tag:
required: true
type: string
commit-hash:
required: false
type: string
jobs:
run-k8s-tests:
strategy:
fail-fast: false
matrix:
vmm:
- qemu-snp
runs-on: sev-snp
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBECONFIG: /home/kata/.kube/config
USING_NFD: "false"
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
- name: Deploy Kata
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-snp
- name: Run tests
timeout-minutes: 30
run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Delete kata-deploy
if: always()
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snp

View File

@@ -1,47 +0,0 @@
name: CI | Run kubernetes tests on TDX
on:
workflow_call:
inputs:
registry:
required: true
type: string
repo:
required: true
type: string
tag:
required: true
type: string
commit-hash:
required: false
type: string
jobs:
run-k8s-tests:
strategy:
fail-fast: false
matrix:
vmm:
- qemu-tdx
runs-on: tdx
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
USING_NFD: "true"
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
- name: Deploy Kata
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-tdx
- name: Run tests
timeout-minutes: 30
run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Delete kata-deploy
if: always()
run: bash tests/integration/kubernetes/gha-run.sh cleanup-tdx

View File

@@ -0,0 +1,86 @@
name: CI | Run kubernetes tests, using CRI-O, on GARM
on:
workflow_call:
inputs:
registry:
required: true
type: string
repo:
required: true
type: string
tag:
required: true
type: string
pr-number:
required: true
type: string
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
run-k8s-tests:
strategy:
fail-fast: false
matrix:
vmm:
- qemu
k8s:
- k0s
instance:
- garm-ubuntu-2004
- garm-ubuntu-2004-smaller
include:
- instance: garm-ubuntu-2004
instance-type: normal
- instance: garm-ubuntu-2004-smaller
instance-type: small
- k8s: k0s
k8s-extra-params: '--cri-socket remote:unix:///var/run/crio/crio.sock --kubelet-extra-args --cgroup-driver="systemd"'
runs-on: ${{ matrix.instance }}
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
PR_NUMBER: ${{ inputs.pr-number }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBERNETES: ${{ matrix.k8s }}
KUBERNETES_EXTRA_PARAMS: ${{ matrix.k8s-extra-params }}
USING_NFD: "false"
K8S_TEST_HOST_TYPE: ${{ matrix.instance-type }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Configure CRI-O
run: bash tests/integration/kubernetes/gha-run.sh setup-crio
- name: Deploy ${{ matrix.k8s }}
run: bash tests/integration/kubernetes/gha-run.sh deploy-k8s
- name: Deploy Kata
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-garm
- name: Install `bats`
run: bash tests/integration/kubernetes/gha-run.sh install-bats
- name: Run tests
timeout-minutes: 30
run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Delete kata-deploy
if: always()
run: bash tests/integration/kubernetes/gha-run.sh cleanup-garm

View File

@@ -0,0 +1,176 @@
name: CI | Run kata coco tests
on:
workflow_call:
inputs:
registry:
required: true
type: string
repo:
required: true
type: string
tag:
required: true
type: string
pr-number:
required: true
type: string
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
run-kata-deploy-tests-on-tdx:
strategy:
fail-fast: false
matrix:
vmm:
- qemu-tdx
runs-on: tdx
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
PR_NUMBER: ${{ inputs.pr-number }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBERNETES: "k3s"
USING_NFD: "true"
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Run tests
run: bash tests/functional/kata-deploy/gha-run.sh run-tests
run-k8s-tests-on-tdx:
strategy:
fail-fast: false
matrix:
vmm:
- qemu-tdx
runs-on: tdx
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
PR_NUMBER: ${{ inputs.pr-number }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBERNETES: "k3s"
USING_NFD: "true"
K8S_TEST_HOST_TYPE: "baremetal"
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Deploy Kata
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-tdx
- name: Run tests
timeout-minutes: 30
run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Delete kata-deploy
if: always()
run: bash tests/integration/kubernetes/gha-run.sh cleanup-tdx
run-k8s-tests-on-sev:
strategy:
fail-fast: false
matrix:
vmm:
- qemu-sev
runs-on: sev
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
PR_NUMBER: ${{ inputs.pr-number }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBECONFIG: /home/kata/.kube/config
KUBERNETES: "vanilla"
USING_NFD: "false"
K8S_TEST_HOST_TYPE: "baremetal"
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Deploy Kata
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-sev
- name: Run tests
timeout-minutes: 30
run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Delete kata-deploy
if: always()
run: bash tests/integration/kubernetes/gha-run.sh cleanup-sev
run-k8s-tests-sev-snp:
strategy:
fail-fast: false
matrix:
vmm:
- qemu-snp
runs-on: sev-snp
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
PR_NUMBER: ${{ inputs.pr-number }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBECONFIG: /home/kata/.kube/config
KUBERNETES: "vanilla"
USING_NFD: "false"
K8S_TEST_HOST_TYPE: "baremetal"
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Deploy Kata
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-snp
- name: Run tests
timeout-minutes: 30
run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Delete kata-deploy
if: always()
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snp

View File

@@ -0,0 +1,89 @@
name: CI | Run kata-deploy tests on AKS
on:
workflow_call:
inputs:
registry:
required: true
type: string
repo:
required: true
type: string
tag:
required: true
type: string
pr-number:
required: true
type: string
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
run-kata-deploy-tests:
strategy:
fail-fast: false
matrix:
host_os:
- ubuntu
vmm:
- clh
- dragonball
- qemu
include:
- host_os: cbl-mariner
vmm: clh
runs-on: ubuntu-latest
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
GH_PR_NUMBER: ${{ inputs.pr-number }}
KATA_HOST_OS: ${{ matrix.host_os }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBERNETES: "vanilla"
USING_NFD: "false"
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Download Azure CLI
run: bash tests/functional/kata-deploy/gha-run.sh install-azure-cli
- name: Log into the Azure account
run: bash tests/functional/kata-deploy/gha-run.sh login-azure
env:
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
- name: Create AKS cluster
timeout-minutes: 10
run: bash tests/functional/kata-deploy/gha-run.sh create-cluster
- name: Install `bats`
run: bash tests/functional/kata-deploy/gha-run.sh install-bats
- name: Install `kubectl`
run: bash tests/functional/kata-deploy/gha-run.sh install-kubectl
- name: Download credentials for the Kubernetes CLI to use them
run: bash tests/functional/kata-deploy/gha-run.sh get-cluster-credentials
- name: Run tests
run: bash tests/functional/kata-deploy/gha-run.sh run-tests
- name: Delete AKS cluster
if: always()
run: bash tests/functional/kata-deploy/gha-run.sh delete-cluster

View File

@@ -0,0 +1,65 @@
name: CI | Run kata-deploy tests on GARM
on:
workflow_call:
inputs:
registry:
required: true
type: string
repo:
required: true
type: string
tag:
required: true
type: string
pr-number:
required: true
type: string
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
run-kata-deploy-tests:
strategy:
fail-fast: false
matrix:
vmm:
- clh
- qemu
k8s:
- k0s
- k3s
- rke2
runs-on: garm-ubuntu-2004-smaller
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
PR_NUMBER: ${{ inputs.pr-number }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBERNETES: ${{ matrix.k8s }}
USING_NFD: "false"
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Deploy ${{ matrix.k8s }}
run: bash tests/functional/kata-deploy/gha-run.sh deploy-k8s
- name: Install `bats`
run: bash tests/functional/kata-deploy/gha-run.sh install-bats
- name: Run tests
run: bash tests/functional/kata-deploy/gha-run.sh run-tests

View File

@@ -0,0 +1,59 @@
name: CI | Run kata-monitor tests
on:
workflow_call:
inputs:
tarball-suffix:
required: false
type: string
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
run-monitor:
strategy:
fail-fast: false
matrix:
vmm:
- qemu
container_engine:
- crio
- containerd
include:
- container_engine: containerd
containerd_version: lts
runs-on: garm-ubuntu-2204-smaller
env:
CONTAINER_ENGINE: ${{ matrix.container_engine }}
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install dependencies
run: bash tests/functional/kata-monitor/gha-run.sh install-dependencies
- name: get-kata-tarball
uses: actions/download-artifact@v3
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
path: kata-artifacts
- name: Install kata
run: bash tests/functional/kata-monitor/gha-run.sh install-kata kata-artifacts
- name: Run kata-monitor tests
run: bash tests/functional/kata-monitor/gha-run.sh run

View File

@@ -8,22 +8,28 @@ on:
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
run-metrics:
strategy:
fail-fast: true
matrix:
vmm: ['clh', 'qemu']
max-parallel: 1
setup-kata:
name: Kata Setup
runs-on: metrics
env:
GOPATH: ${{ github.workspace }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: get-kata-tarball
uses: actions/download-artifact@v3
@@ -34,6 +40,24 @@ jobs:
- name: Install kata
run: bash tests/metrics/gha-run.sh install-kata kata-artifacts
run-metrics:
needs: setup-kata
strategy:
# We can set this to true whenever we're 100% sure that
# the all the tests are not flaky, otherwise we'll fail
# all the tests due to a single flaky instance.
fail-fast: false
matrix:
vmm: ['clh', 'qemu']
max-parallel: 1
runs-on: metrics
env:
GOPATH: ${{ github.workspace }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
steps:
- name: enabling the hypervisor
run: bash tests/metrics/gha-run.sh enabling-hypervisor
- name: run launch times test
run: bash tests/metrics/gha-run.sh run-test-launchtimes
@@ -52,6 +76,12 @@ jobs:
- name: run fio test
run: bash tests/metrics/gha-run.sh run-test-fio
- name: run iperf test
run: bash tests/metrics/gha-run.sh run-test-iperf
- name: run latency test
run: bash tests/metrics/gha-run.sh run-test-latency
- name: make metrics tarball ${{ matrix.vmm }}
run: bash tests/metrics/gha-run.sh make-tarball-results

View File

@@ -0,0 +1,57 @@
name: CI | Run nerdctl integration tests
on:
workflow_call:
inputs:
tarball-suffix:
required: false
type: string
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
run-nerdctl-tests:
strategy:
# We can set this to true whenever we're 100% sure that
# all the tests are not flaky, otherwise we'll fail them
# all due to a single flaky instance.
fail-fast: false
matrix:
vmm:
- clh
- dragonball
- qemu
runs-on: garm-ubuntu-2304-smaller
env:
KATA_HYPERVISOR: ${{ matrix.vmm }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install dependencies
run: bash tests/integration/nerdctl/gha-run.sh install-dependencies
- name: get-kata-tarball
uses: actions/download-artifact@v3
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
path: kata-artifacts
- name: Install kata
run: bash tests/integration/nerdctl/gha-run.sh install-kata kata-artifacts
- name: Run nerdctl smoke test
timeout-minutes: 5
run: bash tests/integration/nerdctl/gha-run.sh run

View File

@@ -1,42 +0,0 @@
name: CI | Run nydus tests
on:
workflow_call:
inputs:
tarball-suffix:
required: false
type: string
commit-hash:
required: false
type: string
jobs:
run-nydus:
strategy:
fail-fast: true
matrix:
containerd_version: ['lts', 'active']
vmm: ['clh', 'qemu', 'dragonball']
runs-on: garm-ubuntu-2204
env:
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
GOPATH: ${{ github.workspace }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
- name: Install dependencies
run: bash tests/integration/nydus/gha-run.sh install-dependencies
- name: get-kata-tarball
uses: actions/download-artifact@v3
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
path: kata-artifacts
- name: Install kata
run: bash tests/integration/nydus/gha-run.sh install-kata kata-artifacts
- name: Run nydus tests
run: bash tests/integration/nydus/gha-run.sh run

46
.github/workflows/run-runk-tests.yaml vendored Normal file
View File

@@ -0,0 +1,46 @@
name: CI | Run runk tests
on:
workflow_call:
inputs:
tarball-suffix:
required: false
type: string
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
jobs:
run-runk:
runs-on: garm-ubuntu-2204-smaller
env:
CONTAINERD_VERSION: lts
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install dependencies
run: bash tests/integration/runk/gha-run.sh install-dependencies
- name: get-kata-tarball
uses: actions/download-artifact@v3
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
path: kata-artifacts
- name: Install kata
run: bash tests/integration/runk/gha-run.sh install-kata kata-artifacts
- name: Run tracing tests
run: bash tests/integration/runk/gha-run.sh run

View File

@@ -1,37 +0,0 @@
name: CI | Run vfio tests
on:
workflow_call:
inputs:
tarball-suffix:
required: false
type: string
commit-hash:
required: false
type: string
jobs:
run-vfio:
strategy:
fail-fast: false
matrix:
vmm: ['clh', 'qemu']
runs-on: garm-ubuntu-2204
env:
GOPATH: ${{ github.workspace }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
- name: Install dependencies
run: bash tests/functional/vfio/gha-run.sh install-dependencies
- name: get-kata-tarball
uses: actions/download-artifact@v3
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
path: kata-artifacts
- name: Run vfio tests
run: bash tests/functional/vfio/gha-run.sh run

View File

@@ -1,37 +0,0 @@
on:
pull_request:
types:
- opened
- edited
- reopened
- synchronize
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.jpeg', '**.svg', '/docs/**' ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
name: Static checks dragonball
jobs:
test-dragonball:
runs-on: dragonball
env:
RUST_BACKTRACE: "1"
steps:
- uses: actions/checkout@v3
- name: Set env
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
run: |
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
- name: Install Rust
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
run: |
./ci/install_rust.sh
echo PATH="$HOME/.cargo/bin:$PATH" >> $GITHUB_ENV
- name: Run Unit Test
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
run: |
cd src/dragonball
cargo version
rustc --version
sudo -E env PATH=$PATH LIBC=gnu SUPPORT_VIRTUALIZATION=true make test

View File

@@ -12,74 +12,183 @@ concurrency:
name: Static checks
jobs:
static-checks:
check-kernel-config-version:
runs-on: ubuntu-latest
steps:
- name: Checkout the code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Ensure the kernel config version has been updated
run: |
kernel_dir="tools/packaging/kernel/"
kernel_version_file="${kernel_dir}kata_config_version"
modified_files=$(git diff --name-only origin/$GITHUB_BASE_REF..HEAD)
if git diff --name-only origin/$GITHUB_BASE_REF..HEAD "${kernel_dir}" | grep "${kernel_dir}"; then
echo "Kernel directory has changed, checking if $kernel_version_file has been updated"
if echo "$modified_files" | grep -v "README.md" | grep "${kernel_dir}" >>"/dev/null"; then
echo "$modified_files" | grep "$kernel_version_file" >>/dev/null || ( echo "Please bump version in $kernel_version_file" && exit 1)
else
echo "Readme file changed, no need for kernel config version update."
fi
echo "Check passed"
fi
build-checks:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
cmd:
component:
- agent
- dragonball
- runtime
- runtime-rs
- agent-ctl
- kata-ctl
- log-parser-rs
- runk
- trace-forwarder
command:
- "make vendor"
- "make static-checks"
- "make check"
- "make test"
- "sudo -E PATH=\"$PATH\" make test"
include:
- component: agent
component-path: src/agent
- component: dragonball
component-path: src/dragonball
- component: runtime
component-path: src/runtime
- component: runtime-rs
component-path: src/runtime-rs
- component: agent-ctl
component-path: src/tools/agent-ctl
- component: kata-ctl
component-path: src/tools/kata-ctl
- component: log-parser-rs
component-path: src/tools/log-parser-rs
- component: runk
component-path: src/tools/runk
- component: trace-forwarder
component-path: src/tools/trace-forwarder
- install-libseccomp: no
- component: agent
install-libseccomp: yes
- component: runk
install-libseccomp: yes
steps:
- name: Checkout the code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install yq
run: |
./ci/install_yq.sh
env:
INSTALL_IN_GOPATH: false
- name: Install golang
if: ${{ matrix.component == 'runtime' }}
run: |
./tests/install_go.sh -f -p
echo "/usr/local/go/bin" >> $GITHUB_PATH
- name: Install rust
if: ${{ matrix.component != 'runtime' }}
run: |
./tests/install_rust.sh
echo "${HOME}/.cargo/bin" >> $GITHUB_PATH
- name: Install musl-tools
if: ${{ matrix.component != 'runtime' }}
run: sudo apt-get -y install musl-tools
- name: Install libseccomp
if: ${{ matrix.command != 'make vendor' && matrix.command != 'make check' && matrix.install-libseccomp == 'yes' }}
run: |
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
- name: Setup XDG_RUNTIME_DIR for the `runtime` tests
if: ${{ matrix.command != 'make vendor' && matrix.command != 'make check' && matrix.component == 'runtime' }}
run: |
XDG_RUNTIME_DIR=$(mktemp -d /tmp/kata-tests-$USER.XXX | tee >(xargs chmod 0700))
echo "XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR}" >> $GITHUB_ENV
- name: Running `${{ matrix.command }}` for ${{ matrix.component }}
run: |
cd ${{ matrix.component-path }}
${{ matrix.command }}
env:
RUST_BACKTRACE: "1"
build-checks-depending-on-kvm:
runs-on: garm-ubuntu-2004-smaller
strategy:
fail-fast: false
matrix:
component:
- runtime-rs
include:
- component: runtime-rs
command: "sudo -E env PATH=$PATH LIBC=gnu SUPPORT_VIRTUALIZATION=true make test"
- component: runtime-rs
component-path: src/dragonball
steps:
- name: Checkout the code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install system deps
run: |
sudo apt-get install -y build-essential musl-tools
- name: Install yq
run: |
sudo -E ./ci/install_yq.sh
env:
INSTALL_IN_GOPATH: false
- name: Install rust
run: |
export PATH="$PATH:/usr/local/bin"
./tests/install_rust.sh
- name: Running `${{ matrix.command }}` for ${{ matrix.component }}
run: |
export PATH="$PATH:${HOME}/.cargo/bin"
cd ${{ matrix.component-path }}
${{ matrix.command }}
env:
RUST_BACKTRACE: "1"
static-checks:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
cmd:
- "make static-checks"
env:
RUST_BACKTRACE: "1"
target_branch: ${{ github.base_ref }}
GOPATH: ${{ github.workspace }}
steps:
- name: Free disk space
run: |
sudo rm -rf /usr/share/dotnet
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0
path: ./src/github.com/${{ github.repository }}
- name: Install Go
uses: actions/setup-go@v3
with:
go-version: 1.19.3
- name: Check kernel config version
run: |
cd "${{ github.workspace }}/src/github.com/${{ github.repository }}"
kernel_dir="tools/packaging/kernel/"
kernel_version_file="${kernel_dir}kata_config_version"
modified_files=$(git diff --name-only origin/main..HEAD)
if git diff --name-only origin/main..HEAD "${kernel_dir}" | grep "${kernel_dir}"; then
echo "Kernel directory has changed, checking if $kernel_version_file has been updated"
if echo "$modified_files" | grep -v "README.md" | grep "${kernel_dir}" >>"/dev/null"; then
echo "$modified_files" | grep "$kernel_version_file" >>/dev/null || ( echo "Please bump version in $kernel_version_file" && exit 1)
else
echo "Readme file changed, no need for kernel config version update."
fi
echo "Check passed"
fi
- name: Set PATH
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
run: |
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
- name: Setup
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
run: |
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
- name: Installing rust
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
run: |
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_rust.sh
PATH=$PATH:"$HOME/.cargo/bin"
rustup target add x86_64-unknown-linux-musl
rustup component add rustfmt clippy
- name: Setup seccomp
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
run: |
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
- name: Run check
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
run: |
cd ${GOPATH}/src/github.com/${{ github.repository }} && ${{ matrix.cmd }}
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
path: ./src/github.com/${{ github.repository }}
- name: Install yq
run: |
cd ${GOPATH}/src/github.com/${{ github.repository }}
./ci/install_yq.sh
env:
INSTALL_IN_GOPATH: false
- name: Install golang
run: |
cd ${GOPATH}/src/github.com/${{ github.repository }}
./tests/install_go.sh -f -p
echo "/usr/local/go/bin" >> $GITHUB_PATH
- name: Install system dependencies
run: |
sudo apt-get -y install moreutils hunspell pandoc
- name: Run check
run: |
export PATH=${PATH}:${GOPATH}/bin
cd ${GOPATH}/src/github.com/${{ github.repository }} && ${{ matrix.cmd }}

View File

@@ -7,12 +7,10 @@
set -o errexit
cidir=$(dirname "$0")
source "${cidir}/lib.sh"
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
script_name="$(basename "${BASH_SOURCE[0]}")"
clone_tests_repo
source "${tests_repo_dir}/.ci/lib.sh"
source "${script_dir}/../tests/common.bash"
# The following variables if set on the environment will change the behavior
# of gperf and libseccomp configure scripts, that may lead this script to
@@ -25,11 +23,11 @@ workdir="$(mktemp -d --tmpdir build-libseccomp.XXXXX)"
# Variables for libseccomp
libseccomp_version="${LIBSECCOMP_VERSION:-""}"
if [ -z "${libseccomp_version}" ]; then
libseccomp_version=$(get_version "externals.libseccomp.version")
libseccomp_version=$(get_from_kata_deps "externals.libseccomp.version")
fi
libseccomp_url="${LIBSECCOMP_URL:-""}"
if [ -z "${libseccomp_url}" ]; then
libseccomp_url=$(get_version "externals.libseccomp.url")
libseccomp_url=$(get_from_kata_deps "externals.libseccomp.url")
fi
libseccomp_tarball="libseccomp-${libseccomp_version}.tar.gz"
libseccomp_tarball_url="${libseccomp_url}/releases/download/v${libseccomp_version}/${libseccomp_tarball}"
@@ -38,11 +36,11 @@ cflags="-O2"
# Variables for gperf
gperf_version="${GPERF_VERSION:-""}"
if [ -z "${gperf_version}" ]; then
gperf_version=$(get_version "externals.gperf.version")
gperf_version=$(get_from_kata_deps "externals.gperf.version")
fi
gperf_url="${GPERF_URL:-""}"
if [ -z "${gperf_url}" ]; then
gperf_url=$(get_version "externals.gperf.url")
gperf_url=$(get_from_kata_deps "externals.gperf.url")
fi
gperf_tarball="gperf-${gperf_version}.tar.gz"
gperf_tarball_url="${gperf_url}/${gperf_tarball}"

View File

@@ -43,7 +43,7 @@ and perform DMA transactions _anywhere_.
The second feature is ACS (Access Control Services), which controls which
devices are allowed to communicate with one another and thus avoids improper
routing of packets irrespectively of whether IOMMU is enabled or not.
routing of packets `irrespectively` of whether IOMMU is enabled or not.
When IOMMU is enabled, ACS is normally configured to force all PCI Express DMA
to go through the root complex so IOMMU can translate it, impacting performance
@@ -126,7 +126,7 @@ efficient P2P communication.
## PCI Express Virtual P2P Approval Capability
Most of the time, the PCI Express topology is flattened and obfuscated to ensure
easy migration of the VM image between different physical hardware topologies.
easy migration of the VM image between different physical hardware `topologies`.
In Kata, we can configure the hypervisor to use PCI Express root ports to
hotplug the VFIO  devices one is passing through. A user can select how many PCI
Express root ports to allocate depending on how many devices are passed through.
@@ -220,7 +220,7 @@ containers that he wants to run with Kata. The goal is to make such things as
transparent as possible, so we also introduced
[CDI](https://github.com/container-orchestrated-devices/container-device-interface)
(Container Device Interface) to Kata. CDI is a[
specification](https://github.com/container-orchestrated-devices/container-device-interface/blob/master/SPEC.md)
specification](https://github.com/container-orchestrated-devices/container-device-interface/blob/main/SPEC.md)
for container runtimes to support third-party devices.
As written before, we can provide a clique ID for the devices that belong
@@ -300,7 +300,7 @@ pcie_switch_port = 8
```
Each device that is passed through is attached to a PCI Express downstream port
as illustrated below. We can even replicate the hosts two DPUs topologies with
as illustrated below. We can even replicate the hosts two DPUs `topologies` with
added metadata through the CDI. Most of the time, a container only needs one
pair of GPU and NIC for GPUDirect RDMA. This is more of a showcase of what we
can do with the power of Kata and CDI. One could even think of adding groups of
@@ -328,7 +328,7 @@ $ lspci -tv
```
The configuration of using either the root port or switch port can be applied on
a per Container or Pod basis, meaning we can switch PCI Express topologies on
a per Container or Pod basis, meaning we can switch PCI Express `topologies` on
each run of an application.
## Hypervisor Resource Limits

View File

@@ -54,7 +54,7 @@ endif
TARGET_PATH = target/$(TRIPLE)/$(BUILD_TYPE)/$(TARGET)
##VAR DESTDIR=<path> is a directory prepended to each installed target file
DESTDIR :=
DESTDIR ?=
##VAR BINDIR=<path> is a directory for installing executable programs
BINDIR := /usr/bin
@@ -140,7 +140,7 @@ vendor:
#TARGET test: run cargo tests
test:
test: $(GENERATED_FILES)
@cargo test --all --target $(TRIPLE) $(EXTRA_RUSTFEATURES) -- --nocapture
##TARGET check: run test

View File

@@ -49,7 +49,7 @@ else
##TARGET default: build code
default: runtime show-header
##TARGET test: run cargo tests
test:
test: static-checks-build
@cargo test --all --target $(TRIPLE) $(EXTRA_RUSTFEATURES) -- --nocapture
install: install-runtime install-configs
endif

View File

@@ -188,12 +188,22 @@ block_device_driver = "virtio-blk"
# Disable the 'seccomp' feature from Cloud Hypervisor, default false
# disable_seccomp = true
# Enable vIOMMU, default false
# Enabling this will result in the VM having a vIOMMU device
# This will also add the following options to the kernel's
# command line: iommu=pt
#enable_iommu = true
# This option changes the default hypervisor and kernel parameters
# to enable debug output where available.
#
# Default false
#enable_debug = true
# Enable hot-plugging of VFIO devices to a root-port.
# The default setting is "no-port"
#hot_plug_vfio = "root-port"
# Path to OCI hook binaries in the *guest rootfs*.
# This does not affect host-side hooks which must instead be added to
# the OCI spec passed to the runtime.

View File

@@ -1680,8 +1680,8 @@ func checkConfig(config oci.RuntimeConfig) error {
// Only allow one of the following settings for cold-plug:
// no-port, root-port, switch-port
func checkPCIeConfig(coldPlug config.PCIePort, hotPlug config.PCIePort, machineType string, hypervisorType virtcontainers.HypervisorType) error {
if hypervisorType != virtcontainers.QemuHypervisor {
kataUtilsLogger.Warn("Advanced PCIe Topology only available for QEMU hypervisor, ignoring hot(cold)_vfio_port setting")
if hypervisorType != virtcontainers.QemuHypervisor && hypervisorType != virtcontainers.ClhHypervisor {
kataUtilsLogger.Warn("Advanced PCIe Topology only available for QEMU/CLH hypervisor, ignoring hot(cold)_vfio_port setting")
return nil
}
@@ -1696,6 +1696,14 @@ func checkPCIeConfig(coldPlug config.PCIePort, hotPlug config.PCIePort, machineT
if machineType != "q35" && machineType != "virt" {
return nil
}
if hypervisorType == virtcontainers.ClhHypervisor {
if coldPlug != config.NoPort {
return fmt.Errorf("cold-plug not supported on CLH")
}
if hotPlug != config.RootPort {
return fmt.Errorf("only hot-plug=%s supported on CLH", config.RootPort)
}
}
var port config.PCIePort
if coldPlug != config.NoPort {
@@ -1704,10 +1712,6 @@ func checkPCIeConfig(coldPlug config.PCIePort, hotPlug config.PCIePort, machineT
if hotPlug != config.NoPort {
port = hotPlug
}
if port == config.NoPort {
return fmt.Errorf("invalid vfio_port=%s setting, use on of %s, %s, %s",
port, config.BridgePort, config.RootPort, config.SwitchPort)
}
if port == config.BridgePort || port == config.RootPort || port == config.SwitchPort {
return nil
}

View File

@@ -18,8 +18,10 @@ import (
"syscall"
"testing"
config "github.com/kata-containers/kata-containers/src/runtime/pkg/device/config"
ktu "github.com/kata-containers/kata-containers/src/runtime/pkg/katatestutils"
"github.com/kata-containers/kata-containers/src/runtime/pkg/oci"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/vcmock"
@@ -419,3 +421,32 @@ func TestCreateContainer(t *testing.T) {
assert.NoError(err)
}
}
func TestVfioChecksClh(t *testing.T) {
assert := assert.New(t)
// Check valid CLH vfio configs
f := func(coldPlug, hotPlug config.PCIePort) error {
return checkPCIeConfig(coldPlug, hotPlug, defaultMachineType, virtcontainers.ClhHypervisor)
}
assert.NoError(f(config.NoPort, config.NoPort))
assert.NoError(f(config.NoPort, config.RootPort))
assert.Error(f(config.RootPort, config.RootPort))
assert.Error(f(config.RootPort, config.NoPort))
assert.Error(f(config.NoPort, config.SwitchPort))
}
func TestVfioCheckQemu(t *testing.T) {
assert := assert.New(t)
// Check valid Qemu vfio configs
f := func(coldPlug, hotPlug config.PCIePort) error {
return checkPCIeConfig(coldPlug, hotPlug, defaultMachineType, virtcontainers.QemuHypervisor)
}
assert.NoError(f(config.NoPort, config.NoPort))
assert.NoError(f(config.RootPort, config.NoPort))
assert.NoError(f(config.NoPort, config.RootPort))
assert.Error(f(config.RootPort, config.RootPort))
assert.Error(f(config.SwitchPort, config.RootPort))
}

View File

@@ -490,6 +490,13 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
}
clh.vmconfig.Payload.SetKernel(kernelPath)
clh.vmconfig.Platform = chclient.NewPlatformConfig()
platform := clh.vmconfig.Platform
platform.SetNumPciSegments(2)
if clh.config.IOMMU {
platform.SetIommuSegments([]int32{0})
}
if clh.config.ConfidentialGuest {
if err := clh.enableProtection(); err != nil {
return err
@@ -528,6 +535,9 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
// start the guest kernel with 'quiet' in non-debug mode
params = append(params, Param{"quiet", ""})
}
if clh.config.IOMMU {
params = append(params, Param{"iommu", "pt"})
}
// Followed by extra kernel parameters defined in the configuration file
params = append(params, clh.config.KernelParams...)
@@ -536,6 +546,7 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
// set random device generator to hypervisor
clh.vmconfig.Rng = chclient.NewRngConfig(clh.config.EntropySource)
clh.vmconfig.Rng.SetIommu(clh.config.IOMMU)
// set the initial root/boot disk of hypervisor
imagePath, err := clh.config.ImageAssetPath()
@@ -561,6 +572,7 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
} else {
pmem := chclient.NewPmemConfig(imagePath)
*pmem.DiscardWrites = true
pmem.SetIommu(clh.config.IOMMU)
if clh.vmconfig.Pmem != nil {
*clh.vmconfig.Pmem = append(*clh.vmconfig.Pmem, *pmem)
@@ -598,6 +610,7 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
clh.vmconfig.Console = chclient.NewConsoleConfig(cctOFF)
}
clh.vmconfig.Console.SetIommu(clh.config.IOMMU)
cpu_topology := chclient.NewCpuTopology()
cpu_topology.ThreadsPerCore = func(i int32) *int32 { return &i }(1)
@@ -840,6 +853,7 @@ func (clh *cloudHypervisor) hotplugAddBlockDevice(drive *config.BlockDrive) erro
queueSize := int32(1024)
clhDisk.NumQueues = &queues
clhDisk.QueueSize = &queueSize
clhDisk.SetIommu(clh.config.IOMMU)
diskRateLimiterConfig := clh.getDiskRateLimiterConfig()
if diskRateLimiterConfig != nil {
@@ -865,6 +879,7 @@ func (clh *cloudHypervisor) hotPlugVFIODevice(device *config.VFIODev) error {
// Create the clh device config via the constructor to ensure default values are properly assigned
clhDevice := *chclient.NewDeviceConfig(device.SysfsDev)
clhDevice.SetIommu(clh.config.IOMMU)
pciInfo, _, err := cl.VmAddDevicePut(ctx, clhDevice)
if err != nil {
return fmt.Errorf("Failed to hotplug device %+v %s", device, openAPIClientError(err))
@@ -1538,6 +1553,7 @@ func (clh *cloudHypervisor) addVSock(cid int64, path string) {
}).Info("Adding HybridVSock")
clh.vmconfig.Vsock = chclient.NewVsockConfig(cid, path)
clh.vmconfig.Vsock.SetIommu(clh.config.IOMMU)
}
func (clh *cloudHypervisor) getRateLimiterConfig(bwSize, bwOneTimeBurst, opsSize, opsOneTimeBurst int64) *chclient.RateLimiterConfig {
@@ -1607,6 +1623,7 @@ func (clh *cloudHypervisor) addNet(e Endpoint) error {
if netRateLimiterConfig != nil {
net.SetRateLimiterConfig(*netRateLimiterConfig)
}
net.SetIommu(clh.config.IOMMU)
if clh.netDevices != nil {
*clh.netDevices = append(*clh.netDevices, *net)
@@ -1639,6 +1656,7 @@ func (clh *cloudHypervisor) addVolume(volume types.Volume) error {
}
fs := chclient.NewFsConfig(volume.MountTag, vfsdSockPath, numQueues, queueSize)
fs.SetPciSegment(1)
clh.vmconfig.Fs = &[]chclient.FsConfig{*fs}
clh.Logger().Debug("Adding share volume to hypervisor: ", volume.MountTag)

View File

@@ -68,6 +68,7 @@ func newClhConfig() (HypervisorConfig, error) {
NetRateLimiterBwOneTimeBurst: int64(0),
NetRateLimiterOpsMaxRate: int64(0),
NetRateLimiterOpsOneTimeBurst: int64(0),
HotPlugVFIO: config.NoPort,
}, nil
}

View File

@@ -1946,6 +1946,7 @@ dependencies = [
"anyhow",
"hyper",
"hyperlocal",
"kata-types",
"tokio",
]

View File

@@ -52,13 +52,13 @@ clean:
vendor:
cargo vendor
test:
test: $(GENERATED_CODE)
@RUSTFLAGS="$(EXTRA_RUSTFLAGS) --deny warnings" cargo test --target $(TRIPLE) $(if $(findstring release,$(BUILD_TYPE)),--release) $(EXTRA_RUSTFEATURES) -- --nocapture
install:
@RUSTFLAGS="$(EXTRA_RUSTFLAGS) --deny warnings" cargo install --locked --target $(TRIPLE) --path . --root $(INSTALL_PATH)
check: standard_rust_check
check: $(GENERATED_CODE) standard_rust_check
.PHONY: \
build \

View File

@@ -539,10 +539,10 @@ mod tests {
},
// Success scenarios
TestData {
module_name: "kvm",
module_name: "loop",
param_name: "",
kernel_module: &KernelModule {
name: "kvm",
name: "loop",
params: &[KernelParam {
name: "nonexistantparam",
value: KernelParamType::Simple("Y"),
@@ -552,16 +552,16 @@ mod tests {
result: Ok(()),
},
TestData {
module_name: "kvm",
param_name: "kvmclock_periodic_sync",
module_name: "loop",
param_name: "hw_queue_depth",
kernel_module: &KernelModule {
name: "kvm",
name: "loop",
params: &[KernelParam {
name: "kvmclock_periodic_sync",
value: KernelParamType::Simple("Y"),
name: "hw_queue_depth",
value: KernelParamType::Simple("128"),
}],
},
param_value: "Y",
param_value: "128",
result: Ok(()),
},
];

View File

@@ -158,7 +158,7 @@ function clean_env_ctr()
info "Wait until the containers gets removed"
for task_id in "${running_tasks[@]}"; do
sudo ctr t kill -a -s SIGTERM ${task_id} >/dev/null 2>&1
sudo timeout -s SIGKILL 30s ctr t kill -a -s SIGTERM ${task_id} >/dev/null 2>&1 || true
sleep 0.5
done
@@ -186,31 +186,19 @@ function clean_env_ctr()
if (( count_tasks > 0 )); then
die "Can't remove running containers."
fi
kill_kata_components
}
# Kills running shim and hypervisor components
function kill_kata_components() {
local kata_bin_dir="/opt/kata/bin"
local shim_path="${kata_bin_dir}/containerd-shim-kata-v2"
local hypervisor_path="${kata_bin_dir}/qemu-system-x86_64"
local pid_shim_count="$(pgrep -fc ${shim_path} || exit 0)"
local TIMEOUT="30s"
local PID_NAMES=( "containerd-shim-kata-v2" "qemu-system-x86_64" "cloud-hypervisor" )
[ ${pid_shim_count} -gt "0" ] && sudo kill -SIGKILL "$(pgrep -f ${shim_path})" > /dev/null 2>&1
if [ "${KATA_HYPERVISOR}" = 'clh' ]; then
hypervisor_path="${kata_bin_dir}/cloud-hypervisor"
elif [ "${KATA_HYPERVISOR}" != 'qemu' ]; then
echo "Failed to stop the hypervisor: '${KATA_HYPERVISOR}' as it is not recognized"
return
fi
local pid_hypervisor_count="$(pgrep -fc ${hypervisor_path} || exit 0)"
if [ ${pid_hypervisor_count} -gt "0" ]; then
sudo kill -SIGKILL "$(pgrep -f ${hypervisor_path})" > /dev/null 2>&1
fi
sudo systemctl stop containerd
# iterate over the list of kata components and stop them
for PID_NAME in "${PID_NAMES[@]}"; do
[[ ! -z "$(pidof ${PID_NAME})" ]] && sudo killall "${PID_NAME}" > /dev/null 2>&1 || true
done
sudo timeout -s SIGKILL "${TIMEOUT}" systemctl start containerd
}
# Restarts a systemd service while ensuring the start-limit-burst is set to 0.
@@ -269,25 +257,71 @@ function restart_containerd_service() {
return 0
}
function restart_crio_service() {
sudo systemctl restart crio
}
# Configures containerd
function overwrite_containerd_config() {
containerd_config="/etc/containerd/config.toml"
sudo rm -f "${containerd_config}"
sudo tee "${containerd_config}" << EOF
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "kata"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = false
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata]
runtime_type = "io.containerd.kata.v2"
EOF
}
# Configures CRI-O
function overwrite_crio_config() {
crio_conf_d="/etc/crio/crio.conf.d"
sudo mkdir -p ${crio_conf_d}
kata_config="${crio_conf_d}/99-kata-containers"
sudo tee "${kata_config}" << EOF
[crio.runtime.runtimes.kata]
runtime_path = "/usr/local/bin/containerd-shim-kata-v2"
runtime_type = "vm"
runtime_root = "/run/vc"
runtime_config_path = "/opt/kata/share/defaults/kata-containers/configuration.toml"
privileged_without_host_devices = true
EOF
debug_config="${crio_conf_d}/100-debug"
sudo tee "${debug_config}" << EOF
[crio]
log_level = "debug"
EOF
}
function install_kata() {
local kata_tarball="kata-static.tar.xz"
declare -r katadir="/opt/kata"
@@ -306,18 +340,35 @@ function install_kata() {
sudo ln -sf "${b}" "${local_bin_dir}/$(basename $b)"
done
if [[ ${KATA_HYPERVISOR} == "dragonball" ]]; then
sudo ln -sf "${katadir}/runtime-rs/bin/containerd-shim-kata-v2" "${local_bin_dir}/containerd-shim-kata-${KATA_HYPERVISOR}-v2"
if [ "${CONTAINER_ENGINE:=containerd}" = "containerd" ]; then
check_containerd_config_for_kata
restart_containerd_service
else
sudo ln -sf "${katadir}/bin/containerd-shim-kata-v2" "${local_bin_dir}/containerd-shim-kata-${KATA_HYPERVISOR}-v2"
overwrite_crio_config
restart_crio_service
fi
sudo ln -sf ${katadir}/share/defaults/kata-containers/configuration-${KATA_HYPERVISOR}.toml ${katadir}/share/defaults/kata-containers/configuration.toml
check_containerd_config_for_kata
restart_containerd_service
}
# creates a new kata configuration.toml hard link that
# points to the hypervisor passed by KATA_HYPERVISOR env var.
function enabling_hypervisor() {
declare -r KATA_DIR="/opt/kata"
declare -r CONFIG_DIR="${KATA_DIR}/share/defaults/kata-containers"
declare -r SRC_HYPERVISOR_CONFIG="${CONFIG_DIR}/configuration-${KATA_HYPERVISOR}.toml"
declare -r DEST_KATA_CONFIG="${CONFIG_DIR}/configuration.toml"
declare -r CONTAINERD_SHIM_KATA="/usr/local/bin/containerd-shim-kata-${KATA_HYPERVISOR}-v2"
if [[ ${KATA_HYPERVISOR} == "dragonball" ]]; then
sudo ln -sf "${KATA_DIR}/runtime-rs/bin/containerd-shim-kata-v2" "${CONTAINERD_SHIM_KATA}"
else
sudo ln -sf "${KATA_DIR}/bin/containerd-shim-kata-v2" "${CONTAINERD_SHIM_KATA}"
fi
sudo ln -sf "${SRC_HYPERVISOR_CONFIG}" "${DEST_KATA_CONFIG}"
}
function check_containerd_config_for_kata() {
# check containerd config
declare -r line1="default_runtime_name = \"kata\""
@@ -339,6 +390,7 @@ function ensure_yq() {
export GOPATH
export PATH="${GOPATH}/bin:${PATH}"
INSTALL_IN_GOPATH=true "${repo_root_dir}/ci/install_yq.sh"
hash -d yq 2> /dev/null || true # yq is preinstalled on GHA Ubuntu 22.04 runners so we clear Bash's PATH cache.
}
# dependency: What we want to get the version from the versions.yaml file
@@ -383,6 +435,19 @@ function download_github_project_tarball() {
wget https://github.com/${project}/releases/download/${version}/${tarball_name}
}
# version: The version to be intalled
function install_cni_plugins() {
version="${1}"
project="containernetworking/plugins"
tarball_name="cni-plugins-linux-$(${repo_root_dir}/tests/kata-arch.sh -g)-${version}.tgz"
download_github_project_tarball "${project}" "${version}" "${tarball_name}"
sudo mkdir -p /opt/cni/bin
sudo tar -xvf "${tarball_name}" -C /opt/cni/bin
rm -f "${tarball_name}"
}
# base_version: The version to be intalled in the ${major}.${minor} format
function install_cri_containerd() {
base_version="${1}"
@@ -413,3 +478,110 @@ function install_cri_tools() {
sudo tar -xvf "${tarball_name}" -C /usr/local/bin
rm -f "${tarball_name}"
}
function install_nydus() {
version="${1}"
project="dragonflyoss/image-service"
tarball_name="nydus-static-${version}-linux-$(${repo_root_dir}/tests/kata-arch.sh -g).tgz"
download_github_project_tarball "${project}" "${version}" "${tarball_name}"
sudo tar xfz "${tarball_name}" -C /usr/local/bin --strip-components=1
rm -f "${tarball_name}"
}
function install_nydus_snapshotter() {
version="${1}"
project="containerd/nydus-snapshotter"
tarball_name="nydus-snapshotter-${version}-$(${repo_root_dir}/tests/kata-arch.sh).tgz"
download_github_project_tarball "${project}" "${version}" "${tarball_name}"
sudo tar xfz "${tarball_name}" -C /usr/local/bin --strip-components=1
rm -f "${tarball_name}"
}
function _get_os_for_crio() {
source /etc/os-release
if [ "${NAME}" != "Ubuntu" ]; then
echo "Only Ubuntu is supported for now"
exit 2
fi
echo "x${NAME}_${VERSION_ID}"
}
# version: the CRI-O version to be installe
function install_crio() {
local version=${1}
os=$(_get_os_for_crio)
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/${os}/ /"|sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
echo "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/${version}/${os}/ /"|sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:${version}.list
curl -L https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:${version}/${os}/Release.key | sudo apt-key add -
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/${os}/Release.key | sudo apt-key add -
sudo apt update
sudo apt install -y cri-o cri-o-runc
# We need to set the default capabilities to ensure our tests will pass
# See: https://github.com/kata-containers/kata-containers/issues/8034
sudo mkdir -p /etc/crio/crio.conf.d/
cat <<EOF | sudo tee /etc/crio/crio.conf.d/00-default-capabilities
[crio.runtime]
default_capabilities = [
"CHOWN",
"DAC_OVERRIDE",
"FSETID",
"FOWNER",
"SETGID",
"SETUID",
"SETPCAP",
"NET_BIND_SERVICE",
"KILL",
"SYS_CHROOT",
]
EOF
sudo systemctl enable --now crio
}
# Convert architecture to the name used by golang
function arch_to_golang() {
local arch="$(uname -m)"
case "${arch}" in
aarch64) echo "arm64";;
ppc64le) echo "${arch}";;
x86_64) echo "amd64";;
s390x) echo "s390x";;
*) die "unsupported architecture: ${arch}";;
esac
}
# Convert architecture to the name used by rust
function arch_to_rust() {
local -r arch="$(uname -m)"
case "${arch}" in
aarch64) echo "${arch}";;
ppc64le) echo "powerpc64le";;
x86_64) echo "${arch}";;
s390x) echo "${arch}";;
*) die "unsupported architecture: ${arch}";;
esac
}
# Convert architecture to the name used by the Linux kernel build system
function arch_to_kernel() {
local -r arch="$(uname -m)"
case "${arch}" in
aarch64) echo "arm64";;
ppc64le) echo "powerpc";;
x86_64) echo "${arch}";;
s390x) echo "s390x";;
*) die "unsupported architecture: ${arch}";;
esac
}

View File

@@ -0,0 +1,69 @@
#!/usr/bin/env bash
# Copyright (c) 2023 Microsoft Corporation
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
set -o errexit
set -o nounset
set -o pipefail
kata_deploy_dir="$(dirname "$(readlink -f "$0")")"
source "${kata_deploy_dir}/../../gha-run-k8s-common.sh"
function run_tests() {
cleanup_runtimeclasses || true
pushd "${kata_deploy_dir}"
bash run-kata-deploy-tests.sh
popd
}
function cleanup_runtimeclasses() {
# Cleanup any runtime class that was left behind in the cluster, in
# case of a test failure, apart from the default one that comes from
# AKS
for rc in `kubectl get runtimeclass -o name | grep -v "kata-mshv-vm-isolation" | sed 's|runtimeclass.node.k8s.io/||'`
do
kubectl delete runtimeclass $rc;
done
}
function cleanup() {
platform="${1}"
test_type="${2:-k8s}"
cleanup_runtimeclasses || true
if [ "${platform}" = "aks" ]; then
delete_cluster ${test_type}
fi
}
function main() {
export KATA_HOST_OS="${KATA_HOST_OS:-}"
platform="aks"
if [ "${KATA_HYPERVISOR}" = "qemu-tdx" ]; then
platform="tdx"
fi
export platform
action="${1:-}"
case "${action}" in
install-azure-cli) install_azure_cli ;;
login-azure) login_azure ;;
create-cluster) create_cluster "kata-deploy" ;;
deploy-k8s) deploy_k8s ;;
install-bats) install_bats ;;
install-kubectl) install_kubectl ;;
get-cluster-credentials) get_cluster_credentials "kata-deploy" ;;
run-tests) run_tests ;;
delete-cluster) cleanup "aks" "kata-deploy" ;;
*) >&2 echo "Invalid argument"; exit 2 ;;
esac
}
main "$@"

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env bats
#
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
setup() {
repo_root_dir="${BATS_TEST_DIRNAME}/../../../"
ensure_yq
# We expect 2 runtime classes because:
# * `kata` is the default runtimeclass created, basically an alias for `kata-${KATA_HYPERVISOR}`.
# * `kata-${KATA_HYPERVISOR}` is the other one
# * As part of the tests we're only deploying the specific runtimeclass that will be used, instead of all of them.
expected_runtime_classes=2
# We expect both runtime classes to have the same handler: kata-${KATA_HYPERVISOR}
expected_handlers_re=( \
"kata\s+kata-${KATA_HYPERVISOR}" \
"kata-${KATA_HYPERVISOR}\s+kata-${KATA_HYPERVISOR}" \
)
# Set the latest image, the one generated as part of the PR, to be used as part of the tests
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Enable debug for Kata Containers
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[1].value' --tag '!!str' "true"
# Create the runtime class only for the shim that's being tested
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[2].value' "${KATA_HYPERVISOR}"
# Set the tested hypervisor as the default `kata` shim
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[3].value' "${KATA_HYPERVISOR}"
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[4].value' --tag '!!str' "true"
# Let the `kata-deploy` create the default `kata` runtime class
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[5].value' --tag '!!str' "true"
if [ "${KATA_HOST_OS}" = "cbl-mariner" ]; then
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[+].name' "HOST_OS"
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[-1].value' "${KATA_HOST_OS}"
fi
echo "::group::Final kata-deploy.yaml that is used in the test"
cat "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" || die "Failed to setup the tests image"
echo "::endgroup::"
kubectl apply -f "${repo_root_dir}/tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
if [ "${KUBERNETES}" = "k0s" ]; then
kubectl apply -k "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/overlays/k0s"
elif [ "${KUBERNETES}" = "k3s" ]; then
kubectl apply -k "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/overlays/k3s"
elif [ "${KUBERNETES}" = "rke2" ]; then
kubectl apply -k "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/overlays/rke2"
else
kubectl apply -f "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
fi
kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
# Give some time for the pod to finish what's doing and have the
# runtimeclasses properly created
sleep 30s
}
@test "Test runtimeclasses are being properly created" {
# We filter `kata-mshv-vm-isolation` out as that's present on AKS clusters, but that's not coming from kata-deploy
current_runtime_classes=$(kubectl get runtimeclasses | grep -v "kata-mshv-vm-isolation" | grep "kata" | wc -l)
[[ ${current_runtime_classes} -eq ${expected_runtime_classes} ]]
for handler_re in ${expected_handlers_re[@]}
do
kubectl get runtimeclass | grep -E "${handler_re}"
done
}
teardown() {
kubectl get runtimeclasses -o name | grep -v "kata-mshv-vm-isolation"
if [ "${KUBERNETES}" = "k0s" ]; then
deploy_spec="-k \"${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/overlays/k0s\""
cleanup_spec="-k \"${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/overlays/k0s\""
elif [ "${KUBERNETES}" = "k3s" ]; then
deploy_spec="-k \"${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/overlays/k3s\""
cleanup_spec="-k \"${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/overlays/k3s\""
elif [ "${KUBERNETES}" = "rke2" ]; then
deploy_spec="-k \"${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/overlays/rke2\""
cleanup_spec="-k \"${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/overlays/rke2\""
else
deploy_spec="-f \"${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml\""
cleanup_spec="-f \"${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml\""
fi
kubectl delete ${deploy_spec}
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" 'spec.template.spec.containers[0].env[4].value' --tag '!!str' "true"
# Create the runtime class only for the shim that's being tested
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" 'spec.template.spec.containers[0].env[2].value' "${KATA_HYPERVISOR}"
# Set the tested hypervisor as the default `kata` shim
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" 'spec.template.spec.containers[0].env[3].value' "${KATA_HYPERVISOR}"
# Let the `kata-deploy` create the default `kata` runtime class
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[5].value' --tag '!!str' "true"
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
cat "${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" "${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" || die "Failed to setup the tests image"
kubectl apply ${cleanup_spec}
sleep 30s
kubectl delete ${cleanup_spec}
kubectl delete -f "${repo_root_dir}/tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
}

View File

@@ -0,0 +1,25 @@
#!/bin/bash
#
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -e
kata_deploy_dir=$(dirname "$(readlink -f "$0")")
source "${kata_deploy_dir}/../../common.bash"
if [ -n "${KATA_DEPLOY_TEST_UNION:-}" ]; then
KATA_DEPLOY_TEST_UNION=($KATA_DEPLOY_TEST_UNION)
else
KATA_DEPLOY_TEST_UNION=( \
"kata-deploy.bats" \
)
fi
info "Run tests"
for KATA_DEPLOY_TEST_ENTRY in ${KATA_DEPLOY_TEST_UNION[@]}
do
bats "${KATA_DEPLOY_TEST_ENTRY}"
done

View File

@@ -0,0 +1,78 @@
#!/bin/bash
#
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o nounset
set -o pipefail
kata_tarball_dir="${2:-kata-artifacts}"
kata_monitor_dir="$(dirname "$(readlink -f "$0")")"
source "${kata_monitor_dir}/../../common.bash"
function install_dependencies() {
info "Installing the dependencies needed for running the cri-containerd tests"
# Dependency list of projects that we can rely on the system packages
# - build-essential
# - Theoretically we only need `make`, but doesn't hurt to install
# the whole build-essential group
# - jq
declare -a system_deps=(
build-essential
jq
)
sudo apt-get update
sudo apt-get -y install "${system_deps[@]}"
ensure_yq
# Dependency list of projects that we can install them
# directly from their releases on GitHub:
# - cri-tools
# - containerd
# - cri-container-cni release tarball already includes CNI plugins
cri_tools_version=$(get_from_kata_deps "externals.critools.latest")
declare -a github_deps
github_deps[0]="cri_tools:${cri_tools_version}"
case "${CONTAINER_ENGINE}" in
containerd)
github_deps[1]="cri_containerd:$(get_from_kata_deps "externals.containerd.${CONTAINERD_VERSION}")"
;;
crio)
github_deps[1]="cni_plugins:$(get_from_kata_deps "externals.cni-plugins.version")"
;;
esac
for github_dep in "${github_deps[@]}"; do
IFS=":" read -r -a dep <<< "${github_dep}"
install_${dep[0]} "${dep[1]}"
done
if [ "${CONTAINER_ENGINE}" = "crio" ]; then
install_crio ${cri_tools_version#v}
fi
}
function run() {
info "Running cri-containerd tests using ${KATA_HYPERVISOR} hypervisor"
enabling_hypervisor
bash -c ${kata_monitor_dir}/kata-monitor-tests.sh
}
function main() {
action="${1:-}"
case "${action}" in
install-dependencies) install_dependencies ;;
install-kata) install_kata ;;
run) run ;;
*) >&2 die "Invalid argument" ;;
esac
}
main "$@"

View File

@@ -0,0 +1,294 @@
#!/bin/bash
#
# Copyright (c) 2022 Red Hat
#
# SPDX-License-Identifier: Apache-2.0
#
# This test file will test kata-monitor for basic functionality (retrieve kata sandboxes)
# It will assume an environment where:
# - a CRI container manager (container engine) will be up and running
# - crictl is installed and configured
# - the kata-monitor binary is available on the host
#
set -o errexit
set -o nounset
set -o pipefail
source "/etc/os-release" || source "/usr/lib/os-release"
[ -n "${BASH_VERSION:-}" ] && set -o errtrace
[ -n "${DEBUG:-}" ] && set -o xtrace
readonly MONITOR_HTTP_ENDPOINT="127.0.0.1:8090"
# we should collect few hundred metrics, let's put a reasonable minimum
readonly MONITOR_MIN_METRICS_NUM=200
CONTAINER_ENGINE=${CONTAINER_ENGINE:-"containerd"}
CRICTL_RUNTIME=${CRICTL_RUNTIME:-"kata"}
KATA_MONITOR_BIN="${KATA_MONITOR_BIN:-$(command -v kata-monitor || true)}"
KATA_MONITOR_PID=""
TMPATH=$(mktemp -d -t kata-monitor-test-XXXXXXXXX)
METRICS_FILE="${TMPATH}/metrics.txt"
MONITOR_LOG_FILE="${TMPATH}/kata-monitor.log"
CACHE_UPD_TIMEOUT_SEC=${CACHE_UPD_TIMEOUT_SEC:-20}
POD_ID=""
CID=""
RUNC_POD_ID=""
RUNC_CID=""
CURRENT_TASK=""
FALSE=1
TRUE=0
trap error_with_msg ERR
title() {
local step="$1"
echo -e "\n* STEP: $step"
}
echo_ok() {
local msg="$1"
echo "OK: $msg"
}
# quiet crictrl
qcrictl() {
sudo crictl "$@" > /dev/null
}
# this is just an hash of current date (+ nanoseconds)
gen_unique_id() {
date +%T:%N | md5sum | cut -d ' ' -f 1
}
error_with_msg() {
local msg=${1:-"cannot $CURRENT_TASK"}
trap - ERR
echo -e "\nERROR: $msg"
if [ -f "$MONITOR_LOG_FILE" ]; then
echo -e "\nkata-monitor logs:\n----------------"
cat "$MONITOR_LOG_FILE"
fi
echo -e "\nkata-monitor testing: FAILED!"
cleanup
exit 1
}
cleanup() {
stop_workload
stop_workload "$RUNC_CID" "$RUNC_POD_ID"
[ -n "$KATA_MONITOR_PID" ] \
&& [ -d "/proc/$KATA_MONITOR_PID" ] \
&& kill -9 "$KATA_MONITOR_PID"
rm -rf "$TMPATH"
}
create_sandbox_json() {
local uid_name_suffix="$(gen_unique_id)"
local sbfile="$TMPATH/sandbox-$uid_name_suffix.json"
cat <<EOF >$sbfile
{
"metadata": {
"name": "nginx-$uid_name_suffix",
"namespace": "default",
"uid": "nginx-container-uid",
"attempt": 1
},
"logDirectory": "/tmp",
"linux": {
}
}
EOF
echo "$sbfile"
}
create_container_json() {
local uid_name_suffix="$(gen_unique_id)"
local cntfile="$TMPATH/container-$uid_name_suffix.json"
cat <<EOF >$cntfile
{
"metadata": {
"name": "busybox",
"namespace": "default",
"uid": "busybox-container-uid"
},
"image":{
"image": "busybox"
},
"command": [
"top"
],
"log_path":"busybox.log",
"linux": {
}
}
EOF
echo "$cntfile"
}
start_workload() {
local runtime=${1:-}
local args=""
local sbfile=""
local cntfile=""
[ -n "$runtime" ] && args="-r $runtime"
sbfile="$(create_sandbox_json)"
cntfile="$(create_container_json)"
POD_ID=$(sudo crictl runp $args $sbfile)
CID=$(sudo crictl create $POD_ID $cntfile $sbfile)
qcrictl start $CID
}
stop_workload() {
local cid="${1:-$CID}"
local pod_id="${2:-$POD_ID}"
local check
[ -z "$pod_id" ] && return
check=$(sudo crictl pods -q -id $pod_id)
[ -z "$check" ] && return
qcrictl stop $cid
qcrictl rm $cid
qcrictl stopp $pod_id
qcrictl rmp $pod_id
}
is_sandbox_there() {
local podid=${1}
local sbs s
sbs=$(sudo curl -s ${MONITOR_HTTP_ENDPOINT}/sandboxes)
if [ -n "$sbs" ]; then
for s in $sbs; do
if [ "$s" = "$podid" ]; then
return $TRUE
break
fi
done
fi
return $FALSE
}
is_sandbox_there_iterate() {
local podid=${1}
for i in $(seq 1 $CACHE_UPD_TIMEOUT_SEC); do
is_sandbox_there "$podid" && return $TRUE
echo -n "."
sleep 1
continue
done
return $FALSE
}
is_sandbox_missing_iterate() {
local podid=${1}
for i in $(seq 1 $CACHE_UPD_TIMEOUT_SEC); do
is_sandbox_there "$podid" || return $TRUE
echo -n "."
sleep 1
continue
done
return $FALSE
}
main() {
local args=""
###########################
title "pre-checks"
CURRENT_TASK="connect to the container engine"
qcrictl pods
echo_ok "$CURRENT_TASK"
###########################
title "pull the image to be used"
sudo crictl pull busybox
###########################
title "create workloads"
CURRENT_TASK="start workload (runc)"
start_workload
RUNC_POD_ID="$POD_ID"
RUNC_CID="$CID"
echo_ok "$CURRENT_TASK - POD ID:$POD_ID, CID:$CID"
CURRENT_TASK="start workload ($CRICTL_RUNTIME)"
start_workload "$CRICTL_RUNTIME"
echo_ok "$CURRENT_TASK - POD ID:$POD_ID, CID:$CID"
###########################
title "start kata-monitor"
[ ! -x "$KATA_MONITOR_BIN" ] && error_with_msg "kata-monitor binary not found"
[ "$CONTAINER_ENGINE" = "crio" ] && args="--runtime-endpoint /run/crio/crio.sock"
CURRENT_TASK="start kata-monitor"
sudo $KATA_MONITOR_BIN $args --log-level trace > "$MONITOR_LOG_FILE" 2>&1 &
KATA_MONITOR_PID="$!"
echo_ok "$CURRENT_TASK ($KATA_MONITOR_PID)"
###########################
title "kata-monitor cache update checks"
CURRENT_TASK="retrieve $POD_ID in kata-monitor cache"
is_sandbox_there_iterate "$POD_ID" || error_with_msg
echo_ok "$CURRENT_TASK"
CURRENT_TASK="look for runc pod $RUNC_POD_ID in kata-monitor cache"
is_sandbox_there_iterate "$RUNC_POD_ID" && error_with_msg "cache: got runc pod $RUNC_POD_ID"
echo_ok "runc pod $RUNC_POD_ID skipped from kata-monitor cache"
###########################
title "kata-monitor metrics retrieval"
CURRENT_TASK="retrieve metrics from kata-monitor"
curl -s ${MONITOR_HTTP_ENDPOINT}/metrics > "$METRICS_FILE"
echo_ok "$CURRENT_TASK"
CURRENT_TASK="retrieve metrics for pod $POD_ID"
METRICS_COUNT=$(grep -c "$POD_ID" "$METRICS_FILE")
[ ${METRICS_COUNT} -lt ${MONITOR_MIN_METRICS_NUM} ] \
&& error_with_msg "got too few metrics (#${METRICS_COUNT})"
echo_ok "$CURRENT_TASK - found #${METRICS_COUNT} metrics"
###########################
title "remove kata workload"
CURRENT_TASK="stop workload ($CRICTL_RUNTIME)"
stop_workload
echo_ok "$CURRENT_TASK"
###########################
title "kata-monitor cache update checks (removal)"
CURRENT_TASK="verify removal of $POD_ID from kata-monitor cache"
is_sandbox_missing_iterate "$POD_ID" || error_with_msg "pod $POD_ID was not removed"
echo_ok "$CURRENT_TASK"
###########################
CURRENT_TASK="cleanup"
cleanup
echo -e "\nkata-monitor testing: PASSED!\n"
}
main "@"

View File

@@ -15,10 +15,33 @@ source "${vfio_dir}/../../common.bash"
function install_dependencies() {
info "Installing the dependencies needed for running the vfio tests"
(
source /etc/os-release || source /usr/lib/os-release
case "${ID}" in
ubuntu)
# cloud image dependencies
deps=(xorriso curl qemu-utils openssh-client)
sudo apt-get update
sudo apt-get install -y ${deps[@]} qemu-system-x86
;;
fedora)
# cloud image dependencies
deps=(xorriso curl qemu-img openssh)
sudo dnf install -y ${deps[@]} qemu-system-x86-core
;;
"*")
die "Unsupported distro: ${ID}"
;;
esac
)
}
function run() {
info "Running cri-containerd tests using ${KATA_HYPERVISOR} hypervisor"
"${vfio_dir}"/vfio_fedora_vm_wrapper.sh
}
function main() {

View File

@@ -0,0 +1,176 @@
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
{
"ociVersion": "1.0.0-rc2-dev",
"platform": {
"os": "linux",
"arch": "amd64"
},
"annotations": {
"io.katacontainers.config.hypervisor.enable_iommu": "false",
"io.katacontainers.config.runtime.vfio_mode": "guest-kernel"
},
"process": {
"terminal": false,
"consoleSize": {
"height": 0,
"width": 0
},
"user": {
"uid": 0,
"gid": 0
},
"args": [ "/bin/tail", "-f", "/dev/null" ],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm"
],
"cwd": "/",
"rlimits": [{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}],
"noNewPrivileges": true
},
"root": {
"path": "@ROOTFS@",
"readonly": false
},
"hostname": "vfio-test",
"mounts": [{
"destination": "/proc",
"type": "proc",
"source": "proc"
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/dev/pts",
"type": "devpts",
"source": "devpts",
"options": [
"nosuid",
"noexec",
"newinstance",
"ptmxmode=0666",
"mode=0620",
"gid=5"
]
},
{
"destination": "/dev/shm",
"type": "tmpfs",
"source": "shm",
"options": [
"nosuid",
"noexec",
"nodev",
"mode=1777",
"size=65536k"
]
},
{
"destination": "/dev/mqueue",
"type": "mqueue",
"source": "mqueue",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
},
{
"destination": "/sys/fs/cgroup",
"type": "cgroup",
"source": "cgroup",
"options": [
"nosuid",
"noexec",
"nodev",
"relatime",
"ro"
]
}
],
"hooks": {},
"linux": {
"devices": [{
"path": "@VFIO_PATH@",
"type": "c",
"major": @VFIO_MAJOR@,
"minor": @VFIO_MINOR@,
"fileMode": 384,
"uid": 0,
"gid": 0
}],
"cgroupsPath": "kata/vfiotest",
"resources": {
"devices": [
{"allow":false,"access":"rwm"},
{"allow":true,"type":"c","major":1,"minor":3,"access":"rwm"},
{"allow":true,"type":"c","major":1,"minor":5,"access":"rwm"},
{"allow":true,"type":"c","major":1,"minor":8,"access":"rwm"},
{"allow":true,"type":"c","major":1,"minor":9,"access":"rwm"},
{"allow":true,"type":"c","major":5,"minor":0,"access":"rwm"},
{"allow":true,"type":"c","major":5,"minor":1,"access":"rwm"},
{"allow": true,"access": "rwm","major": @VFIO_MAJOR@,"minor": @VFIO_MINOR@,"type": "c"}
]
},
"namespaces": [{
"type": "pid"
},
{
"type": "network"
},
{
"type": "ipc"
},
{
"type": "uts"
},
{
"type": "mount"
}
],
"maskedPaths": [
"/proc/kcore",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/sys/firmware"
],
"readonlyPaths": [
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
}
}

350
tests/functional/vfio/run.sh Executable file
View File

@@ -0,0 +1,350 @@
#!/bin/bash
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -x
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
script_path=$(dirname "$0")
source "${script_path}/../../common.bash"
addr=
tmp_data_dir="$(mktemp -d)"
rootfs_tar="${tmp_data_dir}/rootfs.tar"
trap cleanup EXIT
# kata-runtime options
SANDBOX_CGROUP_ONLY=""
HYPERVISOR=
MACHINE_TYPE=
IMAGE_TYPE=
cleanup() {
clean_env_ctr
sudo rm -rf "${tmp_data_dir}"
[ -n "${host_pci}" ] && sudo driverctl unset-override "${host_pci}"
}
host_pci_addr() {
lspci -D | grep "Ethernet controller" | grep "Virtio.*network device" | tail -1 | cut -d' ' -f1
}
get_vfio_path() {
local addr="$1"
echo "/dev/vfio/$(basename $(realpath /sys/bus/pci/drivers/vfio-pci/${host_pci}/iommu_group))"
}
pull_rootfs() {
# pull and export busybox image in tar file
local image="quay.io/prometheus/busybox:latest"
sudo -E ctr i pull ${image}
sudo -E ctr i export "${rootfs_tar}" "${image}"
sudo chown ${USER}:${USER} "${rootfs_tar}"
sync
}
create_bundle() {
local bundle_dir="$1"
mkdir -p "${bundle_dir}"
# extract busybox rootfs
local rootfs_dir="${bundle_dir}/rootfs"
mkdir -p "${rootfs_dir}"
local layers_dir="$(mktemp -d)"
tar -C "${layers_dir}" -pxf "${rootfs_tar}"
for ((i=0;i<$(cat ${layers_dir}/manifest.json | jq -r ".[].Layers | length");i++)); do
tar -C ${rootfs_dir} -xf ${layers_dir}/$(cat ${layers_dir}/manifest.json | jq -r ".[].Layers[${i}]")
done
sync
# Copy config.json
cp -a "${script_path}/config.json" "${bundle_dir}/config.json"
}
run_container() {
local container_id="$1"
local bundle_dir="$2"
sudo -E ctr run -d --runtime io.containerd.kata.v2 --config "${bundle_dir}/config.json" "${container_id}"
}
get_ctr_cmd_output() {
local container_id="$1"
shift
timeout 30s sudo -E ctr t exec --exec-id 2 "${container_id}" "${@}"
}
check_guest_kernel() {
local container_id="$1"
# For vfio_mode=guest-kernel, the device should be bound to
# the guest kernel's native driver. To check this has worked,
# we look for an ethernet device named 'eth*'
get_ctr_cmd_output "${container_id}" ip a | grep "eth" || die "Missing VFIO network interface"
}
check_vfio() {
local cid="$1"
# For vfio_mode=vfio, the device should be bound to the guest
# vfio-pci driver.
# Check the control device is visible
get_ctr_cmd_output "${cid}" ls /dev/vfio/vfio || die "Couldn't find VFIO control device in container"
# The device should *not* cause an ethernet interface to appear
! get_ctr_cmd_output "${cid}" ip a | grep "eth" || die "Unexpected network interface"
# There should be exactly one VFIO group device (there might
# be multiple IOMMU groups in the VM, but only one device
# should be bound to the VFIO driver, so there should still
# only be one VFIO device
group="$(get_ctr_cmd_output "${cid}" ls /dev/vfio | grep -v vfio)"
if [ $(echo "${group}" | wc -w) != "1" ] ; then
die "Expected exactly one VFIO group got: ${group}"
fi
# There should be two devices in the IOMMU group: the ethernet
# device we care about, plus the PCIe to PCI bridge device
devs="$(get_ctr_cmd_output "${cid}" ls /sys/kernel/iommu_groups/"${group}"/devices)"
num_devices=$(echo "${devs}" | wc -w)
if [ "${HYPERVISOR}" = "qemu" ] && [ "${num_devices}" != "2" ] ; then
die "Expected exactly two devices got: ${devs}"
fi
if [ "${HYPERVISOR}" = "clh" ] && [ "${num_devices}" != "1" ] ; then
die "Expected exactly one device got: ${devs}"
fi
# The bridge device will always sort first, because it is on
# bus zero, whereas the NIC will be on a non-zero bus
guest_pci=$(echo "${devs}" | tail -1)
# This is a roundabout way of getting the environment
# variable, but to use the more obvious "echo $PCIDEVICE_..."
# we would have to escape the '$' enough to not be expanded
# before it's injected into the container, but not so much
# that it *is* expanded by the shell within the container.
# Doing that with another shell function in between is very
# fragile, so do it this way instead.
guest_env="$(get_ctr_cmd_output "${cid}" env | grep ^PCIDEVICE_VIRTIO_NET | sed s/^[^=]*=//)"
if [ "${guest_env}" != "${guest_pci}" ]; then
die "PCIDEVICE variable was \"${guest_env}\" instead of \"${guest_pci}\""
fi
}
get_dmesg() {
local container_id="$1"
get_ctr_cmd_output "${container_id}" dmesg
}
# Show help about this script
help(){
cat << EOF
Usage: $0 [-h] [options]
Description:
This script runs a kata container and passthrough a vfio device
Options:
-h, Help
-i <string>, Specify initrd or image
-m <string>, Specify kata-runtime machine type for qemu hypervisor
-p <string>, Specify kata-runtime hypervisor
-s <value>, Set sandbox_cgroup_only in the configuration file
EOF
}
setup_configuration_file() {
local qemu_config_file="configuration-qemu.toml"
local clh_config_file="configuration-clh.toml"
local image_file="/opt/kata/share/kata-containers/kata-containers.img"
local initrd_file="/opt/kata/share/kata-containers/kata-containers-initrd.img"
local kata_config_file=""
for file in $(kata-runtime --kata-show-default-config-paths); do
if [ ! -f "${file}" ]; then
continue
fi
kata_config_file="${file}"
config_dir=$(dirname ${file})
config_filename=""
if [ "$HYPERVISOR" = "qemu" ]; then
config_filename="${qemu_config_file}"
elif [ "$HYPERVISOR" = "clh" ]; then
config_filename="${clh_config_file}"
fi
config_file="${config_dir}/${config_filename}"
if [ -f "${config_file}" ]; then
rm -f "${kata_config_file}"
cp -a $(realpath "${config_file}") "${kata_config_file}"
break
fi
done
# machine type applies to configuration.toml and configuration-qemu.toml
if [ -n "$MACHINE_TYPE" ]; then
if [ "$HYPERVISOR" = "qemu" ]; then
sed -i 's|^machine_type.*|machine_type = "'${MACHINE_TYPE}'"|g' "${kata_config_file}"
else
warn "Variable machine_type only applies to qemu. It will be ignored"
fi
fi
# Make sure we have set hot_plug_vfio to a reasonable value
if [ "$HYPERVISOR" = "qemu" ]; then
sed -i -e 's|^#*.*hot_plug_vfio.*|hot_plug_vfio = "bridge-port"|' "${kata_config_file}"
elif [ "$HYPERVISOR" = "clh" ]; then
sed -i -e 's|^#*.*hot_plug_vfio.*|hot_plug_vfio = "root-port"|' "${kata_config_file}"
fi
if [ -n "${SANDBOX_CGROUP_ONLY}" ]; then
sed -i 's|^sandbox_cgroup_only.*|sandbox_cgroup_only='${SANDBOX_CGROUP_ONLY}'|g' "${kata_config_file}"
fi
# Change to initrd or image depending on user input.
# Non-default configs must be changed to specify either initrd or image, image is default.
if [ "$IMAGE_TYPE" = "initrd" ]; then
if $(grep -q "^image.*" ${kata_config_file}); then
if $(grep -q "^initrd.*" ${kata_config_file}); then
sed -i '/^image.*/d' "${kata_config_file}"
else
sed -i 's|^image.*|initrd = "'${initrd_file}'"|g' "${kata_config_file}"
fi
fi
else
if $(grep -q "^initrd.*" ${kata_config_file}); then
if $(grep -q "^image.*" ${kata_config_file}); then
sed -i '/^initrd.*/d' "${kata_config_file}"
else
sed -i 's|^initrd.*|image = "'${image_file}'"|g' "${kata_config_file}"
fi
fi
fi
# enable debug
sed -i -e 's/^#\(enable_debug\).*=.*$/\1 = true/g' \
-e 's/^#\(debug_console_enabled\).*=.*$/\1 = true/g' \
-e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 mitigations=off agent.log=debug"/g' \
"${kata_config_file}"
# enable VFIO relevant hypervisor annotations
sed -i -e 's/^\(enable_annotations\).*=.*$/\1 = ["enable_iommu"]/' \
"${kata_config_file}"
}
run_test_container() {
local container_id="$1"
local bundle_dir="$2"
local config_json_in="$3"
local host_pci="$4"
# generate final config.json
sed -e '/^#.*/d' \
-e 's|@VFIO_PATH@|'"${vfio_device}"'|g' \
-e 's|@VFIO_MAJOR@|'"${vfio_major}"'|g' \
-e 's|@VFIO_MINOR@|'"${vfio_minor}"'|g' \
-e 's|@VFIO_CTL_MAJOR@|'"${vfio_ctl_major}"'|g' \
-e 's|@VFIO_CTL_MINOR@|'"${vfio_ctl_minor}"'|g' \
-e 's|@ROOTFS@|'"${bundle_dir}/rootfs"'|g' \
-e 's|@HOST_PCI@|'"${host_pci}"'|g' \
"${config_json_in}" > "${script_path}/config.json"
create_bundle "${bundle_dir}"
# run container
run_container "${container_id}" "${bundle_dir}"
# output VM dmesg
get_dmesg "${container_id}"
}
main() {
local OPTIND
while getopts "hi:m:p:s:" opt;do
case ${opt} in
h)
help
exit 0;
;;
i)
IMAGE_TYPE="${OPTARG}"
;;
m)
MACHINE_TYPE="${OPTARG}"
;;
p)
HYPERVISOR="${OPTARG}"
;;
s)
SANDBOX_CGROUP_ONLY="${OPTARG}"
;;
?)
# parse failure
help
die "Failed to parse arguments"
;;
esac
done
shift $((OPTIND-1))
#
# Get the device ready on the host
#
setup_configuration_file
restart_containerd_service
sudo modprobe vfio
sudo modprobe vfio-pci
host_pci=$(host_pci_addr)
[ -n "${host_pci}" ] || die "virtio ethernet controller PCI address not found"
cat /proc/cmdline | grep -q "intel_iommu=on" || \
die "intel_iommu=on not found in kernel cmdline"
sudo driverctl set-override "${host_pci}" vfio-pci
vfio_device="$(get_vfio_path "${host_pci}")"
[ -n "${vfio_device}" ] || die "vfio device not found"
vfio_major="$(printf '%d' $(stat -c '0x%t' ${vfio_device}))"
vfio_minor="$(printf '%d' $(stat -c '0x%T' ${vfio_device}))"
[ -n "/dev/vfio/vfio" ] || die "vfio control device not found"
vfio_ctl_major="$(printf '%d' $(stat -c '0x%t' /dev/vfio/vfio))"
vfio_ctl_minor="$(printf '%d' $(stat -c '0x%T' /dev/vfio/vfio))"
# Get the rootfs we'll use for all tests
pull_rootfs
#
# Run the tests
#
# test for guest-kernel mode
guest_kernel_cid="vfio-guest-kernel-${RANDOM}"
run_test_container "${guest_kernel_cid}" \
"${tmp_data_dir}/vfio-guest-kernel" \
"${script_path}/guest-kernel.json.in" \
"${host_pci}"
check_guest_kernel "${guest_kernel_cid}"
# Remove the container so we can re-use the device for the next test
clean_env_ctr
# test for vfio mode
vfio_cid="vfio-vfio-${RANDOM}"
run_test_container "${vfio_cid}" \
"${tmp_data_dir}/vfio-vfio" \
"${script_path}/vfio.json.in" \
"${host_pci}"
check_vfio "${vfio_cid}"
}
main $@

View File

@@ -0,0 +1,187 @@
#
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
{
"ociVersion": "1.0.0-rc2-dev",
"platform": {
"os": "linux",
"arch": "amd64"
},
"annotations": {
"io.katacontainers.config.hypervisor.enable_iommu": "true",
"io.katacontainers.config.runtime.vfio_mode": "vfio"
},
"process": {
"terminal": false,
"consoleSize": {
"height": 0,
"width": 0
},
"user": {
"uid": 0,
"gid": 0
},
"args": [ "/bin/tail", "-f", "/dev/null" ],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
"PCIDEVICE_VIRTIO_NET=@HOST_PCI@"
],
"cwd": "/",
"rlimits": [{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}],
"noNewPrivileges": true
},
"root": {
"path": "@ROOTFS@",
"readonly": false
},
"hostname": "vfio-test",
"mounts": [{
"destination": "/proc",
"type": "proc",
"source": "proc"
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/dev/pts",
"type": "devpts",
"source": "devpts",
"options": [
"nosuid",
"noexec",
"newinstance",
"ptmxmode=0666",
"mode=0620",
"gid=5"
]
},
{
"destination": "/dev/shm",
"type": "tmpfs",
"source": "shm",
"options": [
"nosuid",
"noexec",
"nodev",
"mode=1777",
"size=65536k"
]
},
{
"destination": "/dev/mqueue",
"type": "mqueue",
"source": "mqueue",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
},
{
"destination": "/sys/fs/cgroup",
"type": "cgroup",
"source": "cgroup",
"options": [
"nosuid",
"noexec",
"nodev",
"relatime",
"ro"
]
}
],
"hooks": {},
"linux": {
"devices": [{
"path": "/dev/vfio/vfio",
"type": "c",
"major": @VFIO_CTL_MAJOR@,
"minor": @VFIO_CTL_MINOR@,
"fileMode": 438,
"uid": 0,
"gid": 0
},
{
"path": "@VFIO_PATH@",
"type": "c",
"major": @VFIO_MAJOR@,
"minor": @VFIO_MINOR@,
"fileMode": 384,
"uid": 0,
"gid": 0
}],
"cgroupsPath": "kata/vfiotest",
"resources": {
"devices": [
{"allow":false,"access":"rwm"},
{"allow":true,"type":"c","major":1,"minor":3,"access":"rwm"},
{"allow":true,"type":"c","major":1,"minor":5,"access":"rwm"},
{"allow":true,"type":"c","major":1,"minor":8,"access":"rwm"},
{"allow":true,"type":"c","major":1,"minor":9,"access":"rwm"},
{"allow":true,"type":"c","major":5,"minor":0,"access":"rwm"},
{"allow":true,"type":"c","major":5,"minor":1,"access":"rwm"},
{"allow": true,"access": "rwm","major": @VFIO_CTL_MAJOR@,"minor": @VFIO_CTL_MINOR@,"type": "c"},
{"allow": true,"access": "rwm","major": @VFIO_MAJOR@,"minor": @VFIO_MINOR@,"type": "c"}
]
},
"namespaces": [{
"type": "pid"
},
{
"type": "network"
},
{
"type": "ipc"
},
{
"type": "uts"
},
{
"type": "mount"
}
],
"maskedPaths": [
"/proc/kcore",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/sys/firmware"
],
"readonlyPaths": [
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
}
}

View File

@@ -0,0 +1,329 @@
#!/bin/bash
#
# Copyright (c) 2020 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Run the .ci/jenkins_job_build.sh script in a VM
# that supports VFIO, then run VFIO functional tests
set -o xtrace
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
cidir=$(readlink -f $(dirname "$0"))
source /etc/os-release || source /usr/lib/os-release
# <CHANGES HERE>
source "${cidir}/../../common.bash"
export WORKSPACE="${WORKSPACE:-${HOME}}"
export GIT_URL="https://github.com/kata-containers/kata-containers.git"
export KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
# </CHANGES>
http_proxy=${http_proxy:-}
https_proxy=${https_proxy:-}
vm_ip="127.0.15.1"
vm_port="10022"
# Don't save data in /tmp, we need it after rebooting the system
data_dir="${HOME}/functional-vfio-test"
ssh_key_file="${data_dir}/key"
arch=$(uname -m)
artifacts_dir="${WORKSPACE}/artifacts"
kill_vms() {
sudo killall -9 qemu-system-${arch}
}
cleanup() {
mkdir -p ${artifacts_dir}
sudo chown -R ${USER} ${artifacts_dir}
scp_vm ${artifacts_dir}/* ${artifacts_dir} || true
kill_vms
echo "::group::L2 journal"
cat "${artifacts_dir}/journal.log"
echo "::endgroup::"
echo "::group::L1 dmesg"
sudo dmesg
echo "::endgroup::"
}
create_ssh_key() {
rm -f "${ssh_key_file}"
ssh-keygen -f "${ssh_key_file}" -t rsa -N ""
}
create_meta_data() {
file="$1"
cat <<EOF > "${file}"
{
"uuid": "d1b4aafa-5d75-4f9c-87eb-2ceabe110c39",
"hostname": "test"
}
EOF
}
create_user_data() {
file="$1"
ssh_pub_key_file="$2"
ssh_pub_key="$(cat "${ssh_pub_key_file}")"
dnf_proxy=""
service_proxy=""
docker_user_proxy="{}"
environment=$(env | egrep "ghprb|WORKSPACE|KATA|GIT|JENKINS|_PROXY|_proxy" | \
sed -e "s/'/'\"'\"'/g" \
-e "s/\(^[[:alnum:]_]\+\)=/\1='/" \
-e "s/$/'/" \
-e 's/^/ export /')
if [ -n "${http_proxy}" ] && [ -n "${https_proxy}" ]; then
dnf_proxy="proxy=${http_proxy}"
service_proxy='[Service]
Environment="HTTP_PROXY='${http_proxy}'" "HTTPS_PROXY='${https_proxy}'" "NO_PROXY='${no_proxy}'"'
docker_user_proxy='{"proxies": { "default": {
"httpProxy": "'${http_proxy}'",
"httpsProxy": "'${https_proxy}'",
"noProxy": "'${no_proxy}'"
} } }'
fi
cat <<EOF > "${file}"
#cloud-config
package_upgrade: false
runcmd:
- chown -R ${USER}:${USER} /home/${USER}
- touch /.done
users:
- gecos: User
gid: "1000"
lock-passwd: true
name: ${USER}
shell: /bin/bash
ssh-authorized-keys:
- ${ssh_pub_key}
sudo: ALL=(ALL) NOPASSWD:ALL
uid: "1000"
write_files:
- content: |
[main]
fastestmirror=True
gpgcheck=1
max_parallel_downloads=10
installonly_limit=2
clean_requirements_on_remove=True
keepcache=True
ip_resolve=4
path: /etc/dnf/dnf.conf
- content: |
${environment}
path: /etc/environment
- content: |
${service_proxy}
path: /etc/systemd/system/docker.service.d/http-proxy.conf
- content: |
${service_proxy}
path: /etc/systemd/system/containerd.service.d/http-proxy.conf
- content: |
${docker_user_proxy}
path: ${HOME}/.docker/config.json
- content: |
${docker_user_proxy}
path: /root/.docker/config.json
- content: |
set -x
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
. /etc/environment
. /etc/os-release
[ "\$ID" = "fedora" ] || (echo >&2 "$0 only supports Fedora"; exit 1)
echo "${dnf_proxy}" | sudo tee -a /etc/dnf/dnf.conf
for i in \$(seq 1 50); do
[ -f /.done ] && break
echo "waiting for cloud-init to finish"
sleep 5;
done
export DEBUG=true
export GOPATH=\${WORKSPACE}/go
export PATH=\${GOPATH}/bin:/usr/local/go/bin:/usr/sbin:\${PATH}
export GOROOT="/usr/local/go"
# Make sure the packages were installed
# Sometimes cloud-init is unable to install them
sudo dnf install -y git wget pciutils driverctl
git config --global user.email "foo@bar"
git config --global user.name "Foo Bar"
sudo mkdir -p /workspace
sudo mount -t 9p -o access=any,trans=virtio,version=9p2000.L workspace /workspace
mkdir -p ${artifacts_dir}
trap "cd /workspace; sudo journalctl -b0 > ${artifacts_dir}/journal.log || true; sudo chown -R \${USER} ${artifacts_dir}" EXIT
pushd /workspace
source tests/common.bash
ensure_yq
cri_containerd=\$(get_from_kata_deps "externals.containerd.lts")
cri_tools=\$(get_from_kata_deps "externals.critools.latest")
install_cri_containerd \${cri_containerd}
install_cri_tools \${cri_tools}
kata_tarball_dir="kata-artifacts"
install_kata
sudo /workspace/tests/functional/vfio/run.sh -s false -p \${KATA_HYPERVISOR} -m q35 -i image
sudo /workspace/tests/functional/vfio/run.sh -s true -p \${KATA_HYPERVISOR} -m q35 -i image
path: /home/${USER}/run.sh
permissions: '0755'
EOF
}
create_config_iso() {
iso_file="$1"
ssh_pub_key_file="${ssh_key_file}.pub"
iso_data_dir="${data_dir}/d"
meta_data_file="${iso_data_dir}/openstack/latest/meta_data.json"
user_data_file="${iso_data_dir}/openstack/latest/user_data"
mkdir -p $(dirname "${user_data_file}")
create_meta_data "${meta_data_file}"
create_user_data "${user_data_file}" "${ssh_pub_key_file}"
[ -f "${iso_file}" ] && rm -f "${iso_file}"
xorriso -as mkisofs -R -V config-2 -o "${iso_file}" "${iso_data_dir}"
}
pull_fedora_cloud_image() {
fedora_img="$1"
fedora_version=38
# Add a version to the image cache, otherwise the tests are going to
# use always the same image without rebuilding it, regardless the version
# set in fedora_version
fedora_img_cache="${fedora_img}.cache.${fedora_version}"
fedora_img_url="https://download.fedoraproject.org/pub/fedora/linux/releases/${fedora_version}/Cloud/${arch}/images/Fedora-Cloud-Base-${fedora_version}-1.6.${arch}.raw.xz"
if [ ! -f "${fedora_img_cache}" ]; then
curl -sL ${fedora_img_url} -o "${fedora_img_cache}.xz"
xz -f -d "${fedora_img_cache}.xz"
fi
cp -a "${fedora_img_cache}" "${fedora_img}"
# setup cloud image
sudo losetup -D
loop=$(sudo losetup --show -Pf "${fedora_img}")
sudo mount "${loop}p2" /mnt
# add intel_iommu=on to the guest kernel command line
kernelopts="intel_iommu=on iommu=pt selinux=0 mitigations=off idle=poll kvm.tdp_mmu=0"
entries=$(sudo ls /mnt/loader/entries/)
for entry in ${entries}; do
sudo sed -i '/^options / s/$/ '"${kernelopts}"' /g' /mnt/loader/entries/"${entry}"
done
sudo sed -i 's|kernelopts="|kernelopts="'"${kernelopts}"'|g' /mnt/grub2/grub.cfg
sudo sed -i 's|kernelopts=|kernelopts='"${kernelopts}"'|g' /mnt/grub2/grubenv
# cleanup
sudo umount -R /mnt/
sudo losetup -d "${loop}"
qemu-img resize -f raw "${fedora_img}" +20G
}
reload_kvm() {
# TDP_MMU is buggy on Hyper-V until v6.3/v6.4
sudo rmmod kvm-intel kvm-amd kvm || true
sudo modprobe kvm tdp_mmu=0
sudo modprobe kvm-intel || true
sudo modprobe kvm-amd || true
}
run_vm() {
image="$1"
config_iso="$2"
disable_modern="off"
hostname="$(hostname)"
memory="8192M"
cpus=2
machine_type="q35"
reload_kvm
sudo /usr/bin/qemu-system-${arch} -m "${memory}" -smp cpus="${cpus}" \
-cpu host,host-phys-bits \
-machine ${machine_type},accel=kvm,kernel_irqchip=split \
-device intel-iommu,intremap=on,caching-mode=on,device-iotlb=on \
-drive file=${image},if=virtio,aio=threads,format=raw \
-drive file=${config_iso_file},if=virtio,media=cdrom \
-daemonize -enable-kvm -device virtio-rng-pci -display none -vga none \
-netdev user,hostfwd=tcp:${vm_ip}:${vm_port}-:22,hostname="${hostname}",id=net0 \
-device virtio-net-pci,netdev=net0,disable-legacy=on,disable-modern="${disable_modern}",iommu_platform=on,ats=on \
-netdev user,id=net1 \
-device virtio-net-pci,netdev=net1,disable-legacy=on,disable-modern="${disable_modern}",iommu_platform=on,ats=on \
-fsdev local,path=${repo_root_dir},security_model=passthrough,id=fs0 \
-device virtio-9p-pci,fsdev=fs0,mount_tag=workspace
}
ssh_vm() {
cmd=$@
ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentitiesOnly=yes -i "${ssh_key_file}" -p "${vm_port}" "${USER}@${vm_ip}" "${cmd}"
}
scp_vm() {
guest_src=$1
host_dest=$2
scp -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentitiesOnly=yes -i "${ssh_key_file}" -P "${vm_port}" ${USER}@${vm_ip}:${guest_src} ${host_dest}
}
wait_for_vm() {
for i in $(seq 1 30); do
if ssh_vm true; then
return 0
fi
info "waiting for VM to start"
sleep 5
done
return 1
}
main() {
trap cleanup EXIT
config_iso_file="${data_dir}/config.iso"
fedora_img="${data_dir}/image.img"
mkdir -p "${data_dir}"
create_ssh_key
create_config_iso "${config_iso_file}"
for i in $(seq 1 5); do
pull_fedora_cloud_image "${fedora_img}"
run_vm "${fedora_img}" "${config_iso_file}"
if wait_for_vm; then
break
fi
info "Couldn't connect to the VM. Stopping VM and starting a new one."
kill_vms
done
ssh_vm "/home/${USER}/run.sh"
}
main $@

266
tests/gha-run-k8s-common.sh Normal file
View File

@@ -0,0 +1,266 @@
#!/usr/bin/env bash
# Copyright (c) 2023 Microsoft Corporation
#
# SPDX-License-Identifier: Apache-2.0
set -o errexit
set -o nounset
set -o pipefail
tests_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${tests_dir}/common.bash"
K8S_TEST_HOST_TYPE="${K8S_TEST_HOST_TYPE:-small}"
function _print_instance_type() {
case ${K8S_TEST_HOST_TYPE} in
small)
echo "Standard_D2s_v5"
;;
normal)
echo "Standard_D4s_v5"
;;
*)
echo "Unknown instance type '${K8S_TEST_HOST_TYPE}'" >&2
exit 1
esac
}
function _print_cluster_name() {
test_type="${1:-k8s}"
short_sha="$(git rev-parse --short=12 HEAD)"
echo "${test_type}-${GH_PR_NUMBER}-${short_sha}-${KATA_HYPERVISOR}-${KATA_HOST_OS}-amd64-${K8S_TEST_HOST_TYPE:0:1}"
}
function _print_rg_name() {
test_type="${1:-k8s}"
echo "${AZ_RG:-"kataCI-$(_print_cluster_name ${test_type})"}"
}
function install_azure_cli() {
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
# The aks-preview extension is required while the Mariner Kata host is in preview.
az extension add --name aks-preview
}
function login_azure() {
az login \
--service-principal \
-u "${AZ_APPID}" \
-p "${AZ_PASSWORD}" \
--tenant "${AZ_TENANT_ID}"
}
function create_cluster() {
test_type="${1:-k8s}"
# First ensure it didn't fail to get cleaned up from a previous run.
delete_cluster "${test_type}" || true
local rg="$(_print_rg_name ${test_type})"
az group create \
-l eastus2 \
-n "${rg}"
az aks create \
-g "${rg}" \
--node-resource-group "node-${rg}" \
-n "$(_print_cluster_name ${test_type})" \
-s "$(_print_instance_type)" \
--node-count 1 \
--generate-ssh-keys \
$([ "${KATA_HOST_OS}" = "cbl-mariner" ] && echo "--os-sku AzureLinux --workload-runtime KataMshvVmIsolation")
}
function install_bats() {
# Installing bats from the lunar repo.
# This installs newer version of the bats which supports setup_file and teardown_file functions.
# These functions are helpful when adding new tests that require one time setup.
sudo apt install -y software-properties-common
sudo add-apt-repository 'deb http://archive.ubuntu.com/ubuntu/ lunar universe'
sudo apt install -y bats
sudo add-apt-repository --remove 'deb http://archive.ubuntu.com/ubuntu/ lunar universe'
}
function install_kubectl() {
sudo az aks install-cli
}
function get_cluster_credentials() {
test_type="${1:-k8s}"
az aks get-credentials \
-g "$(_print_rg_name ${test_type})" \
-n "$(_print_cluster_name ${test_type})"
}
function delete_cluster() {
test_type="${1:-k8s}"
az group delete \
-g "$(_print_rg_name ${test_type})" \
--yes
}
function delete_cluster_kcli() {
CLUSTER_NAME="${CLUSTER_NAME:-kata-k8s}"
kcli delete -y kube "$CLUSTER_NAME"
}
function get_nodes_and_pods_info() {
kubectl debug $(kubectl get nodes -o name) -it --image=quay.io/kata-containers/kata-debug:latest || true
kubectl get pods -o name | grep node-debugger | xargs kubectl delete || true
}
function deploy_k0s() {
curl -sSLf https://get.k0s.sh | sudo sh
sudo k0s install controller --single ${KUBERNETES_EXTRA_PARAMS:-}
sudo k0s start
# This is an arbitrary value that came up from local tests
sleep 120s
# Download the kubectl binary into /usr/bin so we can avoid depending
# on `k0s kubectl` command
ARCH=$(uname -m)
if [ "${ARCH}" = "x86_64" ]; then
ARCH=amd64
fi
kubectl_version=$(sudo k0s kubectl version 2>/dev/null | grep "Client Version" | sed -e 's/Client Version: //')
sudo curl -fL --progress-bar -o /usr/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/${kubectl_version}/bin/linux/${ARCH}/kubectl
sudo chmod +x /usr/bin/kubectl
mkdir -p ~/.kube
sudo cp /var/lib/k0s/pki/admin.conf ~/.kube/config
sudo chown ${USER}:${USER} ~/.kube/config
}
function deploy_k3s() {
curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644
# This is an arbitrary value that came up from local tests
sleep 120s
# Download the kubectl binary into /usr/bin and remove /usr/local/bin/kubectl
#
# We need to do this to avoid hitting issues like:
# ```sh
# error: open /etc/rancher/k3s/k3s.yaml.lock: permission denied
# ```
# Which happens basically because k3s links `/usr/local/bin/kubectl`
# to `/usr/local/bin/k3s`, and that does extra stuff that vanilla
# `kubectl` doesn't do.
ARCH=$(uname -m)
if [ "${ARCH}" = "x86_64" ]; then
ARCH=amd64
fi
kubectl_version=$(/usr/local/bin/k3s kubectl version --short 2>/dev/null | grep "Client Version" | sed -e 's/Client Version: //' -e 's/\+k3s1//')
sudo curl -fL --progress-bar -o /usr/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/${kubectl_version}/bin/linux/${ARCH}/kubectl
sudo chmod +x /usr/bin/kubectl
sudo rm -rf /usr/local/bin/kubectl
mkdir -p ~/.kube
cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
}
function create_cluster_kcli() {
CLUSTER_NAME="${CLUSTER_NAME:-kata-k8s}"
delete_cluster_kcli || true
kcli create kube "${KUBE_TYPE:-generic}" \
-P domain="kata.com" \
-P pool="${LIBVIRT_POOL:-default}" \
-P ctlplanes="${CLUSTER_CONTROL_NODES:-1}" \
-P workers="${CLUSTER_WORKERS:-1}" \
-P network="${LIBVIRT_NETWORK:-default}" \
-P image="${CLUSTER_IMAGE:-ubuntu2004}" \
-P sdn=flannel \
-P nfs=false \
-P disk_size="${CLUSTER_DISK_SIZE:-20}" \
"${CLUSTER_NAME}"
export KUBECONFIG="$HOME/.kcli/clusters/$CLUSTER_NAME/auth/kubeconfig"
local cmd="kubectl get nodes | grep '.*worker.*\<Ready\>'"
echo "Wait at least one worker be Ready"
if ! waitForProcess "330" "30" "$cmd"; then
echo "ERROR: worker nodes not ready."
kubectl get nodes
return 1
fi
# Ensure that system pods are running or completed.
cmd="[ \$(kubectl get pods -A --no-headers | grep -v 'Running\|Completed' | wc -l) -eq 0 ]"
echo "Wait system pods be running or completed"
if ! waitForProcess "90" "30" "$cmd"; then
echo "ERROR: not all pods are Running or Completed."
kubectl get pods -A
kubectl get pods -A
return 1
fi
}
function deploy_rke2() {
curl -sfL https://get.rke2.io | sudo sh -
sudo systemctl enable --now rke2-server.service
# This is an arbitrary value that came up from local tests
sleep 120s
# Link the kubectl binary into /usr/bin
sudo ln -sf /var/lib/rancher/rke2/bin/kubectl /usr/local/bin/kubectl
mkdir -p ~/.kube
sudo cp /etc/rancher/rke2/rke2.yaml ~/.kube/config
sudo chown ${USER}:${USER} ~/.kube/config
}
function _get_k0s_kubernetes_version_for_crio() {
# k0s version will look like:
# v1.27.5+k0s.0
#
# The CRI-O repo for such version of Kubernetes expects something like:
# 1.27
k0s_version=$(curl -sSLf "https://docs.k0sproject.io/stable.txt")
# Remove everything after the second '.'
crio_version=${k0s_version%\.*+*}
# Remove the 'v'
crio_version=${crio_version#v}
echo ${crio_version}
}
function setup_crio() {
# Get the CRI-O version to be installed depending on the version of the
# "k8s distro" that we are using
case ${KUBERNETES} in
k0s) crio_version=$(_get_k0s_kubernetes_version_for_crio) ;;
*) >&2 echo "${KUBERNETES} flavour is not supported with CRI-O"; exit 2 ;;
esac
install_crio ${crio_version}
}
function deploy_k8s() {
echo "::group::Deploying ${KUBERNETES}"
case ${KUBERNETES} in
k0s) deploy_k0s ;;
k3s) deploy_k3s ;;
rke2) deploy_rke2 ;;
*) >&2 echo "${KUBERNETES} flavour is not supported"; exit 2 ;;
esac
echo "::endgroup::"
}

37
tests/git-helper.sh Executable file
View File

@@ -0,0 +1,37 @@
#!/usr/bin/env bash
#
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o nounset
set -o pipefail
function add_kata_bot_info() {
echo "Adding user name and email to the local git repo"
git config user.email "katacontainersbot@gmail.com"
git config user.name "Kata Containers Bot"
}
function rebase_atop_of_the_latest_target_branch() {
if [ -n "${TARGET_BRANCH}" ]; then
echo "Rebasing atop of the latest ${TARGET_BRANCH}"
git rebase origin/${TARGET_BRANCH}
fi
}
function main() {
action="${1:-}"
add_kata_bot_info
case "${action}" in
rebase-atop-of-the-latest-target-branch) rebase_atop_of_the_latest_target_branch;;
*) >&2 echo "Invalid argument"; exit 2 ;;
esac
}
main "$@"

View File

@@ -87,7 +87,7 @@ if command -v go; then
fi
fi
goarch=$("${repo_root_dir}/tests/kata-arch.sh" --golang)
goarch=$(arch_to_golang)
info "Download go version ${go_version}"
kernel_name=$(uname -s)

45
tests/install_rust.sh Executable file
View File

@@ -0,0 +1,45 @@
#!/bin/bash
#
# Copyright (c) 2019 Ant Financial
#
# SPDX-License-Identifier: Apache-2.0
set -o errexit
set -o nounset
set -o pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
script_name="$(basename "${BASH_SOURCE[0]}")"
source "${script_dir}/common.bash"
rustarch=$(arch_to_rust)
version="${1:-""}"
if [ -z "${version}" ]; then
version=$(get_from_kata_deps "languages.rust.meta.newest-version")
fi
echo "Install rust ${version}"
if ! command -v rustup > /dev/null; then
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain ${version}
fi
export PATH="${PATH}:${HOME}/.cargo/bin"
## Still try to install the target version of toolchain,
## in case that the rustup has been installed but
## with a different version toolchain.
## Even though the target version toolchain has been installed,
## this command will not take too long to run.
rustup toolchain install ${version}
rustup default ${version}
if [ "${rustarch}" == "powerpc64le" ] || [ "${rustarch}" == "s390x" ] ; then
rustup target add ${rustarch}-unknown-linux-gnu
else
rustup target add ${rustarch}-unknown-linux-musl
$([ "$(whoami)" != "root" ] && echo sudo) ln -sf /usr/bin/g++ /bin/musl-g++
fi
rustup component add rustfmt
rustup component add clippy

View File

@@ -59,7 +59,8 @@ function install_dependencies() {
function run() {
info "Running cri-containerd tests using ${KATA_HYPERVISOR} hypervisor"
return 0
enabling_hypervisor
bash -c ${cri_containerd_dir}/integration-tests.sh
}
function main() {

View File

@@ -21,9 +21,9 @@ export PATH="$PATH:/usr/local/sbin"
export PATH="$PATH:/usr/local/go/bin"
# Runtime to be used for testing
RUNTIME=${RUNTIME:-containerd-shim-kata-v2}
FACTORY_TEST=${FACTORY_TEST:-""}
KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
RUNTIME=${RUNTIME:-containerd-shim-kata-${KATA_HYPERVISOR}-v2}
FACTORY_TEST=${FACTORY_TEST:-""}
USE_DEVMAPPER="${USE_DEVMAPPER:-false}"
ARCH=$(uname -m)
@@ -130,11 +130,17 @@ trap cleanup EXIT
function err_report() {
local log_file="${REPORT_DIR}/containerd.log"
if [ -f "$log_file" ]; then
echo "ERROR: containerd log :"
echo "::group::ERROR: containerd log :"
echo "-------------------------------------"
cat "${log_file}"
echo "-------------------------------------"
echo "::endgroup::"
fi
echo "::group::ERROR: Kata Containers logs : "
echo "-------------------------------------"
sudo journalctl -xe -t kata
echo "-------------------------------------"
echo "::endgroup::"
}
@@ -197,10 +203,12 @@ EOF
}
function testContainerStop() {
info "show pod $podid"
sudo crictl --timeout=20s pods --id $podid
info "stop pod $podid"
sudo crictl stopp $podid
sudo crictl --timeout=20s stopp $podid
info "remove pod $podid"
sudo crictl rmp $podid
sudo crictl --timeout=20s rmp $podid
sudo cp "$default_containerd_config_backup" "$default_containerd_config"
restart_containerd_service

View File

@@ -0,0 +1,58 @@
#!/bin/bash
#
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o nounset
set -o pipefail
kata_tarball_dir="${2:-kata-artifacts}"
docker_dir="$(dirname "$(readlink -f "$0")")"
source "${docker_dir}/../../common.bash"
function install_dependencies() {
info "Installing the dependencies needed for running the docker smoke test"
# Add Docker's official GPG key:
sudo apt-get update
sudo apt-get -y install ca-certificates curl gnupg
sudo install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg
# Add the repository to Apt sources:
echo \
"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
}
function run() {
info "Running docker smoke test tests using ${KATA_HYPERVISOR} hypervisor"
enabling_hypervisor
info "Running docker with runc"
sudo docker run --rm --entrypoint nping instrumentisto/nmap --tcp-connect -c 2 -p 80 www.github.com
info "Running docker with Kata Containers (${KATA_HYPERVISOR})"
sudo docker run --rm --runtime io.containerd.kata-${KATA_HYPERVISOR}.v2 --entrypoint nping instrumentisto/nmap --tcp-connect -c 2 -p 80 www.github.com
}
function main() {
action="${1:-}"
case "${action}" in
install-dependencies) install_dependencies ;;
install-kata) install_kata ;;
run) run ;;
*) >&2 die "Invalid argument" ;;
esac
}
main "$@"

View File

@@ -0,0 +1,34 @@
#!/usr/bin/env bash
# Copyright 2022-2023 Advanced Micro Devices, Inc.
# Copyright 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
source "${BATS_TEST_DIRNAME}/tests_common.sh"
function setup_unencrypted_confidential_pod() {
get_pod_config_dir
export SSH_KEY_FILE="${pod_config_dir}/confidential/unencrypted/ssh/unencrypted"
if [ -n "${PR_NUMBER}" ]; then
# Use correct address in pod yaml
sed -i "s/-nightly/-${PR_NUMBER}/" "${pod_config_dir}/pod-confidential-unencrypted.yaml"
fi
# Set permissions on private key file
sudo chmod 600 "${SSH_KEY_FILE}"
}
# This function relies on `KATA_HYPERVISOR` being an environment variable
# and returns the remote command to be executed to that specific hypervisor
# in order to identify whether the workload is running on a TEE environment
function get_remote_command_per_hypervisor() {
declare -A REMOTE_COMMAND_PER_HYPERVISOR
REMOTE_COMMAND_PER_HYPERVISOR[qemu-sev]="dmesg | grep \"Memory Encryption Features active:.*\(SEV$\|SEV \)\""
REMOTE_COMMAND_PER_HYPERVISOR[qemu-snp]="dmesg | grep \"Memory Encryption Features active:.*SEV-SNP\""
REMOTE_COMMAND_PER_HYPERVISOR[qemu-tdx]="cpuid | grep TDX_GUEST"
echo "${REMOTE_COMMAND_PER_HYPERVISOR[${KATA_HYPERVISOR}]}"
}

View File

@@ -9,63 +9,100 @@ set -o nounset
set -o pipefail
kubernetes_dir="$(dirname "$(readlink -f "$0")")"
source "${kubernetes_dir}/../../common.bash"
source "${kubernetes_dir}/../../gha-run-k8s-common.sh"
# shellcheck disable=2154
tools_dir="${repo_root_dir}/tools"
AZ_RG="${AZ_RG:-kataCI}"
DOCKER_REGISTRY=${DOCKER_REGISTRY:-quay.io}
DOCKER_REPO=${DOCKER_REPO:-kata-containers/kata-deploy-ci}
DOCKER_TAG=${DOCKER_TAG:-kata-containers-latest}
KATA_DEPLOY_WAIT_TIMEOUT=${KATA_DEPLOY_WAIT_TIMEOUT:-10m}
KATA_HYPERVISOR=${KATA_HYPERVISOR:-qemu}
KUBERNETES="${KUBERNETES:-}"
function _print_cluster_name() {
short_sha="$(git rev-parse --short=12 HEAD)"
echo "${GH_PR_NUMBER}-${short_sha}-${KATA_HYPERVISOR}-${KATA_HOST_OS}-amd64"
function configure_devmapper() {
sudo mkdir -p /var/lib/containerd/devmapper
sudo truncate --size 10G /var/lib/containerd/devmapper/data-disk.img
sudo truncate --size 10G /var/lib/containerd/devmapper/meta-disk.img
cat<<EOF | sudo tee /etc/systemd/system/containerd-devmapper.service
[Unit]
Description=Setup containerd devmapper device
DefaultDependencies=no
After=systemd-udev-settle.service
Before=lvm2-activation-early.service
Wants=systemd-udev-settle.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=-/sbin/losetup /dev/loop20 /var/lib/containerd/devmapper/data-disk.img
ExecStart=-/sbin/losetup /dev/loop21 /var/lib/containerd/devmapper/meta-disk.img
[Install]
WantedBy=local-fs.target
EOF
sudo systemctl daemon-reload
sudo systemctl enable --now containerd-devmapper
# Time to setup the thin pool for consumption.
# The table arguments are such.
# start block in the virtual device
# length of the segment (block device size in bytes / Sector size (512)
# metadata device
# block data device
# data_block_size Currently set it 512 (128KB)
# low_water_mark. Copied this from containerd snapshotter test setup
# no. of feature arguments
# Skip zeroing blocks for new volumes.
sudo dmsetup create contd-thin-pool \
--table "0 20971520 thin-pool /dev/loop21 /dev/loop20 512 32768 1 skip_block_zeroing"
case "${KUBERNETES}" in
k3s)
containerd_config_file="/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl"
sudo cp /var/lib/rancher/k3s/agent/etc/containerd/config.toml ${containerd_config_file}
;;
*) >&2 echo "${KUBERNETES} flavour is not supported"; exit 2 ;;
esac
# We're not using this with baremetal machines, so we're fine on cutting
# corners here and just append this to the configuration file.
cat<<EOF | sudo tee -a ${containerd_config_file}
[plugins."io.containerd.snapshotter.v1.devmapper"]
pool_name = "contd-thin-pool"
base_image_size = "4096MB"
EOF
case "${KUBERNETES}" in
k3s)
sudo sed -i -e 's/snapshotter = "overlayfs"/snapshotter = "devmapper"/g' ${containerd_config_file}
sudo systemctl restart k3s ;;
*) >&2 echo "${KUBERNETES} flavour is not supported"; exit 2 ;;
esac
sleep 60s
sudo cat ${containerd_config_file}
}
function install_azure_cli() {
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
# The aks-preview extension is required while the Mariner Kata host is in preview.
az extension add --name aks-preview
}
function configure_snapshotter() {
echo "::group::Configuring ${SNAPSHOTTER}"
function login_azure() {
az login \
--service-principal \
-u "${AZ_APPID}" \
-p "${AZ_PASSWORD}" \
--tenant "${AZ_TENANT_ID}"
}
case ${SNAPSHOTTER} in
devmapper) configure_devmapper ;;
*) >&2 echo "${SNAPSHOTTER} flavour is not supported"; exit 2 ;;
esac
function create_cluster() {
# First, ensure that the cluster didn't fail to get cleaned up from a previous run.
delete_cluster || true
az aks create \
-g "${AZ_RG}" \
-n "$(_print_cluster_name)" \
-s "Standard_D4s_v5" \
--node-count 1 \
--generate-ssh-keys \
$([ "${KATA_HOST_OS}" = "cbl-mariner" ] && echo "--os-sku AzureLinux --workload-runtime KataMshvVmIsolation")
}
function install_bats() {
sudo apt-get update
sudo apt-get -y install bats
}
function install_kubectl() {
sudo az aks install-cli
}
function get_cluster_credentials() {
az aks get-credentials \
-g "${AZ_RG}" \
-n "$(_print_cluster_name)"
echo "::endgroup::"
}
function deploy_kata() {
platform="${1}"
ensure_yq
# Emsure we're in the default namespace
[ "$platform" = "kcli" ] && \
export KUBECONFIG="$HOME/.kcli/clusters/${CLUSTER_NAME:-kata-k8s}/auth/kubeconfig"
# Ensure we're in the default namespace
kubectl config set-context --current --namespace=default
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
@@ -82,16 +119,16 @@ function deploy_kata() {
echo "::group::Final kata-deploy.yaml that is used in the test"
cat "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
cat "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" | grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" || die "Failed to setup the tests image"
grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" || die "Failed to setup the tests image"
echo "::endgroup::"
kubectl apply -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
if [ "${platform}" = "tdx" ]; then
if [ "${KUBERNETES}" = "k3s" ]; then
kubectl apply -k "${tools_dir}/packaging/kata-deploy/kata-deploy/overlays/k3s"
else
kubectl apply -f "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
fi
kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
kubectl -n kube-system wait --timeout="${KATA_DEPLOY_WAIT_TIMEOUT}" --for=condition=Ready -l name=kata-deploy pod
# This is needed as the kata-deploy pod will be set to "Ready" when it starts running,
# which may cause issues like not having the node properly labeled or the artefacts
@@ -103,7 +140,7 @@ function deploy_kata() {
fi
echo "::group::kata-deploy logs"
kubectl -n kube-system logs -l name=kata-deploy
kubectl -n kube-system logs --tail=100 -l name=kata-deploy
echo "::endgroup::"
echo "::group::Runtime classes"
@@ -112,11 +149,16 @@ function deploy_kata() {
}
function run_tests() {
platform="${1:-}"
[ "$platform" = "kcli" ] && \
export KUBECONFIG="$HOME/.kcli/clusters/${CLUSTER_NAME:-kata-k8s}/auth/kubeconfig"
# Delete any spurious tests namespace that was left behind
kubectl delete namespace kata-containers-k8s-tests &> /dev/null || true
# Create a new namespace for the tests and switch to it
kubectl apply -f ${kubernetes_dir}/runtimeclass_workloads/tests-namespace.yaml
kubectl apply -f "${kubernetes_dir}/runtimeclass_workloads/tests-namespace.yaml"
kubectl config set-context --current --namespace=kata-containers-k8s-tests
pushd "${kubernetes_dir}"
@@ -127,13 +169,17 @@ function run_tests() {
function cleanup() {
platform="${1}"
test_type="${2:-k8s}"
ensure_yq
[ "$platform" = "kcli" ] && \
export KUBECONFIG="$HOME/.kcli/clusters/${CLUSTER_NAME:-kata-k8s}/auth/kubeconfig"
echo "Gather information about the nodes and pods before cleaning up the node"
get_nodes_and_pods_info
if [ "${platform}" = "aks" ]; then
delete_cluster
delete_cluster ${test_type}
return
fi
@@ -141,7 +187,7 @@ function cleanup() {
kubectl config set-context --current --namespace=default
kubectl delete namespace kata-containers-k8s-tests
if [ "${platform}" = "tdx" ]; then
if [ "${KUBERNETES}" = "k3s" ]; then
deploy_spec="-k "${tools_dir}/packaging/kata-deploy/kata-deploy/overlays/k3s""
cleanup_spec="-k "${tools_dir}/packaging/kata-deploy/kata-cleanup/overlays/k3s""
else
@@ -149,6 +195,7 @@ function cleanup() {
cleanup_spec="-f "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml""
fi
# shellcheck disable=2086
kubectl delete ${deploy_spec}
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
@@ -157,28 +204,19 @@ function cleanup() {
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" | grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" || die "Failed to setup the tests image"
grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" || die "Failed to setup the tests image"
# shellcheck disable=2086
kubectl apply ${cleanup_spec}
sleep 180s
# shellcheck disable=2086
kubectl delete ${cleanup_spec}
kubectl delete -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
}
function delete_cluster() {
az aks delete \
-g "${AZ_RG}" \
-n "$(_print_cluster_name)" \
--yes
}
function get_nodes_and_pods_info() {
kubectl debug $(kubectl get nodes -o name) -it --image=quay.io/kata-containers/kata-debug:latest || true
kubectl get pods -o name | grep node-debugger | xargs kubectl delete || true
}
function main() {
export KATA_HOST_OS="${KATA_HOST_OS:-}"
export K8S_TEST_HOST_TYPE="${K8S_TEST_HOST_TYPE:-}"
action="${1:-}"
@@ -186,18 +224,28 @@ function main() {
install-azure-cli) install_azure_cli ;;
login-azure) login_azure ;;
create-cluster) create_cluster ;;
create-cluster-kcli) create_cluster_kcli ;;
configure-snapshotter) configure_snapshotter ;;
setup-crio) setup_crio ;;
deploy-k8s) deploy_k8s ;;
install-bats) install_bats ;;
install-kubectl) install_kubectl ;;
get-cluster-credentials) get_cluster_credentials ;;
deploy-kata-aks) deploy_kata "aks" ;;
deploy-kata-kcli) deploy_kata "kcli" ;;
deploy-kata-sev) deploy_kata "sev" ;;
deploy-kata-snp) deploy_kata "snp" ;;
deploy-kata-tdx) deploy_kata "tdx" ;;
deploy-kata-garm) deploy_kata "garm" ;;
run-tests) run_tests ;;
run-tests-kcli) run_tests "kcli" ;;
cleanup-kcli) cleanup "kcli" ;;
cleanup-sev) cleanup "sev" ;;
cleanup-snp) cleanup "snp" ;;
cleanup-tdx) cleanup "tdx" ;;
cleanup-garm) cleanup "garm" ;;
delete-cluster) cleanup "aks" ;;
delete-cluster-kcli) delete_cluster_kcli ;;
*) >&2 echo "Invalid argument"; exit 2 ;;
esac
}

View File

@@ -0,0 +1,49 @@
#!/usr/bin/env bats
# Copyright 2022-2023 Advanced Micro Devices, Inc.
# Copyright 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/confidential_common.sh"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
SUPPORTED_HYPERVISORS=("qemu-sev" "qemu-snp" "qemu-tdx")
# This check must be done with "<SPACE>${KATA_HYPERVISOR}<SPACE>" to avoid
# having substrings, like qemu, being matched with qemu-$something.
[[ " ${SUPPORTED_HYPERVISORS[*]} " =~ " ${KATA_HYPERVISOR} " ]] || skip "Test not supported for ${KATA_HYPERVISOR}."
get_pod_config_dir
setup_unencrypted_confidential_pod
}
@test "Test unencrypted confidential container launch success and verify that we are running in a secure enclave." {
# Start the service/deployment/pod
kubectl apply -f "${pod_config_dir}/pod-confidential-unencrypted.yaml"
# Retrieve pod name, wait for it to come up, retrieve pod ip
pod_name=$(kubectl get pod -o wide | grep "confidential-unencrypted" | awk '{print $1;}')
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "${pod_name}"
pod_ip=$(kubectl get pod -o wide | grep "confidential-unencrypted" | awk '{print $6;}')
# Run the remote command
coco_enabled=$(ssh -i ${SSH_KEY_FILE} -o "StrictHostKeyChecking no" -o "PasswordAuthentication=no" root@${pod_ip} /bin/sh -c "$(get_remote_command_per_hypervisor)" || true)
if [ -z "$coco_enabled" ]; then
>&2 echo -e "Confidential compute is expected but not enabled."
return 1
fi
}
teardown() {
[[ " ${SUPPORTED_HYPERVISORS[*]} " =~ " ${KATA_HYPERVISOR} " ]] || skip "Test not supported for ${KATA_HYPERVISOR}."
kubectl describe "pod/${pod_name}" || true
kubectl delete -f "${pod_config_dir}/pod-confidential-unencrypted.yaml" || true
}

View File

@@ -10,6 +10,7 @@ load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "dragonball" ] && skip "test not working see: ${dragonball_limitations}"
( [ "${KATA_HYPERVISOR}" == "qemu-tdx" ] || [ "${KATA_HYPERVISOR}" == "qemu-snp" ] || [ "${KATA_HYPERVISOR}" == "qemu-sev" ] ) \
&& skip "TEEs do not support memory / CPU hotplug"
@@ -28,12 +29,6 @@ setup() {
}
@test "Check CPU constraints" {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "dragonball" ] && skip "test not working see: ${dragonball_limitations}"
( [ "${KATA_HYPERVISOR}" == "qemu-tdx" ] || [ "${KATA_HYPERVISOR}" == "qemu-snp" ] || [ "${KATA_HYPERVISOR}" == "qemu-sev" ] ) \
&& skip "TEEs do not support memory / CPU hotplug"
# Create the pod
kubectl create -f "${pod_config_dir}/pod-cpu.yaml"
@@ -76,6 +71,7 @@ setup() {
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "dragonball" ] && skip "test not working see: ${dragonball_limitations}"
( [ "${KATA_HYPERVISOR}" == "qemu-tdx" ] || [ "${KATA_HYPERVISOR}" == "qemu-snp" ] || [ "${KATA_HYPERVISOR}" == "qemu-sev" ] ) \
&& skip "TEEs do not support memory / CPU hotplug"

View File

@@ -10,13 +10,12 @@ load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fcr" ] && skip "test not working see: ${fc_limitations}"
get_pod_config_dir
}
@test "Credentials using secrets" {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
secret_name="test-secret"
pod_name="secret-test-pod"
second_pod_name="secret-envars-test-pod"
@@ -52,6 +51,7 @@ setup() {
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
# Debugging information
kubectl describe "pod/$pod_name"

View File

@@ -11,22 +11,24 @@ TEST_INITRD="${TEST_INITRD:-no}"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
pod_name="test-file-volume"
container_name="busybox-file-volume-container"
tmp_file=$(exec_host mktemp /tmp/file-volume-test-foo.XXXXX)
node="$(get_one_kata_node)"
tmp_file=$(exec_host "$node" mktemp /tmp/file-volume-test-foo.XXXXX)
mount_path="/tmp/foo.txt"
file_body="test"
get_pod_config_dir
}
@test "Test readonly volume for pods" {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
# Write test body to temp file
exec_host "echo "$file_body" > $tmp_file"
exec_host "$node" "echo "$file_body" > $tmp_file"
# Create test yaml
sed -e "s|HOST_FILE|$tmp_file|" ${pod_config_dir}/pod-file-volume.yaml > ${pod_config_dir}/test-pod-file-volume.yaml
sed -i "s|MOUNT_PATH|$mount_path|" ${pod_config_dir}/test-pod-file-volume.yaml
sed -i "s|NODE|$node|" ${pod_config_dir}/test-pod-file-volume.yaml
# Create pod
kubectl create -f "${pod_config_dir}/test-pod-file-volume.yaml"
@@ -41,7 +43,8 @@ setup() {
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
kubectl delete pod "$pod_name"
exec_host rm -f $tmp_file
exec_host "$node" rm -f $tmp_file
rm -f ${pod_config_dir}/test-pod-file-volume.yaml.yaml
}

View File

@@ -10,6 +10,7 @@ load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
get_pod_config_dir
}
@@ -40,6 +41,7 @@ setup() {
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"

View File

@@ -15,7 +15,7 @@ setup() {
get_pod_config_dir
}
@test "Check PID namespaces" {
@test "Kill all processes in container" {
# Create the pod
kubectl create -f "${pod_config_dir}/initcontainer-shareprocesspid.yaml"

View File

@@ -10,6 +10,7 @@ load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
get_pod_config_dir
@@ -30,6 +31,7 @@ setup() {
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
# Debugging information
kubectl describe "pod/$pod_name"

View File

@@ -8,6 +8,8 @@ load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: https://github.com/kata-containers/kata-containers/issues/7873"
get_pod_config_dir
}
@@ -31,5 +33,12 @@ setup() {
}
teardown() {
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: https://github.com/kata-containers/kata-containers/issues/7873"
# Debugging information
kubectl describe deployment ${deployment_name}
# Clean-up
kubectl delete -f "${pod_config_dir}/pod-quota-deployment.yaml"
kubectl delete -f "${pod_config_dir}/resource-quota.yaml"
}

View File

@@ -10,13 +10,12 @@ load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
get_pod_config_dir
}
@test "Projected volume" {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
password="1f2d1e2e67df"
username="admin"
pod_name="test-projected-volume"
@@ -53,6 +52,7 @@ setup() {
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
# Debugging information
kubectl describe "pod/$pod_name"

View File

@@ -11,16 +11,21 @@ TEST_INITRD="${TEST_INITRD:-no}"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
get_pod_config_dir
tmp_file=$(exec_host mktemp -d /tmp/data.XXXX)
node=$(get_one_kata_node)
tmp_file=$(exec_host "$node" mktemp -d /tmp/data.XXXX)
pv_yaml=$(mktemp --tmpdir pv_config.XXXXXX.yaml)
pod_yaml=$(mktemp --tmpdir pod_config.XXXXXX.yaml)
msg="Hello from Kubernetes"
exec_host "echo $msg > $tmp_file/index.html"
exec_host "$node" "echo $msg > $tmp_file/index.html"
pod_name="pv-pod"
# Define temporary file at yaml
sed -e "s|tmp_data|${tmp_file}|g" ${pod_config_dir}/pv-volume.yaml > "$pod_yaml"
sed -e "s|tmp_data|${tmp_file}|g" ${pod_config_dir}/pv-volume.yaml > "$pv_yaml"
sed -e "s|NODE|${node}|g" "${pod_config_dir}/pv-pod.yaml" > "$pod_yaml"
}
@test "Create Persistent Volume" {
@@ -30,7 +35,7 @@ setup() {
volume_claim="pv-claim"
# Create the persistent volume
kubectl create -f "$pod_yaml"
kubectl create -f "$pv_yaml"
# Check the persistent volume is Available
cmd="kubectl get pv $volume_name | grep Available"
@@ -44,7 +49,7 @@ setup() {
waitForProcess "$wait_time" "$sleep_time" "$cmd"
# Create pod
kubectl create -f "${pod_config_dir}/pv-pod.yaml"
kubectl create -f "$pod_yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
@@ -55,13 +60,15 @@ setup() {
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
rm -f "$pod_yaml"
kubectl delete pvc "$volume_claim"
kubectl delete pv "$volume_name"
rm -f "$pod_yaml"
exec_host rm -rf "$tmp_file"
rm -f "$pv_yaml"
exec_host "$node" rm -rf "$tmp_file"
}

View File

@@ -13,11 +13,13 @@ source "${kubernetes_dir}/../../common.bash"
TARGET_ARCH="${TARGET_ARCH:-x86_64}"
KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
K8S_TEST_DEBUG="${K8S_TEST_DEBUG:-false}"
K8S_TEST_HOST_TYPE="${K8S_TEST_HOST_TYPE:-small}"
if [ -n "${K8S_TEST_UNION:-}" ]; then
K8S_TEST_UNION=($K8S_TEST_UNION)
else
K8S_TEST_UNION=( \
K8S_TEST_SMALL_HOST_UNION=( \
"k8s-confidential.bats" \
"k8s-attach-handlers.bats" \
"k8s-caps.bats" \
"k8s-configmap.bats" \
@@ -36,18 +38,15 @@ else
"k8s-liveness-probes.bats" \
"k8s-memory.bats" \
"k8s-nested-configmap-secret.bats" \
"k8s-number-cpus.bats" \
"k8s-oom.bats" \
"k8s-optional-empty-configmap.bats" \
"k8s-optional-empty-secret.bats" \
"k8s-parallel.bats" \
"k8s-pid-ns.bats" \
"k8s-pod-quota.bats" \
"k8s-port-forward.bats" \
"k8s-projected-volume.bats" \
"k8s-qos-pods.bats" \
"k8s-replication.bats" \
"k8s-scale-nginx.bats" \
"k8s-seccomp.bats" \
"k8s-sysctls.bats" \
"k8s-security-context.bats" \
@@ -55,6 +54,29 @@ else
"k8s-volume.bats" \
"k8s-nginx-connectivity.bats" \
)
K8S_TEST_NORMAL_HOST_UNION=( \
"k8s-number-cpus.bats" \
"k8s-parallel.bats" \
"k8s-scale-nginx.bats" \
)
case ${K8S_TEST_HOST_TYPE} in
small)
K8S_TEST_UNION=(${K8S_TEST_SMALL_HOST_UNION[@]})
;;
normal)
K8S_TEST_UNION=(${K8S_TEST_NORMAL_HOST_UNION[@]})
;;
baremetal)
K8S_TEST_UNION=(${K8S_TEST_SMALL_HOST_UNION[@]} ${K8S_TEST_NORMAL_HOST_UNION[@]})
;;
*)
echo "${K8S_TEST_HOST_TYPE} is an invalid K8S_TEST_HOST_TYPE option. Valid options are: small | normal | baremetal"
return 1
;;
esac
fi
# we may need to skip a few test cases when running on non-x86_64 arch

View File

@@ -0,0 +1,37 @@
# Copyright (c) 2023 Intel Corporatiion
#
# SPDX-License-Identifier: Apache-2.0
# We know that using latest is error prone, we're taking the risk here.
# hadolint ignore=DL3007
FROM alpine:latest
# We don't need a specific version of those packages
# hadolint ignore=DL3018
RUN apk add --no-cache curl openssh-server
# Download and install `cpuid`, which will be used to detect
# whether we're the container is running on a TEE guest
# hadolint ignore=DL3059
RUN /bin/sh -c \
'ARCH=$(uname -m) && \
[[ "${ARCH}" == "x86_64" ]] && \
curl -LO https://github.com/klauspost/cpuid/releases/download/v2.2.5/cpuid-Linux_x86_64_2.2.5.tar.gz && \
tar -xvzf cpuid-Linux_x86_64_2.2.5.tar.gz -C /usr/bin && \
rm -rf cpuid-Linux_x86_64_2.2.5.tar.gz && \
rm -f /usr/bin/LICENSE' || true
# This is done just to avoid the following error starting sshd
# `sshd: no hostkeys available -- exiting.`
# hadolint ignore=DL3059
RUN ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -P ""
# A password needs to be set for login to work. An empty password is
# unproblematic as password-based login to root is not allowed.
# hadolint ignore=DL3059
RUN passwd -d root
# Generated with `ssh-keygen -t ed25519 -f unencrypted -P "" -C ""`
COPY ssh/unencrypted.pub /root/.ssh/authorized_keys
ENTRYPOINT ["/usr/sbin/sshd", "-D"]

View File

@@ -0,0 +1,7 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACD5RDTjWd8c793pKpOUGt+/D+Fa7PMVUQtSudt6R8JMYAAAAIh44GnReOBp
0QAAAAtzc2gtZWQyNTUxOQAAACD5RDTjWd8c793pKpOUGt+/D+Fa7PMVUQtSudt6R8JMYA
AAAEDwZtSRH/KNwmm/QCMHcif3iMQpGPOr2d12hcQqMY3KJPlENONZ3xzv3ekqk5Qa378P
4Vrs8xVRC1K523pHwkxgAAAAAAECAwQF
-----END OPENSSH PRIVATE KEY-----

View File

@@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPlENONZ3xzv3ekqk5Qa378P4Vrs8xVRC1K523pHwkxg

View File

@@ -16,10 +16,8 @@ spec:
args: ["-c", "inotifywait --timeout 120 -r /config/ && [[ -L /config/config.toml ]] && echo success" ]
resources:
requests:
cpu: 1
memory: 50Mi
limits:
cpu: 1
memory: 1024Mi
volumeMounts:
- name: config

View File

@@ -0,0 +1,33 @@
# Copyright (c) 2023 Advanced Micro Devices, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
kind: Service
apiVersion: v1
metadata:
name: "confidential-unencrypted"
spec:
selector:
app: "confidential-unencrypted"
ports:
- port: 22
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: "confidential-unencrypted"
spec:
selector:
matchLabels:
app: "confidential-unencrypted"
template:
metadata:
labels:
app: "confidential-unencrypted"
spec:
runtimeClassName: kata
containers:
- name: "confidential-unencrypted"
image: ghcr.io/kata-containers/test-images:unencrypted-nightly
imagePullPolicy: Always

View File

@@ -11,6 +11,7 @@ spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
restartPolicy: Never
nodeName: NODE
volumes:
- name: shared-file
hostPath:

View File

@@ -10,6 +10,7 @@ metadata:
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
nodeName: NODE
volumes:
- name: pv-storage
persistentVolumeClaim:

View File

@@ -22,18 +22,20 @@ set_runtime_class() {
set_kernel_path() {
if [[ "${KATA_HOST_OS}" = "cbl-mariner" ]]; then
mariner_kernel_path="/usr/share/cloud-hypervisor/vmlinux.bin"
find ${kubernetes_dir}/runtimeclass_workloads_work/*.yaml -exec yq write -i {} 'metadata.annotations[io.katacontainers.config.hypervisor.kernel]' "${mariner_kernel_path}" \;
# Not using find -exec as that still returns 0 on failure.
find ${kubernetes_dir}/runtimeclass_workloads_work/*.yaml -print0 | xargs -0 -I% yq write -i % 'metadata.annotations[io.katacontainers.config.hypervisor.kernel]' "${mariner_kernel_path}"
fi
}
set_initrd_path() {
if [[ "${KATA_HOST_OS}" = "cbl-mariner" ]]; then
initrd_path="/opt/kata/share/kata-containers/kata-containers-initrd-mariner.img"
find ${kubernetes_dir}/runtimeclass_workloads_work/*.yaml -exec yq write -i {} 'metadata.annotations[io.katacontainers.config.hypervisor.initrd]' "${initrd_path}" \;
find ${kubernetes_dir}/runtimeclass_workloads_work/*.yaml -print0 | xargs -0 -I% yq write -i % 'metadata.annotations[io.katacontainers.config.hypervisor.initrd]' "${initrd_path}"
fi
}
main() {
ensure_yq
reset_workloads_work_dir
set_runtime_class
set_kernel_path

View File

@@ -38,13 +38,34 @@ get_pod_config_dir() {
info "k8s configured to use runtimeclass"
}
# Return the first worker found that is kata-runtime labeled.
get_one_kata_node() {
local resource_name
resource_name="$(kubectl get node -l katacontainers.io/kata-runtime=true -o name | head -1)"
# Remove leading "/node"
echo "${resource_name/"node/"}"
}
# Runs a command in the host filesystem.
#
# Parameters:
# $1 - the node name
#
exec_host() {
node="$(kubectl get node -o name)"
node="$1"
# `kubectl debug` always returns 0, so we hack it to return the right exit code.
command="$@"
command="${@:2}"
command+='; echo -en \\n$?'
output="$(kubectl debug -qit "${node}" --image=alpine:latest -- chroot /host bash -c "${command}")"
# We're trailing the `\r` here due to: https://github.com/kata-containers/kata-containers/issues/8051
# tl;dr: When testing with CRI-O we're facing the foillowing error:
# ```
# (from function `exec_host' in file tests_common.sh, line 51,
# in test file k8s-file-volume.bats, line 25)
# `exec_host "echo "$file_body" > $tmp_file"' failed with status 127
# [bats-exec-test:38] INFO: k8s configured to use runtimeclass
# bash: line 1: $'\r': command not found
# ```
output="$(kubectl debug -qit "node/${node}" --image=alpine:latest -- chroot /host bash -c "${command}" | tr -d '\r')"
kubectl get pods -o name | grep node-debugger | xargs kubectl delete > /dev/null
exit_code="$(echo "${output}" | tail -1)"
echo "$(echo "${output}" | head -n -1)"

View File

@@ -0,0 +1,83 @@
#!/bin/bash
#
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o nounset
set -o pipefail
kata_tarball_dir="${2:-kata-artifacts}"
nerdctl_dir="$(dirname "$(readlink -f "$0")")"
source "${nerdctl_dir}/../../common.bash"
function install_dependencies() {
info "Installing the dependencies for running the nerdctl tests"
# Dependency list of projects that we can rely on the system packages
# - wget
# - Used to download the nerdctl-full tarball
# - pip
# - Used to install lastversion, which will be used to get the latest
# release of the nerdctl
declare -a system_deps=(
wget
pip
)
sudo apt update
sudo apt -y install "${system_deps[@]}"
# Install lastversion from pip
#
# --break-system-packages is, unfortunately, needed here as it'll also
# bring in some python3 dependencies on its own
pip install lastversion --break-system-packages
# As the command above will install lastversion on $HOME/.local/bin, we
# need to add it to the PATH
export PATH=$PATH:${HOME}/.local/bin
# Download the nerdctl-full tarball, as it comes with all the deps
# needed.
nerdctl_lastest_version=$(lastversion containerd/nerdctl)
wget https://github.com/containerd/nerdctl/releases/download/v${nerdctl_lastest_version}/nerdctl-full-${nerdctl_lastest_version}-linux-amd64.tar.gz
# Unpack the latest nerdctl into /usr/local/
sudo tar -xvf nerdctl-full-${nerdctl_lastest_version}-linux-amd64.tar.gz -C /usr/local/
# Start containerd service
sudo systemctl daemon-reload
sudo systemctl start containerd
# Create the default containerd configuration
sudo mkdir -p /etc/containerd
containerd config default > sudo tee /etc/containerd/config.toml
sudo systemctl restart containerd
}
function run() {
info "Running nerdctl smoke test tests using ${KATA_HYPERVISOR} hypervisor"
enabling_hypervisor
info "Running nerdctl with runc"
sudo nerdctl run --rm --entrypoint nping instrumentisto/nmap --tcp-connect -c 2 -p 80 www.github.com
info "Running nerdctl with Kata Containers (${KATA_HYPERVISOR})"
sudo nerdctl run --rm --runtime io.containerd.kata-${KATA_HYPERVISOR}.v2 --entrypoint nping instrumentisto/nmap --tcp-connect -c 2 -p 80 www.github.com
}
function main() {
action="${1:-}"
case "${action}" in
install-dependencies) install_dependencies ;;
install-kata) install_kata ;;
run) run ;;
*) >&2 die "Invalid argument" ;;
esac
}
main "$@"

View File

@@ -16,20 +16,48 @@ source "${nydus_dir}/../../common.bash"
function install_dependencies() {
info "Installing the dependencies needed for running the nydus tests"
return 0
# Dependency list of projects that we can rely on the system packages
# - jq
declare -a system_deps=(
jq
)
sudo apt-get update
sudo apt-get -y install "${system_deps[@]}"
ensure_yq
# Dependency list of projects that we can install them
# directly from their releases on GitHub:
# - containerd
# - cri-container-cni release tarball already includes CNI plugins
# - cri-tools
# - nydus
# - nydus-snapshotter
declare -a github_deps
github_deps[0]="cri_containerd:$(get_from_kata_deps "externals.containerd.${CONTAINERD_VERSION}")"
github_deps[1]="cri_tools:$(get_from_kata_deps "externals.critools.latest")"
github_deps[2]="nydus:$(get_from_kata_deps "externals.nydus.version")"
github_deps[3]="nydus_snapshotter:$(get_from_kata_deps "externals.nydus-snapshotter.version")"
for github_dep in "${github_deps[@]}"; do
IFS=":" read -r -a dep <<< "${github_dep}"
install_${dep[0]} "${dep[1]}"
done
}
function run() {
info "Running nydus tests using ${KATA_HYPERVISOR} hypervisor"
return 0
enabling_hypervisor
bash -c "${nydus_dir}/nydus_tests.sh"
}
function main() {
action="${1:-}"
case "${action}" in
install-dependencies) install_dependencies ;;
install-kata) return 0 ;;
install-kata) install_kata ;;
run) run ;;
*) >&2 die "Invalid argument" ;;
esac

View File

@@ -1,5 +1,7 @@
metadata:
name: nydus-container
namespace: default
uid: nydus-containerd-uid
image:
image: ghcr.io/dragonflyoss/image-service/alpine:nydus-latest
command:

View File

@@ -2,4 +2,5 @@ metadata:
attempt: 1
name: nydus-sandbox
namespace: default
uid: nydus-sandbox-uid
log_directory: /tmp

View File

@@ -12,8 +12,7 @@ set -o pipefail
set -o errtrace
dir_path=$(dirname "$0")
source "${dir_path}/../../lib/common.bash"
source "${dir_path}/../../.ci/lib.sh"
source "${dir_path}/../../common.bash"
source "/etc/os-release" || source "/usr/lib/os-release"
KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
@@ -30,56 +29,20 @@ containerd_config_backup="/tmp/containerd.config.toml"
# test image for container
IMAGE="${IMAGE:-ghcr.io/dragonflyoss/image-service/alpine:nydus-latest}"
if [ "$KATA_HYPERVISOR" != "qemu" ] && [ "$KATA_HYPERVISOR" != "cloud-hypervisor" ] && [ "$KATA_HYPERVISOR" != "dragonball" ]; then
echo "Skip nydus test for $KATA_HYPERVISOR, it only works for QEMU/CLH/DB now."
if [ "$KATA_HYPERVISOR" != "qemu" ] && [ "$KATA_HYPERVISOR" != "clh" ]; then
echo "Skip nydus test for $KATA_HYPERVISOR, it only works for QEMU/CLH now."
exit 0
fi
arch="$(uname -m)"
if [ "$arch" != "x86_64" ]; then
echo "Skip nydus test for $arch, it only works for x86_64 now. See https://github.com/kata-containers/tests/issues/4445"
exit 0
fi
function install_from_tarball() {
local package_name="$1"
local binary_name="$2"
[ -n "$package_name" ] || die "need package_name"
[ -n "$binary_name" ] || die "need package release binary_name"
local url=$(get_version "externals.${package_name}.url")
local version=$(get_version "externals.${package_name}.version")
local tarball_url="${url}/releases/download/${version}/${binary_name}-${version}-$arch.tgz"
if [ "${package_name}" == "nydus" ]; then
local goarch="$(${dir_path}/../../.ci/kata-arch.sh --golang)"
tarball_url="${url}/releases/download/${version}/${binary_name}-${version}-linux-$goarch.tgz"
fi
echo "Download tarball from ${tarball_url}"
curl -Ls "$tarball_url" | sudo tar xfz - -C /usr/local/bin --strip-components=1
}
function setup_nydus() {
# install nydus
install_from_tarball "nydus" "nydus-static"
# install nydus-snapshotter
install_from_tarball "nydus-snapshotter" "nydus-snapshotter"
# Config nydus snapshotter
sudo -E cp "$dir_path/nydusd-config.json" /etc/
sudo -E cp "$dir_path/snapshotter-config.toml" /etc/
# start nydus-snapshotter
nohup /usr/local/bin/containerd-nydus-grpc \
--config-path /etc/nydusd-config.json \
--shared-daemon \
--log-level debug \
--root /var/lib/containerd/io.containerd.snapshotter.v1.nydus \
--cache-dir /var/lib/nydus/cache \
--nydusd-path /usr/local/bin/nydusd \
--nydusimg-path /usr/local/bin/nydus-image \
--disable-cache-manager true \
--enable-nydus-overlayfs true \
--log-to-stdout >/dev/null 2>&1 &
sudo nohup /usr/local/bin/containerd-nydus-grpc \
--config /etc/snapshotter-config.toml \
--nydusd-config /etc/nydusd-config.json &
}
function config_kata() {
@@ -136,40 +99,51 @@ function config_containerd() {
[plugins.cri.containerd.runtimes.runc.options]
BinaryName = "${runc_path}"
Root = ""
[plugins.cri.containerd.runtimes.kata]
runtime_type = "io.containerd.kata.v2"
[plugins.cri.containerd.runtimes.kata-${KATA_HYPERVISOR}]
runtime_type = "io.containerd.kata-${KATA_HYPERVISOR}.v2"
privileged_without_host_devices = true
EOF
}
function check_nydus_snapshotter_exist() {
echo "check_nydus_snapshotter_exist"
bin="containerd-nydus-grpc"
if pgrep -f "$bin" >/dev/null; then
echo "nydus-snapshotter is running"
else
die "nydus-snapshotter is not running"
fi
}
function setup() {
setup_nydus
config_kata
config_containerd
restart_containerd_service
check_processes
check_nydus_snapshotter_exist
extract_kata_env
}
function run_test() {
sudo -E crictl pull "${IMAGE}"
pod=$(sudo -E crictl runp -r kata $dir_path/nydus-sandbox.yaml)
sudo -E crictl --timeout=20s pull "${IMAGE}"
pod=$(sudo -E crictl --timeout=20s runp -r kata-${KATA_HYPERVISOR} $dir_path/nydus-sandbox.yaml)
echo "Pod $pod created"
cnt=$(sudo -E crictl create $pod $dir_path/nydus-container.yaml $dir_path/nydus-sandbox.yaml)
cnt=$(sudo -E crictl --timeout=20s create $pod $dir_path/nydus-container.yaml $dir_path/nydus-sandbox.yaml)
echo "Container $cnt created"
sudo -E crictl start $cnt
sudo -E crictl --timeout=20s start $cnt
echo "Container $cnt started"
# ensure container is running
state=$(sudo -E crictl inspect $cnt | jq .status.state | tr -d '"')
state=$(sudo -E crictl --timeout=20s inspect $cnt | jq .status.state | tr -d '"')
[ $state == "CONTAINER_RUNNING" ] || die "Container is not running($state)"
# run a command in container
crictl exec $cnt ls
sudo -E crictl --timeout=20s exec $cnt ls
# cleanup containers
sudo -E crictl stop $cnt
sudo -E crictl stopp $pod
sudo -E crictl rmp $pod
sudo -E crictl --timeout=20s stop $cnt
sudo -E crictl --timeout=20s stopp $pod
sudo -E crictl --timeout=20s rmp $pod
}
function teardown() {
@@ -177,11 +151,11 @@ function teardown() {
# kill nydus-snapshotter
bin=containerd-nydus-grpc
kill -9 $(pidof $bin) || true
sudo -E kill -9 $(pidof $bin) || true
[ "$(pidof $bin)" == "" ] || die "$bin is running"
bin=nydusd
kill -9 $(pidof $bin) || true
sudo -E kill -9 $(pidof $bin) || true
[ "$(pidof $bin)" == "" ] || die "$bin is running"
# restore kata configuratiom.toml if needed

View File

@@ -3,17 +3,13 @@
"backend": {
"type": "registry",
"config": {
"scheme": "https",
"timeout": 5,
"connect_timeout": 5,
"retry_limit": 2
}
},
"cache": {
"type": "blobcache",
"config": {
"work_dir": "/var/lib/nydus/cache"
}
"type": "blobcache"
}
},
"mode": "direct",
@@ -22,6 +18,8 @@
"enable_xattr": true,
"fs_prefetch": {
"enable": true,
"threads_count": 2
"threads_count": 8,
"merging_size": 1048576,
"prefetch_all": true
}
}
}

View File

@@ -0,0 +1,128 @@
version = 1
# Snapshotter's own home directory where it stores and creates necessary resources
root = "/var/lib/containerd-nydus"
# The snapshotter's GRPC server socket, containerd will connect to plugin on this socket
address = "/run/containerd-nydus/containerd-nydus-grpc.sock"
daemon_mode = "dedicated"
# Whether snapshotter should try to clean up resources when it is closed
cleanup_on_close = false
[system]
# Snapshotter's debug and trace HTTP server interface
enable = true
# Unix domain socket path where system controller is listening on
address = "/run/containerd-nydus/system.sock"
[system.debug]
# Snapshotter can profile the CPU utilization of each nydusd daemon when it is being started.
# This option specifies the profile duration when nydusd is downloading and uncomproessing data.
daemon_cpu_profile_duration_secs = 5
# Enable by assigning an address, empty indicates pprof server is disabled
pprof_address = ""
[daemon]
# Specify a configuration file for nydusd
nydusd_config = "/etc/nydusd-config.json"
nydusd_path = "/usr/local/bin/nydusd"
nydusimage_path = "/usr/local/bin/nydus-image"
# fusedev or fscache
fs_driver = "fusedev"
# How to process when daemon dies: "none", "restart" or "failover"
recover_policy = "restart"
# Nydusd worker thread number to handle FUSE or fscache requests, [0-1024].
# Setting to 0 will use the default configuration of nydusd.
threads_number = 4
# Log rotation size for nydusd, in unit MB(megabytes)
log_rotation_size = 100
[cgroup]
# Whether to use separate cgroup for nydusd.
enable = true
# The memory limit for nydusd cgroup, which contains all nydusd processes.
# Percentage is supported as well, please ensure it is end with "%".
# The default unit is bytes. Acceptable values include "209715200", "200MiB", "200Mi" and "10%".
memory_limit = ""
[log]
# Print logs to stdout rather than logging files
log_to_stdout = false
# Snapshotter's log level
level = "info"
log_rotation_compress = true
log_rotation_local_time = true
# Max number of days to retain logs
log_rotation_max_age = 7
log_rotation_max_backups = 5
# In unit MB(megabytes)
log_rotation_max_size = 100
[metrics]
# Enable by assigning an address, empty indicates metrics server is disabled
address = ":9110"
[remote]
convert_vpc_registry = false
[remote.mirrors_config]
# Snapshotter will overwrite daemon's mirrors configuration
# if the values loaded from this driectory are not null before starting a daemon.
# Set to "" or an empty directory to disable it.
#dir = "/etc/nydus/certs.d"
[remote.auth]
# Fetch the private registry auth by listening to K8s API server
enable_kubeconfig_keychain = false
# synchronize `kubernetes.io/dockerconfigjson` secret from kubernetes API server with specified kubeconfig (default `$KUBECONFIG` or `~/.kube/config`)
kubeconfig_path = ""
# Fetch the private registry auth as CRI image service proxy
enable_cri_keychain = false
# the target image service when using image proxy
#image_service_address = "/run/containerd/containerd.sock"
[snapshot]
# Let containerd use nydus-overlayfs mount helper
enable_nydus_overlayfs = true
# Insert Kata Virtual Volume option to `Mount.Options`
enable_kata_volume = false
# Whether to remove resources when a snapshot is removed
sync_remove = false
[cache_manager]
disable = false
gc_period = "24h"
# Directory to host cached files
cache_dir = ""
[image]
public_key_file = ""
validate_signature = false
# The configuraions for features that are not production ready
[experimental]
# Whether to enable stargz support
enable_stargz = false
# Whether to enable referrers support
# The option enables trying to fetch the Nydus image associated with the OCI image and run it.
# Also see https://github.com/opencontainers/distribution-spec/blob/main/spec.md#listing-referrers
enable_referrer_detect = false
[experimental.tarfs]
# Whether to enable nydus tarfs mode. Tarfs is supported by:
# - The EROFS filesystem driver since Linux 6.4
# - Nydus Image Service release v2.3
enable_tarfs = false
# Mount rafs on host by loopdev and EROFS
mount_tarfs_on_host = false
# Only enable nydus tarfs mode for images with `tarfs hint` label when true
tarfs_hint = false
# Maximum of concurrence to converting OCIv1 images to tarfs, 0 means default
max_concurrent_proc = 0
# Mode to export tarfs images:
# - "none" or "": do not export tarfs
# - "layer_verity_only": only generate disk verity information for a layer blob
# - "image_verity_only": only generate disk verity information for all blobs of an image
# - "layer_block": generate a raw block disk image with tarfs for a layer
# - "image_block": generate a raw block disk image with tarfs for an image
# - "layer_block_with_verity": generate a raw block disk image with tarfs for a layer with dm-verity info
# - "image_block_with_verity": generate a raw block disk image with tarfs for an image with dm-verity info
export_mode = ""

View File

@@ -0,0 +1,59 @@
#!/bin/bash
#
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o nounset
set -o pipefail
kata_tarball_dir="${2:-kata-artifacts}"
runk_dir="$(dirname "$(readlink -f "$0")")"
source "${runk_dir}/../../common.bash"
function install_dependencies() {
info "Installing the dependencies needed for running the runk tests"
# Dependency list of projects that we can rely on the system packages
# - jq
declare -a system_deps=(
jq
)
sudo apt-get update
sudo apt-get -y install "${system_deps[@]}"
ensure_yq
# Dependency list of projects that we can install them
# directly from their releases on GitHub:
# - containerd
# - cri-container-cni release tarball already includes CNI plugins
declare -a github_deps
github_deps[0]="cri_containerd:$(get_from_kata_deps "externals.containerd.${CONTAINERD_VERSION}")"
for github_dep in "${github_deps[@]}"; do
IFS=":" read -r -a dep <<< "${github_dep}"
install_${dep[0]} "${dep[1]}"
done
}
function run() {
info "Running runk tests using"
bash -c ${runk_dir}/runk-tests.sh
}
function main() {
action="${1:-}"
case "${action}" in
install-dependencies) install_dependencies ;;
install-kata) install_kata ;;
run) run ;;
*) >&2 die "Invalid argument" ;;
esac
}
main "$@"

View File

@@ -0,0 +1,99 @@
#!/bin/bash
#
# Copyright (c) 2023 Kata Contributors
#
# SPDX-License-Identifier: Apache-2.0
#
# This test will validate runk with containerd
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
tracing_dir="$(dirname "$(readlink -f "$0")")"
source "${tracing_dir}/../../common.bash"
source "${tracing_dir}/../../metrics/lib/common.bash"
RUNK_BIN_PATH="/usr/local/bin/runk"
TEST_IMAGE="docker.io/library/busybox:latest"
CONTAINER_ID="id1"
PID_FILE="${CONTAINER_ID}.pid"
WORK_DIR="$(mktemp -d --tmpdir runk.XXXXX)"
setup() {
echo "pull container image"
check_images ${TEST_IMAGE}
}
test_runk() {
echo "start container with runk"
# Bind mount ${WORK_DIR}:/tmp. Tests below will store files in this dir and check them when container is frozon.
sudo ctr run --pid-file ${PID_FILE} -d --runc-binary ${RUNK_BIN_PATH} --mount type=bind,src=${WORK_DIR},dst=/tmp,options=rbind:rw ${TEST_IMAGE} ${CONTAINER_ID}
read CID PID STATUS <<< $(sudo ctr t ls | grep ${CONTAINER_ID})
[ ${PID} == $(cat ${PID_FILE}) ] || die "pid is not consistent"
[ ${STATUS} == "RUNNING" ] || die "container status is not RUNNING"
echo "exec process in a container"
sudo ctr t exec --exec-id id1 ${CONTAINER_ID} sh -c "echo hello > /tmp/foo"
[ "hello" == "$(sudo ctr t exec --exec-id id1 ${CONTAINER_ID} cat /tmp/foo)" ] || die "exec process failed"
echo "test ps command"
sudo ctr t exec --detach --exec-id id1 ${CONTAINER_ID} sh
# one line is the titles, and the other 2 lines are porcess info
[ "3" == "$(sudo ctr t ps ${CONTAINER_ID} | wc -l)" ] || die "ps command failed"
echo "test pause and resume"
# The process outputs lines into /tmp/{CONTAINER_ID}, which can be read in host when it's frozon.
sudo ctr t exec --detach --exec-id id2 ${CONTAINER_ID} sh -c "while true; do echo hello >> /tmp/${CONTAINER_ID}; sleep 0.1; done"
# sleep for 1s to make sure the process outputs some lines
sleep 1
sudo ctr t pause ${CONTAINER_ID}
[ "PAUSED" == "$(sudo ctr t ls | grep ${CONTAINER_ID} | grep -o PAUSED)" ] || die "status is not PAUSED"
echo "container is paused"
local TMP_FILE="${WORK_DIR}/${CONTAINER_ID}"
local lines1=$(cat ${TMP_FILE} | wc -l)
# sleep for a while and check the lines are not changed.
sleep 1
local lines2=$(cat ${TMP_FILE} | wc -l)
[ ${lines1} == ${lines2} ] || die "paused container is still running"
sudo ctr t resume ${CONTAINER_ID}
[ "RUNNING" == "$(sudo ctr t ls | grep ${CONTAINER_ID} | grep -o RUNNING)" ] || die "status is not RUNNING"
echo "container is resumed"
# sleep for a while and check the lines are changed.
sleep 1
local lines3=$(cat ${TMP_FILE} | wc -l)
[ ${lines2} -lt ${lines3} ] || die "resumed container is not running"
echo "kill the container and poll until it is stopped"
sudo ctr t kill --signal SIGKILL --all ${CONTAINER_ID}
# poll for a while until the task receives signal and exit
local cmd='[ "STOPPED" == "$(sudo ctr t ls | grep ${CONTAINER_ID} | awk "{print \$3}")" ]'
waitForProcess 10 1 "${cmd}" || die "failed to kill task"
echo "check the container is stopped"
# there is only title line of ps command
[ "1" == "$(sudo ctr t ps ${CONTAINER_ID} | wc -l)" ] || die "kill command failed"
# High-level container runtimes such as containerd call the kill command with
# --all option in order to terminate all processes inside the container
# even if the container already is stopped. Hence, a low-level runtime
# should allow kill --all regardless of the container state like runc.
echo "test kill --all is allowed regardless of the container state"
sudo ctr t kill --signal SIGKILL ${CONTAINER_ID} && die "kill should fail"
sudo ctr t kill --signal SIGKILL --all ${CONTAINER_ID} || die "kill --all should not fail"
echo "delete the container"
sudo ctr t rm ${CONTAINER_ID}
[ -z "$(sudo ctr t ls | grep ${CONTAINER_ID})" ] || die "failed to delete task"
sudo ctr c rm ${CONTAINER_ID} || die "failed to delete container"
}
clean_up() {
rm -f ${PID_FILE}
rm -rf ${WORK_DIR}
}
setup
test_runk
clean_up

View File

@@ -1,7 +1,5 @@
# Kata Containers metrics
> **_Warning:_** Migration of metrics tests is WIP and you may not find all tests available here, but you can take a look at the [tests repo](https://github.com/kata-containers/tests/tree/main/metrics).
This directory contains the metrics tests for Kata Containers.
The tests within this directory have a number of potential use cases:
@@ -35,7 +33,7 @@ regression checking, we try to define and stick to some "quality measures" for o
## Categories
Kata Container metrics tend to fall into a set of categories, and we organise the tests
Kata Container metrics tend to fall into a set of categories, and we organize the tests
within this folder as such.
Each sub-folder contains its own `README` detailing its own tests.
@@ -62,9 +60,8 @@ For further details see the [density tests documentation](density).
Tests relating to networking. General items could include:
- bandwidth
- latency
- jitter
- `jitter`
- parallel bandwidth
- write and read percentiles
For further details see the [network tests documentation](network).
@@ -78,10 +75,12 @@ For further details see the [storage tests documentation](storage).
Test relating to measure reading and writing against clusters.
For further details see the [disk tests documentation](disk).
### Machine Learning
Tests relating with TensorFlow and Pytorch implementations of several popular
convolutional models.
`convolutional` models.
For further details see the [machine learning tests documentation](machine_learning).
@@ -114,7 +113,7 @@ to do some JSON handling themselves before injecting their JSON into the API.
#### `metrics_json_init()`
Initialise the API. Must be called before all other JSON API calls.
Initialize the API. Must be called before all other JSON API calls.
Should be matched by a final call to `metrics_json_save`.
Relies upon the `TEST_NAME` variable to derive the file name the final JSON
@@ -148,7 +147,7 @@ Add a JSON formatted fragment at the top level.
#### `metrics_json_start_array()`
Initialise the JSON array API subsystem, ready to accept JSON fragments via
Initialize the JSON array API subsystem, ready to accept JSON fragments via
`metrics_json_add_array_element`.
This JSON array API subset allows accumulation of multiple entries into a
@@ -210,3 +209,7 @@ set if necessary.
## `checkmetrics`
`checkmetrics` is a CLI tool to check a metrics CI results file. For further reference see the [`checkmetrics`](cmd/checkmetrics).
## Report generator
See the [report generator](report) documentation.

View File

@@ -29,7 +29,7 @@ description = "measure memory usage"
# within (inclusive)
checkvar = ".\"memory-footprint\".Results | .[] | .average.Result"
checktype = "mean"
midval = 2518364.00
midval = 127220.25
minpercent = 20.0
maxpercent = 20.0
@@ -73,27 +73,92 @@ minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "tensorflow"
name = "tensorflow_nhwc"
type = "json"
description = "tensorflow resnet model"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"tensorflow\".Results | .[] | .resnet.Result"
checkvar = ".\"tensorflow_nhwc\".Results | .[] | .resnet.Result"
checktype = "mean"
midval = 3566.0
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "tensorflow"
name = "tensorflow_nhwc"
type = "json"
description = "tensorflow alexnet model"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"tensorflow\".Results | .[] | .alexnet.Result"
checkvar = ".\"tensorflow_nhwc\".Results | .[] | .alexnet.Result"
checktype = "mean"
midval = 98.0
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "latency"
type = "json"
description = "measure container latency"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"latency\".Results | .[] | .latency.Result"
checktype = "mean"
midval = 0.75
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "network-iperf3"
type = "json"
description = "measure container cpu utilization using iperf3"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"network-iperf3\".Results | .[] | .cpu.Result"
checktype = "mean"
midval = 85.60
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "network-iperf3"
type = "json"
description = "measure container bandwidth using iperf3"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"network-iperf3\".Results | .[] | .bandwidth.Result"
checktype = "mean"
midval = 61176889941.19
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "network-iperf3"
type = "json"
description = "measure container parallel bandwidth using iperf3"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"network-iperf3\".Results | .[] | .parallel.Result"
checktype = "mean"
midval = 47734838389.0
minpercent = 20.0
maxpercent = 20.0
[[metric]]
name = "network-iperf3"
type = "json"
description = "iperf"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"network-iperf3\".Results | .[] | .jitter.Result"
checktype = "mean"
midval = 0.044
minpercent = 50.0
maxpercent = 50.0

Some files were not shown because too many files have changed in this diff Show More