From d3bb25418875d2cbb329812b6aab28819ba7a1f3 Mon Sep 17 00:00:00 2001 From: Archana Shinde Date: Fri, 3 Feb 2023 15:41:59 -0800 Subject: [PATCH 001/137] utils: Add function to check vhost-vsock Add function to check if the host-system has the vhost-vsock kernel module. Signed-off-by: Archana Shinde --- src/tools/kata-ctl/src/utils.rs | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/src/tools/kata-ctl/src/utils.rs b/src/tools/kata-ctl/src/utils.rs index b1564f4c4..9c92f82bf 100644 --- a/src/tools/kata-ctl/src/utils.rs +++ b/src/tools/kata-ctl/src/utils.rs @@ -144,6 +144,12 @@ pub fn get_generic_cpu_details(cpu_info_file: &str) -> Result<(String, String)> Ok((vendor, model)) } +const VHOST_VSOCK_DEVICE: &str = "/dev/vhost-vsock"; +pub fn supports_vsocks(vsock_path: &str) -> Result { + let metadata = fs::metadata(vsock_path)?; + Ok(metadata.is_file()) +} + #[cfg(test)] mod tests { use super::*; @@ -283,4 +289,30 @@ mod tests { ); assert_eq!(actual, expected); } + + #[test] + fn check_supports_vsocks_valid() { + let dir = tempdir().unwrap(); + let file_path = dir.path().join("vhost-vsock"); + let path = file_path.clone(); + let _file = fs::File::create(file_path).unwrap(); + let res = supports_vsocks(path.to_str().unwrap()).unwrap(); + assert!(res); + } + + #[test] + fn check_supports_vsocks_dir() { + let dir = tempdir().unwrap(); + let file_path = dir.path().join("vhost-vsock"); + let path = file_path.clone(); + let _dir = fs::create_dir(file_path).unwrap(); + let res = supports_vsocks(path.to_str().unwrap()).unwrap(); + assert!(!res); + } + + #[test] + fn check_supports_vsocks_missing_file() { + let res = supports_vsocks("/xyz/vhost-vsock"); + assert!(res.is_err()); + } } From cbe6ad90340264a043c594c0544215c339af4735 Mon Sep 17 00:00:00 2001 From: Feng Wang Date: Tue, 31 Jan 2023 09:48:49 -0800 Subject: [PATCH 002/137] runtime: support non-root for clh This change enables to run cloud-hypervisor VMM using a non-root user when rootless flag is set true in the configuration Fixes: #2567 Signed-off-by: Feng Wang --- docs/how-to/how-to-run-rootless-vmm.md | 4 +-- src/runtime/config/configuration-clh.toml.in | 5 +++ src/runtime/pkg/katautils/config.go | 1 + src/runtime/virtcontainers/clh.go | 38 ++++++++++++++++++-- src/runtime/virtcontainers/qemu.go | 1 + 5 files changed, 45 insertions(+), 4 deletions(-) diff --git a/docs/how-to/how-to-run-rootless-vmm.md b/docs/how-to/how-to-run-rootless-vmm.md index 3986de252..7711c1325 100644 --- a/docs/how-to/how-to-run-rootless-vmm.md +++ b/docs/how-to/how-to-run-rootless-vmm.md @@ -1,5 +1,5 @@ ## Introduction -To improve security, Kata Container supports running the VMM process (currently only QEMU) as a non-`root` user. +To improve security, Kata Container supports running the VMM process (QEMU and cloud-hypervisor) as a non-`root` user. This document describes how to enable the rootless VMM mode and its limitations. ## Pre-requisites @@ -27,7 +27,7 @@ Another necessary change is to move the hypervisor runtime files (e.g. `vhost-fs ## Limitations 1. Only the VMM process is running as a non-root user. Other processes such as Kata Container shimv2 and `virtiofsd` still run as the root user. -2. Currently, this feature is only supported in QEMU. Still need to bring it to Firecracker and Cloud Hypervisor (see https://github.com/kata-containers/kata-containers/issues/2567). +2. Currently, this feature is only supported in QEMU and cloud-hypervisor. For firecracker, you can use jailer to run the VMM process with a non-root user. 3. Certain features will not work when rootless VMM is enabled, including: 1. Passing devices to the guest (`virtio-blk`, `virtio-scsi`) will not work if the non-privileged user does not have permission to access it (leading to a permission denied error). A more permissive permission (e.g. 666) may overcome this issue. However, you need to be aware of the potential security implications of reducing the security on such devices. 2. `vfio` device will also not work because of permission denied error. \ No newline at end of file diff --git a/src/runtime/config/configuration-clh.toml.in b/src/runtime/config/configuration-clh.toml.in index df7cc7ac5..d79770487 100644 --- a/src/runtime/config/configuration-clh.toml.in +++ b/src/runtime/config/configuration-clh.toml.in @@ -41,6 +41,11 @@ rootfs_type=@DEFROOTFSTYPE@ # Default false # confidential_guest = true +# Enable running clh VMM as a non-root user. +# By default clh VMM run as root. When this is set to true, clh VMM process runs as +# a non-root random user. See documentation for the limitations of this mode. +# rootless = true + # disable applying SELinux on the VMM process (default false) disable_selinux=@DEFDISABLESELINUX@ diff --git a/src/runtime/pkg/katautils/config.go b/src/runtime/pkg/katautils/config.go index 997d07368..37c247d2b 100644 --- a/src/runtime/pkg/katautils/config.go +++ b/src/runtime/pkg/katautils/config.go @@ -1046,6 +1046,7 @@ func newClhHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { EnableAnnotations: h.EnableAnnotations, DisableSeccomp: h.DisableSeccomp, ConfidentialGuest: h.ConfidentialGuest, + Rootless: h.Rootless, DisableSeLinux: h.DisableSeLinux, DisableGuestSeLinux: h.DisableGuestSeLinux, NetRateLimiterBwMaxRate: h.getNetRateLimiterBwMaxRate(), diff --git a/src/runtime/virtcontainers/clh.go b/src/runtime/virtcontainers/clh.go index 71bd931dc..e9906ddaf 100644 --- a/src/runtime/virtcontainers/clh.go +++ b/src/runtime/virtcontainers/clh.go @@ -19,6 +19,7 @@ import ( "net/http/httputil" "os" "os/exec" + "os/user" "path/filepath" "regexp" "strconv" @@ -37,6 +38,8 @@ import ( "github.com/kata-containers/kata-containers/src/runtime/pkg/device/config" hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors" "github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace" + pkgUtils "github.com/kata-containers/kata-containers/src/runtime/pkg/utils" + "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils" ) @@ -653,7 +656,7 @@ func (clh *cloudHypervisor) StartVM(ctx context.Context, timeout int) error { clh.Logger().WithField("function", "StartVM").Info("starting Sandbox") vmPath := filepath.Join(clh.config.VMStorePath, clh.id) - err := os.MkdirAll(vmPath, DirMode) + err := utils.MkdirAllWithInheritedOwner(vmPath, DirMode) if err != nil { return err } @@ -1352,9 +1355,16 @@ func (clh *cloudHypervisor) launchClh() (int, error) { cmdHypervisor.Stdout = clh.console } } - cmdHypervisor.Stderr = cmdHypervisor.Stdout + attr := syscall.SysProcAttr{} + attr.Credential = &syscall.Credential{ + Uid: clh.config.Uid, + Gid: clh.config.Gid, + Groups: clh.config.Groups, + } + cmdHypervisor.SysProcAttr = &attr + err = utils.StartCmd(cmdHypervisor) if err != nil { return -1, err @@ -1679,6 +1689,30 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error { clh.Logger().WithError(err).WithField("path", dir).Warnf("failed to remove vm path") } } + if rootless.IsRootless() { + if _, err := user.Lookup(clh.config.User); err != nil { + clh.Logger().WithError(err).WithFields( + log.Fields{ + "user": clh.config.User, + "uid": clh.config.Uid, + }).Warn("failed to find the user, it might have been removed") + return nil + } + + if err := pkgUtils.RemoveVmmUser(clh.config.User); err != nil { + clh.Logger().WithError(err).WithFields( + log.Fields{ + "user": clh.config.User, + "uid": clh.config.Uid, + }).Warn("failed to delete the user") + return nil + } + clh.Logger().WithFields( + log.Fields{ + "user": clh.config.User, + "uid": clh.config.Uid, + }).Debug("successfully removed the non root user") + } clh.reset() diff --git a/src/runtime/virtcontainers/qemu.go b/src/runtime/virtcontainers/qemu.go index 0d4fa3e85..471d0f767 100644 --- a/src/runtime/virtcontainers/qemu.go +++ b/src/runtime/virtcontainers/qemu.go @@ -1183,6 +1183,7 @@ func (q *qemu) cleanupVM() error { "user": q.config.User, "uid": q.config.Uid, }).Warn("failed to delete the user") + return nil } q.Logger().WithFields( logrus.Fields{ From f31c79d2107579ec00085c260b293d62fd74aabe Mon Sep 17 00:00:00 2001 From: Jeremi Piotrowski Date: Fri, 3 Mar 2023 09:37:25 +0100 Subject: [PATCH 003/137] workflows: static-checks: Remove TRAVIS_XXX variables These variables are unused since we don't use travis CI. This also allows to remove two steps: - 'Setup GOPATH' only printed variables - 'Setup travis reference' modified some shell local variables that don't have any influence on the rest of the steps The TRAVIS var is still used by tools/osbuilder/tests to determine if virtualization is available. Fixes: #3544 Signed-off-by: Jeremi Piotrowski --- .github/workflows/static-checks.yaml | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/.github/workflows/static-checks.yaml b/.github/workflows/static-checks.yaml index 8d9858500..1a995dca5 100644 --- a/.github/workflows/static-checks.yaml +++ b/.github/workflows/static-checks.yaml @@ -20,9 +20,6 @@ jobs: - "sudo -E PATH=\"$PATH\" make test" env: TRAVIS: "true" - TRAVIS_BRANCH: ${{ github.base_ref }} - TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }} - TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }} RUST_BACKTRACE: "1" target_branch: ${{ github.base_ref }} steps: @@ -52,23 +49,11 @@ jobs: fi echo "Check passed" fi - - name: Setup GOPATH - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - run: | - echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}" - echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}" - echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}" - echo "TRAVIS: ${TRAVIS}" - name: Set env if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} run: | echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV echo "${{ github.workspace }}/bin" >> $GITHUB_PATH - - name: Setup travis references - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - run: | - echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}" - target_branch=${TRAVIS_BRANCH} - name: Setup if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} run: | From 395645e1ce374c8b4f067cc95adb1cf72af6ca0f Mon Sep 17 00:00:00 2001 From: Jianyong Wu Date: Thu, 2 Mar 2023 16:41:46 +0800 Subject: [PATCH 004/137] runtime: hybrid-mode cause error in the latest nydusd When update the nydusd to 2.2, the argument "--hybrid-mode" cause the following error: thread 'main' panicked at 'ArgAction::SetTrue / ArgAction::SetFalse is defaulted' Maybe we should remove it to upgrad nydusd Fixes: #6407 Signed-off-by: Jianyong Wu --- src/runtime/virtcontainers/nydusd.go | 2 +- src/runtime/virtcontainers/nydusd_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/runtime/virtcontainers/nydusd.go b/src/runtime/virtcontainers/nydusd.go index 9a2e1a638..3c42e4cde 100644 --- a/src/runtime/virtcontainers/nydusd.go +++ b/src/runtime/virtcontainers/nydusd.go @@ -157,7 +157,7 @@ func (nd *nydusd) args() ([]string, error) { logLevel = "debug" } args := []string{ - "virtiofs", "--hybrid-mode", + "virtiofs", "--log-level", logLevel, "--apisock", nd.apiSockPath, "--sock", nd.sockPath, diff --git a/src/runtime/virtcontainers/nydusd_test.go b/src/runtime/virtcontainers/nydusd_test.go index 481866ffc..a8ec6dc9b 100644 --- a/src/runtime/virtcontainers/nydusd_test.go +++ b/src/runtime/virtcontainers/nydusd_test.go @@ -99,13 +99,13 @@ func TestNydusdArgs(t *testing.T) { apiSockPath: "/var/lib/api.sock", debug: true, } - expected := "virtiofs --hybrid-mode --log-level debug --apisock /var/lib/api.sock --sock /var/lib/vhost-user.sock" + expected := "virtiofs --log-level debug --apisock /var/lib/api.sock --sock /var/lib/vhost-user.sock" args, err := nd.args() assert.NoError(err) assert.Equal(expected, strings.Join(args, " ")) nd.debug = false - expected = "virtiofs --hybrid-mode --log-level info --apisock /var/lib/api.sock --sock /var/lib/vhost-user.sock" + expected = "virtiofs --log-level info --apisock /var/lib/api.sock --sock /var/lib/vhost-user.sock" args, err = nd.args() assert.NoError(err) assert.Equal(expected, strings.Join(args, " ")) From 3443f558a61ae4c79472837c057f551315557fca Mon Sep 17 00:00:00 2001 From: Jianyong Wu Date: Thu, 2 Mar 2023 16:49:51 +0800 Subject: [PATCH 005/137] nydus: upgrad nydus to v2.2.0 Use the latest nydus, we may let nydus work on arm64. Fixes: #6407 Signed-off-by: Jianyong Wu --- versions.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/versions.yaml b/versions.yaml index 174162b99..b738594f6 100644 --- a/versions.yaml +++ b/versions.yaml @@ -254,7 +254,7 @@ externals: nydus: description: "Nydus image acceleration service" url: "https://github.com/dragonflyoss/image-service" - version: "v2.1.1" + version: "v2.2.0" nydus-snapshotter: description: "Snapshotter for Nydus image acceleration service" From 439ff9d4c49e6078a3394872fbc524dcdfe224e9 Mon Sep 17 00:00:00 2001 From: Jeremi Piotrowski Date: Fri, 3 Mar 2023 11:19:38 +0100 Subject: [PATCH 006/137] tools/osbuilder/tests: Remove TRAVIS variable The last remaining user of the TRAVIS variable in this repo is tools/osbuilder/tests and it is only used to skip spinning up VMs. Travis didn't support virtualization and the same is true for github actions hosted runners. Replace the variable with KVM_MISSING and determine availability of /dev/kvm at runtime. TRAVIS is also used by '.ci/setup.sh' in kata-containers/tests to reduce the set of dependencies that gets installed, but this is also in the process of being removed. Fixes: #3544 Signed-off-by: Jeremi Piotrowski --- .github/workflows/static-checks.yaml | 1 - tools/osbuilder/tests/test_images.sh | 15 ++++++--------- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/.github/workflows/static-checks.yaml b/.github/workflows/static-checks.yaml index 1a995dca5..0117dd4e7 100644 --- a/.github/workflows/static-checks.yaml +++ b/.github/workflows/static-checks.yaml @@ -19,7 +19,6 @@ jobs: - "make test" - "sudo -E PATH=\"$PATH\" make test" env: - TRAVIS: "true" RUST_BACKTRACE: "1" target_branch: ${{ github.base_ref }} steps: diff --git a/tools/osbuilder/tests/test_images.sh b/tools/osbuilder/tests/test_images.sh index 338e5d3ad..945ae8783 100755 --- a/tools/osbuilder/tests/test_images.sh +++ b/tools/osbuilder/tests/test_images.sh @@ -32,6 +32,7 @@ readonly KATA_HYPERVISOR="${KATA_HYPERVISOR:-}" readonly KATA_DEV_MODE="${KATA_DEV_MODE:-}" readonly ci_results_dir="/var/osbuilder/tests" readonly dracut_dir=${project_dir}/dracut +readonly KVM_MISSING="$([ -e /dev/kvm ] || echo true)" build_images=1 build_initrds=1 @@ -166,7 +167,7 @@ exit_handler() rm -rf "${tmp_dir}" # Restore the default image in config file - [ -n "${TRAVIS:-}" ] || run_mgr configure-image + [ -n "${KVM_MISSING:-}" ] || run_mgr configure-image return fi @@ -258,8 +259,7 @@ set_runtime() [ -n "${KATA_DEV_MODE}" ] && return - # Travis doesn't support VT-x - [ -n "${TRAVIS:-}" ] && return + [ -n "${KVM_MISSING:-}" ] && return if [ "$KATA_HYPERVISOR" != "firecracker" ]; then if [ -f "$sysconfig_docker_config_file" ]; then @@ -285,8 +285,7 @@ setup() sudo -E mkdir -p ${ci_results_dir} fi - # Travis doesn't support VT-x - [ -n "${TRAVIS:-}" ] && return + [ -n "${KVM_MISSING:-}" ] && return [ ! -d "${tests_repo_dir}" ] && git clone "https://${tests_repo}" "${tests_repo_dir}" @@ -383,8 +382,7 @@ install_image_create_container() [ -z "$file" ] && die "need file" [ ! -e "$file" ] && die "file does not exist: $file" - # Travis doesn't support VT-x - [ -n "${TRAVIS:-}" ] && return + [ -n "${KVM_MISSING:-}" ] && return showKataRunFailure=1 run_mgr reset-config @@ -401,8 +399,7 @@ install_initrd_create_container() [ -z "$file" ] && die "need file" [ ! -e "$file" ] && die "file does not exist: $file" - # Travis doesn't support VT-x - [ -n "${TRAVIS:-}" ] && return + [ -n "${KVM_MISSING:-}" ] && return showKataRunFailure=1 run_mgr reset-config From e68186d9af0d7e41c6eb73e04000d2ebfc3ebe27 Mon Sep 17 00:00:00 2001 From: Jeremi Piotrowski Date: Fri, 3 Mar 2023 11:23:42 +0100 Subject: [PATCH 007/137] workflows: static-checks: Set GOPATH only once {{ runner.workspace }}/kata-containers and {{ github.workspace }} resolve to the same value, but they're being used multiple times in the workflow. Remove multiple definitions and define the GOPATH var at job level once. Signed-off-by: Jeremi Piotrowski --- .github/workflows/static-checks.yaml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/static-checks.yaml b/.github/workflows/static-checks.yaml index 0117dd4e7..9b2103f99 100644 --- a/.github/workflows/static-checks.yaml +++ b/.github/workflows/static-checks.yaml @@ -21,6 +21,7 @@ jobs: env: RUST_BACKTRACE: "1" target_branch: ${{ github.base_ref }} + GOPATH: ${{ github.workspace }} steps: - name: Checkout code uses: actions/checkout@v3 @@ -31,8 +32,6 @@ jobs: uses: actions/setup-go@v3 with: go-version: 1.19.3 - env: - GOPATH: ${{ runner.workspace }}/kata-containers - name: Check kernel config version run: | cd "${{ github.workspace }}/src/github.com/${{ github.repository }}" @@ -48,17 +47,14 @@ jobs: fi echo "Check passed" fi - - name: Set env + - name: Set PATH if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} run: | - echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV echo "${{ github.workspace }}/bin" >> $GITHUB_PATH - name: Setup if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} run: | cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh - env: - GOPATH: ${{ runner.workspace }}/kata-containers - name: Installing rust if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} run: | From 462d4a1af257642a8c669e3914fb6c1fe0bfa580 Mon Sep 17 00:00:00 2001 From: Jeremi Piotrowski Date: Fri, 3 Mar 2023 11:35:24 +0100 Subject: [PATCH 008/137] workflows: static-checks: Free disk space before running checks We've been seeing the 'sudo make test' job occasionally run out of space in /tmp, which is part of the root filesystem. Removing dotnet and `AGENT_TOOLSDIRECTORY` frees around 10GB of space and in my tests the job still has 13GB of space left after running. Fixes: #6401 Signed-off-by: Jeremi Piotrowski --- .github/workflows/static-checks.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/static-checks.yaml b/.github/workflows/static-checks.yaml index 9b2103f99..906081453 100644 --- a/.github/workflows/static-checks.yaml +++ b/.github/workflows/static-checks.yaml @@ -23,6 +23,10 @@ jobs: target_branch: ${{ github.base_ref }} GOPATH: ${{ github.workspace }} steps: + - name: Free disk space + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf "$AGENT_TOOLSDIRECTORY" - name: Checkout code uses: actions/checkout@v3 with: From 09c4828ac3a912366c44d9a5c2246872044c77cd Mon Sep 17 00:00:00 2001 From: Wainer dos Santos Moschetta Date: Fri, 17 Mar 2023 15:28:47 -0300 Subject: [PATCH 009/137] workflows: add missing artifacts on payload-after-push The kata-deploy-ci payloads for amd64 and arm64 were missing the shim-v2 and kernel-dragonball-experimental artifacts. Fixes #6493 Signed-off-by: Wainer dos Santos Moschetta --- .github/workflows/payload-after-push-amd64.yaml | 2 ++ .github/workflows/payload-after-push-arm64.yaml | 2 ++ .github/workflows/payload-after-push-s390x.yaml | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/payload-after-push-amd64.yaml b/.github/workflows/payload-after-push-amd64.yaml index 2b4814b84..ef2f976eb 100644 --- a/.github/workflows/payload-after-push-amd64.yaml +++ b/.github/workflows/payload-after-push-amd64.yaml @@ -15,10 +15,12 @@ jobs: - cloud-hypervisor - firecracker - kernel + - kernel-dragonball-experimental - nydus - qemu - rootfs-image - rootfs-initrd + - shim-v2 - virtiofsd steps: - name: Login to Kata Containers quay.io diff --git a/.github/workflows/payload-after-push-arm64.yaml b/.github/workflows/payload-after-push-arm64.yaml index c7315bab0..e25cd60f0 100644 --- a/.github/workflows/payload-after-push-arm64.yaml +++ b/.github/workflows/payload-after-push-arm64.yaml @@ -15,10 +15,12 @@ jobs: - cloud-hypervisor - firecracker - kernel + - kernel-dragonball-experimental - nydus - qemu - rootfs-image - rootfs-initrd + - shim-v2 - virtiofsd steps: - name: Login to Kata Containers quay.io diff --git a/.github/workflows/payload-after-push-s390x.yaml b/.github/workflows/payload-after-push-s390x.yaml index 4fa147205..df9afa9f9 100644 --- a/.github/workflows/payload-after-push-s390x.yaml +++ b/.github/workflows/payload-after-push-s390x.yaml @@ -13,10 +13,10 @@ jobs: matrix: asset: - kernel - - shim-v2 - qemu - rootfs-image - rootfs-initrd + - shim-v2 - virtiofsd steps: - name: Login to Kata Containers quay.io From 4f0887ce42a5ef65d40d6d9ae4745007951a8481 Mon Sep 17 00:00:00 2001 From: Wainer dos Santos Moschetta Date: Fri, 17 Mar 2023 16:09:21 -0300 Subject: [PATCH 010/137] kata-deploy: fix install failing to chmod runtime-rs/bin/* The kata-deploy install method tried to `chmod +x /opt/kata/runtime-rs/bin/*` but it isn't always true that /opt/kata/runtime-rs/bin/ exists. For example, the s390x payload does not build the kernel-dragonball-experimental artifacts. So let's ensure the dir exist before issuing the command. Fixes #6494 Signed-off-by: Wainer dos Santos Moschetta --- tools/packaging/kata-deploy/scripts/kata-deploy.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/packaging/kata-deploy/scripts/kata-deploy.sh b/tools/packaging/kata-deploy/scripts/kata-deploy.sh index a4a4f9d5c..e4e48732b 100755 --- a/tools/packaging/kata-deploy/scripts/kata-deploy.sh +++ b/tools/packaging/kata-deploy/scripts/kata-deploy.sh @@ -58,7 +58,8 @@ function install_artifacts() { echo "copying kata artifacts onto host" cp -au /opt/kata-artifacts/opt/kata/* /opt/kata/ chmod +x /opt/kata/bin/* - chmod +x /opt/kata/runtime-rs/bin/* + [ -d /opt/kata/runtime-rs/bin ] && \ + chmod +x /opt/kata/runtime-rs/bin/* } function configure_cri_runtime() { From 59c81ed2bba16211d3fad11ed0944afecac1dbc8 Mon Sep 17 00:00:00 2001 From: Gabe Venberg Date: Mon, 6 Mar 2023 20:57:26 -0600 Subject: [PATCH 011/137] utils: informed pre-check about only_kata passed the only_kata variable through to pre_check, only_kata does not abort the install when containerd is already installed. fixes #6385 Signed-off-by: Gabe Venberg --- utils/kata-manager.sh | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/utils/kata-manager.sh b/utils/kata-manager.sh index fbc911ab4..fa3da6208 100755 --- a/utils/kata-manager.sh +++ b/utils/kata-manager.sh @@ -254,9 +254,14 @@ pre_checks() { info "Running pre-checks" + local only_kata="${1:-}" + [ -z "$only_kata" ] && die "no only_kata value" + command -v "${kata_shim_v2}" &>/dev/null \ && die "Please remove existing $kata_project installation" + [only_kata = "false" ] && return 0 + local ret { containerd_installed; ret=$?; } || true @@ -315,6 +320,9 @@ setup() local force="${2:-}" [ -z "$force" ] && die "no force value" + local only_kata="${3:-}" + [ -z "$only_kata" ] && die "no only_kata value" + [ "$cleanup" = "true" ] && trap cleanup EXIT source /etc/os-release || source /usr/lib/os-release @@ -324,7 +332,7 @@ setup() [ "$force" = "true" ] && return 0 - pre_checks + pre_checks "$only_kata" } # Download the requested version of the specified project. @@ -691,7 +699,7 @@ handle_installation() [ "$only_run_test" = "true" ] && test_installation && return 0 - setup "$cleanup" "$force" + setup "$cleanup" "$force" "$only_kata" handle_kata "$kata_version" "$enable_debug" From dd23f452ab7fd64907ff6eabe62a509062049936 Mon Sep 17 00:00:00 2001 From: Gabe Venberg Date: Fri, 17 Mar 2023 16:09:45 -0500 Subject: [PATCH 012/137] utils: renamed only_kata to skip_containerd Renamed for greater clarity as to what that flag does. Signed-off-by: Gabe Venberg --- utils/kata-manager.sh | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/utils/kata-manager.sh b/utils/kata-manager.sh index fa3da6208..8d83d055b 100755 --- a/utils/kata-manager.sh +++ b/utils/kata-manager.sh @@ -254,13 +254,13 @@ pre_checks() { info "Running pre-checks" - local only_kata="${1:-}" - [ -z "$only_kata" ] && die "no only_kata value" + local skip_containerd="${1:-}" + [ -z "$skip_containerd" ] && die "no skip_containerd value" command -v "${kata_shim_v2}" &>/dev/null \ && die "Please remove existing $kata_project installation" - [only_kata = "false" ] && return 0 + [skip_containerd = "false" ] && return 0 local ret @@ -320,8 +320,8 @@ setup() local force="${2:-}" [ -z "$force" ] && die "no force value" - local only_kata="${3:-}" - [ -z "$only_kata" ] && die "no only_kata value" + local skip_containerd="${3:-}" + [ -z "$skip_containerd" ] && die "no skip_containerd value" [ "$cleanup" = "true" ] && trap cleanup EXIT @@ -332,7 +332,7 @@ setup() [ "$force" = "true" ] && return 0 - pre_checks "$only_kata" + pre_checks "$skip_containerd" } # Download the requested version of the specified project. @@ -681,8 +681,8 @@ handle_installation() local force="${2:-}" [ -z "$force" ] && die "no force value" - local only_kata="${3:-}" - [ -z "$only_kata" ] && die "no only Kata value" + local skip_containerd="${3:-}" + [ -z "$skip_containerd" ] && die "no only Kata value" local enable_debug="${4:-}" [ -z "$enable_debug" ] && die "no enable debug value" @@ -699,11 +699,11 @@ handle_installation() [ "$only_run_test" = "true" ] && test_installation && return 0 - setup "$cleanup" "$force" "$only_kata" + setup "$cleanup" "$force" "$skip_containerd" handle_kata "$kata_version" "$enable_debug" - [ "$only_kata" = "false" ] && \ + [ "$skip_containerd" = "false" ] && \ handle_containerd \ "$containerd_version" \ "$force" \ @@ -711,7 +711,7 @@ handle_installation() [ "$disable_test" = "false" ] && test_installation - if [ "$only_kata" = "true" ] + if [ "$skip_containerd" = "true" ] then info "$kata_project is now installed" else @@ -725,7 +725,7 @@ handle_args() { local cleanup="true" local force="false" - local only_kata="false" + local skip_containerd="false" local disable_test="false" local only_run_test="false" local enable_debug="false" @@ -743,7 +743,7 @@ handle_args() f) force="true" ;; h) usage; exit 0 ;; k) kata_version="$OPTARG" ;; - o) only_kata="true" ;; + o) skip_containerd="true" ;; r) cleanup="false" ;; t) disable_test="true" ;; T) only_run_test="true" ;; @@ -758,7 +758,7 @@ handle_args() handle_installation \ "$cleanup" \ "$force" \ - "$only_kata" \ + "$skip_containerd" \ "$enable_debug" \ "$disable_test" \ "$only_run_test" \ From ece5edc641330c09146c5f72c43f73f2724e1574 Mon Sep 17 00:00:00 2001 From: Jianyong Wu Date: Mon, 20 Mar 2023 14:15:57 +0800 Subject: [PATCH 013/137] qemu/arm64: disable image nvdimm if no firmware offered For now, image nvdimm on qemu/arm64 depends on UEFI/ACPI, so if there is no firmware offered, it should be disabled. Fixes: #6468 Signed-off-by: Jianyong Wu --- src/runtime/pkg/katautils/config.go | 10 ++++++++++ src/runtime/pkg/katautils/config_test.go | 5 +++++ 2 files changed, 15 insertions(+) diff --git a/src/runtime/pkg/katautils/config.go b/src/runtime/pkg/katautils/config.go index 997d07368..993ca36b7 100644 --- a/src/runtime/pkg/katautils/config.go +++ b/src/runtime/pkg/katautils/config.go @@ -768,6 +768,16 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { kataUtilsLogger.Info("Setting 'disable_image_nvdimm = true' as microvm does not support NVDIMM") } + // Nvdimm can only be support when UEFI/ACPI is enabled on arm64, otherwise disable it. + if goruntime.GOARCH == "arm64" && firmware == "" { + if p, err := h.PFlash(); err == nil { + if len(p) == 0 { + h.DisableImageNvdimm = true + kataUtilsLogger.Info("Setting 'disable_image_nvdimm = true' if there is no firmware specified") + } + } + } + blockDriver, err := h.blockDeviceDriver() if err != nil { return vc.HypervisorConfig{}, err diff --git a/src/runtime/pkg/katautils/config_test.go b/src/runtime/pkg/katautils/config_test.go index d958fa1a3..80268f911 100644 --- a/src/runtime/pkg/katautils/config_test.go +++ b/src/runtime/pkg/katautils/config_test.go @@ -13,6 +13,7 @@ import ( "path" "path/filepath" "reflect" + goruntime "runtime" "strings" "syscall" "testing" @@ -181,6 +182,10 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf SGXEPCSize: epcSize, } + if goruntime.GOARCH == "arm64" && len(hypervisorConfig.PFlash) == 0 && hypervisorConfig.FirmwarePath == "" { + hypervisorConfig.DisableImageNvdimm = true + } + agentConfig := vc.KataAgentConfig{ LongLiveConn: true, } From e3c2d727ba9e5efc363e1e1301ceb8d74c188f82 Mon Sep 17 00:00:00 2001 From: "James O. D. Hunt" Date: Wed, 22 Mar 2023 11:56:54 +0000 Subject: [PATCH 014/137] runtime-rs: ch: clippy fix Simplify the code to keep rust's `clippy` happy. Signed-off-by: James O. D. Hunt --- src/runtime-rs/crates/hypervisor/ch-config/src/ch_api.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/runtime-rs/crates/hypervisor/ch-config/src/ch_api.rs b/src/runtime-rs/crates/hypervisor/ch-config/src/ch_api.rs index d332a154f..191cb5457 100644 --- a/src/runtime-rs/crates/hypervisor/ch-config/src/ch_api.rs +++ b/src/runtime-rs/crates/hypervisor/ch-config/src/ch_api.rs @@ -91,7 +91,7 @@ pub async fn cloud_hypervisor_vm_fs_add( mut socket: UnixStream, fs_config: FsConfig, ) -> Result> { - let result = task::spawn_blocking(move || -> Result> { + task::spawn_blocking(move || -> Result> { let response = simple_api_full_command_and_response( &mut socket, "PUT", @@ -102,7 +102,5 @@ pub async fn cloud_hypervisor_vm_fs_add( Ok(response) }) - .await?; - - result + .await? } From 96555186b3eb42b5cb81171c36fc076e8e91696a Mon Sep 17 00:00:00 2001 From: "James O. D. Hunt" Date: Wed, 8 Mar 2023 15:47:54 +0000 Subject: [PATCH 015/137] runtime-rs: ch: Honour debug setting Enable Cloud Hypervisor debug based on the specified configuration rather than hard-coding debug to be disabled. Signed-off-by: James O. D. Hunt --- .../crates/hypervisor/src/ch/inner_hypervisor.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/runtime-rs/crates/hypervisor/src/ch/inner_hypervisor.rs b/src/runtime-rs/crates/hypervisor/src/ch/inner_hypervisor.rs index 3a2631c35..04fd72494 100644 --- a/src/runtime-rs/crates/hypervisor/src/ch/inner_hypervisor.rs +++ b/src/runtime-rs/crates/hypervisor/src/ch/inner_hypervisor.rs @@ -229,7 +229,13 @@ impl CloudHypervisorInner { async fn cloud_hypervisor_launch(&mut self, _timeout_secs: i32) -> Result<()> { self.cloud_hypervisor_ensure_not_launched().await?; - let debug = false; + let cfg = self + .config + .as_ref() + .ok_or("no hypervisor config for CH") + .map_err(|e| anyhow!(e))?; + + let debug = cfg.debug_info.enable_debug; let disable_seccomp = true; From ac585886821ebde412eb75436e2bf99b57c0b204 Mon Sep 17 00:00:00 2001 From: "James O. D. Hunt" Date: Wed, 8 Mar 2023 15:49:03 +0000 Subject: [PATCH 016/137] runtime-rs: ch: Generate Cloud Hypervisor config for confidential guests This change provides a preliminary implementation for the Cloud Hypervisor (CH) feature ([currently disabled](https://github.com/kata-containers/kata-containers/pull/6201)) to allow it to generate the CH configuration for handling confidential guests. This change also introduces concrete errors using the `thiserror` crate (see `src/runtime-rs/crates/hypervisor/ch-config/src/errors.rs`) and a lot of unit tests for the conversion code that generates the CH configuration from the generic Hypervisor configuration. Fixes: #6430. Signed-off-by: James O. D. Hunt --- src/runtime-rs/Cargo.lock | 1 + .../crates/hypervisor/ch-config/Cargo.toml | 1 + .../hypervisor/ch-config/src/convert.rs | 1767 ++++++++++++++++- .../crates/hypervisor/ch-config/src/errors.rs | 107 + .../crates/hypervisor/ch-config/src/lib.rs | 8 +- .../hypervisor/src/ch/inner_hypervisor.rs | 21 + 6 files changed, 1806 insertions(+), 99 deletions(-) create mode 100644 src/runtime-rs/crates/hypervisor/ch-config/src/errors.rs diff --git a/src/runtime-rs/Cargo.lock b/src/runtime-rs/Cargo.lock index caff85873..1a92b385d 100644 --- a/src/runtime-rs/Cargo.lock +++ b/src/runtime-rs/Cargo.lock @@ -423,6 +423,7 @@ dependencies = [ "nix 0.26.2", "serde", "serde_json", + "thiserror", "tokio", ] diff --git a/src/runtime-rs/crates/hypervisor/ch-config/Cargo.toml b/src/runtime-rs/crates/hypervisor/ch-config/Cargo.toml index a51370999..10ed105e3 100644 --- a/src/runtime-rs/crates/hypervisor/ch-config/Cargo.toml +++ b/src/runtime-rs/crates/hypervisor/ch-config/Cargo.toml @@ -23,3 +23,4 @@ api_client = { git = "https://github.com/cloud-hypervisor/cloud-hypervisor", cra kata-types = { path = "../../../../libs/kata-types"} nix = "0.26.2" +thiserror = "1.0.38" diff --git a/src/runtime-rs/crates/hypervisor/ch-config/src/convert.rs b/src/runtime-rs/crates/hypervisor/ch-config/src/convert.rs index f0f5e88e8..809c26052 100644 --- a/src/runtime-rs/crates/hypervisor/ch-config/src/convert.rs +++ b/src/runtime-rs/crates/hypervisor/ch-config/src/convert.rs @@ -6,8 +6,8 @@ use crate::net_util::MAC_ADDR_LEN; use crate::NamedHypervisorConfig; use crate::VmConfig; use crate::{ - ConsoleConfig, ConsoleOutputMode, CpuFeatures, CpuTopology, CpusConfig, MacAddr, MemoryConfig, - PayloadConfig, PmemConfig, RngConfig, VsockConfig, + ConsoleConfig, ConsoleOutputMode, CpuFeatures, CpuTopology, CpusConfig, DiskConfig, MacAddr, + MemoryConfig, PayloadConfig, PlatformConfig, PmemConfig, RngConfig, VsockConfig, }; use anyhow::{anyhow, Context, Result}; use kata_types::config::default::DEFAULT_CH_ENTROPY_SOURCE; @@ -17,6 +17,8 @@ use std::convert::TryFrom; use std::fmt::Display; use std::path::PathBuf; +use crate::errors::*; + // 1 MiB const MIB: u64 = 1024 * 1024; @@ -24,75 +26,115 @@ const PMEM_ALIGN_BYTES: u64 = 2 * MIB; const DEFAULT_CH_MAX_PHYS_BITS: u8 = 46; +const DEFAULT_VSOCK_CID: u64 = 3; + impl TryFrom for VmConfig { - type Error = anyhow::Error; + type Error = VmConfigError; fn try_from(n: NamedHypervisorConfig) -> Result { - let kernel_params = n.kernel_params; + let kernel_params = if n.kernel_params.is_empty() { + None + } else { + Some(n.kernel_params) + }; + let cfg = n.cfg; - let vsock_socket_path = n.vsock_socket_path; - let sandbox_path = n.sandbox_path; + + let debug = cfg.debug_info.enable_debug; + let confidential_guest = cfg.security_info.confidential_guest; + + let tdx_enabled = n.tdx_enabled; + + let vsock_socket_path = if n.vsock_socket_path.is_empty() { + return Err(VmConfigError::EmptyVsockSocketPath); + } else { + n.vsock_socket_path + }; + + let sandbox_path = if n.sandbox_path.is_empty() { + return Err(VmConfigError::EmptySandboxPath); + } else { + n.sandbox_path + }; + let fs = n.shared_fs_devices; - let cpus = CpusConfig::try_from(cfg.cpu_info)?; + let cpus = CpusConfig::try_from(cfg.cpu_info).map_err(VmConfigError::CPUError)?; - let rng = RngConfig::try_from(cfg.machine_info)?; + let rng = RngConfig::from(cfg.machine_info); // Note how CH handles the different image types: // - // - An image is specified in PmemConfig. + // - A standard image is specified in PmemConfig. // - An initrd/initramfs is specified in PayloadConfig. + // - A confidential guest image is specified by a DiskConfig. + // - If TDX is enabled, the firmware (`td-shim` [1]) must be + // specified in PayloadConfig. + // - A confidential guest initrd is specified by a PayloadConfig with + // firmware. + // + // [1] - https://github.com/confidential-containers/td-shim let boot_info = cfg.boot_info; let use_initrd = !boot_info.initrd.is_empty(); let use_image = !boot_info.image.is_empty(); if use_initrd && use_image { - return Err(anyhow!("cannot specify image and initrd")); + return Err(VmConfigError::MultipleBootFiles); } if !use_initrd && !use_image { - return Err(anyhow!("missing boot file (no image or initrd)")); + return Err(VmConfigError::NoBootFile); } - let initrd = if use_initrd { - Some(PathBuf::from(boot_info.initrd.clone())) - } else { + let pmem = if use_initrd || confidential_guest { None - }; + } else { + let pmem = PmemConfig::try_from(&boot_info).map_err(VmConfigError::PmemError)?; - let pmem = if use_initrd { - None - } else { - let pmem = PmemConfig::try_from(&boot_info)?; Some(vec![pmem]) }; - let payload = PayloadConfig::try_from((boot_info, kernel_params, initrd))?; + let payload = Some( + PayloadConfig::try_from((boot_info.clone(), kernel_params, tdx_enabled)) + .map_err(VmConfigError::PayloadError)?, + ); - let serial = get_serial_cfg()?; - let console = get_console_cfg()?; + let disks = if confidential_guest && use_image { + let disk = DiskConfig::try_from(boot_info).map_err(VmConfigError::DiskError)?; - let memory = MemoryConfig::try_from(cfg.memory_info)?; - - std::fs::create_dir_all(sandbox_path).context("failed to create sandbox path")?; - - let vsock = VsockConfig { - cid: 3, - socket: PathBuf::from(vsock_socket_path), - ..Default::default() + Some(vec![disk]) + } else { + None }; + let serial = get_serial_cfg(debug, confidential_guest); + let console = get_console_cfg(debug, confidential_guest); + + let memory = MemoryConfig::try_from((cfg.memory_info, confidential_guest)) + .map_err(VmConfigError::MemoryError)?; + + std::fs::create_dir_all(sandbox_path.clone()) + .map_err(|e| VmConfigError::SandboxError(sandbox_path, e.to_string()))?; + + let vsock = VsockConfig::try_from((vsock_socket_path, DEFAULT_VSOCK_CID)) + .map_err(VmConfigError::VsockError)?; + + let platform = get_platform_cfg(tdx_enabled); + let cfg = VmConfig { cpus, memory, serial, console, - payload: Some(payload), + payload, fs, pmem, + disks, vsock: Some(vsock), rng, + platform, + ..Default::default() }; @@ -100,30 +142,71 @@ impl TryFrom for VmConfig { } } -impl TryFrom for MemoryConfig { - type Error = anyhow::Error; +impl TryFrom<(String, u64)> for VsockConfig { + type Error = VsockConfigError; - fn try_from(mem: MemoryInfo) -> Result { - let sysinfo = nix::sys::sysinfo::sysinfo()?; + fn try_from(args: (String, u64)) -> Result { + let vsock_socket_path = args.0; + let cid = args.1; + + let path = if vsock_socket_path.is_empty() { + return Err(VsockConfigError::NoVsockSocketPath); + } else { + vsock_socket_path + }; + + let cfg = VsockConfig { + cid, + socket: PathBuf::from(path), + + ..Default::default() + }; + + Ok(cfg) + } +} + +impl TryFrom<(MemoryInfo, bool)> for MemoryConfig { + type Error = MemoryConfigError; + + fn try_from(args: (MemoryInfo, bool)) -> Result { + let mem = args.0; + let confidential_guest = args.1; + + if mem.default_memory == 0 { + return Err(MemoryConfigError::NoDefaultMemory); + } + + let sysinfo = nix::sys::sysinfo::sysinfo().map_err(MemoryConfigError::SysInfoFail)?; let max_mem_bytes = sysinfo.ram_total(); let mem_bytes: u64 = MIB .checked_mul(mem.default_memory as u64) - .ok_or("cannot convert default memory to bytes") - .map_err(|e| anyhow!(e))?; + .ok_or(()) + .map_err(|_| MemoryConfigError::BadDefaultMemSize(mem.default_memory))?; - // The amount of memory that can be hot-plugged is the total less the - // amount allocated at VM start. - let hotplug_size_bytes = max_mem_bytes - .checked_sub(mem_bytes) - .ok_or("failed to calculate max hotplug size for CH") - .map_err(|e| anyhow!(e))?; + if mem_bytes > max_mem_bytes { + return Err(MemoryConfigError::DefaultMemSizeTooBig); + } - let aligned_hotplug_size_bytes = - checked_next_multiple_of(hotplug_size_bytes, PMEM_ALIGN_BYTES) - .ok_or("cannot handle pmem alignment for CH") - .map_err(|e| anyhow!(e))?; + let hotplug_size = if confidential_guest { + None + } else { + // The amount of memory that can be hot-plugged is the total less the + // amount allocated at VM start. + let hotplug_size_bytes = max_mem_bytes + .checked_sub(mem_bytes) + .ok_or(()) + .map_err(|_| MemoryConfigError::BadMemSizeForHotplug(max_mem_bytes))?; + + let aligned_hotplug_size_bytes = + checked_next_multiple_of(hotplug_size_bytes, PMEM_ALIGN_BYTES) + .ok_or(()) + .map_err(|_| MemoryConfigError::BadPmemAlign(hotplug_size_bytes))?; + + Some(aligned_hotplug_size_bytes) + }; let cfg = MemoryConfig { size: mem_bytes, @@ -131,7 +214,7 @@ impl TryFrom for MemoryConfig { // Required shared: true, - hotplug_size: Some(aligned_hotplug_size_bytes), + hotplug_size, ..Default::default() }; @@ -155,26 +238,32 @@ fn checked_next_multiple_of(value: u64, multiple: u64) -> Option { } impl TryFrom for CpusConfig { - type Error = anyhow::Error; + type Error = CpusConfigError; fn try_from(cpu: CpuInfo) -> Result { - let boot_vcpus = u8::try_from(cpu.default_vcpus)?; - let max_vcpus = u8::try_from(cpu.default_maxvcpus)?; + let boot_vcpus = + u8::try_from(cpu.default_vcpus).map_err(CpusConfigError::BootVCPUsTooBig)?; + + let max_vcpus = + u8::try_from(cpu.default_maxvcpus).map_err(CpusConfigError::MaxVCPUsTooBig)?; let topology = CpuTopology { - threads_per_core: 1, cores_per_die: max_vcpus, + threads_per_core: 1, dies_per_package: 1, packages: 1, }; let max_phys_bits = DEFAULT_CH_MAX_PHYS_BITS; + let features = CpuFeatures::from(cpu.cpu_features); + let cfg = CpusConfig { boot_vcpus, max_vcpus, max_phys_bits, topology: Some(topology), + features, ..Default::default() }; @@ -183,76 +272,117 @@ impl TryFrom for CpusConfig { } } -impl TryFrom for CpuFeatures { - type Error = anyhow::Error; - +impl From for CpuFeatures { #[cfg(target_arch = "x86_64")] - fn try_from(s: String) -> Result { + fn from(s: String) -> Self { let amx = s.split(',').any(|x| x == "amx"); - let cpu_features = CpuFeatures { amx }; - - Ok(cpu_features) + CpuFeatures { amx } } #[cfg(not(target_arch = "x86_64"))] - fn try_from(_s: String) -> Result { - Ok(CpuFeatures::default()) + fn from(_s: String) -> Self { + CpuFeatures::default() } } -// The 2nd tuple element is the space separated kernel parameters list. -// The 3rd tuple element is an optional initramfs image to use. -// This cannot be created only from BootInfo since that contains the -// user-specified kernel parameters only. -impl TryFrom<(BootInfo, String, Option)> for PayloadConfig { - type Error = anyhow::Error; +// - The 2nd tuple element is the space separated final kernel parameters list. +// It is made up of both the CH specific kernel parameters and the user +// specified parameters from BootInfo. +// +// The kernel params cannot be created only from BootInfo since that contains +// the user-specified kernel parameters only. +// +// - The 3rd tuple element determines if TDX is enabled. +// +impl TryFrom<(BootInfo, Option, bool)> for PayloadConfig { + type Error = PayloadConfigError; - fn try_from(args: (BootInfo, String, Option)) -> Result { - let b = args.0; + fn try_from(args: (BootInfo, Option, bool)) -> Result { + let boot_info = args.0; let cmdline = args.1; - let initramfs = args.2; + let tdx_enabled = args.2; - let kernel = PathBuf::from(b.kernel); + // The kernel is always specified here, + // not in the top level VmConfig.kernel. + let kernel = if boot_info.kernel.is_empty() { + return Err(PayloadConfigError::NoKernel); + } else { + PathBuf::from(boot_info.kernel) + }; + + let initramfs = if boot_info.initrd.is_empty() { + None + } else { + Some(PathBuf::from(boot_info.initrd)) + }; + + let firmware = if tdx_enabled { + if boot_info.firmware.is_empty() { + return Err(PayloadConfigError::TDXFirmwareMissing); + } else { + Some(PathBuf::from(boot_info.firmware)) + } + } else if boot_info.firmware.is_empty() { + None + } else { + Some(PathBuf::from(boot_info.firmware)) + }; let payload = PayloadConfig { kernel: Some(kernel), - cmdline: Some(cmdline), initramfs, - - ..Default::default() + cmdline, + firmware, }; Ok(payload) } } -impl TryFrom for RngConfig { - type Error = anyhow::Error; +impl TryFrom for DiskConfig { + type Error = DiskConfigError; - fn try_from(m: MachineInfo) -> Result { + fn try_from(boot_info: BootInfo) -> Result { + let path = if boot_info.image.is_empty() { + return Err(DiskConfigError::MissingPath); + } else { + PathBuf::from(boot_info.image) + }; + + let disk = DiskConfig { + path: Some(path), + readonly: true, + + ..Default::default() + }; + + Ok(disk) + } +} + +impl From for RngConfig { + fn from(m: MachineInfo) -> Self { let entropy_source = if !m.entropy_source.is_empty() { m.entropy_source } else { DEFAULT_CH_ENTROPY_SOURCE.to_string() }; - let rng = RngConfig { + RngConfig { src: PathBuf::from(entropy_source), ..Default::default() - }; - - Ok(rng) + } } } impl TryFrom<&BootInfo> for PmemConfig { - type Error = anyhow::Error; + type Error = PmemConfigError; fn try_from(b: &BootInfo) -> Result { let file = if b.image.is_empty() { - return Err(anyhow!("CH PmemConfig only used for images")); + return Err(PmemConfigError::MissingImage); } else { b.image.clone() }; @@ -268,24 +398,52 @@ impl TryFrom<&BootInfo> for PmemConfig { } } -fn get_serial_cfg() -> Result { - let cfg = ConsoleConfig { - file: None, - mode: ConsoleOutputMode::Tty, - iommu: false, +fn get_serial_cfg(debug: bool, confidential_guest: bool) -> ConsoleConfig { + let mode = if confidential_guest { + ConsoleOutputMode::Off + } else if debug { + ConsoleOutputMode::Tty + } else { + ConsoleOutputMode::Off }; - Ok(cfg) + ConsoleConfig { + file: None, + mode, + iommu: false, + } } -fn get_console_cfg() -> Result { - let cfg = ConsoleConfig { - file: None, - mode: ConsoleOutputMode::Off, - iommu: false, +fn get_console_cfg(debug: bool, confidential_guest: bool) -> ConsoleConfig { + let mode = if confidential_guest { + if debug { + ConsoleOutputMode::Tty + } else { + ConsoleOutputMode::Off + } + } else { + ConsoleOutputMode::Off }; - Ok(cfg) + ConsoleConfig { + file: None, + mode, + iommu: false, + } +} + +fn get_platform_cfg(tdx_enabled: bool) -> Option { + if tdx_enabled { + let platform = PlatformConfig { + tdx: true, + + ..Default::default() + }; + + Some(platform) + } else { + None + } } #[allow(dead_code)] @@ -322,3 +480,1422 @@ where Ok(MacAddr { bytes }) } + +#[cfg(test)] +mod tests { + use super::*; + use kata_types::config::hypervisor::{Hypervisor as HypervisorConfig, SecurityInfo}; + + // Generate a valid generic memory info object and a valid CH specific + // memory config object. + fn make_memory_objects( + default_memory_mib: u32, + usable_max_mem_bytes: u64, + confidential_guest: bool, + ) -> (MemoryInfo, MemoryConfig) { + let mem_info = MemoryInfo { + default_memory: default_memory_mib, + + ..Default::default() + }; + + let hotplug_size = if confidential_guest { + None + } else { + checked_next_multiple_of( + usable_max_mem_bytes - (default_memory_mib as u64 * MIB), + PMEM_ALIGN_BYTES, + ) + }; + + let mem_cfg = MemoryConfig { + size: default_memory_mib as u64 * MIB, + shared: true, + hotplug_size, + + ..Default::default() + }; + + (mem_info, mem_cfg) + } + + // The "default" sent to CH but without "cores_per_die" + // to allow the tests to set that value explicitly. + fn make_bare_topology() -> CpuTopology { + CpuTopology { + threads_per_core: 1, + dies_per_package: 1, + packages: 1, + + ..Default::default() + } + } + + fn make_cpu_objects(cpu_default: u8, cpu_max: u8) -> (CpuInfo, CpusConfig) { + let cpu_info = CpuInfo { + default_vcpus: cpu_default as i32, + default_maxvcpus: cpu_max as u32, + + ..Default::default() + }; + + let cpus_config = CpusConfig { + boot_vcpus: cpu_default, + max_vcpus: cpu_max, + topology: Some(CpuTopology { + cores_per_die: cpu_max, + + ..make_bare_topology() + }), + max_phys_bits: DEFAULT_CH_MAX_PHYS_BITS, + + ..Default::default() + }; + + (cpu_info, cpus_config) + } + + fn make_bootinfo_pmemconfig_objects(image: &str) -> (BootInfo, PmemConfig) { + let boot_info = BootInfo { + image: image.to_string(), + + ..Default::default() + }; + + let pmem_config = PmemConfig { + file: PathBuf::from(image), + discard_writes: true, + + ..Default::default() + }; + + (boot_info, pmem_config) + } + + fn make_bootinfo_diskconfig_objects(path: &str) -> (BootInfo, DiskConfig) { + let boot_info = BootInfo { + image: path.to_string(), + + ..Default::default() + }; + + let disk_config = DiskConfig { + path: Some(PathBuf::from(path)), + readonly: true, + + ..Default::default() + }; + + (boot_info, disk_config) + } + + // Create BootInfo and PayloadConfig objects for non-TDX scenarios. + fn make_bootinfo_payloadconfig_objects( + kernel: &str, + initramfs: &str, + firmware: Option<&str>, + cmdline: Option, + ) -> (BootInfo, PayloadConfig) { + let boot_info = if let Some(firmware) = firmware { + BootInfo { + kernel: kernel.into(), + initrd: initramfs.into(), + firmware: firmware.into(), + + ..Default::default() + } + } else { + BootInfo { + kernel: kernel.into(), + initrd: initramfs.into(), + + ..Default::default() + } + }; + + let payload_firmware = firmware.map(PathBuf::from); + + let payload_config = PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + initramfs: Some(PathBuf::from(initramfs)), + firmware: payload_firmware, + cmdline, + }; + + (boot_info, payload_config) + } + + fn make_machineinfo_rngconfig_objects(entropy_source: &str) -> (MachineInfo, RngConfig) { + let machine_info = MachineInfo { + entropy_source: entropy_source.to_string(), + + ..Default::default() + }; + + let rng_config = RngConfig { + src: PathBuf::from(entropy_source.to_string()), + + ..Default::default() + }; + + (machine_info, rng_config) + } + + #[test] + fn test_get_serial_cfg() { + #[derive(Debug)] + struct TestData { + debug: bool, + confidential_guest: bool, + result: ConsoleConfig, + } + + let tests = &[ + TestData { + debug: false, + confidential_guest: false, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Off, + iommu: false, + }, + }, + TestData { + debug: true, + confidential_guest: false, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Tty, + iommu: false, + }, + }, + TestData { + debug: false, + confidential_guest: true, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Off, + iommu: false, + }, + }, + TestData { + debug: true, + confidential_guest: true, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Off, + iommu: false, + }, + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = get_serial_cfg(d.debug, d.confidential_guest); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + assert_eq!(result.file, d.result.file, "{}", msg); + assert_eq!(result.iommu, d.result.iommu, "{}", msg); + assert_eq!(result.mode, d.result.mode, "{}", msg); + } + } + + #[test] + fn test_get_console_cfg() { + #[derive(Debug)] + struct TestData { + debug: bool, + confidential_guest: bool, + result: ConsoleConfig, + } + + let tests = &[ + TestData { + debug: false, + confidential_guest: false, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Off, + iommu: false, + }, + }, + TestData { + debug: true, + confidential_guest: false, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Off, + iommu: false, + }, + }, + TestData { + debug: false, + confidential_guest: true, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Off, + iommu: false, + }, + }, + TestData { + debug: true, + confidential_guest: true, + result: ConsoleConfig { + file: None, + mode: ConsoleOutputMode::Tty, + iommu: false, + }, + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = get_console_cfg(d.debug, d.confidential_guest); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + assert_eq!(result, d.result, "{}", msg); + } + } + + #[test] + fn test_get_platform_cfg() { + #[derive(Debug)] + struct TestData { + tdx_enabled: bool, + result: Option, + } + + let tests = &[ + TestData { + tdx_enabled: false, + result: None, + }, + TestData { + tdx_enabled: true, + result: Some(PlatformConfig { + tdx: true, + + ..Default::default() + }), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = get_platform_cfg(d.tdx_enabled); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + assert_eq!(result, d.result, "{}", msg); + } + } + + #[test] + fn test_bootinfo_to_pmemconfig() { + #[derive(Debug)] + struct TestData { + boot_info: BootInfo, + result: Result, + } + + let image = "/an/image"; + + let (boot_info_with_image, pmem_config) = make_bootinfo_pmemconfig_objects(image); + + let tests = &[ + TestData { + boot_info: BootInfo::default(), + result: Err(PmemConfigError::MissingImage), + }, + TestData { + boot_info: boot_info_with_image, + result: Ok(pmem_config), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = PmemConfig::try_from(&d.boot_info); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + if d.result.is_err() { + assert!(result.is_err(), "{}", msg); + + assert_eq!( + &result.unwrap_err(), + d.result.as_ref().unwrap_err(), + "{}", + msg + ); + + continue; + } + + assert!(result.is_ok(), "{}", msg); + assert_eq!(&result.unwrap(), d.result.as_ref().unwrap(), "{}", msg); + } + } + + #[test] + fn test_machineinfo_to_rngconfig() { + #[derive(Debug)] + struct TestData { + machine_info: MachineInfo, + result: RngConfig, + } + + let entropy_source = "/dev/foo"; + + let (machine_info, rng_config) = make_machineinfo_rngconfig_objects(entropy_source); + + let tests = &[ + TestData { + machine_info: MachineInfo::default(), + result: RngConfig { + src: PathBuf::from(DEFAULT_CH_ENTROPY_SOURCE.to_string()), + + ..Default::default() + }, + }, + TestData { + machine_info: MachineInfo { + entropy_source: DEFAULT_CH_ENTROPY_SOURCE.to_string(), + + ..Default::default() + }, + result: RngConfig { + src: PathBuf::from(DEFAULT_CH_ENTROPY_SOURCE.to_string()), + + ..Default::default() + }, + }, + TestData { + machine_info: MachineInfo { + entropy_source: entropy_source.to_string(), + + ..Default::default() + }, + result: RngConfig { + src: PathBuf::from(entropy_source.to_string()), + + ..Default::default() + }, + }, + TestData { + machine_info, + result: rng_config, + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = RngConfig::from(d.machine_info.clone()); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + assert_eq!(result, d.result, "{}", msg); + } + } + + #[test] + fn test_string_to_cpufeatures() { + #[derive(Debug)] + struct TestData<'a> { + s: &'a str, + result: CpuFeatures, + } + + let tests = &[ + TestData { + s: "", + result: CpuFeatures::default(), + }, + #[cfg(target_arch = "x86_64")] + TestData { + s: "amx", + result: CpuFeatures { amx: true }, + }, + #[cfg(target_arch = "x86_64")] + TestData { + s: "amxyz", + result: CpuFeatures { amx: false }, + }, + #[cfg(target_arch = "x86_64")] + TestData { + s: "aamx", + result: CpuFeatures { amx: false }, + }, + #[cfg(not(target_arch = "x86_64"))] + TestData { + s: "amx", + result: CpuFeatures::default(), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = CpuFeatures::from(d.s.to_string()); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + assert_eq!(result, d.result, "{}", msg); + } + } + + #[test] + fn test_bootinfo_to_diskconfig() { + #[derive(Debug)] + struct TestData { + boot_info: BootInfo, + result: Result, + } + + let path = "/some/where"; + + let (boot_info, disk_config) = make_bootinfo_diskconfig_objects(path); + + let tests = &[ + TestData { + boot_info: BootInfo::default(), + result: Err(DiskConfigError::MissingPath), + }, + TestData { + boot_info, + result: Ok(disk_config), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = DiskConfig::try_from(d.boot_info.clone()); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + assert_eq!(result, d.result, "{}", msg); + } + } + + #[test] + fn test_cpuinfo_to_cpusconfig() { + #[derive(Debug)] + struct TestData { + cpu_info: CpuInfo, + result: Result, + } + + let topology = make_bare_topology(); + + let u8_max = std::u8::MAX; + + let (cpu_info, cpus_config) = make_cpu_objects(7, u8_max); + + let tests = &[ + TestData { + cpu_info: CpuInfo::default(), + result: Ok(CpusConfig { + boot_vcpus: 0, + max_vcpus: 0, + topology: Some(CpuTopology { + cores_per_die: 0, + + ..topology + }), + max_phys_bits: DEFAULT_CH_MAX_PHYS_BITS, + + ..Default::default() + }), + }, + TestData { + cpu_info: CpuInfo { + default_vcpus: u8_max as i32, + + ..Default::default() + }, + result: Ok(CpusConfig { + boot_vcpus: u8_max, + max_vcpus: 0, + topology: Some(topology.clone()), + max_phys_bits: DEFAULT_CH_MAX_PHYS_BITS, + + ..Default::default() + }), + }, + TestData { + cpu_info: CpuInfo { + default_vcpus: u8_max as i32 + 1, + + ..Default::default() + }, + result: Err(CpusConfigError::BootVCPUsTooBig( + u8::try_from(u8_max as i32 + 1).unwrap_err(), + )), + }, + TestData { + cpu_info: CpuInfo { + default_maxvcpus: u8_max as u32 + 1, + + ..Default::default() + }, + result: Err(CpusConfigError::MaxVCPUsTooBig( + u8::try_from(u8_max as u32 + 1).unwrap_err(), + )), + }, + TestData { + cpu_info: CpuInfo { + default_vcpus: u8_max as i32, + default_maxvcpus: u8_max as u32, + + ..Default::default() + }, + result: Ok(CpusConfig { + boot_vcpus: u8_max, + max_vcpus: u8_max, + topology: Some(CpuTopology { + cores_per_die: u8_max, + + ..topology + }), + max_phys_bits: DEFAULT_CH_MAX_PHYS_BITS, + + ..Default::default() + }), + }, + TestData { + cpu_info: CpuInfo { + default_vcpus: (u8_max - 1) as i32, + default_maxvcpus: u8_max as u32, + + ..Default::default() + }, + result: Ok(CpusConfig { + boot_vcpus: (u8_max - 1), + max_vcpus: u8_max, + topology: Some(CpuTopology { + cores_per_die: u8_max, + + ..topology + }), + max_phys_bits: DEFAULT_CH_MAX_PHYS_BITS, + + ..Default::default() + }), + }, + TestData { + cpu_info, + result: Ok(cpus_config), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = CpusConfig::try_from(d.cpu_info.clone()); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + if d.result.is_err() { + assert!(result.is_err(), "{}", msg); + + assert_eq!( + &result.unwrap_err(), + d.result.as_ref().unwrap_err(), + "{}", + msg + ); + continue; + } + + assert!(result.is_ok(), "{}", msg); + assert_eq!(&result.unwrap(), d.result.as_ref().unwrap(), "{}", msg); + } + } + + #[test] + fn test_bootinfo_to_payloadconfig() { + #[derive(Debug)] + struct TestData { + boot_info: BootInfo, + cmdline: Option, + tdx: bool, + result: Result, + } + + let cmdline = "debug foo a=b c=d"; + let kernel = "kernel"; + let firmware = "firmware"; + let initramfs = "initramfs"; + + let (boot_info_with_initrd, payload_config_with_initrd) = + make_bootinfo_payloadconfig_objects( + kernel, + initramfs, + Some(firmware), + Some(cmdline.to_string()), + ); + + let boot_info_without_initrd = BootInfo { + kernel: kernel.into(), + firmware: firmware.into(), + + ..Default::default() + }; + + let payload_config_without_initrd = PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + firmware: Some(PathBuf::from(firmware)), + cmdline: Some(cmdline.into()), + + ..Default::default() + }; + + let tests = &[ + TestData { + boot_info: BootInfo::default(), + cmdline: None, + tdx: false, + result: Err(PayloadConfigError::NoKernel), + }, + TestData { + boot_info: BootInfo { + kernel: kernel.into(), + kernel_params: String::new(), + initrd: initramfs.into(), + + ..Default::default() + }, + cmdline: None, + tdx: false, + result: Ok(PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + cmdline: None, + initramfs: Some(PathBuf::from(initramfs)), + + ..Default::default() + }), + }, + TestData { + boot_info: BootInfo { + kernel: kernel.into(), + kernel_params: cmdline.to_string(), + initrd: initramfs.into(), + + ..Default::default() + }, + cmdline: Some(cmdline.to_string()), + tdx: false, + result: Ok(PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + initramfs: Some(PathBuf::from(initramfs)), + cmdline: Some(cmdline.to_string()), + + ..Default::default() + }), + }, + TestData { + boot_info: BootInfo { + kernel: kernel.into(), + initrd: initramfs.into(), + + ..Default::default() + }, + cmdline: None, + tdx: true, + result: Err(PayloadConfigError::TDXFirmwareMissing), + }, + TestData { + boot_info: boot_info_with_initrd, + cmdline: Some(cmdline.to_string()), + tdx: true, + result: Ok(payload_config_with_initrd), + }, + TestData { + boot_info: boot_info_without_initrd, + cmdline: Some(cmdline.to_string()), + tdx: true, + result: Ok(payload_config_without_initrd), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = PayloadConfig::try_from((d.boot_info.clone(), d.cmdline.clone(), d.tdx)); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + if d.result.is_err() { + assert!(result.is_err(), "{}", msg); + + assert_eq!( + &result.unwrap_err(), + d.result.as_ref().unwrap_err(), + "{}", + msg + ); + continue; + } + + assert!(result.is_ok(), "{}", msg); + assert_eq!(&result.unwrap(), d.result.as_ref().unwrap(), "{}", msg); + } + } + + #[test] + fn test_memoryinfo_to_memoryconfig() { + #[derive(Debug)] + struct TestData { + mem_info: MemoryInfo, + confidential_guest: bool, + result: Result, + } + + let sysinfo = nix::sys::sysinfo::sysinfo().unwrap(); + + let actual_max_mem_bytes = sysinfo.ram_total(); + + // Calculate the available MiB value + let max_mem_mib = actual_max_mem_bytes.checked_div(MIB).unwrap(); + + // Undo the operation to get back to the usable amount of max memory + // bytes. + let usable_max_mem_bytes = MIB.checked_mul(max_mem_mib).unwrap(); + + let (mem_info_std, mem_cfg_std) = make_memory_objects(79, usable_max_mem_bytes, false); + let (mem_info_confidential_guest, mem_cfg_confidential_guest) = + make_memory_objects(79, usable_max_mem_bytes, true); + + let tests = &[ + TestData { + mem_info: MemoryInfo::default(), + confidential_guest: false, + result: Err(MemoryConfigError::NoDefaultMemory), + }, + TestData { + mem_info: MemoryInfo { + default_memory: 17, + + ..Default::default() + }, + confidential_guest: true, + result: Ok(MemoryConfig { + size: (17 * MIB), + shared: true, + hotplug_size: None, + + ..Default::default() + }), + }, + TestData { + mem_info: MemoryInfo { + default_memory: max_mem_mib as u32, + + ..Default::default() + }, + confidential_guest: true, + result: Ok(MemoryConfig { + size: usable_max_mem_bytes, + shared: true, + hotplug_size: None, + + ..Default::default() + }), + }, + TestData { + mem_info: MemoryInfo { + default_memory: (max_mem_mib + 1) as u32, + + ..Default::default() + }, + confidential_guest: true, + result: Err(MemoryConfigError::DefaultMemSizeTooBig), + }, + TestData { + mem_info: MemoryInfo { + default_memory: 1024, + + ..Default::default() + }, + confidential_guest: false, + result: Ok(MemoryConfig { + size: 1024_u64 * MIB, + shared: true, + hotplug_size: checked_next_multiple_of( + usable_max_mem_bytes - (1024 * MIB), + PMEM_ALIGN_BYTES, + ), + + ..Default::default() + }), + }, + TestData { + mem_info: mem_info_std, + confidential_guest: false, + result: Ok(mem_cfg_std), + }, + TestData { + mem_info: mem_info_confidential_guest, + confidential_guest: true, + result: Ok(mem_cfg_confidential_guest), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = MemoryConfig::try_from((d.mem_info.clone(), d.confidential_guest)); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + if d.result.is_err() { + assert!(result.is_err(), "{}", msg); + + assert_eq!( + &result.unwrap_err(), + d.result.as_ref().unwrap_err(), + "{}", + msg + ); + continue; + } + + assert!(result.is_ok(), "{}", msg); + assert_eq!(&result.unwrap(), d.result.as_ref().unwrap(), "{}", msg); + } + } + + #[test] + fn test_vsock_config() { + #[derive(Debug)] + struct TestData<'a> { + vsock_socket_path: &'a str, + cid: u64, + result: Result, + } + + let tests = &[ + TestData { + vsock_socket_path: "", + cid: 0, + result: Err(VsockConfigError::NoVsockSocketPath), + }, + TestData { + vsock_socket_path: "vsock_socket_path", + cid: DEFAULT_VSOCK_CID, + result: Ok(VsockConfig { + socket: PathBuf::from("vsock_socket_path"), + cid: DEFAULT_VSOCK_CID, + + ..Default::default() + }), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = VsockConfig::try_from((d.vsock_socket_path.to_string(), d.cid)); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + if d.result.is_err() { + assert!(result.is_err(), "{}", msg); + + assert_eq!( + &result.unwrap_err(), + d.result.as_ref().unwrap_err(), + "{}", + msg + ); + continue; + } + + assert!(result.is_ok(), "{}", msg); + assert_eq!(&result.unwrap(), d.result.as_ref().unwrap(), "{}", msg); + } + } + + #[test] + fn test_named_hypervisor_config_to_vmconfig() { + #[derive(Debug)] + struct TestData { + cfg: NamedHypervisorConfig, + result: Result, + } + + let u8_max = std::u8::MAX; + let sysinfo = nix::sys::sysinfo::sysinfo().unwrap(); + + let actual_max_mem_bytes = sysinfo.ram_total(); + + // Calculate the available MiB value + let max_mem_mib = actual_max_mem_bytes.checked_div(MIB).unwrap(); + + // Undo the operation to get back to the usable amount of max memory + // bytes. + let usable_max_mem_bytes = MIB.checked_mul(max_mem_mib).unwrap(); + + let image = "image"; + let initramfs = "initramfs"; + let kernel = "kernel"; + let firmware = "firmware"; + + let entropy_source = "entropy_source"; + let sandbox_path = "sandbox_path"; + let vsock_socket_path = "vsock_socket_path"; + + let valid_vsock = + VsockConfig::try_from((vsock_socket_path.to_string(), DEFAULT_VSOCK_CID)).unwrap(); + + let (cpu_info, cpus_config) = make_cpu_objects(7, u8_max); + + let (memory_info_std, mem_config_std) = + make_memory_objects(79, usable_max_mem_bytes, false); + + let (memory_info_confidential_guest, mem_config_confidential_guest) = + make_memory_objects(79, usable_max_mem_bytes, true); + + let (_, pmem_config_with_image) = make_bootinfo_pmemconfig_objects(image); + let (machine_info, rng_config) = make_machineinfo_rngconfig_objects(entropy_source); + + let payload_firmware = None; + + let (boot_info_with_initrd, payload_config_with_initrd) = + make_bootinfo_payloadconfig_objects(kernel, initramfs, payload_firmware, None); + + let (boot_info_confidential_guest_image, disk_config_confidential_guest_image) = + make_bootinfo_diskconfig_objects(image); + + let boot_info_confidential_guest_initrd = BootInfo { + kernel: kernel.to_string(), + initrd: initramfs.to_string(), + + ..Default::default() + }; + + let boot_info_tdx_image = BootInfo { + kernel: kernel.to_string(), + image: image.to_string(), + firmware: firmware.to_string(), + + ..Default::default() + }; + + let boot_info_tdx_initrd = BootInfo { + kernel: kernel.to_string(), + initrd: initramfs.to_string(), + firmware: firmware.to_string(), + + ..Default::default() + }; + + let payload_config_confidential_guest_initrd = PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + initramfs: Some(PathBuf::from(initramfs)), + + ..Default::default() + }; + + // XXX: Note that the image is defined in a DiskConfig! + let payload_config_tdx_for_image = PayloadConfig { + firmware: Some(PathBuf::from(firmware)), + kernel: Some(PathBuf::from(kernel)), + + ..Default::default() + }; + + let payload_config_tdx_initrd = PayloadConfig { + firmware: Some(PathBuf::from(firmware)), + initramfs: Some(PathBuf::from(initramfs)), + kernel: Some(PathBuf::from(kernel)), + + ..Default::default() + }; + + //------------------------------ + + let hypervisor_cfg_with_image_and_kernel = HypervisorConfig { + cpu_info: cpu_info.clone(), + memory_info: memory_info_std.clone(), + boot_info: BootInfo { + image: image.to_string(), + kernel: kernel.to_string(), + + ..Default::default() + }, + machine_info: machine_info.clone(), + + ..Default::default() + }; + + let hypervisor_cfg_with_initrd = HypervisorConfig { + cpu_info: cpu_info.clone(), + memory_info: memory_info_std, + boot_info: boot_info_with_initrd, + machine_info: machine_info.clone(), + + ..Default::default() + }; + + let security_info_confidential_guest = SecurityInfo { + confidential_guest: true, + + ..Default::default() + }; + + let hypervisor_cfg_confidential_guest_image = HypervisorConfig { + cpu_info: cpu_info.clone(), + memory_info: memory_info_confidential_guest.clone(), + boot_info: BootInfo { + kernel: kernel.to_string(), + + ..boot_info_confidential_guest_image + }, + machine_info: machine_info.clone(), + security_info: security_info_confidential_guest.clone(), + + ..Default::default() + }; + + let hypervisor_cfg_confidential_guest_initrd = HypervisorConfig { + cpu_info: cpu_info.clone(), + memory_info: memory_info_confidential_guest.clone(), + boot_info: boot_info_confidential_guest_initrd, + machine_info: machine_info.clone(), + security_info: security_info_confidential_guest.clone(), + + ..Default::default() + }; + + let hypervisor_cfg_tdx_image = HypervisorConfig { + cpu_info: cpu_info.clone(), + memory_info: memory_info_confidential_guest.clone(), + boot_info: boot_info_tdx_image, + machine_info: machine_info.clone(), + security_info: security_info_confidential_guest.clone(), + + ..Default::default() + }; + + let hypervisor_cfg_tdx_initrd = HypervisorConfig { + cpu_info, + memory_info: memory_info_confidential_guest, + boot_info: boot_info_tdx_initrd, + machine_info, + security_info: security_info_confidential_guest, + + ..Default::default() + }; + + //------------------------------ + + let vmconfig_with_image_and_kernel = VmConfig { + cpus: cpus_config.clone(), + memory: mem_config_std.clone(), + rng: rng_config.clone(), + vsock: Some(valid_vsock.clone()), + + // rootfs image specific + pmem: Some(vec![pmem_config_with_image]), + + payload: Some(PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + + ..Default::default() + }), + + ..Default::default() + }; + + let vmconfig_with_initrd = VmConfig { + cpus: cpus_config.clone(), + memory: mem_config_std, + rng: rng_config.clone(), + vsock: Some(valid_vsock.clone()), + + // initrd/initramfs specific + payload: Some(payload_config_with_initrd), + + ..Default::default() + }; + + let vmconfig_confidential_guest_image = VmConfig { + cpus: cpus_config.clone(), + memory: mem_config_confidential_guest.clone(), + rng: rng_config.clone(), + vsock: Some(valid_vsock.clone()), + + // Confidential guest image specific + disks: Some(vec![disk_config_confidential_guest_image.clone()]), + + payload: Some(PayloadConfig { + kernel: Some(PathBuf::from(kernel)), + + ..Default::default() + }), + + ..Default::default() + }; + + let vmconfig_confidential_guest_initrd = VmConfig { + cpus: cpus_config.clone(), + memory: mem_config_confidential_guest.clone(), + rng: rng_config.clone(), + vsock: Some(valid_vsock.clone()), + + // Confidential guest initrd specific + payload: Some(payload_config_confidential_guest_initrd), + + ..Default::default() + }; + + let platform_config_tdx = get_platform_cfg(true); + + let vmconfig_tdx_image = VmConfig { + cpus: cpus_config.clone(), + memory: mem_config_confidential_guest.clone(), + rng: rng_config.clone(), + vsock: Some(valid_vsock.clone()), + platform: platform_config_tdx.clone(), + + // TDX specific + payload: Some(payload_config_tdx_for_image), + + // Confidential guest + TDX specific + disks: Some(vec![disk_config_confidential_guest_image]), + + ..Default::default() + }; + + let vmconfig_tdx_initrd = VmConfig { + cpus: cpus_config, + memory: mem_config_confidential_guest, + rng: rng_config, + vsock: Some(valid_vsock), + platform: platform_config_tdx, + + // Confidential guest + TDX specific + payload: Some(payload_config_tdx_initrd), + + ..Default::default() + }; + + //------------------------------ + + let named_hypervisor_cfg_with_image_and_kernel = NamedHypervisorConfig { + sandbox_path: sandbox_path.into(), + vsock_socket_path: vsock_socket_path.into(), + + cfg: hypervisor_cfg_with_image_and_kernel, + + ..Default::default() + }; + + let named_hypervisor_cfg_with_initrd = NamedHypervisorConfig { + sandbox_path: sandbox_path.into(), + vsock_socket_path: vsock_socket_path.into(), + + cfg: hypervisor_cfg_with_initrd, + + ..Default::default() + }; + + let named_hypervisor_cfg_confidential_guest_image = NamedHypervisorConfig { + sandbox_path: sandbox_path.into(), + vsock_socket_path: vsock_socket_path.into(), + + cfg: hypervisor_cfg_confidential_guest_image, + + ..Default::default() + }; + + let named_hypervisor_cfg_confidential_guest_initrd = NamedHypervisorConfig { + sandbox_path: sandbox_path.into(), + vsock_socket_path: vsock_socket_path.into(), + + cfg: hypervisor_cfg_confidential_guest_initrd, + + ..Default::default() + }; + + let named_hypervisor_cfg_tdx_image = NamedHypervisorConfig { + sandbox_path: sandbox_path.into(), + vsock_socket_path: vsock_socket_path.into(), + + cfg: hypervisor_cfg_tdx_image, + + tdx_enabled: true, + + ..Default::default() + }; + + let named_hypervisor_cfg_tdx_initrd = NamedHypervisorConfig { + sandbox_path: sandbox_path.into(), + vsock_socket_path: vsock_socket_path.into(), + + cfg: hypervisor_cfg_tdx_initrd, + + tdx_enabled: true, + + ..Default::default() + }; + + //------------------------------ + + let tests = &[ + TestData { + cfg: NamedHypervisorConfig::default(), + result: Err(VmConfigError::EmptyVsockSocketPath), + }, + TestData { + cfg: NamedHypervisorConfig { + vsock_socket_path: "vsock_socket_path".into(), + + ..Default::default() + }, + result: Err(VmConfigError::EmptySandboxPath), + }, + TestData { + cfg: NamedHypervisorConfig { + sandbox_path: "sandbox_path".into(), + + ..Default::default() + }, + result: Err(VmConfigError::EmptyVsockSocketPath), + }, + TestData { + cfg: NamedHypervisorConfig { + sandbox_path: "sandbox_path".into(), + vsock_socket_path: "vsock_socket_path".into(), + cfg: HypervisorConfig::default(), + + ..Default::default() + }, + result: Err(VmConfigError::NoBootFile), + }, + TestData { + cfg: NamedHypervisorConfig { + sandbox_path: "sandbox_path".into(), + vsock_socket_path: "vsock_socket_path".into(), + cfg: HypervisorConfig { + boot_info: BootInfo { + initrd: "initrd".into(), + image: "image".into(), + + ..Default::default() + }, + + ..Default::default() + }, + + ..Default::default() + }, + result: Err(VmConfigError::MultipleBootFiles), + }, + TestData { + cfg: named_hypervisor_cfg_with_image_and_kernel, + result: Ok(vmconfig_with_image_and_kernel), + }, + TestData { + cfg: named_hypervisor_cfg_with_initrd, + result: Ok(vmconfig_with_initrd), + }, + TestData { + cfg: named_hypervisor_cfg_confidential_guest_image, + result: Ok(vmconfig_confidential_guest_image), + }, + TestData { + cfg: named_hypervisor_cfg_confidential_guest_initrd, + result: Ok(vmconfig_confidential_guest_initrd), + }, + TestData { + cfg: named_hypervisor_cfg_tdx_image, + result: Ok(vmconfig_tdx_image), + }, + TestData { + cfg: named_hypervisor_cfg_tdx_initrd, + result: Ok(vmconfig_tdx_initrd), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + + let result = VmConfig::try_from(d.cfg.clone()); + + let msg = format!("{}: actual result: {:?}", msg, result); + + if std::env::var("DEBUG").is_ok() { + eprintln!("DEBUG: {}", msg); + } + + if d.result.is_err() { + assert!(result.is_err(), "{}", msg); + + assert_eq!( + &result.unwrap_err(), + d.result.as_ref().unwrap_err(), + "{}", + msg + ); + continue; + } + + assert!(result.is_ok(), "{}", msg); + assert_eq!(&result.unwrap(), d.result.as_ref().unwrap(), "{}", msg); + } + } +} diff --git a/src/runtime-rs/crates/hypervisor/ch-config/src/errors.rs b/src/runtime-rs/crates/hypervisor/ch-config/src/errors.rs new file mode 100644 index 000000000..7e062f5e6 --- /dev/null +++ b/src/runtime-rs/crates/hypervisor/ch-config/src/errors.rs @@ -0,0 +1,107 @@ +// Copyright (c) 2023 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 + +use std::convert::TryFrom; +use thiserror::Error; + +#[derive(Error, Debug, PartialEq)] +pub enum VmConfigError { + #[error("empty sandbox path")] + EmptySandboxPath, + + #[error("empty VSOCK socket path")] + EmptyVsockSocketPath, + + #[error("cannot specify image and initrd")] + MultipleBootFiles, + + #[error("missing boot image (no rootfs image or initrd)")] + NoBootFile, + + #[error("CPU config error: {0}")] + CPUError(CpusConfigError), + + #[error("Pmem config error: {0}")] + PmemError(PmemConfigError), + + #[error("Payload config error: {0}")] + PayloadError(PayloadConfigError), + + #[error("Disk config error: {0}")] + DiskError(DiskConfigError), + + #[error("Memory config error: {0}")] + MemoryError(MemoryConfigError), + + // The 2nd arg is actually a std::io::Error but that doesn't implement + // PartialEq, so we convert it to a String. + #[error("Failed to create sandbox path ({0}: {1}")] + SandboxError(String, String), + + #[error("VSOCK config error: {0}")] + VsockError(VsockConfigError), +} + +#[derive(Error, Debug, PartialEq)] +pub enum PmemConfigError { + #[error("Need rootfs image for PmemConfig")] + MissingImage, +} + +#[derive(Error, Debug, PartialEq)] +pub enum DiskConfigError { + #[error("Need path for DiskConfig")] + MissingPath, + + #[error("Found unexpected path for DiskConfig with TDX: {0}")] + UnexpectedPathForTDX(String), +} + +#[derive(Error, Debug, PartialEq)] +pub enum CpusConfigError { + #[error("Too many boot vCPUs specified: {0}")] + BootVCPUsTooBig(>::Error), + + #[error("Too many max vCPUs specified: {0}")] + MaxVCPUsTooBig(>::Error), +} + +#[derive(Error, Debug, PartialEq)] +pub enum PayloadConfigError { + #[error("No kernel specified")] + NoKernel, + + #[error("No initrd/initramfs specified")] + NoInitrd, + + #[error("Need firmware for TDX")] + TDXFirmwareMissing, +} + +#[derive(Error, Debug, PartialEq)] +pub enum MemoryConfigError { + #[error("No default memory specified")] + NoDefaultMemory, + + #[error("Default memory size > available RAM")] + DefaultMemSizeTooBig, + + #[error("Cannot convert default memory to bytes: {0}")] + BadDefaultMemSize(u32), + + #[error("Cannot calculate hotplug memory size from default memory: {0}")] + BadMemSizeForHotplug(u64), + + #[error("Cannot align hotplug memory size from pmem: {0}")] + BadPmemAlign(u64), + + #[error("Failed to query system memory information: {0}")] + SysInfoFail(#[source] nix::errno::Errno), +} + +#[derive(Error, Debug, PartialEq)] +pub enum VsockConfigError { + #[error("Missing VSOCK socket path")] + NoVsockSocketPath, +} diff --git a/src/runtime-rs/crates/hypervisor/ch-config/src/lib.rs b/src/runtime-rs/crates/hypervisor/ch-config/src/lib.rs index 2969e6847..9d6214a77 100644 --- a/src/runtime-rs/crates/hypervisor/ch-config/src/lib.rs +++ b/src/runtime-rs/crates/hypervisor/ch-config/src/lib.rs @@ -17,6 +17,8 @@ pub use net_util::MacAddr; pub const MAX_NUM_PCI_SEGMENTS: u16 = 16; +mod errors; + #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, Default)] pub struct BalloonConfig { pub size: u64, @@ -330,7 +332,6 @@ pub struct PlatformConfig { pub uuid: Option, #[serde(default)] pub oem_strings: Option>, - #[cfg(feature = "tdx")] #[serde(default)] pub tdx: bool, } @@ -425,9 +426,7 @@ pub struct VmConfig { pub fs: Option>, #[serde(skip_serializing_if = "Option::is_none")] pub pmem: Option>, - //#[serde(default = "ConsoleConfig::default_serial")] pub serial: ConsoleConfig, - //#[serde(default = "ConsoleConfig::default_console")] pub console: ConsoleConfig, #[serde(skip_serializing_if = "Option::is_none")] pub devices: Option>, @@ -484,12 +483,13 @@ fn u16_is_zero(v: &u16) -> bool { // Type used to simplify conversion from a generic Hypervisor config // to a CH specific VmConfig. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct NamedHypervisorConfig { pub kernel_params: String, pub sandbox_path: String, pub vsock_socket_path: String, pub cfg: HypervisorConfig, + pub tdx_enabled: bool, pub shared_fs_devices: Option>, } diff --git a/src/runtime-rs/crates/hypervisor/src/ch/inner_hypervisor.rs b/src/runtime-rs/crates/hypervisor/src/ch/inner_hypervisor.rs index 04fd72494..89747d936 100644 --- a/src/runtime-rs/crates/hypervisor/src/ch/inner_hypervisor.rs +++ b/src/runtime-rs/crates/hypervisor/src/ch/inner_hypervisor.rs @@ -68,6 +68,8 @@ impl CloudHypervisorInner { let enable_debug = cfg.debug_info.enable_debug; + let confidential_guest = cfg.security_info.confidential_guest; + // Note that the configuration option hypervisor.block_device_driver is not used. let rootfs_driver = VM_ROOTFS_DRIVER_PMEM; @@ -81,6 +83,18 @@ impl CloudHypervisorInner { let mut rootfs_param = KernelParams::new_rootfs_kernel_params(rootfs_driver, rootfs_type)?; + let mut extra_params = if enable_debug { + if confidential_guest { + KernelParams::from_string("console=hvc0") + } else { + KernelParams::from_string("console=ttyS0,115200n8") + } + } else { + KernelParams::from_string("quiet") + }; + + params.append(&mut extra_params); + // Add the rootfs device params.append(&mut rootfs_param); @@ -121,11 +135,18 @@ impl CloudHypervisorInner { let kernel_params = self.get_kernel_params().await?; + // FIXME: See: + // + // - https://github.com/kata-containers/kata-containers/issues/6383 + // - https://github.com/kata-containers/kata-containers/pull/6257 + let tdx_enabled = false; + let named_cfg = NamedHypervisorConfig { kernel_params, sandbox_path, vsock_socket_path, cfg: hypervisor_config.clone(), + tdx_enabled, shared_fs_devices, }; From 74ec38cf021690295c7f7f6bc565c62e50b5c34b Mon Sep 17 00:00:00 2001 From: Dallas Delaney Date: Thu, 26 Jan 2023 14:58:55 -0800 Subject: [PATCH 017/137] osbuilder: Add support for CBL-Mariner Add osbuilder support to build a rootfs and image based on the CBL-Mariner Linux distro Fixes: #6462 Signed-off-by: Dallas Delaney --- tools/osbuilder/README.md | 14 +++++----- .../rootfs-builder/cbl-mariner/Dockerfile.in | 15 +++++++++++ .../rootfs-builder/cbl-mariner/config.sh | 10 +++++++ .../rootfs-builder/cbl-mariner/rootfs_lib.sh | 26 +++++++++++++++++++ 4 files changed, 58 insertions(+), 7 deletions(-) create mode 100644 tools/osbuilder/rootfs-builder/cbl-mariner/Dockerfile.in create mode 100644 tools/osbuilder/rootfs-builder/cbl-mariner/config.sh create mode 100644 tools/osbuilder/rootfs-builder/cbl-mariner/rootfs_lib.sh diff --git a/tools/osbuilder/README.md b/tools/osbuilder/README.md index 343d2bf60..9415de74e 100644 --- a/tools/osbuilder/README.md +++ b/tools/osbuilder/README.md @@ -80,7 +80,7 @@ filesystem components to generate an initrd. 3. When generating an image, the initrd is extracted to obtain the base rootfs for the image. -Ubuntu is the default distro for building the rootfs, to use a different one, you can set `DISTRO=alpine|clearlinux|debian|ubuntu`. +Ubuntu is the default distro for building the rootfs, to use a different one, you can set `DISTRO=alpine|clearlinux|debian|ubuntu|cbl-mariner`. For example `make USE_DOCKER=true DISTRO=alpine rootfs` will make an Alpine rootfs using Docker. ### Rootfs creation @@ -209,9 +209,9 @@ of the the osbuilder distributions. > Note: this table is not relevant for the dracut build method, since it supports any Linux distribution and architecture where dracut is available. -| |Alpine |CentOS Stream |Clear Linux |Debian/Ubuntu | -|-- |-- |-- |-- |-- | -|**ARM64** |:heavy_check_mark:|:heavy_check_mark:| | | -|**PPC64le**| |:heavy_check_mark:| |:heavy_check_mark:| -|**s390x** | |:heavy_check_mark:| |:heavy_check_mark:| -|**x86_64** |:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:| +| |Alpine |CentOS Stream |Clear Linux |Debian/Ubuntu |CBL-Mariner | +|-- |-- |-- |-- |-- |-- | +|**ARM64** |:heavy_check_mark:|:heavy_check_mark:| | | | +|**PPC64le**| |:heavy_check_mark:| |:heavy_check_mark:| | +|**s390x** | |:heavy_check_mark:| |:heavy_check_mark:| | +|**x86_64** |:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:| diff --git a/tools/osbuilder/rootfs-builder/cbl-mariner/Dockerfile.in b/tools/osbuilder/rootfs-builder/cbl-mariner/Dockerfile.in new file mode 100644 index 000000000..6fa29807d --- /dev/null +++ b/tools/osbuilder/rootfs-builder/cbl-mariner/Dockerfile.in @@ -0,0 +1,15 @@ +# Copyright (c) 2023 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +ARG IMAGE_REGISTRY=mcr.microsoft.com +FROM ${IMAGE_REGISTRY}/cbl-mariner/base/core:@OS_VERSION@ + +RUN tdnf -y install \ + ca-certificates \ + build-essential \ + dnf \ + git \ + tar + +@INSTALL_RUST@ diff --git a/tools/osbuilder/rootfs-builder/cbl-mariner/config.sh b/tools/osbuilder/rootfs-builder/cbl-mariner/config.sh new file mode 100644 index 000000000..694124acd --- /dev/null +++ b/tools/osbuilder/rootfs-builder/cbl-mariner/config.sh @@ -0,0 +1,10 @@ +# Copyright (c) 2023 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +OS_NAME=cbl-mariner +OS_VERSION=${OS_VERSION:-2.0} +LIBC="gnu" +PACKAGES="core-packages-base-image ca-certificates" +[ "$AGENT_INIT" = no ] && PACKAGES+=" systemd" +[ "$SECCOMP" = yes ] && PACKAGES+=" libseccomp" diff --git a/tools/osbuilder/rootfs-builder/cbl-mariner/rootfs_lib.sh b/tools/osbuilder/rootfs-builder/cbl-mariner/rootfs_lib.sh new file mode 100644 index 000000000..0288d4d77 --- /dev/null +++ b/tools/osbuilder/rootfs-builder/cbl-mariner/rootfs_lib.sh @@ -0,0 +1,26 @@ +# Copyright (c) 2023 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +build_rootfs() +{ + # Mandatory + local ROOTFS_DIR="$1" + + [ -z "$ROOTFS_DIR" ] && die "need rootfs" + + # In case of support EXTRA packages, use it to allow + # users add more packages to the base rootfs + local EXTRA_PKGS=${EXTRA_PKGS:-""} + + check_root + mkdir -p "${ROOTFS_DIR}" + PKG_MANAGER="tdnf" + + DNF="${PKG_MANAGER} -y --installroot=${ROOTFS_DIR} --noplugins --releasever=${OS_VERSION}" + + info "install packages for rootfs" + $DNF install ${EXTRA_PKGS} ${PACKAGES} + + rm -rf ${ROOTFS_DIR}/usr/share/{bash-completion,cracklib,doc,info,locale,man,misc,pixmaps,terminfo,zoneinfo,zsh} +} From 8b008fc7430740ee64d20410d67a74f91c6306d9 Mon Sep 17 00:00:00 2001 From: xyz-li Date: Thu, 23 Mar 2023 22:39:27 +0800 Subject: [PATCH 018/137] kata-deploy: fix bash semantics error The argument of return must be numeric. Fixes: #6521 Signed-off-by: xyz-li --- .../kata-deploy/local-build/kata-deploy-binaries.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index 0d5ef611c..1da2d4f8e 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -112,9 +112,9 @@ install_cached_tarball_component() { info "Using cached tarball of ${component}" echo "Downloading tarball from: ${jenkins_build_url}/${component_tarball_name}" - wget "${jenkins_build_url}/${component_tarball_name}" || return cleanup_and_fail - wget "${jenkins_build_url}/sha256sum-${component_tarball_name}" || return cleanup_and_fail - sha256sum -c "sha256sum-${component_tarball_name}" || return cleanup_and_fail + wget "${jenkins_build_url}/${component_tarball_name}" || return $(cleanup_and_fail) + wget "${jenkins_build_url}/sha256sum-${component_tarball_name}" || return $(cleanup_and_fail) + sha256sum -c "sha256sum-${component_tarball_name}" || return $(cleanup_and_fail) mv "${component_tarball_name}" "${component_tarball_path}" } From 43dd4440f48333e3f2ab4232d1f291b2470b0ebf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 22 Mar 2023 21:22:29 +0100 Subject: [PATCH 019/137] snap: Build the artefacts using kata-deploy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Our CI and release process are currently taking advantage of the kata-deploy local build scripts to build the artefacts. Having snap doing the same is the next logical step, and it will also help to reduce, by a lot, the CI time as we only build the components that a PR is touching (otherwise we just pull the cached component). Fixes: #6514 Signed-off-by: Fabiano Fidêncio --- snap/snapcraft.yaml | 270 ++++++-------------------------------------- 1 file changed, 33 insertions(+), 237 deletions(-) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 2b68a7e87..8baf8d7e1 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -34,54 +34,6 @@ parts: mkdir -p $(dirname ${kata_dir}) ln -sf $(realpath "${SNAPCRAFT_STAGE}/..") ${kata_dir} - godeps: - after: [metadata] - plugin: nil - prime: - - -* - build-packages: - - curl - override-build: | - source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - - # put everything in stage - cd "${SNAPCRAFT_STAGE}" - - version="$(${yq} r ${kata_dir}/versions.yaml languages.golang.meta.newest-version)" - tarfile="go${version}.${goos}-${goarch}.tar.gz" - curl -LO https://golang.org/dl/${tarfile} - tar -xf ${tarfile} --strip-components=1 - - rustdeps: - after: [metadata] - plugin: nil - prime: - - -* - build-packages: - - curl - override-build: | - source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - - # put everything in stage - cd "${SNAPCRAFT_STAGE}" - - version="$(${yq} r ${kata_dir}/versions.yaml languages.rust.meta.newest-version)" - if ! command -v rustup > /dev/null; then - curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain ${version} - fi - - export PATH=${PATH}:${HOME}/.cargo/bin - rustup toolchain install ${version} - rustup default ${version} - if [ "${arch}" == "ppc64le" ] || [ "${arch}" == "s390x" ] ; then - [ "${arch}" == "ppc64le" ] && arch="powerpc64le" - rustup target add ${arch}-unknown-linux-gnu - else - rustup target add ${arch}-unknown-linux-musl - $([ "$(whoami)" != "root" ] && echo sudo) ln -sf /usr/bin/g++ /bin/musl-g++ - fi - rustup component add rustfmt - docker: after: [metadata] plugin: nil @@ -114,237 +66,86 @@ parts: sudo -E systemctl start docker || true image: - after: [godeps, docker, qemu, kernel] + after: [docker] plugin: nil - build-packages: - - docker.io - - cpio - - git - - iptables - - software-properties-common - - uidmap - - gnupg2 override-build: | source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - [ "${arch}" = "ppc64le" ] || [ "${arch}" = "s390x" ] && sudo apt-get --no-install-recommends install -y protobuf-compiler + cd "${SNAPCRAFT_PROJECT_DIR}" + sudo -E NO_TTY=true make rootfs-image-tarball - if [ -n "$http_proxy" ]; then - echo "Setting proxy $http_proxy" - sudo -E systemctl set-environment http_proxy="$http_proxy" || true - sudo -E systemctl set-environment https_proxy="$https_proxy" || true - fi + tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-rootfs-image.tar.xz" - # Copy yq binary. It's used in the container - cp -a "${yq}" "${GOPATH}/bin/" + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" - cd "${kata_dir}/tools/osbuilder" - # build image - export AGENT_INIT=yes - export USE_DOCKER=1 - export DEBUG=1 - initrd_distro=$(${yq} r -X ${kata_dir}/versions.yaml assets.initrd.architecture.${arch}.name) - image_distro=$(${yq} r -X ${kata_dir}/versions.yaml assets.image.architecture.${arch}.name) - case "$arch" in - x86_64) - # In some build systems it's impossible to build a rootfs image, try with the initrd image - sudo -E PATH=$PATH make image DISTRO="${image_distro}" || sudo -E PATH="$PATH" make initrd DISTRO="${initrd_distro}" - ;; + sudo -E NO_TTY=true make rootfs-initrd-tarball - aarch64|ppc64le|s390x) - sudo -E PATH="$PATH" make initrd DISTRO="${initrd_distro}" - ;; + tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-rootfs-initrd.tar.xz" - *) die "unsupported architecture: ${arch}" ;; - esac + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" - # Install image - kata_image_dir="${SNAPCRAFT_PART_INSTALL}/usr/share/kata-containers" - mkdir -p "${kata_image_dir}" - cp kata-containers*.img "${kata_image_dir}" runtime: - after: [godeps, image, cloud-hypervisor] + after: [docker] plugin: nil - build-attributes: [no-patchelf] override-build: | source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - cd "${kata_dir}/src/runtime" + cd "${SNAPCRAFT_PROJECT_DIR}" + sudo -E NO_TTY=true make shim-v2-tarball - qemu_cmd="qemu-system-${qemu_arch}" + tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-shim-v2.tar.xz" - # build and install runtime - make \ - PREFIX="/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr" \ - SKIP_GO_VERSION_CHECK=1 \ - QEMUCMD="${qemu_cmd}" + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" - make install \ - PREFIX=/usr \ - DESTDIR="${SNAPCRAFT_PART_INSTALL}" \ - SKIP_GO_VERSION_CHECK=1 \ - QEMUCMD="${qemu_cmd}" - - if [ ! -f ${SNAPCRAFT_PART_INSTALL}/../../image/install/usr/share/kata-containers/kata-containers.img ]; then - sed -i -e "s|^image =.*|initrd = \"/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr/share/kata-containers/kata-containers-initrd.img\"|" \ - ${SNAPCRAFT_PART_INSTALL}/usr/share/defaults/${SNAPCRAFT_PROJECT_NAME}/configuration.toml - fi + mkdir -p "${SNAPCRAFT_PART_INSTALL}/usr/bin" + ln -sf "${SNAPCRAFT_PART_INSTALL}/opt/kata/bin/containerd-shim-kata-v2" "${SNAPCRAFT_PART_INSTALL}/usr/bin/containerd-shim-kata-v2" + ln -sf "${SNAPCRAFT_PART_INSTALL}/opt/kata/bin/kata-runtime" "${SNAPCRAFT_PART_INSTALL}/usr/bin/kata-runtime" + ln -sf "${SNAPCRAFT_PART_INSTALL}/opt/kata/bin/kata-collect-data.sh" "${SNAPCRAFT_PART_INSTALL}/usr/bin/kata-collect-data.sh" kernel: - after: [godeps] + after: [docker] plugin: nil - build-packages: - - libelf-dev - - curl - - build-essential - - bison - - flex override-build: | source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - kernel_version="$(${yq} r $versions_file assets.kernel.version)" - #Remove extra 'v' - kernel_version="${kernel_version#v}" + cd "${SNAPCRAFT_PROJECT_DIR}" + sudo -E NO_TTY=true make kernel-tarball - [ "${arch}" = "s390x" ] && sudo apt-get --no-install-recommends install -y libssl-dev + tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-kernel.tar.xz" - cd "${kata_dir}/tools/packaging/kernel" - kernel_dir_prefix="kata-linux-" - - # Setup and build kernel - ./build-kernel.sh -v "${kernel_version}" -d setup - cd ${kernel_dir_prefix}* - make -j $(nproc ${CI:+--ignore 1}) EXTRAVERSION=".container" - - kernel_suffix="${kernel_version}.container" - kata_kernel_dir="${SNAPCRAFT_PART_INSTALL}/usr/share/kata-containers" - mkdir -p "${kata_kernel_dir}" - - # Install bz kernel - make install INSTALL_PATH="${kata_kernel_dir}" EXTRAVERSION=".container" || true - vmlinuz_name="vmlinuz-${kernel_suffix}" - ln -sf "${vmlinuz_name}" "${kata_kernel_dir}/vmlinuz.container" - - # Install raw kernel - vmlinux_path="vmlinux" - [ "${arch}" = "s390x" ] && vmlinux_path="arch/s390/boot/vmlinux" - vmlinux_name="vmlinux-${kernel_suffix}" - cp "${vmlinux_path}" "${kata_kernel_dir}/${vmlinux_name}" - ln -sf "${vmlinux_name}" "${kata_kernel_dir}/vmlinux.container" + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" qemu: plugin: make - after: [godeps] - build-packages: - - gcc - - python3 - - zlib1g-dev - - libcap-ng-dev - - libglib2.0-dev - - libpixman-1-dev - - libnuma-dev - - libltdl-dev - - libcap-dev - - libattr1-dev - - libfdt-dev - - curl - - libcapstone-dev - - bc - - libblkid-dev - - libffi-dev - - libmount-dev - - libseccomp-dev - - libselinux1-dev - - ninja-build + after: [docker] override-build: | source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.version)" - url="$(${yq} r ${versions_file} assets.hypervisor.qemu.url)" - commit="" - patches_dir="${kata_dir}/tools/packaging/qemu/patches/$(echo ${branch} | sed -e 's/.[[:digit:]]*$//' -e 's/^v//').x" - patches_version_dir="${kata_dir}/tools/packaging/qemu/patches/tag_patches/${branch}" + cd "${SNAPCRAFT_PROJECT_DIR}" + sudo -E NO_TTY=true make qemu-tarball - # download source - qemu_dir="${SNAPCRAFT_STAGE}/qemu" - rm -rf "${qemu_dir}" - git clone --depth 1 --branch ${branch} --single-branch ${url} "${qemu_dir}" - cd "${qemu_dir}" - [ -z "${commit}" ] || git checkout "${commit}" + tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-qemu.tar.xz" - [ -n "$(ls -A ui/keycodemapdb)" ] || git clone --depth 1 https://github.com/qemu/keycodemapdb ui/keycodemapdb/ - [ -n "$(ls -A capstone)" ] || git clone --depth 1 https://github.com/qemu/capstone capstone - - # Apply branch patches - [ -d "${patches_version_dir}" ] || mkdir "${patches_version_dir}" - ${kata_dir}/tools/packaging/scripts/apply_patches.sh "${patches_dir}" - ${kata_dir}/tools/packaging/scripts/apply_patches.sh "${patches_version_dir}" - - # Only x86_64 supports libpmem - [ "${arch}" = "x86_64" ] && sudo apt-get --no-install-recommends install -y apt-utils ca-certificates libpmem-dev - - configure_hypervisor="${kata_dir}/tools/packaging/scripts/configure-hypervisor.sh" - chmod +x "${configure_hypervisor}" - # static build. The --prefix, --libdir, --libexecdir, --datadir arguments are - # based on PREFIX and set by configure-hypervisor.sh - echo "$(PREFIX=/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr ${configure_hypervisor} -s kata-qemu) \ - --disable-rbd " \ - | xargs ./configure - - # Copy QEMU configurations (Kconfigs) - case "${branch}" in - "v5.1.0") - cp -a "${kata_dir}"/tools/packaging/qemu/default-configs/* default-configs - ;; - - *) - cp -a "${kata_dir}"/tools/packaging/qemu/default-configs/* configs/devices/ - ;; - esac - - # build and install - make -j $(nproc ${CI:+--ignore 1}) - make install DESTDIR="${SNAPCRAFT_PART_INSTALL}" - prime: - - -snap/ - - -usr/bin/qemu-ga - - -usr/bin/qemu-pr-helper - - -usr/bin/virtfs-proxy-helper - - -usr/include/ - - -usr/share/applications/ - - -usr/share/icons/ - - -usr/var/ - - usr/* - - lib/* - organize: - # Hack: move qemu to / - "snap/kata-containers/current/": "./" + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" virtiofsd: plugin: nil - after: [godeps, rustdeps, docker] + after: [docker] override-build: | source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" - echo "INFO: Building rust version of virtiofsd" - cd "${SNAPCRAFT_PROJECT_DIR}" - # Clean-up build dir in case it already exists sudo -E NO_TTY=true make virtiofsd-tarball - sudo install \ - --owner='root' \ - --group='root' \ - --mode=0755 \ - -D \ - --target-directory="${SNAPCRAFT_PART_INSTALL}/usr/libexec/" \ - build/virtiofsd/builddir/virtiofsd/virtiofsd + tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-virtiofsd.tar.xz" + + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" cloud-hypervisor: plugin: nil - after: [godeps, docker] + after: [docker] override-build: | source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh" @@ -353,13 +154,8 @@ parts: sudo -E NO_TTY=true make cloud-hypervisor-tarball tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-cloud-hypervisor.tar.xz" - tmpdir=$(mktemp -d) - tar -xvJpf "${tarfile}" -C "${tmpdir}" - - install -D "${tmpdir}/opt/kata/bin/cloud-hypervisor" "${SNAPCRAFT_PART_INSTALL}/usr/bin/cloud-hypervisor" - - rm -rf "${tmpdir}" + tar -xvJpf "${tarfile}" -C "${SNAPCRAFT_PART_INSTALL}" fi apps: From 4a246309ee4de182fef4a2b54884ebadd2ca9e9f Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 28 Mar 2023 08:40:06 +0200 Subject: [PATCH 020/137] release: Kata Containers 3.2.0-alpha0 - nydus: upgrad to v2.2.0 - osbuilder: Add support for CBL-Mariner - kata-deploy: Fix bash semantics error - make only_kata work without -f - runtime-rs: ch: Implement confidential guest handling - qemu/arm64: disable image nvdimm once no firmware offered - static checks workflow improvements - A couple of kata-deploy fixes - agent: Bring in VFIO-AP device handling again - bugfix: set hostname in CreateSandboxRequest - packaging / kata-deploy builds: Add the ability to cache and consume cached components - versions: Update firecracker version - dependency: update cgroups-rs - Built-in Sandbox: add more unit tests for dragonball. Part 6 - runtime: add support for Hyper-V - runtime-rs: update load_config comment - Add support for ephemeral mounts to occupy entire sandbox's memory - runtime-rs: fix default kernel location and add more default config paths - Implement direct-volume commands handler for shim-mgmt - bugfix: modify tty_win info in runtime when handling ResizePtyRequest - bugfix: add get_ns_path API for Hypervisor - runtime-rs: add the missing default trait - packaging: Simplify get_last_modification() - utils: Make kata-manager.sh runs checks - dragonball: support pmu on aarch64 - docs: fix typo in key filename in AWS installation guide - backport rustjail systemd cgroup fix #6331 to 3.1 - main | kata-deploy: Fix kata deploy arm64 image build error - workflows: Yet more fixes for publishing the kata-deploy payload after every PR merged - rustjail: fix cgroup handling in agent-init mode - runtime/Makefile: Fix install-containerd-shim-v2 dependency - fix wrong notes for func GetSandboxesStoragePathRust() - fix(runtime-rs): add exited state to ensure cleanup - runtime-rs: add oci hook support - utils: Remove kata-manager.sh cgroups v2 check - workflows: Fixes for the `payload-after-push` action - Dragonball: update dependencies - workflows: Do not install docker - workflows: Publish kata-deploy payload after a merge - src: Fixed typo mod.rs - actions: Use `git-diff` to get changes in kernel dir - agent: don't set permission of existing directory in copy_file - runtime: use filepath.Clean() to clean the mount path - Upgrade to Cloud Hypervisor v30.0 - feat(runtime): make static resource management consistent with 2.0 - osbuilder: Include minimal set of device nodes in ubuntu initrd - kata-ctl/exec: add new command exec to enter guest VM. - kernel: Add CONFIG_SEV_GUEST to SEV kernel config - runtime-rs: Improve Cloud Hypervisor config handling - virtiofsd: update to a valid path on ppc64le - runtime-rs: cleanup kata host share path - osbuilder: fix default build target in makefile - devguide: Add link to the contribution guidelines - kata-deploy: Ensure go binaries can run on Ubuntu 20.04 - dragonball: config_manager: preserve device when update - Revert "workflows: Push the builder image to quay.io" - Remove all remaining unsafe impl - kata-deploy: Fix building the kata static firecracker arm64 package occurred an error - shim-v2: Bump Ubuntu container image to 22.04 - packaging: Cache the container used to build the kata-deploy artefacts - utils: always check some dependencies. - versions: Use ubuntu as the default distro for the rootfs-image - github-action: Replace deprecated command with environment file - docs: Change the order of release step - runtime-rs: remove unnecessary Send/Sync trait implement - runtime-rs: Don't build on Power, don't break on Power. - runtime-rs: handle sys_dir bind volume - sandbox: set the dns for the sandbox - packaging/shim-v2: Only change the config if the file exists - runtime-rs: Add basic CH implementation - release: Revert kata-deploy changes after 3.1.0-rc0 release 8b008fc74307 kata-deploy: fix bash semantics error 74ec38cf0216 osbuilder: Add support for CBL-Mariner ac585886821e runtime-rs: ch: Generate Cloud Hypervisor config for confidential guests 96555186b3eb runtime-rs: ch: Honour debug setting e3c2d727ba9e runtime-rs: ch: clippy fix ece5edc64133 qemu/arm64: disable image nvdimm if no firmware offered dd23f452ab7f utils: renamed only_kata to skip_containerd 59c81ed2bba1 utils: informed pre-check about only_kata 4f0887ce42a5 kata-deploy: fix install failing to chmod runtime-rs/bin/* 09c4828ac3a9 workflows: add missing artifacts on payload-after-push fbf891fdfff5 packaging: Adapt `get_last_modification()` 82a04dbce179 local-build: Use cached VirtioFS when possible 3b9900489774 local-build: Use cached shim v2 when possible 1b8c5474dab1 local-build: Use cached RootFS when possible 09ce4ab893b2 local-build: Use cached QEMU when possible 1e1c843b8b65 local-build: Use cached Nydus when possible 64832ab65b35 local-build: Use cached Kernel when possible 04fb52f6c9ab local-build: Use cached Firecracker when possible 8a40f6f23498 local-build: Use cached Cloud Hypervisor when possible 194d5dc8a6e9 tools: Add support for caching VirtioFS artefacts a34272cf2042 tools: Add support for caching shim v2 artefacts 7898db5f7902 tools: Add support for caching RootFS artefacts e90891059b03 tools: Add support for caching QEMU artefacts 7aed8f8c80c3 tools: Add support for caching Nydus artefacts cb4cbe29580f tools: Add support for caching Kernel artefacts 762f9f4c3edf tools: Add support for caching Firecracker artefacts 6b1b424fc733 tools: Add support for caching Cloud Hypervisor artefacts 08fe49f708e5 versions: Adjust kernel names to match kata-deploy build targets 99505c0f4f3a versions: Update firecracker version f4938c0d90a1 bugfix: set hostname 96baa8389525 agent: Bring in VFIO-AP device handling again f666f8e2df6b agent: Add VFIO-AP device handling b546eca26f0e runtime: Generalize VFIO devices 4c527d00c7b7 agent: Rename VFIO handling to VFIO PCI handling db89c88f4fcb agent: Use cfg-if for s390x CCW 68a586e52c88 agent: Use a constant for CCW root bus path a8b55bf8746d dependency: update cgroups-rs 97cdba97ea98 runtime-rs: update load_config comment 974a5c22f006 runtime: add support for Hyper-V 40f4eef5355f build: Use the correct kernel name a6c67a161e91 runtime: add support for ephemeral mounts to occupy entire sandbox memory 844bf053b2aa runtime-rs: add the missing default trait e7bca62c32fb bugfix: modify tty_win info in runtime when handling ResizePtyRequest 30e235f0a1ec runtime-rs: impl volume-resize trait for sandbox e029988bc2b7 bugfix: add get_ns_path API for Hypervisor 42b8867148d2 runtime-rs: impl volume-stats trait for sandbox 462d4a1af257 workflows: static-checks: Free disk space before running checks e68186d9af0d workflows: static-checks: Set GOPATH only once 439ff9d4c49e tools/osbuilder/tests: Remove TRAVIS variable 43ce3f7588c6 packaging: Simplify get_last_modification() 33c5c49719ce packaging: Move repo_root_dir to lib.sh 16e2c3cc55b1 agent: implement update_ephemeral_mounts api 3896c7a22bf3 protocol: add updateEphemeralMounts proto 23488312f545 agent: always use cgroupfs when running as init 854638734887 agent: determine value of use_systemd_cgroup before LinuxContainer::new() 736aae47a4d2 rustjail: print type of cgroup manager dbae281924b3 workflows: Properly set the kata-tarball architecture 76b4591e2b09 tools: Adjust the build-and-upload-payload.sh script cd2aaeda2a07 kata-deploy: Switch to using an ubuntu image 2d43e131022c docs: fix typo in AWS installation guide 760f78137db0 dragonball: support pmu on aarch64 9bc7bef3d622 kata-deploy: Fix path to the Dockerfile 78ba363f8e81 kata-deploy: Use different images for s390x and aarch64 6267909501a1 kata-deploy: Allow passing BASE_IMAGE_{NAME,TAG} 3443f558a61a nydus: upgrad nydus to v2.2.0 395645e1ce37 runtime: hybrid-mode cause error in the latest nydusd f8e44172f6d1 utils: Make kata-manager.sh runs checks f31c79d21075 workflows: static-checks: Remove TRAVIS_XXX variables 8030e469b220 fix(runtime-rs): add exited state to ensure cleanup 7d292d7fc3e8 workflows: Fix the path of imported workflows e07162e79d15 workflows: Fix action name dd2713521e3a Dragonball: update dependencies bd1ed26c8d0e workflows: Publish kata-deploy payload after a merge fea7e8816fa5 runtime-rs: Fixed typo mod.rs a9e2fc86786e runtime/Makefile: Fix install-containerd-shim-v2 dependency b6880c60d38e logging: Correct the code notes 12cfad485853 runtime-rs: modify the transfer to oci::Hooks 828d467222d4 workflows: Do not install docker 4b8a5a1a3df6 utils: Remove kata-manager.sh cgroups v2 check 2c4428ee0247 runtime-rs: move pre-start hooks to sandbox_start e80c9f7b742d runtime-rs: add StartContainer hook 977f281c5c08 runtime-rs: add CreateContainer hook support 875f2db5284b runtime-rs: add oci hook support ecac3a9e104a docs: add design doc for Hooks 3ac6f29e9544 runtime: clh: Re-generate the client code 262daaa2eff4 versions: Upgrade to Cloud Hypervisor v30.0 192df845885f agent: always use cgroupfs when running as init b0691806f143 agent: determine value of use_systemd_cgroup before LinuxContainer::new() dc86d6dac35f runtime: use filepath.Clean() to clean the mount path c4ef5fd32551 agent: don't set permission of existing directory 3483272bbda5 runtime-rs: ch: Enable initrd usage fbee6c820e73 runtime-rs: Improve Cloud Hypervisor config handling 1bff1ca30adb kernel: Add CONFIG_SEV_GUEST to SEV kernel config Adding kernel config to sev case since it is needed for SNP and SNP will use the SEV kernel. Incrementing kernel config version to reflect changes ad8968c8d99a rustjail: print type of cgroup manager b4a1527aa664 kata-deploy: Fix static shim-v2 build on arm64 2c4f8077fd2e Revert "shim-v2: Bump Ubuntu container image to 22.04" afaccf924d93 Revert "workflows: Push the builder image to quay.io" 4c39c4ef9f42 devguide: Add link to the contribution guidelines 76e926453a02 osbuilder: Include minimal set of device nodes in ubuntu initrd 697ec8e578f3 kata-deploy: Fix kata static firecracker arm64 package build error ced3c9989559 dragonball: config_manager: preserve device when update da8a6417aa21 runtime-rs: remove all remaining unsafe impl 0301194851c0 dragonball: use crossbeam_channel in VmmService instead of mpsc::channel 9d78bf90861b shim-v2: Bump Ubuntu container image to 22.04 3cfce5a7090f utils: improved unsupported distro message. 919d19f41542 feat(runtime): make static resource management consistent with 2.0 b835c40bbdc1 workflows: Push the builder image to quay.io 781ed2986a25 packaging: Allow passing a container builder to the scripts 45668fae15ac packaging: Use existing image to build td-shim e8c6bfbdeb8f packaging: Use existing image to build td-shim 3fa24f7acce5 packaging: Add infra to push the OVMF builder image f076fa4c770f packaging: Use existing image to build OVMF c7f515172dc2 packaging: Add infra to push the QEMU builder image fb7b86b8e0e3 packaging: Use existing image to build QEMU d0181bb26261 packaging: Add infra to push the virtiofsd builder image 7c93428a1889 packaging: Use existing image to build virtiofsd 8c227e247185 virtiofsd: Pass the expected toolchain to the build container 7ee00d8e5764 packaging: Add infra to push the shim-v2 builder image 24767d82aa5b packaging: Use existing image to build the shim-v2 e84af6a6205e virtiofsd: update to a valid path on ppc64le 6c3c771a52a6 packaging: Add infra to push the kernel builder image b9b23112bf6f packaging: Use existing image to build the kernel 869827d77f62 packaging: Add push_to_registry() e69a6f57493d packaging: Add get_last_modification() 6c05e5c67a0b packaging: Add and export BUILDER_REGISTRY 1047840cf81a utils: always check some dependencies. 95e3364493bd runtime-rs: remove unnecessary Send/Sync trait implement a96ba9923918 actions: Use `git-diff` to get changes in kernel dir 619ef544525d docs: Change the order of release step a161d119208e versions: Use ubuntu as the default distro for the rootfs-image be40683bc592 runtime-rs: Add a generic powerpc64le-options.mk 47c058599a39 packaging/shim-v2: Install the target depending on the arch/libc b582c0db86b3 kata-ctl/exec: add new command exec to enter guest VM. 07802a19dc54 runtime-rs: handle sys_dir bind volume 04e930073c70 sandbox: set the dns for the sandbox 32ebe1895bc2 agent: fix the issue of creating the dns file 44aaec9020f9 github-action: Replace deprecated command with environment file a68c5004f859 packaging/shim-v2: Only change the config if the file exists ee76b398b32b release: Revert kata-deploy changes after 3.1.0-rc0 release bbc733d6c8e6 docs: runtime-rs: Add CH status details 37b594c0d217 runtime-rs: Add basic CH implementation 545151829d51 kata-types: Add Cloud Hypervisor (CH) definitions 2dd2421ad0c7 runtime-rs: cleanup kata host share path 0a21ad78b12d osbuilder: fix default build target in makefile 9a01d4e4469a dragonball: add more unit test for virtio-blk device. Signed-off-by: Greg Kurz --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index a36373c3b..2f81ab203 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.1.0-rc0 +3.2.0-alpha0 From 0f7351556117b57dba1e86a02c8148682bf9e574 Mon Sep 17 00:00:00 2001 From: Miao Xia Date: Mon, 20 Mar 2023 14:53:40 +0800 Subject: [PATCH 021/137] runtime: add filter metrics with specific names The kata monitor metrics API returns a huge size response, if containers or sandboxs are a large number, focus on what we need will be harder. Fixes: #6500 Signed-off-by: Miao Xia --- src/runtime/cmd/kata-monitor/README.md | 2 + src/runtime/pkg/kata-monitor/metrics.go | 51 ++++++++++++++------- src/runtime/pkg/kata-monitor/shim_client.go | 9 ++++ 3 files changed, 46 insertions(+), 16 deletions(-) diff --git a/src/runtime/cmd/kata-monitor/README.md b/src/runtime/cmd/kata-monitor/README.md index 5ebbc8cda..f6fcec1d3 100644 --- a/src/runtime/cmd/kata-monitor/README.md +++ b/src/runtime/cmd/kata-monitor/README.md @@ -52,6 +52,8 @@ The **log-level** allows the chose how verbose the logs should be. The default i **NOTE: The debug endpoints are available only if the [Kata Containers configuration file](https://github.com/kata-containers/kata-containers/blob/9d5b03a1b70bbd175237ec4b9f821d6ccee0a1f6/src/runtime/config/configuration-qemu.toml.in#L590-L592) includes** `enable_pprof = true` **in the** `[runtime]` **section**. +The `/metrics` has a query parameter `filter_family`, which filter Kata sandboxes metrics with specific names. If `filter_family` is set to `A` (and `B`, split with `,`), metrics with prefix `A` (and `B`) will only be returned. + The `/sandboxes` endpoint lists the _sandbox ID_ of all the detected Kata runtimes. If accessed via a web browser, it provides html links to the endpoints available for each sandbox. In order to retrieve data for a specific Kata workload, the _sandbox ID_ should be passed in the query string using the _sandbox_ key. The `/agent-url`, and all the `/debug/`* endpoints require `sandbox_id` to be specified in the query string. diff --git a/src/runtime/pkg/kata-monitor/metrics.go b/src/runtime/pkg/kata-monitor/metrics.go index 98ecb68f0..e45a8f19d 100644 --- a/src/runtime/pkg/kata-monitor/metrics.go +++ b/src/runtime/pkg/kata-monitor/metrics.go @@ -114,25 +114,32 @@ func (km *KataMonitor) ProcessMetricsRequest(w http.ResponseWriter, r *http.Requ writer = gz } - // create encoder to encode metrics. - encoder := expfmt.NewEncoder(writer, contentType) - - // gather metrics collected for management agent. - mfs, err := prometheus.DefaultGatherer.Gather() + filterFamilies, err := getFilterFamilyFromReq(r) if err != nil { - monitorLog.WithError(err).Error("failed to Gather metrics from prometheus.DefaultGatherer") - w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte(err.Error())) return } - // encode metric gathered in current process - if err := encodeMetricFamily(mfs, encoder); err != nil { - monitorLog.WithError(err).Warnf("failed to encode metrics") + // create encoder to encode metrics. + encoder := expfmt.NewEncoder(writer, contentType) + + if len(filterFamilies) == 0 { + // gather metrics collected for management agent. + mfs, err := prometheus.DefaultGatherer.Gather() + if err != nil { + monitorLog.WithError(err).Error("failed to Gather metrics from prometheus.DefaultGatherer") + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err.Error())) + return + } + + // encode metric gathered in current process + if err := encodeMetricFamily(mfs, encoder); err != nil { + monitorLog.WithError(err).Warnf("failed to encode metrics") + } } // aggregate sandboxes metrics and write to response by encoder - if err := km.aggregateSandboxMetrics(encoder); err != nil { + if err := km.aggregateSandboxMetrics(encoder, filterFamilies); err != nil { monitorLog.WithError(err).Errorf("failed aggregateSandboxMetrics") scrapeFailedCount.Inc() } @@ -155,7 +162,7 @@ func encodeMetricFamily(mfs []*dto.MetricFamily, encoder expfmt.Encoder) error { } // aggregateSandboxMetrics will get metrics from one sandbox and do some process -func (km *KataMonitor) aggregateSandboxMetrics(encoder expfmt.Encoder) error { +func (km *KataMonitor) aggregateSandboxMetrics(encoder expfmt.Encoder, filterFamilies []string) error { // get all kata sandboxes from cache sandboxes := km.sandboxCache.getSandboxList() // save running kata pods as a metrics. @@ -230,9 +237,21 @@ func (km *KataMonitor) aggregateSandboxMetrics(encoder expfmt.Encoder) error { } // write metrics to response. - for _, mf := range metricsMap { - if err := encoder.Encode(mf); err != nil { - return err + if len(filterFamilies) > 0 { + for _, filterName := range filterFamilies { + for fullName, mf := range metricsMap { + if strings.HasPrefix(fullName, filterName) { + if err := encoder.Encode(mf); err != nil { + return err + } + } + } + } + } else { + for _, mf := range metricsMap { + if err := encoder.Encode(mf); err != nil { + return err + } } } return nil diff --git a/src/runtime/pkg/kata-monitor/shim_client.go b/src/runtime/pkg/kata-monitor/shim_client.go index 388ac6fff..3730c8af0 100644 --- a/src/runtime/pkg/kata-monitor/shim_client.go +++ b/src/runtime/pkg/kata-monitor/shim_client.go @@ -8,6 +8,7 @@ package katamonitor import ( "fmt" "net/http" + "strings" "time" shim "github.com/kata-containers/kata-containers/src/runtime/pkg/containerd-shim-v2" @@ -36,3 +37,11 @@ func getSandboxIDFromReq(r *http.Request) (string, error) { func getSandboxFS() string { return shim.GetSandboxesStoragePath() } + +func getFilterFamilyFromReq(r *http.Request) ([]string, error) { + filterFamilies := r.URL.Query().Get("filter_family") + if filterFamilies != "" { + return strings.Split(filterFamilies, ","), nil + } + return nil, nil +} From a914283ce03869b3ef78ea833c684932d366052d Mon Sep 17 00:00:00 2001 From: Archana Shinde Date: Wed, 8 Feb 2023 00:33:43 -0800 Subject: [PATCH 022/137] kata-ctl: add function to get platform protection. This function checks for tdx, sev or snp protection on x86 platform. Fixes: #1000 Signed-off-by: Archana Shinde --- src/tools/kata-ctl/Cargo.toml | 1 + src/tools/kata-ctl/src/arch/aarch64/mod.rs | 7 + .../kata-ctl/src/arch/powerpc64le/mod.rs | 16 ++ src/tools/kata-ctl/src/arch/s390x/mod.rs | 115 +++++++++++++ src/tools/kata-ctl/src/arch/x86_64/mod.rs | 151 ++++++++++++++++++ src/tools/kata-ctl/src/check.rs | 25 +++ src/tools/kata-ctl/src/utils.rs | 2 +- 7 files changed, 316 insertions(+), 1 deletion(-) diff --git a/src/tools/kata-ctl/Cargo.toml b/src/tools/kata-ctl/Cargo.toml index cc32054d2..48a0b2794 100644 --- a/src/tools/kata-ctl/Cargo.toml +++ b/src/tools/kata-ctl/Cargo.toml @@ -40,4 +40,5 @@ reqwest = { version = "0.11", default-features = false, features = ["json", "blo [dev-dependencies] semver = "1.0.12" tempfile = "3.1.0" +nix = "0.25.0" test-utils = { path = "../../libs/test-utils" } diff --git a/src/tools/kata-ctl/src/arch/aarch64/mod.rs b/src/tools/kata-ctl/src/arch/aarch64/mod.rs index 7966123bd..65e3ed93f 100644 --- a/src/tools/kata-ctl/src/arch/aarch64/mod.rs +++ b/src/tools/kata-ctl/src/arch/aarch64/mod.rs @@ -7,6 +7,7 @@ pub use arch_specific::*; mod arch_specific { + use crate::check; use crate::types::*; use anyhow::Result; use std::path::Path; @@ -39,4 +40,10 @@ mod arch_specific { pub fn get_checks() -> Option<&'static [CheckItem<'static>]> { Some(CHECK_LIST) } + + #[allow(dead_code)] + // Guest protection is not supported on ARM64. + pub fn available_guest_protection() -> Result { + Ok(check::GuestProtection::NoProtection) + } } diff --git a/src/tools/kata-ctl/src/arch/powerpc64le/mod.rs b/src/tools/kata-ctl/src/arch/powerpc64le/mod.rs index 1cc49d70c..8290dbb13 100644 --- a/src/tools/kata-ctl/src/arch/powerpc64le/mod.rs +++ b/src/tools/kata-ctl/src/arch/powerpc64le/mod.rs @@ -8,6 +8,7 @@ use crate::types::*; pub use arch_specific::*; mod arch_specific { + use crate::check; use anyhow::Result; pub const ARCH_CPU_VENDOR_FIELD: &str = ""; @@ -20,4 +21,19 @@ mod arch_specific { pub fn get_checks() -> Option<&'static [CheckItem<'static>]> { None } + + const PEF_SYS_FIRMWARE_DIR: &str = "/sys/firmware/ultravisor/"; + + pub fn available_guest_protection() -> Result { + if !Uid::effective().is_root() { + return Err(check::ProtectionError::NoPerms); + } + + let metadata = fs::metadata(PEF_SYS_FIRMWARE_DIR); + if metadata.is_ok() && metadata.unwrap().is_dir() { + Ok(check::GuestProtection::Pef) + } + + Ok(check::GuestProtection::NoProtection) + } } diff --git a/src/tools/kata-ctl/src/arch/s390x/mod.rs b/src/tools/kata-ctl/src/arch/s390x/mod.rs index b3196547b..276c75a9e 100644 --- a/src/tools/kata-ctl/src/arch/s390x/mod.rs +++ b/src/tools/kata-ctl/src/arch/s390x/mod.rs @@ -11,6 +11,10 @@ mod arch_specific { use crate::check; use crate::types::*; use anyhow::{anyhow, Result}; + use nix::unistd::Uid; + use std::collections::HashMap; + use std::io::BufRead; + use std::io::BufReader; const CPUINFO_DELIMITER: &str = "processor "; const CPUINFO_FEATURES_TAG: &str = "features"; @@ -65,4 +69,115 @@ mod arch_specific { pub fn get_checks() -> Option<&'static [CheckItem<'static>]> { Some(CHECK_LIST) } + + #[allow(dead_code)] + fn retrieve_cpu_facilities() -> Result> { + let f = std::fs::File::open(check::PROC_CPUINFO)?; + let mut reader = BufReader::new(f); + let mut contents = String::new(); + let facilities_field = "facilities"; + let mut facilities = HashMap::new(); + + while reader.read_line(&mut contents)? > 0 { + let fields: Vec<&str> = contents.split_whitespace().collect(); + if fields.len() < 2 { + contents.clear(); + continue; + } + + if !fields[0].starts_with(facilities_field) { + contents.clear(); + continue; + } + + let mut start = 1; + if fields[1] == ":" { + start = 2; + } + + for field in fields.iter().skip(start) { + let bit = field.parse::()?; + facilities.insert(bit, true); + } + return Ok(facilities); + } + + Ok(facilities) + } + + #[allow(dead_code)] + pub fn check_cmd_line( + kernel_cmdline_path: &str, + search_param: &str, + search_values: &[&str], + ) -> Result { + let f = std::fs::File::open(kernel_cmdline_path)?; + let reader = BufReader::new(f); + + let check_fn = if search_values.is_empty() { + |param: &str, search_param: &str, _search_values: &[&str]| { + return param.eq_ignore_ascii_case(search_param); + } + } else { + |param: &str, search_param: &str, search_values: &[&str]| { + let split: Vec<&str> = param.splitn(2, "=").collect(); + if split.len() < 2 || split[0] != search_param { + return false; + } + + for value in search_values { + if value.eq_ignore_ascii_case(split[1]) { + return true; + } + } + false + } + }; + + for line in reader.lines() { + for field in line?.split_whitespace() { + if check_fn(field, search_param, search_values) { + return Ok(true); + } + } + } + Ok(false) + } + + #[allow(dead_code)] + // Guest protection is not supported on ARM64. + pub fn available_guest_protection() -> Result { + if !Uid::effective().is_root() { + return Err(check::ProtectionError::NoPerms)?; + } + + let facilities = retrieve_cpu_facilities().map_err(|err| { + check::ProtectionError::CheckFailed(format!( + "Error retrieving cpu facilities file : {}", + err.to_string() + )) + })?; + + // Secure Execution + // https://www.kernel.org/doc/html/latest/virt/kvm/s390-pv.html + let se_cpu_facility_bit: i32 = 158; + if !facilities.contains_key(&se_cpu_facility_bit) { + return Ok(check::GuestProtection::NoProtection); + } + + let cmd_line_values = vec!["1", "on", "y", "yes"]; + let se_cmdline_param = "prot_virt"; + + let se_cmdline_present = + check_cmd_line("/proc/cmdline", se_cmdline_param, &cmd_line_values) + .map_err(|err| check::ProtectionError::CheckFailed(err.to_string()))?; + + if !se_cmdline_present { + return Err(check::ProtectionError::InvalidValue(String::from( + "Protected Virtualization is not enabled on kernel command line!", + ))); + } + + Ok(check::GuestProtection::Se) + } } diff --git a/src/tools/kata-ctl/src/arch/x86_64/mod.rs b/src/tools/kata-ctl/src/arch/x86_64/mod.rs index 924536a13..9c8782fa7 100644 --- a/src/tools/kata-ctl/src/arch/x86_64/mod.rs +++ b/src/tools/kata-ctl/src/arch/x86_64/mod.rs @@ -3,13 +3,19 @@ // SPDX-License-Identifier: Apache-2.0 // +#![allow(dead_code)] + #[cfg(target_arch = "x86_64")] pub use arch_specific::*; mod arch_specific { use crate::check; + use crate::check::{GuestProtection, ProtectionError}; use crate::types::*; use anyhow::{anyhow, Result}; + use nix::unistd::Uid; + use std::fs; + use std::path::Path; const CPUINFO_DELIMITER: &str = "\nprocessor"; const CPUINFO_FLAGS_TAG: &str = "flags"; @@ -61,4 +67,149 @@ mod arch_specific { Ok(()) } + + fn retrieve_cpu_flags() -> Result { + let cpu_info = check::get_single_cpu_info(check::PROC_CPUINFO, CPUINFO_DELIMITER)?; + + let cpu_flags = check::get_cpu_flags(&cpu_info, CPUINFO_FLAGS_TAG).map_err(|e| { + anyhow!( + "Error parsing CPU flags, file {:?}, {:?}", + check::PROC_CPUINFO, + e + ) + })?; + + Ok(cpu_flags) + } + + pub const TDX_SYS_FIRMWARE_DIR: &str = "/sys/firmware/tdx_seam/"; + pub const TDX_CPU_FLAG: &str = "tdx"; + pub const SEV_KVM_PARAMETER_PATH: &str = "/sys/module/kvm_amd/parameters/sev"; + pub const SNP_KVM_PARAMETER_PATH: &str = "/sys/module/kvm_amd/parameters/sev_snp"; + + pub fn available_guest_protection() -> Result { + if !Uid::effective().is_root() { + return Err(ProtectionError::NoPerms); + } + + arch_guest_protection( + TDX_SYS_FIRMWARE_DIR, + TDX_CPU_FLAG, + SEV_KVM_PARAMETER_PATH, + SNP_KVM_PARAMETER_PATH, + ) + } + + pub fn arch_guest_protection( + tdx_path: &str, + tdx_flag: &str, + sev_path: &str, + snp_path: &str, + ) -> Result { + let flags = + retrieve_cpu_flags().map_err(|err| ProtectionError::CheckFailed(err.to_string()))?; + + let metadata = fs::metadata(tdx_path); + + if metadata.is_ok() && metadata.unwrap().is_dir() && flags.contains(tdx_flag) { + return Ok(GuestProtection::Tdx); + } + + let check_contents = |file_name: &str| -> Result { + let file_path = Path::new(file_name); + if !file_path.exists() { + return Ok(false); + } + + let contents = fs::read_to_string(file_name).map_err(|err| { + ProtectionError::CheckFailed(format!("Error reading file {} : {}", file_name, err)) + })?; + + if contents == "Y" { + return Ok(true); + } + Ok(false) + }; + + if check_contents(snp_path)? { + return Ok(GuestProtection::Snp); + } + + if check_contents(sev_path)? { + return Ok(GuestProtection::Sev); + } + + Ok(GuestProtection::NoProtection) + } +} + +#[cfg(target_arch = "x86_64")] +#[cfg(test)] +mod tests { + use super::*; + use crate::check; + use nix::unistd::Uid; + use std::fs; + use std::io::Write; + use tempfile::tempdir; + + #[test] + fn test_available_guest_protection_no_privileges() { + if !Uid::effective().is_root() { + let res = available_guest_protection(); + assert!(res.is_err()); + assert_eq!( + "No permission to check guest protection", + res.unwrap_err().to_string() + ); + } + } + + fn test_arch_guest_protection_snp() { + // Test snp + let dir = tempdir().unwrap(); + let snp_file_path = dir.path().join("sev_snp"); + let path = snp_file_path.clone(); + let mut snp_file = fs::File::create(snp_file_path).unwrap(); + writeln!(snp_file, "Y").unwrap(); + + let actual = + arch_guest_protection("/xyz/tmp", TDX_CPU_FLAG, "/xyz/tmp", path.to_str().unwrap()); + assert!(actual.is_ok()); + assert_eq!(actual.unwrap(), check::GuestProtection::Snp); + + writeln!(snp_file, "N").unwrap(); + let actual = + arch_guest_protection("/xyz/tmp", TDX_CPU_FLAG, "/xyz/tmp", path.to_str().unwrap()); + assert!(actual.is_ok()); + assert_eq!(actual.unwrap(), check::GuestProtection::NoProtection); + } + + fn test_arch_guest_protection_sev() { + // Test sev + let dir = tempdir().unwrap(); + let sev_file_path = dir.path().join("sev"); + let sev_path = sev_file_path.clone(); + let mut sev_file = fs::File::create(sev_file_path).unwrap(); + writeln!(sev_file, "Y").unwrap(); + + let actual = arch_guest_protection( + "/xyz/tmp", + TDX_CPU_FLAG, + sev_path.to_str().unwrap(), + "/xyz/tmp", + ); + assert!(actual.is_ok()); + assert_eq!(actual.unwrap(), check::GuestProtection::Sev); + + writeln!(sev_file, "N").unwrap(); + let actual = arch_guest_protection( + "/xyz/tmp", + TDX_CPU_FLAG, + sev_path.to_str().unwrap(), + "/xyz/tmp", + ); + assert!(actual.is_ok()); + assert_eq!(actual.unwrap(), check::GuestProtection::NoProtection); + } } diff --git a/src/tools/kata-ctl/src/check.rs b/src/tools/kata-ctl/src/check.rs index 81b9b83a7..a39c3e61f 100644 --- a/src/tools/kata-ctl/src/check.rs +++ b/src/tools/kata-ctl/src/check.rs @@ -8,6 +8,7 @@ use anyhow::{anyhow, Result}; use reqwest::header::{CONTENT_TYPE, USER_AGENT}; use serde::{Deserialize, Serialize}; +use thiserror::Error; #[derive(Debug, Deserialize, Serialize, PartialEq)] struct Release { tag_name: String, @@ -118,6 +119,30 @@ pub fn check_cpu_attribs( Ok(missing_attribs) } +#[allow(dead_code)] +#[derive(Debug, PartialEq)] +pub enum GuestProtection { + NoProtection, + Tdx, + Sev, + Snp, + Pef, + Se, +} + +#[allow(dead_code)] +#[derive(Error, Debug)] +pub enum ProtectionError { + #[error("No permission to check guest protection")] + NoPerms, + + #[error("Failed to check guest protection: {0}")] + CheckFailed(String), + + #[error("Invalid guest protection value: {0}")] + InvalidValue(String), +} + pub fn run_network_checks() -> Result<()> { Ok(()) } diff --git a/src/tools/kata-ctl/src/utils.rs b/src/tools/kata-ctl/src/utils.rs index 9c92f82bf..6252271c8 100644 --- a/src/tools/kata-ctl/src/utils.rs +++ b/src/tools/kata-ctl/src/utils.rs @@ -305,7 +305,7 @@ mod tests { let dir = tempdir().unwrap(); let file_path = dir.path().join("vhost-vsock"); let path = file_path.clone(); - let _dir = fs::create_dir(file_path).unwrap(); + fs::create_dir(file_path).unwrap(); let res = supports_vsocks(path.to_str().unwrap()).unwrap(); assert!(!res); } From 41fdda1d84ea72b3ae2dbadc3152346d418fa73f Mon Sep 17 00:00:00 2001 From: Christophe de Dinechin Date: Thu, 30 Mar 2023 16:09:13 +0200 Subject: [PATCH 023/137] rustjail: Do not unwrap potential error with cgroup manager There can be an error while connecting to the cgroups managager, for example a `ENOENT` if a file is not found. Make sure that this is reported through the proper channels instead of causing a `panic()` that does not provide much information. Fixes: #6561 Signed-off-by: Christophe de Dinechin Reported-by: Greg Kurz --- src/agent/rustjail/src/cgroups/systemd/manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agent/rustjail/src/cgroups/systemd/manager.rs b/src/agent/rustjail/src/cgroups/systemd/manager.rs index c52e727e4..c34aaf910 100644 --- a/src/agent/rustjail/src/cgroups/systemd/manager.rs +++ b/src/agent/rustjail/src/cgroups/systemd/manager.rs @@ -41,7 +41,7 @@ pub struct Manager { impl CgroupManager for Manager { fn apply(&self, pid: pid_t) -> Result<()> { let unit_name = self.unit_name.as_str(); - if self.dbus_client.unit_exist(unit_name).unwrap() { + if self.dbus_client.unit_exist(unit_name)? { self.dbus_client.add_process(pid, self.unit_name.as_str())?; } else { self.dbus_client.start_unit( From 7796e6ccc64246576b441824a8617cd1607125b4 Mon Sep 17 00:00:00 2001 From: Christophe de Dinechin Date: Thu, 30 Mar 2023 16:13:37 +0200 Subject: [PATCH 024/137] rustjail: Fix minor grammatical error in function name Rename `unit_exist` function to `unit_exists` to match English grammar rule. Fixes: #6561 Signed-off-by: Christophe de Dinechin --- src/agent/rustjail/src/cgroups/systemd/dbus_client.rs | 4 ++-- src/agent/rustjail/src/cgroups/systemd/manager.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/agent/rustjail/src/cgroups/systemd/dbus_client.rs b/src/agent/rustjail/src/cgroups/systemd/dbus_client.rs index 09c46f24d..fd3b9bf8f 100644 --- a/src/agent/rustjail/src/cgroups/systemd/dbus_client.rs +++ b/src/agent/rustjail/src/cgroups/systemd/dbus_client.rs @@ -26,7 +26,7 @@ pub trait SystemdInterface { fn get_version(&self) -> Result; - fn unit_exist(&self, unit_name: &str) -> Result; + fn unit_exists(&self, unit_name: &str) -> Result; fn add_process(&self, pid: i32, unit_name: &str) -> Result<()>; } @@ -108,7 +108,7 @@ impl SystemdInterface for DBusClient { Ok(systemd_version) } - fn unit_exist(&self, unit_name: &str) -> Result { + fn unit_exists(&self, unit_name: &str) -> Result { let proxy = self.build_proxy()?; Ok(proxy.get_unit(unit_name).is_ok()) diff --git a/src/agent/rustjail/src/cgroups/systemd/manager.rs b/src/agent/rustjail/src/cgroups/systemd/manager.rs index c34aaf910..dcbc65a2c 100644 --- a/src/agent/rustjail/src/cgroups/systemd/manager.rs +++ b/src/agent/rustjail/src/cgroups/systemd/manager.rs @@ -41,7 +41,7 @@ pub struct Manager { impl CgroupManager for Manager { fn apply(&self, pid: pid_t) -> Result<()> { let unit_name = self.unit_name.as_str(); - if self.dbus_client.unit_exist(unit_name)? { + if self.dbus_client.unit_exists(unit_name)? { self.dbus_client.add_process(pid, self.unit_name.as_str())?; } else { self.dbus_client.start_unit( From a552a1953ac222533c22f40fd803ea4b9b3b355f Mon Sep 17 00:00:00 2001 From: Gabriela Cervantes Date: Thu, 30 Mar 2023 16:20:33 +0000 Subject: [PATCH 025/137] docs: Update CNM url in networking document This PR updates the url for the Container Network Model in the network document. Fixes #6563 Signed-off-by: Gabriela Cervantes --- docs/design/architecture/networking.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/design/architecture/networking.md b/docs/design/architecture/networking.md index ab056849c..1550f0ad0 100644 --- a/docs/design/architecture/networking.md +++ b/docs/design/architecture/networking.md @@ -36,7 +36,7 @@ compatibility, and performance on par with MACVTAP. Kata Containers has deprecated support for bridge due to lacking performance relative to TC-filter and MACVTAP. Kata Containers supports both -[CNM](https://github.com/docker/libnetwork/blob/master/docs/design.md#the-container-network-model) +[CNM](https://github.com/moby/libnetwork/blob/master/docs/design.md#the-container-network-model) and [CNI](https://github.com/containernetworking/cni) for networking management. ## Network Hotplug From 56331bd7bcfbc6aabd8a732a10c544ba3df84622 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 30 Mar 2023 19:17:55 +0200 Subject: [PATCH 026/137] gha: Split payload-after-push-*.yaml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's split those actions into two different ones: * Build the kata-static tarball * Publish the kata-deploy payload We're doing this as, later in this series we'll start taking advantage of both pieces. Signed-off-by: Fabiano Fidêncio --- ...l => build-kata-static-tarball-amd64.yaml} | 43 +++-------------- ...l => build-kata-static-tarball-arm64.yaml} | 47 +++---------------- ...l => build-kata-static-tarball-s390x.yaml} | 47 +++---------------- .github/workflows/payload-after-push.yaml | 39 ++++++++++----- .../publish-kata-deploy-payload-amd64.yaml | 38 +++++++++++++++ .../publish-kata-deploy-payload-arm64.yaml | 42 +++++++++++++++++ .../publish-kata-deploy-payload-s390x.yaml | 42 +++++++++++++++++ 7 files changed, 167 insertions(+), 131 deletions(-) rename .github/workflows/{payload-after-push-amd64.yaml => build-kata-static-tarball-amd64.yaml} (57%) rename .github/workflows/{payload-after-push-arm64.yaml => build-kata-static-tarball-arm64.yaml} (58%) rename .github/workflows/{payload-after-push-s390x.yaml => build-kata-static-tarball-s390x.yaml} (57%) create mode 100644 .github/workflows/publish-kata-deploy-payload-amd64.yaml create mode 100644 .github/workflows/publish-kata-deploy-payload-arm64.yaml create mode 100644 .github/workflows/publish-kata-deploy-payload-s390x.yaml diff --git a/.github/workflows/payload-after-push-amd64.yaml b/.github/workflows/build-kata-static-tarball-amd64.yaml similarity index 57% rename from .github/workflows/payload-after-push-amd64.yaml rename to .github/workflows/build-kata-static-tarball-amd64.yaml index ef2f976eb..1a14d145e 100644 --- a/.github/workflows/payload-after-push-amd64.yaml +++ b/.github/workflows/build-kata-static-tarball-amd64.yaml @@ -1,9 +1,9 @@ -name: CI | Publish kata-deploy payload for amd64 +name: CI | Build kata-static tarball for amd64 on: workflow_call: inputs: - target-arch: - required: true + tarball-suffix: + required: false type: string jobs: @@ -23,13 +23,6 @@ jobs: - shim-v2 - virtiofsd steps: - - name: Login to Kata Containers quay.io - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} - password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - - uses: actions/checkout@v3 with: fetch-depth: 0 # This is needed in order to keep the commit ids history @@ -47,7 +40,7 @@ jobs: - name: store-artifact ${{ matrix.asset }} uses: actions/upload-artifact@v3 with: - name: kata-artifacts-amd64 + name: kata-artifacts-amd64${{ inputs.tarball-suffix }} path: kata-build/kata-static-${{ matrix.asset }}.tar.xz retention-days: 1 if-no-files-found: error @@ -60,7 +53,7 @@ jobs: - name: get-artifacts uses: actions/download-artifact@v3 with: - name: kata-artifacts-amd64 + name: kata-artifacts-amd64${{ inputs.tarball-suffix }} path: kata-artifacts - name: merge-artifacts run: | @@ -68,31 +61,7 @@ jobs: - name: store-artifacts uses: actions/upload-artifact@v3 with: - name: kata-static-tarball-amd64 + name: kata-static-tarball-amd64${{ inputs.tarball-suffix }} path: kata-static.tar.xz retention-days: 1 if-no-files-found: error - - kata-payload: - needs: create-kata-tarball - runs-on: ubuntu-latest - steps: - - name: Login to Kata Containers quay.io - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} - password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - - - uses: actions/checkout@v3 - - name: get-kata-tarball - uses: actions/download-artifact@v3 - with: - name: kata-static-tarball-amd64 - - - name: build-and-push-kata-payload - id: build-and-push-kata-payload - run: | - ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ - $(pwd)/kata-static.tar.xz "quay.io/kata-containers/kata-deploy-ci" \ - "kata-containers-${{ inputs.target-arch }}" diff --git a/.github/workflows/payload-after-push-arm64.yaml b/.github/workflows/build-kata-static-tarball-arm64.yaml similarity index 58% rename from .github/workflows/payload-after-push-arm64.yaml rename to .github/workflows/build-kata-static-tarball-arm64.yaml index e25cd60f0..f7b040b4a 100644 --- a/.github/workflows/payload-after-push-arm64.yaml +++ b/.github/workflows/build-kata-static-tarball-arm64.yaml @@ -1,9 +1,9 @@ -name: CI | Publish kata-deploy payload for arm64 +name: CI | Build kata-static tarball for arm64 on: workflow_call: inputs: - target-arch: - required: true + tarball-suffix: + required: false type: string jobs: @@ -23,13 +23,6 @@ jobs: - shim-v2 - virtiofsd steps: - - name: Login to Kata Containers quay.io - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} - password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - - name: Adjust a permission for repo run: | sudo chown -R $USER:$USER $GITHUB_WORKSPACE @@ -51,7 +44,7 @@ jobs: - name: store-artifact ${{ matrix.asset }} uses: actions/upload-artifact@v3 with: - name: kata-artifacts-arm64 + name: kata-artifacts-arm64${{ inputs.tarball-suffix }} path: kata-build/kata-static-${{ matrix.asset }}.tar.xz retention-days: 1 if-no-files-found: error @@ -68,7 +61,7 @@ jobs: - name: get-artifacts uses: actions/download-artifact@v3 with: - name: kata-artifacts-arm64 + name: kata-artifacts-arm64${{ inputs.tarball-suffix }} path: kata-artifacts - name: merge-artifacts run: | @@ -76,35 +69,7 @@ jobs: - name: store-artifacts uses: actions/upload-artifact@v3 with: - name: kata-static-tarball-arm64 + name: kata-static-tarball-arm64${{ inputs.tarball-suffix }} path: kata-static.tar.xz retention-days: 1 if-no-files-found: error - - kata-payload: - needs: create-kata-tarball - runs-on: arm64 - steps: - - name: Login to Kata Containers quay.io - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} - password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - - - name: Adjust a permission for repo - run: | - sudo chown -R $USER:$USER $GITHUB_WORKSPACE - - - uses: actions/checkout@v3 - - name: get-kata-tarball - uses: actions/download-artifact@v3 - with: - name: kata-static-tarball-arm64 - - - name: build-and-push-kata-payload - id: build-and-push-kata-payload - run: | - ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ - $(pwd)/kata-static.tar.xz "quay.io/kata-containers/kata-deploy-ci" \ - "kata-containers-${{ inputs.target-arch }}" diff --git a/.github/workflows/payload-after-push-s390x.yaml b/.github/workflows/build-kata-static-tarball-s390x.yaml similarity index 57% rename from .github/workflows/payload-after-push-s390x.yaml rename to .github/workflows/build-kata-static-tarball-s390x.yaml index df9afa9f9..c00795fe3 100644 --- a/.github/workflows/payload-after-push-s390x.yaml +++ b/.github/workflows/build-kata-static-tarball-s390x.yaml @@ -1,9 +1,9 @@ -name: CI | Publish kata-deploy payload for s390x +name: CI | Build kata-static tarball for s390x on: workflow_call: inputs: - target-arch: - required: true + tarball-suffix: + required: false type: string jobs: @@ -19,13 +19,6 @@ jobs: - shim-v2 - virtiofsd steps: - - name: Login to Kata Containers quay.io - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} - password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - - name: Adjust a permission for repo run: | sudo chown -R $USER:$USER $GITHUB_WORKSPACE @@ -48,7 +41,7 @@ jobs: - name: store-artifact ${{ matrix.asset }} uses: actions/upload-artifact@v3 with: - name: kata-artifacts-s390x + name: kata-artifacts-s390x${{ inputs.tarball-suffix }} path: kata-build/kata-static-${{ matrix.asset }}.tar.xz retention-days: 1 if-no-files-found: error @@ -65,7 +58,7 @@ jobs: - name: get-artifacts uses: actions/download-artifact@v3 with: - name: kata-artifacts-s390x + name: kata-artifacts-s390x${{ inputs.tarball-suffix }} path: kata-artifacts - name: merge-artifacts run: | @@ -73,35 +66,7 @@ jobs: - name: store-artifacts uses: actions/upload-artifact@v3 with: - name: kata-static-tarball-s390x + name: kata-static-tarball-s390x${{ inputs.tarball-suffix }} path: kata-static.tar.xz retention-days: 1 if-no-files-found: error - - kata-payload: - needs: create-kata-tarball - runs-on: s390x - steps: - - name: Login to Kata Containers quay.io - uses: docker/login-action@v2 - with: - registry: quay.io - username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} - password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - - - name: Adjust a permission for repo - run: | - sudo chown -R $USER:$USER $GITHUB_WORKSPACE - - - uses: actions/checkout@v3 - - name: get-kata-tarball - uses: actions/download-artifact@v3 - with: - name: kata-static-tarball-s390x - - - name: build-and-push-kata-payload - id: build-and-push-kata-payload - run: | - ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ - $(pwd)/kata-static.tar.xz "quay.io/kata-containers/kata-deploy-ci" \ - "kata-containers-${{ inputs.target-arch }}" diff --git a/.github/workflows/payload-after-push.yaml b/.github/workflows/payload-after-push.yaml index f07c8859c..a03d4e543 100644 --- a/.github/workflows/payload-after-push.yaml +++ b/.github/workflows/payload-after-push.yaml @@ -7,26 +7,41 @@ on: jobs: build-assets-amd64: - uses: ./.github/workflows/payload-after-push-amd64.yaml - with: - target-arch: amd64 - secrets: inherit + uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml build-assets-arm64: - uses: ./.github/workflows/payload-after-push-arm64.yaml - with: - target-arch: arm64 - secrets: inherit + uses: ./.github/workflows/build-kata-static-tarball-arm64.yaml build-assets-s390x: - uses: ./.github/workflows/payload-after-push-s390x.yaml + uses: ./.github/workflows/build-kata-static-tarball-s390x.yaml + + publish-kata-deploy-payload-amd64: + needs: build-assets-amd64 + uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml with: - target-arch: s390x + registry: quay.io/kata-containers/kata-deploy-ci + tag: kata-containers-amd64 secrets: inherit - publish: + publish-kata-deploy-payload-arm64: + needs: build-assets-arm64 + uses: ./.github/workflows/publish-kata-deploy-payload-arm64.yaml + with: + registry: quay.io/kata-containers/kata-deploy-ci + tag: kata-containers-arm64 + secrets: inherit + + publish-kata-deploy-payload-s390x: + needs: build-assets-s390x + uses: ./.github/workflows/publish-kata-deploy-payload-s390x.yaml + with: + registry: quay.io/kata-containers/kata-deploy-ci + tag: kata-containers-s390x + secrets: inherit + + publish-manifest: runs-on: ubuntu-latest - needs: [build-assets-amd64, build-assets-arm64, build-assets-s390x] + needs: [publish-kata-deploy-payload-amd64, publish-kata-deploy-payload-arm64, publish-kata-deploy-payload-s390x] steps: - name: Checkout repository uses: actions/checkout@v3 diff --git a/.github/workflows/publish-kata-deploy-payload-amd64.yaml b/.github/workflows/publish-kata-deploy-payload-amd64.yaml new file mode 100644 index 000000000..27c0ade46 --- /dev/null +++ b/.github/workflows/publish-kata-deploy-payload-amd64.yaml @@ -0,0 +1,38 @@ +name: CI | Publish kata-deploy payload for amd64 +on: + workflow_call: + inputs: + tarball-suffix: + required: false + type: string + repo: + required: true + type: string + tag: + required: true + type: string + +jobs: + kata-payload: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: get-kata-tarball + uses: actions/download-artifact@v3 + with: + name: kata-static-tarball-amd64${{ inputs.tarball-suffix }} + + - name: Login to Kata Containers quay.io + uses: docker/login-action@v2 + with: + registry: quay.io + username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} + password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} + + - name: build-and-push-kata-payload + id: build-and-push-kata-payload + run: | + ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ + $(pwd)/kata-static.tar.xz \ + quay.io/kata-containers/${{ inputs.repo }} ${{ inputs.tag }} diff --git a/.github/workflows/publish-kata-deploy-payload-arm64.yaml b/.github/workflows/publish-kata-deploy-payload-arm64.yaml new file mode 100644 index 000000000..0834b7bf5 --- /dev/null +++ b/.github/workflows/publish-kata-deploy-payload-arm64.yaml @@ -0,0 +1,42 @@ +name: CI | Publish kata-deploy payload for arm64 +on: + workflow_call: + inputs: + tarball-suffix: + required: false + type: string + repo: + required: true + type: string + tag: + required: true + type: string + +jobs: + kata-payload: + runs-on: arm64 + steps: + - name: Adjust a permission for repo + run: | + sudo chown -R $USER:$USER $GITHUB_WORKSPACE + + - uses: actions/checkout@v3 + + - name: get-kata-tarball + uses: actions/download-artifact@v3 + with: + name: kata-static-tarball-arm64${{ inputs.tarball-suffix }} + + - name: Login to Kata Containers quay.io + uses: docker/login-action@v2 + with: + registry: quay.io + username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} + password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} + + - name: build-and-push-kata-payload + id: build-and-push-kata-payload + run: | + ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ + $(pwd)/kata-static.tar.xz \ + quay.io/kata-containers/$${ inputs.repo }} ${{ inputs.tag }} diff --git a/.github/workflows/publish-kata-deploy-payload-s390x.yaml b/.github/workflows/publish-kata-deploy-payload-s390x.yaml new file mode 100644 index 000000000..6c96df801 --- /dev/null +++ b/.github/workflows/publish-kata-deploy-payload-s390x.yaml @@ -0,0 +1,42 @@ +name: CI | Publish kata-deploy payload for s390x +on: + workflow_call: + inputs: + tarball-suffix: + required: false + type: string + registry: + repo: true + type: string + tag: + required: true + type: string + +jobs: + kata-payload: + runs-on: s390x + steps: + - name: Adjust a permission for repo + run: | + sudo chown -R $USER:$USER $GITHUB_WORKSPACE + + - uses: actions/checkout@v3 + + - name: get-kata-tarball + uses: actions/download-artifact@v3 + with: + name: kata-static-tarball-s390x${{ inputs.tarball-suffix }} + + - name: Login to Kata Containers quay.io + uses: docker/login-action@v2 + with: + registry: quay.io + username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} + password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} + + - name: build-and-push-kata-payload + id: build-and-push-kata-payload + run: | + ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ + $(pwd)/kata-static.tar.xz \ + quay.io/kata-containers/${{ inputs.repo }} ${{ inputs.tag }} From d38d7fbf1a208866dad8d9c173c0a3170a7c27bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 30 Mar 2023 19:27:57 +0200 Subject: [PATCH 027/137] gha: Remove code duplication from release.yaml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can easily re-use the newly added build-kata-static-tarball-*.yaml as part of the release.yaml file. By doing this we consolidate on how we build the components accross our actions. Signed-off-by: Fabiano Fidêncio --- .github/workflows/release.yaml | 59 +++------------------------------- 1 file changed, 4 insertions(+), 55 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 87a5992c1..80be95daf 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -5,69 +5,18 @@ on: - '[0-9]+.[0-9]+.[0-9]+*' jobs: - build-asset: - runs-on: ubuntu-latest - strategy: - matrix: - asset: - - cloud-hypervisor - - firecracker - - kernel - - kernel-dragonball-experimental - - nydus - - qemu - - rootfs-image - - rootfs-initrd - - shim-v2 - - virtiofsd - steps: - - uses: actions/checkout@v2 - - name: Build ${{ matrix.asset }} - run: | - ./tools/packaging/kata-deploy/local-build/kata-deploy-copy-yq-installer.sh - ./tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh --build="${KATA_ASSET}" - build_dir=$(readlink -f build) - # store-artifact does not work with symlink - sudo cp -r "${build_dir}" "kata-build" - env: - KATA_ASSET: ${{ matrix.asset }} - TAR_OUTPUT: ${{ matrix.asset }}.tar.gz - - - name: store-artifact ${{ matrix.asset }} - uses: actions/upload-artifact@v2 - with: - name: kata-artifacts - path: kata-build/kata-static-${{ matrix.asset }}.tar.xz - if-no-files-found: error - - create-kata-tarball: - runs-on: ubuntu-latest - needs: build-asset - steps: - - uses: actions/checkout@v2 - - name: get-artifacts - uses: actions/download-artifact@v2 - with: - name: kata-artifacts - path: kata-artifacts - - name: merge-artifacts - run: | - ./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts - - name: store-artifacts - uses: actions/upload-artifact@v2 - with: - name: kata-static-tarball - path: kata-static.tar.xz + build-kata-static-tarball-amd64: + uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml kata-deploy: - needs: create-kata-tarball + needs: build-kata-static-tarball-amd64 runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: get-kata-tarball uses: actions/download-artifact@v2 with: - name: kata-static-tarball + name: kata-static-tarball-amd64 - name: build-and-push-kata-deploy-ci id: build-and-push-kata-deploy-ci run: | From 73be4bd3f971b3e1ce57f0745f7913da2c01c551 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 30 Mar 2023 20:15:58 +0200 Subject: [PATCH 028/137] gha: Update actions for release.yaml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit checkout@v2 should not be used anymore, please, see: https://github.blog/changelog/2022-09-22-github-actions-all-actions-will-begin-running-on-node16-instead-of-node12/ Signed-off-by: Fabiano Fidêncio --- .github/workflows/release.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 80be95daf..a642fa36f 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -12,9 +12,9 @@ jobs: needs: build-kata-static-tarball-amd64 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: get-kata-tarball - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: kata-static-tarball-amd64 - name: build-and-push-kata-deploy-ci @@ -61,9 +61,9 @@ jobs: needs: kata-deploy runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: download-artifacts - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: kata-static-tarball - name: install hub @@ -85,7 +85,7 @@ jobs: needs: upload-static-tarball runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: generate-and-upload-tarball run: | tag=$(echo $GITHUB_REF | cut -d/ -f3-) @@ -99,7 +99,7 @@ jobs: needs: upload-cargo-vendored-tarball runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: download-and-upload-tarball env: GITHUB_TOKEN: ${{ secrets.GIT_UPLOAD_TOKEN }} From 11e0099fb5d3f307ed23a1e6eaab17ed5b7a47ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 30 Mar 2023 22:14:25 +0200 Subject: [PATCH 029/137] tests: Move k8s tests to this repo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The first part of simplifying things to have all our tests using GitHub actions is moving the k8s tests to this repo, as those will be the first vict^W targets to be migrated to GitHub actions. Those tests have been slightly adapted, mainly related to what they load / import, so they are more self-contained and do not require us bringing a lot of scripts from the tests repo here. A few scripts were also dropped along the way, as we no longer plan to deploy kubernetes as part of every single run, but rather assume there will always be k8s running whenever we land to run those tests. It's important to mention that a few tests were not added here: * k8s-block-volume: * k8s-file-volume: * k8s-volume: * k8s-ro-volume: These tests depend on some sort of volume being created on the kubernetes node where the test will run, and this won't fly as the tests will run from a GitHub runner, targetting a different machine where kubernetes will be running. * https://github.com/kata-containers/kata-containers/issues/6566 * k8s-hugepages: This test depends a whole lot on the host where it lands and right now we cannot assume anything about that anymore, as the tests will run from a GitHub runner, targetting a different machine where kubernetes will be running. * https://github.com/kata-containers/kata-containers/issues/6567 * k8s-expose-ip: This is simply hanging when running on AKS and has to be debugged in order to figure out the root cause of that, and then adapted to also work on AKS. * https://github.com/kata-containers/kata-containers/issues/6578 Till those issues are solved, we'll keep running a jenkins job with hose tests to avoid any possible regression. Last but not least, I've decided to **not** keep the history when bringing those tests here, otherwise we'd end up polluting a lot the history of this repo, without any clear benefit on doing so. Signed-off-by: Fabiano Fidêncio --- tests/common.bash | 47 +++++++++++ .../integration/kubernetes/filter_k8s_test.sh | 48 +++++++++++ .../filter_out_per_arch/aarch64.yaml | 23 +++++ .../filter_out_per_arch/ppc64le.yaml | 11 +++ .../kubernetes/filter_out_per_arch/s390x.yaml | 8 ++ .../kubernetes/k8s-attach-handlers.bats | 42 ++++++++++ tests/integration/kubernetes/k8s-caps.bats | 55 ++++++++++++ .../integration/kubernetes/k8s-configmap.bats | 43 ++++++++++ .../integration/kubernetes/k8s-copy-file.bats | 83 +++++++++++++++++++ tests/integration/kubernetes/k8s-cpu-ns.bats | 76 +++++++++++++++++ .../kubernetes/k8s-credentials-secrets.bats | 62 ++++++++++++++ .../kubernetes/k8s-custom-dns.bats | 34 ++++++++ .../kubernetes/k8s-empty-dirs.bats | 74 +++++++++++++++++ tests/integration/kubernetes/k8s-env.bats | 40 +++++++++ tests/integration/kubernetes/k8s-exec.bats | 65 +++++++++++++++ .../integration/kubernetes/k8s-footloose.bats | 58 +++++++++++++ tests/integration/kubernetes/k8s-inotify.bats | 46 ++++++++++ tests/integration/kubernetes/k8s-job.bats | 49 +++++++++++ .../k8s-kill-all-process-in-container.bats | 37 +++++++++ .../kubernetes/k8s-limit-range.bats | 41 +++++++++ .../kubernetes/k8s-liveness-probes.bats | 80 ++++++++++++++++++ tests/integration/kubernetes/k8s-memory.bats | 56 +++++++++++++ .../k8s-nested-configmap-secret.bats | 39 +++++++++ .../kubernetes/k8s-nginx-connectivity.bats | 53 ++++++++++++ .../kubernetes/k8s-number-cpus.bats | 47 +++++++++++ tests/integration/kubernetes/k8s-oom.bats | 37 +++++++++ .../k8s-optional-empty-configmap.bats | 39 +++++++++ .../kubernetes/k8s-optional-empty-secret.bats | 39 +++++++++ .../integration/kubernetes/k8s-parallel.bats | 48 +++++++++++ tests/integration/kubernetes/k8s-pid-ns.bats | 48 +++++++++++ .../integration/kubernetes/k8s-pod-quota.bats | 37 +++++++++ .../kubernetes/k8s-port-forward.bats | 71 ++++++++++++++++ .../kubernetes/k8s-projected-volume.bats | 63 ++++++++++++++ .../integration/kubernetes/k8s-qos-pods.bats | 58 +++++++++++++ .../kubernetes/k8s-replication.bats | 62 ++++++++++++++ .../kubernetes/k8s-scale-nginx.bats | 36 ++++++++ tests/integration/kubernetes/k8s-seccomp.bats | 35 ++++++++ .../kubernetes/k8s-security-context.bats | 35 ++++++++ .../kubernetes/k8s-shared-volume.bats | 51 ++++++++++++ tests/integration/kubernetes/k8s-sysctls.bats | 34 ++++++++ .../kubernetes/run_kubernetes_tests.sh | 68 +++++++++++++++ .../runtimeclass_workloads/busybox-pod.yaml | 32 +++++++ .../busybox-template.yaml | 19 +++++ .../runtimeclass_workloads/configmap.yaml | 12 +++ .../footloose-configmap.yaml | 12 +++ .../initContainer-shared-volume.yaml | 29 +++++++ .../initcontainer-shareprocesspid.yaml | 26 ++++++ .../runtimeclass_workloads/inject_secret.yaml | 12 +++ .../inotify-configmap-pod.yaml | 32 +++++++ .../inotify-configmap.yaml | 13 +++ .../inotify-updated-configmap.yaml | 14 ++++ .../runtimeclass_workloads/job-template.yaml | 25 ++++++ .../runtimeclass_workloads/job.yaml | 20 +++++ .../kata-runtimeclass.yaml | 9 ++ .../lifecycle-events.yaml | 23 +++++ .../runtimeclass_workloads/limit-range.yaml | 16 ++++ .../nginx-deployment.yaml | 26 ++++++ .../pod-besteffort.yaml | 16 ++++ .../runtimeclass_workloads/pod-burstable.yaml | 21 +++++ .../runtimeclass_workloads/pod-caps.yaml | 18 ++++ .../runtimeclass_workloads/pod-configmap.yaml | 28 +++++++ .../pod-cpu-defaults.yaml | 16 ++++ .../runtimeclass_workloads/pod-cpu.yaml | 23 +++++ .../pod-custom-dns.yaml | 23 +++++ .../pod-empty-dir-fsgroup.yaml | 44 ++++++++++ .../runtimeclass_workloads/pod-empty-dir.yaml | 28 +++++++ .../runtimeclass_workloads/pod-env.yaml | 46 ++++++++++ .../pod-file-volume.yaml | 26 ++++++ .../runtimeclass_workloads/pod-footloose.yaml | 59 +++++++++++++ .../pod-guaranteed.yaml | 23 +++++ .../pod-http-liveness.yaml | 25 ++++++ .../runtimeclass_workloads/pod-hugepage.yaml | 30 +++++++ .../runtimeclass_workloads/pod-liveness.yaml | 28 +++++++ .../pod-memory-limit.yaml | 23 +++++ .../pod-nested-configmap-secret.yaml | 44 ++++++++++ .../pod-number-cpu.yaml | 27 ++++++ .../runtimeclass_workloads/pod-oom.yaml | 25 ++++++ .../pod-optional-empty-configmap.yaml | 30 +++++++ .../pod-optional-empty-secret.yaml | 30 +++++++ .../pod-projected-volume.yaml | 28 +++++++ .../pod-quota-deployment.yaml | 26 ++++++ .../pod-readonly-volume.yaml | 27 ++++++ .../runtimeclass_workloads/pod-seccomp.yaml | 22 +++++ .../pod-secret-env.yaml | 27 ++++++ .../runtimeclass_workloads/pod-secret.yaml | 25 ++++++ .../pod-security-context.yaml | 18 ++++ .../pod-shared-volume.yaml | 31 +++++++ .../runtimeclass_workloads/pod-sysctl.yaml | 28 +++++++ .../pod-tcp-liveness.yaml | 31 +++++++ .../redis-master-deployment.yaml | 36 ++++++++ .../redis-master-service.yaml | 21 +++++ .../replication-controller.yaml | 26 ++++++ .../resource-quota.yaml | 20 +++++ .../runtimeclass_workloads/stress/Dockerfile | 13 +++ .../runtimeclass_workloads/vfio.yaml | 24 ++++++ tests/integration/kubernetes/tests_common.sh | 38 +++++++++ 96 files changed, 3372 insertions(+) create mode 100644 tests/common.bash create mode 100755 tests/integration/kubernetes/filter_k8s_test.sh create mode 100644 tests/integration/kubernetes/filter_out_per_arch/aarch64.yaml create mode 100644 tests/integration/kubernetes/filter_out_per_arch/ppc64le.yaml create mode 100644 tests/integration/kubernetes/filter_out_per_arch/s390x.yaml create mode 100644 tests/integration/kubernetes/k8s-attach-handlers.bats create mode 100644 tests/integration/kubernetes/k8s-caps.bats create mode 100644 tests/integration/kubernetes/k8s-configmap.bats create mode 100644 tests/integration/kubernetes/k8s-copy-file.bats create mode 100644 tests/integration/kubernetes/k8s-cpu-ns.bats create mode 100644 tests/integration/kubernetes/k8s-credentials-secrets.bats create mode 100644 tests/integration/kubernetes/k8s-custom-dns.bats create mode 100644 tests/integration/kubernetes/k8s-empty-dirs.bats create mode 100644 tests/integration/kubernetes/k8s-env.bats create mode 100644 tests/integration/kubernetes/k8s-exec.bats create mode 100644 tests/integration/kubernetes/k8s-footloose.bats create mode 100644 tests/integration/kubernetes/k8s-inotify.bats create mode 100644 tests/integration/kubernetes/k8s-job.bats create mode 100644 tests/integration/kubernetes/k8s-kill-all-process-in-container.bats create mode 100644 tests/integration/kubernetes/k8s-limit-range.bats create mode 100644 tests/integration/kubernetes/k8s-liveness-probes.bats create mode 100644 tests/integration/kubernetes/k8s-memory.bats create mode 100644 tests/integration/kubernetes/k8s-nested-configmap-secret.bats create mode 100644 tests/integration/kubernetes/k8s-nginx-connectivity.bats create mode 100644 tests/integration/kubernetes/k8s-number-cpus.bats create mode 100644 tests/integration/kubernetes/k8s-oom.bats create mode 100644 tests/integration/kubernetes/k8s-optional-empty-configmap.bats create mode 100644 tests/integration/kubernetes/k8s-optional-empty-secret.bats create mode 100644 tests/integration/kubernetes/k8s-parallel.bats create mode 100644 tests/integration/kubernetes/k8s-pid-ns.bats create mode 100644 tests/integration/kubernetes/k8s-pod-quota.bats create mode 100644 tests/integration/kubernetes/k8s-port-forward.bats create mode 100644 tests/integration/kubernetes/k8s-projected-volume.bats create mode 100644 tests/integration/kubernetes/k8s-qos-pods.bats create mode 100644 tests/integration/kubernetes/k8s-replication.bats create mode 100644 tests/integration/kubernetes/k8s-scale-nginx.bats create mode 100644 tests/integration/kubernetes/k8s-seccomp.bats create mode 100644 tests/integration/kubernetes/k8s-security-context.bats create mode 100644 tests/integration/kubernetes/k8s-shared-volume.bats create mode 100644 tests/integration/kubernetes/k8s-sysctls.bats create mode 100755 tests/integration/kubernetes/run_kubernetes_tests.sh create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/busybox-pod.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/busybox-template.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/configmap.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/footloose-configmap.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/initContainer-shared-volume.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/initcontainer-shareprocesspid.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/inject_secret.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap-pod.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/inotify-updated-configmap.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/job-template.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/job.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/kata-runtimeclass.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/lifecycle-events.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/limit-range.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/nginx-deployment.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-besteffort.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-burstable.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-caps.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-configmap.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-cpu-defaults.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-cpu.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-custom-dns.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir-fsgroup.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-env.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-file-volume.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-footloose.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-guaranteed.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-http-liveness.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-hugepage.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-liveness.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-memory-limit.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-nested-configmap-secret.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-number-cpu.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-oom.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-configmap.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-secret.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-projected-volume.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-quota-deployment.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-readonly-volume.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-seccomp.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-secret-env.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-secret.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-security-context.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-shared-volume.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-sysctl.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/pod-tcp-liveness.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/redis-master-deployment.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/redis-master-service.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/replication-controller.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/resource-quota.yaml create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/stress/Dockerfile create mode 100644 tests/integration/kubernetes/runtimeclass_workloads/vfio.yaml create mode 100644 tests/integration/kubernetes/tests_common.sh diff --git a/tests/common.bash b/tests/common.bash new file mode 100644 index 000000000..a29b29b87 --- /dev/null +++ b/tests/common.bash @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +# This file contains common functions that +# are being used by our metrics and integration tests + +die() { + local msg="$*" + echo -e "[$(basename $0):${BASH_LINENO[0]}] ERROR: $msg" >&2 + exit 1 +} + +warn() { + local msg="$*" + echo -e "[$(basename $0):${BASH_LINENO[0]}] WARNING: $msg" +} + +info() { + local msg="$*" + echo -e "[$(basename $0):${BASH_LINENO[0]}] INFO: $msg" +} + +handle_error() { + local exit_code="${?}" + local line_number="${1:-}" + echo -e "[$(basename $0):$line_number] ERROR: $(eval echo "$BASH_COMMAND")" + exit "${exit_code}" +} +trap 'handle_error $LINENO' ERR + +waitForProcess() { + wait_time="$1" + sleep_time="$2" + cmd="$3" + while [ "$wait_time" -gt 0 ]; do + if eval "$cmd"; then + return 0 + else + sleep "$sleep_time" + wait_time=$((wait_time-sleep_time)) + fi + done + return 1 +} diff --git a/tests/integration/kubernetes/filter_k8s_test.sh b/tests/integration/kubernetes/filter_k8s_test.sh new file mode 100755 index 000000000..2b90076d9 --- /dev/null +++ b/tests/integration/kubernetes/filter_k8s_test.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# +# Copyright (c) 2019 ARM Limited +# +# SPDX-License-Identifier: Apache-2.0 + +set -o errexit +set -o nounset +set -o pipefail + +GOPATH_LOCAL="${GOPATH%%:*}" +KATA_DIR="${GOPATH_LOCAL}/src/github.com/kata-containers" +TEST_DIR="${KATA_DIR}/tests" +CI_DIR="${TEST_DIR}/.ci" + +K8S_FILTER_FLAG="kubernetes" + +source "${CI_DIR}/lib.sh" + +main() +{ + local K8S_CONFIG_FILE="$1" + local K8S_TEST_UNION="$2" + local result=() + + mapfile -d " " -t _K8S_TEST_UNION <<< "${K8S_TEST_UNION}" + + # install yq if not exist + ${CI_DIR}/install_yq.sh > /dev/null + + local K8S_SKIP_UNION=$("${GOPATH_LOCAL}/bin/yq" read "${K8S_CONFIG_FILE}" "${K8S_FILTER_FLAG}") + [ "${K8S_SKIP_UNION}" == "null" ] && return + mapfile -t _K8S_SKIP_UNION <<< "${K8S_SKIP_UNION}" + + for TEST_ENTRY in "${_K8S_TEST_UNION[@]}" + do + local flag="false" + for SKIP_ENTRY in "${_K8S_SKIP_UNION[@]}" + do + SKIP_ENTRY="${SKIP_ENTRY#- }.bats" + [ "$SKIP_ENTRY" == "$TEST_ENTRY" ] && flag="true" + done + [ "$flag" == "false" ] && result+=("$TEST_ENTRY") + done + echo ${result[@]} +} + +main "$@" diff --git a/tests/integration/kubernetes/filter_out_per_arch/aarch64.yaml b/tests/integration/kubernetes/filter_out_per_arch/aarch64.yaml new file mode 100644 index 000000000..8474a67fc --- /dev/null +++ b/tests/integration/kubernetes/filter_out_per_arch/aarch64.yaml @@ -0,0 +1,23 @@ +# +# Copyright (c) 2018 ARM Limited +# +# SPDX-License-Identifier: Apache-2.0 + +# for now, not all integration test suites are fully passed in aarch64. +# some need to be tested, and some need to be refined. +# sequence of 'test' holds supported integration tests components. +test: + - functional + - kubernetes + - cri-containerd + +kubernetes: + - k8s-cpu-ns + - k8s-limit-range + - k8s-number-cpus + - k8s-expose-ip + - k8s-oom + - k8s-block-volume + - k8s-inotify + - k8s-qos-pods + - k8s-footloose diff --git a/tests/integration/kubernetes/filter_out_per_arch/ppc64le.yaml b/tests/integration/kubernetes/filter_out_per_arch/ppc64le.yaml new file mode 100644 index 000000000..d8644e019 --- /dev/null +++ b/tests/integration/kubernetes/filter_out_per_arch/ppc64le.yaml @@ -0,0 +1,11 @@ +# +# Copyright (c) 2019 IBM +# +# SPDX-License-Identifier: Apache-2.0 + +kubernetes: + - k8s-block-volume + - k8s-limit-range + - k8s-number-cpus + - k8s-oom + - k8s-inotify diff --git a/tests/integration/kubernetes/filter_out_per_arch/s390x.yaml b/tests/integration/kubernetes/filter_out_per_arch/s390x.yaml new file mode 100644 index 000000000..224539d8b --- /dev/null +++ b/tests/integration/kubernetes/filter_out_per_arch/s390x.yaml @@ -0,0 +1,8 @@ +# +# Copyright (c) 2021 IBM +# +# SPDX-License-Identifier: Apache-2.0 + +kubernetes: + - k8s-caps + - k8s-inotify diff --git a/tests/integration/kubernetes/k8s-attach-handlers.bats b/tests/integration/kubernetes/k8s-attach-handlers.bats new file mode 100644 index 000000000..10a7a0f19 --- /dev/null +++ b/tests/integration/kubernetes/k8s-attach-handlers.bats @@ -0,0 +1,42 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + nginx_version="${docker_images_nginx_version}" + nginx_image="nginx:$nginx_version" + + pod_name="handlers" + + get_pod_config_dir +} + +@test "Running with postStart and preStop handlers" { + # Create yaml + sed -e "s/\${nginx_version}/${nginx_image}/" \ + "${pod_config_dir}/lifecycle-events.yaml" > "${pod_config_dir}/test-lifecycle-events.yaml" + + # Create the pod with postStart and preStop handlers + kubectl create -f "${pod_config_dir}/test-lifecycle-events.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Check postStart message + display_message="cat /usr/share/message" + check_postStart=$(kubectl exec $pod_name -- sh -c "$display_message" | grep "Hello from the postStart handler") +} + +teardown(){ + # Debugging information + kubectl describe "pod/$pod_name" + + rm -f "${pod_config_dir}/test-lifecycle-events.yaml" + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-caps.bats b/tests/integration/kubernetes/k8s-caps.bats new file mode 100644 index 000000000..3126af640 --- /dev/null +++ b/tests/integration/kubernetes/k8s-caps.bats @@ -0,0 +1,55 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2021 Apple Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="pod-caps" + get_pod_config_dir +# We expect the capabilities mask to very per distribution, runtime +# configuration. Even for this, we should expect a few common items to +# not be set in the mask unless we are failing to apply capabilities. If +# we fail to configure, we'll see all bits set for permitted: 0x03fffffffff +# We do expect certain parts of the mask to be common when we set appropriately: +# b20..b23 should be cleared for all (no CAP_SYS_{PACCT, ADMIN, NICE, BOOT}) +# b0..b11 are consistent across the distros: +# 0x5fb: 0101 1111 1011 +# | | \- should be cleared (CAP_DAC_READ_SEARCH) +# | \- should be cleared (CAP_LINUX_IMMUTABLE) +# \- should be cleared (CAP_NET_BROADCAST) +# Example match: +# CapPrm: 00000000a80425fb + expected="CapPrm.*..0..5fb$" +} + +@test "Check capabilities of pod" { + # Create pod + kubectl create -f "${pod_config_dir}/pod-caps.yaml" + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Verify expected capabilities for the running container. Add retry to ensure + # that the container had time to execute: + wait_time=5 + sleep_time=1 + cmd="kubectl logs $pod_name | grep -q $expected" + waitForProcess "$wait_time" "$sleep_time" "$cmd" + + # Verify expected capabilities from exec context: + kubectl exec "$pod_name" -- sh -c "cat /proc/self/status" | grep -q "$expected" +} + +teardown() { + # Debugging information + echo "expected capability mask:" + echo "$expected" + echo "observed: " + kubectl logs "pod/$pod_name" + kubectl exec "$pod_name" -- sh -c "cat /proc/self/status | grep Cap" + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-configmap.bats b/tests/integration/kubernetes/k8s-configmap.bats new file mode 100644 index 000000000..6809ba130 --- /dev/null +++ b/tests/integration/kubernetes/k8s-configmap.bats @@ -0,0 +1,43 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "ConfigMap for a pod" { + config_name="test-configmap" + pod_name="config-env-test-pod" + + # Create ConfigMap + kubectl create -f "${pod_config_dir}/configmap.yaml" + + # View the values of the keys + kubectl get configmaps $config_name -o yaml | grep -q "data-" + + # Create a pod that consumes the ConfigMap + kubectl create -f "${pod_config_dir}/pod-configmap.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check env + cmd="env" + kubectl exec $pod_name -- sh -c $cmd | grep "KUBE_CONFIG_1=value-1" + kubectl exec $pod_name -- sh -c $cmd | grep "KUBE_CONFIG_2=value-2" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" + kubectl delete configmap "$config_name" +} diff --git a/tests/integration/kubernetes/k8s-copy-file.bats b/tests/integration/kubernetes/k8s-copy-file.bats new file mode 100644 index 000000000..0106e12c3 --- /dev/null +++ b/tests/integration/kubernetes/k8s-copy-file.bats @@ -0,0 +1,83 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir + file_name="file.txt" + content="Hello" +} + +@test "Copy file in a pod" { + # Create pod + pod_name="pod-copy-file-from-host" + ctr_name="ctr-copy-file-from-host" + + pod_config=$(mktemp --tmpdir pod_config.XXXXXX.yaml) + cp "$pod_config_dir/busybox-template.yaml" "$pod_config" + sed -i "s/POD_NAME/$pod_name/" "$pod_config" + sed -i "s/CTR_NAME/$ctr_name/" "$pod_config" + + kubectl create -f "${pod_config}" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Create a file + echo "$content" > "$file_name" + + # Copy file into a pod + kubectl cp "$file_name" $pod_name:/tmp + + # Print environment variables + kubectl exec $pod_name -- sh -c "cat /tmp/$file_name | grep $content" +} + +@test "Copy from pod to host" { + # Create pod + pod_name="pod-copy-file-to-host" + ctr_name="ctr-copy-file-to-host" + + pod_config=$(mktemp --tmpdir pod_config.XXXXXX.yaml) + cp "$pod_config_dir/busybox-template.yaml" "$pod_config" + sed -i "s/POD_NAME/$pod_name/" "$pod_config" + sed -i "s/CTR_NAME/$ctr_name/" "$pod_config" + + kubectl create -f "${pod_config}" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + kubectl logs "$pod_name" || true + kubectl describe pod "$pod_name" || true + kubectl get pods --all-namespaces + + # Create a file in the pod + kubectl exec "$pod_name" -- sh -c "cd /tmp && echo $content > $file_name" + + kubectl logs "$pod_name" || true + kubectl describe pod "$pod_name" || true + kubectl get pods --all-namespaces + + # Copy file from pod to host + kubectl cp "$pod_name":/tmp/"$file_name" "$file_name" + + # Verify content + cat "$file_name" | grep "$content" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + rm -f "$file_name" + kubectl delete pod "$pod_name" + + rm -f "$pod_config" +} diff --git a/tests/integration/kubernetes/k8s-cpu-ns.bats b/tests/integration/kubernetes/k8s-cpu-ns.bats new file mode 100644 index 000000000..289dfc667 --- /dev/null +++ b/tests/integration/kubernetes/k8s-cpu-ns.bats @@ -0,0 +1,76 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + pod_name="constraints-cpu-test" + container_name="first-cpu-container" + sharessyspath="/sys/fs/cgroup/cpu/cpu.shares" + quotasyspath="/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + periodsyspath="/sys/fs/cgroup/cpu/cpu.cfs_period_us" + total_cpus=2 + total_requests=512 + total_cpu_container=1 + + get_pod_config_dir +} + +@test "Check CPU constraints" { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + # Create the pod + kubectl create -f "${pod_config_dir}/pod-cpu.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + retries="10" + + num_cpus_cmd='grep -e "^processor" /proc/cpuinfo |wc -l' + # Check the total of cpus + for _ in $(seq 1 "$retries"); do + # Get number of cpus + total_cpus_container=$(kubectl exec pod/"$pod_name" -c "$container_name" \ + -- sh -c "$num_cpus_cmd") + # Verify number of cpus + [ "$total_cpus_container" -le "$total_cpus" ] + [ "$total_cpus_container" -eq "$total_cpus" ] && break + sleep 1 + done + [ "$total_cpus_container" -eq "$total_cpus" ] + + # Check the total of requests + total_requests_container=$(kubectl exec $pod_name -c $container_name \ + -- sh -c "cat $sharessyspath") + + [ "$total_requests_container" -eq "$total_requests" ] + + # Check the cpus inside the container + + total_cpu_quota=$(kubectl exec $pod_name -c $container_name \ + -- sh -c "cat $quotasyspath") + + total_cpu_period=$(kubectl exec $pod_name -c $container_name \ + -- sh -c "cat $periodsyspath") + + division_quota_period=$(echo $((total_cpu_quota/total_cpu_period))) + + [ "$division_quota_period" -eq "$total_cpu_container" ] +} + +teardown() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-credentials-secrets.bats b/tests/integration/kubernetes/k8s-credentials-secrets.bats new file mode 100644 index 000000000..51d2ba995 --- /dev/null +++ b/tests/integration/kubernetes/k8s-credentials-secrets.bats @@ -0,0 +1,62 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + get_pod_config_dir +} + +@test "Credentials using secrets" { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + secret_name="test-secret" + pod_name="secret-test-pod" + second_pod_name="secret-envars-test-pod" + + # Create the secret + kubectl create -f "${pod_config_dir}/inject_secret.yaml" + + # View information about the secret + kubectl get secret "${secret_name}" -o yaml | grep "type: Opaque" + + # Create a pod that has access to the secret through a volume + kubectl create -f "${pod_config_dir}/pod-secret.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # List the files + cmd="ls /tmp/secret-volume" + kubectl exec $pod_name -- sh -c "$cmd" | grep -w "password" + kubectl exec $pod_name -- sh -c "$cmd" | grep -w "username" + + # Create a pod that has access to the secret data through environment variables + kubectl create -f "${pod_config_dir}/pod-secret-env.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$second_pod_name" + + # Display environment variables + second_cmd="printenv" + kubectl exec $second_pod_name -- sh -c "$second_cmd" | grep -w "SECRET_USERNAME" + kubectl exec $second_pod_name -- sh -c "$second_cmd" | grep -w "SECRET_PASSWORD" +} + +teardown() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + # Debugging information + kubectl describe "pod/$pod_name" + kubectl describe "pod/$second_pod_name" + + kubectl delete pod "$pod_name" "$second_pod_name" + kubectl delete secret "$secret_name" +} diff --git a/tests/integration/kubernetes/k8s-custom-dns.bats b/tests/integration/kubernetes/k8s-custom-dns.bats new file mode 100644 index 000000000..aa2532364 --- /dev/null +++ b/tests/integration/kubernetes/k8s-custom-dns.bats @@ -0,0 +1,34 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="custom-dns-test" + file_name="/etc/resolv.conf" + get_pod_config_dir +} + +@test "Check custom dns" { + # Create the pod + kubectl create -f "${pod_config_dir}/pod-custom-dns.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Check dns config at /etc/resolv.conf + kubectl exec "$pod_name" -- cat "$file_name" | grep -q "nameserver 1.2.3.4" + kubectl exec "$pod_name" -- cat "$file_name" | grep -q "search dns.test.search" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-empty-dirs.bats b/tests/integration/kubernetes/k8s-empty-dirs.bats new file mode 100644 index 000000000..0bf901caa --- /dev/null +++ b/tests/integration/kubernetes/k8s-empty-dirs.bats @@ -0,0 +1,74 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +assert_equal() { + local expected=$1 + local actual=$2 + if [[ "$expected" != "$actual" ]]; then + echo "expected: $expected, got: $actual" + return 1 + fi +} + +setup() { + pod_name="sharevol-kata" + get_pod_config_dir + pod_logs_file="" +} + +@test "Empty dir volumes" { + # Create the pod + kubectl create -f "${pod_config_dir}/pod-empty-dir.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check volume mounts + cmd="mount | grep cache" + kubectl exec $pod_name -- sh -c "$cmd" | grep "/tmp/cache type tmpfs" + + # Check it can write up to the volume limit (50M) + cmd="dd if=/dev/zero of=/tmp/cache/file1 bs=1M count=50; echo $?" + kubectl exec $pod_name -- sh -c "$cmd" | tail -1 | grep 0 +} + +@test "Empty dir volume when FSGroup is specified with non-root container" { + # This is a reproducer of k8s e2e "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is non-root" test + pod_file="${pod_config_dir}/pod-empty-dir-fsgroup.yaml" + agnhost_name="${container_images_agnhost_name}" + agnhost_version="${container_images_agnhost_version}" + image="${agnhost_name}:${agnhost_version}" + + # Try to avoid timeout by prefetching the image. + sed -e "s#\${agnhost_image}#${image}#" "$pod_file" |\ + kubectl create -f - + cmd="kubectl get pods ${pod_name} | grep Completed" + waitForProcess "${wait_time}" "${sleep_time}" "${cmd}" + + pod_logs_file="$(mktemp)" + for container in mounttest-container mounttest-container-2; do + kubectl logs "$pod_name" "$container" > "$pod_logs_file" + # Check owner UID of file + uid=$(cat $pod_logs_file | grep 'owner UID of' | sed 's/.*:\s//') + assert_equal "1001" "$uid" + # Check owner GID of file + gid=$(cat $pod_logs_file | grep 'owner GID of' | sed 's/.*:\s//') + assert_equal "123" "$gid" + done +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" + + [ ! -f "$pod_logs_file" ] || rm -f "$pod_logs_file" +} diff --git a/tests/integration/kubernetes/k8s-env.bats b/tests/integration/kubernetes/k8s-env.bats new file mode 100644 index 000000000..ee09d10f2 --- /dev/null +++ b/tests/integration/kubernetes/k8s-env.bats @@ -0,0 +1,40 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="test-env" + get_pod_config_dir +} + +@test "Environment variables" { + # Create pod + kubectl create -f "${pod_config_dir}/pod-env.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Print environment variables + cmd="printenv" + kubectl exec $pod_name -- sh -c $cmd | grep "MY_POD_NAME=$pod_name" + kubectl exec $pod_name -- sh -c $cmd | \ + grep "HOST_IP=\([0-9]\+\(\.\|$\)\)\{4\}" + # Requested 32Mi of memory + kubectl exec $pod_name -- sh -c $cmd | \ + grep "MEMORY_REQUESTS=$((1024 * 1024 * 32))" + # Memory limits allocated by the node + kubectl exec $pod_name -- sh -c $cmd | grep "MEMORY_LIMITS=[1-9]\+" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-exec.bats b/tests/integration/kubernetes/k8s-exec.bats new file mode 100644 index 000000000..aa14d7160 --- /dev/null +++ b/tests/integration/kubernetes/k8s-exec.bats @@ -0,0 +1,65 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2020 Ant Financial +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir + pod_name="busybox" + first_container_name="first-test-container" + second_container_name="second-test-container" +} + +@test "Kubectl exec" { + # Create the pod + kubectl create -f "${pod_config_dir}/busybox-pod.yaml" + + # Get pod specification + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Run commands in Pod + ## Cases for -it options + # TODO: enable -i option after updated to new CRI-O + # see: https://github.com/kata-containers/tests/issues/2770 + # kubectl exec -i "$pod_name" -- ls -tl / + # kubectl exec -it "$pod_name" -- ls -tl / + kubectl exec "$pod_name" -- date + + ## Case for stdin + kubectl exec -i "$pod_name" -- sh <<-EOF +echo abc > /tmp/abc.txt +grep abc /tmp/abc.txt +exit +EOF + + ## Case for return value + ### Command return non-zero code + run bash -c "kubectl exec -i $pod_name -- sh <<-EOF +exit 123 +EOF" + echo "run status: $status" 1>&2 + echo "run output: $output" 1>&2 + [ "$status" -eq 123 ] + + ## Cases for target container + ### First container + container_name=$(kubectl exec $pod_name -c $first_container_name -- env | grep CONTAINER_NAME) + [ "$container_name" == "CONTAINER_NAME=$first_container_name" ] + + ### Second container + container_name=$(kubectl exec $pod_name -c $second_container_name -- env | grep CONTAINER_NAME) + [ "$container_name" == "CONTAINER_NAME=$second_container_name" ] + +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-footloose.bats b/tests/integration/kubernetes/k8s-footloose.bats new file mode 100644 index 000000000..b8b10db7a --- /dev/null +++ b/tests/integration/kubernetes/k8s-footloose.bats @@ -0,0 +1,58 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="footubuntu" + config_name="ssh-config-map" + get_pod_config_dir + + # Creates ssh-key + key_path=$(mktemp --tmpdir) + public_key_path="${key_path}.pub" + echo -e 'y\n' | sudo ssh-keygen -t rsa -N "" -f "$key_path" + + # Create ConfigMap.yaml + configmap_yaml="${pod_config_dir}/footloose-rsa-configmap.yaml" + sed -e "/\${ssh_key}/r ${public_key_path}" -e "/\${ssh_key}/d" \ + "${pod_config_dir}/footloose-configmap.yaml" > "$configmap_yaml" + sed -i 's/ssh-rsa/ ssh-rsa/' "$configmap_yaml" +} + +@test "Footloose pod" { + cmd="uname -r" + sleep_connect="10" + + # Create ConfigMap + kubectl create -f "$configmap_yaml" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-footloose.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Get pod ip + pod_ip=$(kubectl get pod $pod_name --template={{.status.podIP}}) + + # Exec to the pod + kubectl exec $pod_name -- sh -c "$cmd" + + # Connect to the VM + sleep "$sleep_connect" + ssh -i "$key_path" -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no 2>/dev/null root@"$pod_ip" "$cmd" +} + +teardown() { + kubectl delete pod "$pod_name" + kubectl delete configmap "$config_name" + sudo rm -rf "$public_key_path" + sudo rm -rf "$key_path" + sudo rm -rf "$configmap_yaml" +} diff --git a/tests/integration/kubernetes/k8s-inotify.bats b/tests/integration/kubernetes/k8s-inotify.bats new file mode 100644 index 000000000..f3dbc073f --- /dev/null +++ b/tests/integration/kubernetes/k8s-inotify.bats @@ -0,0 +1,46 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2021 Apple Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + get_pod_config_dir +} + +@test "configmap update works, and preserves symlinks" { + pod_name="inotify-configmap-testing" + + # Create configmap for my deployment + kubectl apply -f "${pod_config_dir}"/inotify-configmap.yaml + + # Create deployment that expects identity-certs + kubectl apply -f "${pod_config_dir}"/inotify-configmap-pod.yaml + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Update configmap + kubectl apply -f "${pod_config_dir}"/inotify-updated-configmap.yaml + + # Ideally we'd wait for the pod to complete... + sleep 120 + + # Verify we saw the update + result=$(kubectl get pod "$pod_name" --output="jsonpath={.status.containerStatuses[]}") + echo $result | grep -vq Error + + kubectl delete configmap cm +} + + + +teardown() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + # Debugging information + kubectl describe "pod/$pod_name" + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-job.bats b/tests/integration/kubernetes/k8s-job.bats new file mode 100644 index 000000000..e1fd3cc38 --- /dev/null +++ b/tests/integration/kubernetes/k8s-job.bats @@ -0,0 +1,49 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "Run a job to completion" { + job_name="job-pi-test" + + # Create job + kubectl apply -f "${pod_config_dir}/job.yaml" + + # Verify job + kubectl describe jobs/"$job_name" | grep "SuccessfulCreate" + + # List pods that belong to the job + pod_name=$(kubectl get pods --selector=job-name=$job_name --output=jsonpath='{.items[*].metadata.name}') + + # Verify that the job is completed + cmd="kubectl get pods -o jsonpath='{.items[*].status.phase}' | grep Succeeded" + waitForProcess "$wait_time" "$sleep_time" "$cmd" + + # Verify the output of the pod + pi_number="3.14" + kubectl logs "$pod_name" | grep "$pi_number" +} + +teardown() { + kubectl delete pod "$pod_name" + # Verify that pod is not running + run kubectl get pods + echo "$output" + [[ "$output" =~ "No resources found" ]] + + + kubectl delete jobs/"$job_name" + # Verify that the job is not running + run kubectl get jobs + echo "$output" + [[ "$output" =~ "No resources found" ]] +} diff --git a/tests/integration/kubernetes/k8s-kill-all-process-in-container.bats b/tests/integration/kubernetes/k8s-kill-all-process-in-container.bats new file mode 100644 index 000000000..5081b8d7d --- /dev/null +++ b/tests/integration/kubernetes/k8s-kill-all-process-in-container.bats @@ -0,0 +1,37 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2022 AntGroup Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="busybox" + first_container_name="first-test-container" + + get_pod_config_dir +} + +@test "Check PID namespaces" { + # Create the pod + kubectl create -f "${pod_config_dir}/initcontainer-shareprocesspid.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Check PID from first container + first_pid_container=$(kubectl exec $pod_name -c $first_container_name \ + -- ps | grep "tail" || true) + # Verify that the tail process didn't exist + [ -z $first_pid_container ] || die "found processes pid: $first_pid_container" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-limit-range.bats b/tests/integration/kubernetes/k8s-limit-range.bats new file mode 100644 index 000000000..7e5686c36 --- /dev/null +++ b/tests/integration/kubernetes/k8s-limit-range.bats @@ -0,0 +1,41 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir + namespace_name="default-cpu-example" + pod_name="default-cpu-test" +} + +@test "Limit range for storage" { + # Create namespace + kubectl create namespace "$namespace_name" + + # Create the LimitRange in the namespace + kubectl create -f "${pod_config_dir}/limit-range.yaml" --namespace=${namespace_name} + + # Create the pod + kubectl create -f "${pod_config_dir}/pod-cpu-defaults.yaml" --namespace=${namespace_name} + + # Get pod specification + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" --namespace="$namespace_name" + + # Check limits + # Find the 500 millicpus specified at the yaml + kubectl describe pod "$pod_name" --namespace="$namespace_name" | grep "500m" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" + kubectl delete namespaces "$namespace_name" +} diff --git a/tests/integration/kubernetes/k8s-liveness-probes.bats b/tests/integration/kubernetes/k8s-liveness-probes.bats new file mode 100644 index 000000000..5c8a736e7 --- /dev/null +++ b/tests/integration/kubernetes/k8s-liveness-probes.bats @@ -0,0 +1,80 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + sleep_liveness=20 + agnhost_name="${container_images_agnhost_name}" + agnhost_version="${container_images_agnhost_version}" + + get_pod_config_dir +} + +@test "Liveness probe" { + pod_name="liveness-exec" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-liveness.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check liveness probe returns a success code + kubectl describe pod "$pod_name" | grep -E "Liveness|#success=1" + + # Sleep necessary to check liveness probe returns a failure code + sleep "$sleep_liveness" + kubectl describe pod "$pod_name" | grep "Liveness probe failed" +} + +@test "Liveness http probe" { + pod_name="liveness-http" + + # Create pod + sed -e "s#\${agnhost_image}#${agnhost_name}:${agnhost_version}#" \ + "${pod_config_dir}/pod-http-liveness.yaml" |\ + kubectl create -f - + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check liveness probe returns a success code + kubectl describe pod "$pod_name" | grep -E "Liveness|#success=1" + + # Sleep necessary to check liveness probe returns a failure code + sleep "$sleep_liveness" + kubectl describe pod "$pod_name" | grep "Started container" +} + + +@test "Liveness tcp probe" { + pod_name="tcptest" + + # Create pod + sed -e "s#\${agnhost_image}#${agnhost_name}:${agnhost_version}#" \ + "${pod_config_dir}/pod-tcp-liveness.yaml" |\ + kubectl create -f - + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check liveness probe returns a success code + kubectl describe pod "$pod_name" | grep -E "Liveness|#success=1" + + # Sleep necessary to check liveness probe returns a failure code + sleep "$sleep_liveness" + kubectl describe pod "$pod_name" | grep "Started container" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-memory.bats b/tests/integration/kubernetes/k8s-memory.bats new file mode 100644 index 000000000..5bcffaab4 --- /dev/null +++ b/tests/integration/kubernetes/k8s-memory.bats @@ -0,0 +1,56 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="memory-test" + get_pod_config_dir +} + +setup_yaml() { + sed \ + -e "s/\${memory_size}/${memory_limit_size}/" \ + -e "s/\${memory_allocated}/${allocated_size}/" \ + "${pod_config_dir}/pod-memory-limit.yaml" +} + + +@test "Exceeding memory constraints" { + memory_limit_size="50Mi" + allocated_size="250M" + # Create test .yaml + setup_yaml > "${pod_config_dir}/test_exceed_memory.yaml" + + # Create the pod exceeding memory constraints + run kubectl create -f "${pod_config_dir}/test_exceed_memory.yaml" + [ "$status" -ne 0 ] + + rm -f "${pod_config_dir}/test_exceed_memory.yaml" +} + +@test "Running within memory constraints" { + memory_limit_size="600Mi" + allocated_size="150M" + # Create test .yaml + setup_yaml > "${pod_config_dir}/test_within_memory.yaml" + + # Create the pod within memory constraints + kubectl create -f "${pod_config_dir}/test_within_memory.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + rm -f "${pod_config_dir}/test_within_memory.yaml" + kubectl delete pod "$pod_name" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" || true +} diff --git a/tests/integration/kubernetes/k8s-nested-configmap-secret.bats b/tests/integration/kubernetes/k8s-nested-configmap-secret.bats new file mode 100644 index 000000000..b84fb89cc --- /dev/null +++ b/tests/integration/kubernetes/k8s-nested-configmap-secret.bats @@ -0,0 +1,39 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2021 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + get_pod_config_dir + + pod_name="nested-configmap-secret-pod" +} + +@test "Nested mount of a secret volume in a configmap volume for a pod" { + # Creates a configmap, secret and pod that mounts the secret inside the configmap + kubectl create -f "${pod_config_dir}/pod-nested-configmap-secret.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check config/secret value are correct + [ "myconfig" == $(kubectl exec $pod_name -- cat /config/config_key) ] + [ "mysecret" == $(kubectl exec $pod_name -- cat /config/secret/secret_key) ] +} + +teardown() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + # Debugging information + kubectl describe "pod/$pod_name" + + # Delete the configmap, secret, and pod used for testing + kubectl delete -f "${pod_config_dir}/pod-nested-configmap-secret.yaml" +} diff --git a/tests/integration/kubernetes/k8s-nginx-connectivity.bats b/tests/integration/kubernetes/k8s-nginx-connectivity.bats new file mode 100644 index 000000000..bc7271dc8 --- /dev/null +++ b/tests/integration/kubernetes/k8s-nginx-connectivity.bats @@ -0,0 +1,53 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + nginx_version="${docker_images_nginx_version}" + nginx_image="nginx:$nginx_version" + busybox_image="busybox" + deployment="nginx-deployment" + + get_pod_config_dir +} + +@test "Verify nginx connectivity between pods" { + + # Create test .yaml + sed -e "s/\${nginx_version}/${nginx_image}/" \ + "${pod_config_dir}/${deployment}.yaml" > "${pod_config_dir}/test-${deployment}.yaml" + + kubectl create -f "${pod_config_dir}/test-${deployment}.yaml" + kubectl wait --for=condition=Available --timeout=$timeout deployment/${deployment} + kubectl expose deployment/${deployment} + + busybox_pod="test-nginx" + kubectl run $busybox_pod --restart=Never -it --image="$busybox_image" \ + -- sh -c 'i=1; while [ $i -le '"$wait_time"' ]; do wget --timeout=5 '"$deployment"' && break; sleep 1; i=$(expr $i + 1); done' + + # check pod's status, it should be Succeeded. + # or {.status.containerStatuses[0].state.terminated.reason} = "Completed" + [ $(kubectl get pods/$busybox_pod -o jsonpath="{.status.phase}") = "Succeeded" ] + kubectl logs "$busybox_pod" | grep "index.html" +} + +teardown() { + # Debugging information + kubectl describe "pod/$busybox_pod" + kubectl get "pod/$busybox_pod" -o yaml + kubectl logs "$busybox_pod" + kubectl get deployment/${deployment} -o yaml + kubectl get service/${deployment} -o yaml + kubectl get endpoints/${deployment} -o yaml + + rm -f "${pod_config_dir}/test-${deployment}.yaml" + kubectl delete deployment "$deployment" + kubectl delete service "$deployment" + kubectl delete pod "$busybox_pod" +} diff --git a/tests/integration/kubernetes/k8s-number-cpus.bats b/tests/integration/kubernetes/k8s-number-cpus.bats new file mode 100644 index 000000000..338963f6d --- /dev/null +++ b/tests/integration/kubernetes/k8s-number-cpus.bats @@ -0,0 +1,47 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="cpu-test" + container_name="c1" + get_pod_config_dir +} + +# Skip on aarch64 due to missing cpu hotplug related functionality. +@test "Check number of cpus" { + # Create pod + kubectl create -f "${pod_config_dir}/pod-number-cpu.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + retries="10" + max_number_cpus="3" + + num_cpus_cmd='cat /proc/cpuinfo |grep processor|wc -l' + for _ in $(seq 1 "$retries"); do + # Get number of cpus + number_cpus=$(kubectl exec pod/"$pod_name" -c "$container_name" \ + -- sh -c "$num_cpus_cmd") + if [[ "$number_cpus" =~ ^[0-9]+$ ]]; then + # Verify number of cpus + [ "$number_cpus" -le "$max_number_cpus" ] + [ "$number_cpus" -eq "$max_number_cpus" ] && break + fi + sleep 1 + done +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-oom.bats b/tests/integration/kubernetes/k8s-oom.bats new file mode 100644 index 000000000..f89b761f8 --- /dev/null +++ b/tests/integration/kubernetes/k8s-oom.bats @@ -0,0 +1,37 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2020 Ant Group +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="pod-oom" + get_pod_config_dir +} + +@test "Test OOM events for pods" { + # Create pod + kubectl create -f "${pod_config_dir}/$pod_name.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check if OOMKilled + cmd="kubectl get pods "$pod_name" -o jsonpath='{.status.containerStatuses[0].state.terminated.reason}' | grep OOMKilled" + + waitForProcess "$wait_time" "$sleep_time" "$cmd" + + rm -f "${pod_config_dir}/test_pod_oom.yaml" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + kubectl get "pod/$pod_name" -o yaml + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-optional-empty-configmap.bats b/tests/integration/kubernetes/k8s-optional-empty-configmap.bats new file mode 100644 index 000000000..05c779b77 --- /dev/null +++ b/tests/integration/kubernetes/k8s-optional-empty-configmap.bats @@ -0,0 +1,39 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2021 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "Optional and Empty ConfigMap Volume for a pod" { + config_name="empty-config" + pod_name="optional-empty-config-test-pod" + + # Create Empty ConfigMap + kubectl create configmap "$config_name" + + # Create a pod that consumes the "empty-config" and "optional-missing-config" ConfigMaps as volumes + kubectl create -f "${pod_config_dir}/pod-optional-empty-configmap.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check configmap folders exist + kubectl exec $pod_name -- sh -c ls /empty-config + kubectl exec $pod_name -- sh -c ls /optional-missing-config +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" + kubectl delete configmap "$config_name" +} diff --git a/tests/integration/kubernetes/k8s-optional-empty-secret.bats b/tests/integration/kubernetes/k8s-optional-empty-secret.bats new file mode 100644 index 000000000..958603416 --- /dev/null +++ b/tests/integration/kubernetes/k8s-optional-empty-secret.bats @@ -0,0 +1,39 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2021 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "Optional and Empty Secret Volume for a pod" { + secret_name="empty-secret" + pod_name="optional-empty-secret-test-pod" + + # Create Empty Secret + kubectl create secret generic "$secret_name" + + # Create a pod that consumes the "empty-secret" and "optional-missing-secret" Secrets as volumes + kubectl create -f "${pod_config_dir}/pod-optional-empty-secret.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check secret folders exist + kubectl exec $pod_name -- sh -c ls /empty-secret + kubectl exec $pod_name -- sh -c ls /optional-missing-secret +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" + kubectl delete secret "$secret_name" +} diff --git a/tests/integration/kubernetes/k8s-parallel.bats b/tests/integration/kubernetes/k8s-parallel.bats new file mode 100644 index 000000000..4408ea5e5 --- /dev/null +++ b/tests/integration/kubernetes/k8s-parallel.bats @@ -0,0 +1,48 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir + job_name="jobtest" + names=( "test1" "test2" "test3" ) +} + +@test "Parallel jobs" { + # Create yaml files + for i in "${names[@]}"; do + sed "s/\$ITEM/$i/" ${pod_config_dir}/job-template.yaml > ${pod_config_dir}/job-$i.yaml + done + + # Create the jobs + for i in "${names[@]}"; do + kubectl create -f "${pod_config_dir}/job-$i.yaml" + done + + # Check the jobs + kubectl get jobs -l jobgroup=${job_name} + + # Check the pods + kubectl wait --for=condition=Ready --timeout=$timeout pod -l jobgroup=${job_name} + + # Check output of the jobs + for i in $(kubectl get pods -l jobgroup=${job_name} -o name); do + kubectl logs ${i} + done +} + +teardown() { + # Delete jobs + kubectl delete jobs -l jobgroup=${job_name} + + # Remove generated yaml files + for i in "${names[@]}"; do + rm -f ${pod_config_dir}/job-$i.yaml + done +} diff --git a/tests/integration/kubernetes/k8s-pid-ns.bats b/tests/integration/kubernetes/k8s-pid-ns.bats new file mode 100644 index 000000000..8726af48b --- /dev/null +++ b/tests/integration/kubernetes/k8s-pid-ns.bats @@ -0,0 +1,48 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="busybox" + first_container_name="first-test-container" + second_container_name="second-test-container" + + get_pod_config_dir +} + +@test "Check PID namespaces" { + # Create the pod + kubectl create -f "${pod_config_dir}/busybox-pod.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Check PID from first container + first_pid_container=$(kubectl exec $pod_name -c $first_container_name \ + -- ps | grep "/pause") + # Verify that is not empty + check_first_pid=$(echo $first_pid_container | wc -l) + [ "$check_first_pid" == "1" ] + + # Check PID from second container + second_pid_container=$(kubectl exec $pod_name -c $second_container_name \ + -- ps | grep "/pause") + # Verify that is not empty + check_second_pid=$(echo $second_pid_container | wc -l) + [ "$check_second_pid" == "1" ] + + [ "$first_pid_container" == "$second_pid_container" ] +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-pod-quota.bats b/tests/integration/kubernetes/k8s-pod-quota.bats new file mode 100644 index 000000000..addc37bb3 --- /dev/null +++ b/tests/integration/kubernetes/k8s-pod-quota.bats @@ -0,0 +1,37 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "Pod quota" { + resource_name="pod-quota" + deployment_name="deploymenttest" + namespace="test-quota-ns" + + # Create the resourcequota + kubectl create -f "${pod_config_dir}/resource-quota.yaml" + + # View information about resourcequota + kubectl get -n "$namespace" resourcequota "$resource_name" \ + --output=yaml | grep 'pods: "2"' + + # Create deployment + kubectl create -f "${pod_config_dir}/pod-quota-deployment.yaml" + + # View deployment + kubectl wait --for=condition=Available --timeout=$timeout \ + -n "$namespace" deployment/${deployment_name} +} + +teardown() { + kubectl delete -n "$namespace" deployment "$deployment_name" + kubectl delete -f "${pod_config_dir}/resource-quota.yaml" +} diff --git a/tests/integration/kubernetes/k8s-port-forward.bats b/tests/integration/kubernetes/k8s-port-forward.bats new file mode 100644 index 000000000..d46c15f42 --- /dev/null +++ b/tests/integration/kubernetes/k8s-port-forward.bats @@ -0,0 +1,71 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" +source "/etc/os-release" || source "/usr/lib/os-release" + +issue="https://github.com/kata-containers/runtime/issues/1834" + +setup() { + skip "test not working see: ${issue}" + get_pod_config_dir +} + +@test "Port forwarding" { + skip "test not working see: ${issue}" + deployment_name="redis-master" + + # Create deployment + kubectl apply -f "${pod_config_dir}/redis-master-deployment.yaml" + + # Check deployment + kubectl wait --for=condition=Available --timeout=$timeout deployment/"$deployment_name" + kubectl expose deployment/"$deployment_name" + + # Get pod name + pod_name=$(kubectl get pods --output=jsonpath={.items..metadata.name}) + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # View replicaset + kubectl get rs + + # Create service + kubectl apply -f "${pod_config_dir}/redis-master-service.yaml" + + # Check service + kubectl get svc | grep redis + + # Check redis service + port_redis=$(kubectl get pods $pod_name --template='{{(index (index .spec.containers 0).ports 0).containerPort}}{{"\n"}}') + + # Verify that redis is running in the pod and listening on port + port=6379 + [ "$port_redis" -eq "$port" ] + + # Forward a local port to a port on the pod + (2&>1 kubectl port-forward "$pod_name" 7000:"$port"> /dev/null) & + + # Run redis-cli + retries="10" + ok="0" + + for _ in $(seq 1 "$retries"); do + if sudo -E redis-cli -p 7000 ping | grep -q "PONG" ; then + ok="1" + break; + fi + sleep 1 + done + + [ "$ok" -eq "1" ] +} + +teardown() { + skip "test not working see: ${issue}" + kubectl delete -f "${pod_config_dir}/redis-master-deployment.yaml" + kubectl delete -f "${pod_config_dir}/redis-master-service.yaml" +} diff --git a/tests/integration/kubernetes/k8s-projected-volume.bats b/tests/integration/kubernetes/k8s-projected-volume.bats new file mode 100644 index 000000000..33788e475 --- /dev/null +++ b/tests/integration/kubernetes/k8s-projected-volume.bats @@ -0,0 +1,63 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + get_pod_config_dir +} + +@test "Projected volume" { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + password="1f2d1e2e67df" + username="admin" + pod_name="test-projected-volume" + + TMP_FILE=$(mktemp username.XXXX) + SECOND_TMP_FILE=$(mktemp password.XXXX) + + # Create files containing the username and password + echo "$username" > $TMP_FILE + echo "$password" > $SECOND_TMP_FILE + + # Package these files into secrets + kubectl create secret generic user --from-file=$TMP_FILE + kubectl create secret generic pass --from-file=$SECOND_TMP_FILE + + # Create pod + kubectl create -f "${pod_config_dir}/pod-projected-volume.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check that the projected sources exists + cmd="ls /projected-volume | grep username" + kubectl exec $pod_name -- sh -c "$cmd" + sec_cmd="ls /projected-volume | grep password" + kubectl exec $pod_name -- sh -c "$sec_cmd" + + # Check content of the projected sources + check_cmd="cat /projected-volume/username*" + kubectl exec $pod_name -- sh -c "$check_cmd" | grep "$username" + sec_check_cmd="cat /projected-volume/password*" + kubectl exec $pod_name -- sh -c "$sec_check_cmd" | grep "$password" +} + +teardown() { + [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + + # Debugging information + kubectl describe "pod/$pod_name" + + rm -f $TMP_FILE $SECOND_TMP_FILE + kubectl delete pod "$pod_name" + kubectl delete secret pass user +} diff --git a/tests/integration/kubernetes/k8s-qos-pods.bats b/tests/integration/kubernetes/k8s-qos-pods.bats new file mode 100644 index 000000000..6f1df43ef --- /dev/null +++ b/tests/integration/kubernetes/k8s-qos-pods.bats @@ -0,0 +1,58 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" +TEST_INITRD="${TEST_INITRD:-no}" + +# Not working on ARM CI see https://github.com/kata-containers/tests/issues/4727 +setup() { + get_pod_config_dir +} + +@test "Guaranteed QoS" { + pod_name="qos-test" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-guaranteed.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check pod class + kubectl get pod "$pod_name" --output=yaml | grep "qosClass: Guaranteed" +} + +@test "Burstable QoS" { + pod_name="burstable-test" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-burstable.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check pod class + kubectl get pod "$pod_name" --output=yaml | grep "qosClass: Burstable" +} + +@test "BestEffort QoS" { + pod_name="besteffort-test" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-besteffort.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check pod class + kubectl get pod "$pod_name" --output=yaml | grep "qosClass: BestEffort" +} + +teardown() { + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-replication.bats b/tests/integration/kubernetes/k8s-replication.bats new file mode 100644 index 000000000..e8f14e4dd --- /dev/null +++ b/tests/integration/kubernetes/k8s-replication.bats @@ -0,0 +1,62 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + nginx_version="${docker_images_nginx_version}" + nginx_image="nginx:$nginx_version" + + get_pod_config_dir +} + +@test "Replication controller" { + replication_name="replicationtest" + + # Create yaml + sed -e "s/\${nginx_version}/${nginx_image}/" \ + "${pod_config_dir}/replication-controller.yaml" > "${pod_config_dir}/test-replication-controller.yaml" + + # Create replication controller + kubectl create -f "${pod_config_dir}/test-replication-controller.yaml" + + # Check replication controller + local cmd="kubectl describe replicationcontrollers/$replication_name | grep replication-controller" + waitForProcess "$wait_time" "$sleep_time" "$cmd" + + number_of_replicas=$(kubectl get replicationcontrollers/"$replication_name" \ + --output=jsonpath='{.spec.replicas}') + [ "${number_of_replicas}" -gt 0 ] + + # The replicas pods can be in running, waiting, succeeded or failed + # status. We need them all on running state before proceed. + cmd="kubectl describe rc/\"${replication_name}\"" + cmd+="| grep \"Pods Status\" | grep \"${number_of_replicas} Running\"" + waitForProcess "$wait_time" "$sleep_time" "$cmd" + + # Check number of pods created for the + # replication controller is equal to the + # number of replicas that we defined + launched_pods=($(kubectl get pods --selector=app=nginx-rc-test \ + --output=jsonpath={.items..metadata.name})) + [ "${#launched_pods[@]}" -eq "$number_of_replicas" ] + + # Check pod creation + for pod_name in ${launched_pods[@]}; do + cmd="kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name" + waitForProcess "$wait_time" "$sleep_time" "$cmd" + done +} + +teardown() { + # Debugging information + kubectl describe replicationcontrollers/"$replication_name" + + rm -f "${pod_config_dir}/test-replication-controller.yaml" + kubectl delete rc "$replication_name" +} diff --git a/tests/integration/kubernetes/k8s-scale-nginx.bats b/tests/integration/kubernetes/k8s-scale-nginx.bats new file mode 100644 index 000000000..3f11236f3 --- /dev/null +++ b/tests/integration/kubernetes/k8s-scale-nginx.bats @@ -0,0 +1,36 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + nginx_version="${docker_images_nginx_version}" + nginx_image="nginx:$nginx_version" + replicas="3" + deployment="nginx-deployment" + get_pod_config_dir +} + +@test "Scale nginx deployment" { + + sed -e "s/\${nginx_version}/${nginx_image}/" \ + "${pod_config_dir}/${deployment}.yaml" > "${pod_config_dir}/test-${deployment}.yaml" + + kubectl create -f "${pod_config_dir}/test-${deployment}.yaml" + kubectl wait --for=condition=Available --timeout=$timeout deployment/${deployment} + kubectl expose deployment/${deployment} + kubectl scale deployment/${deployment} --replicas=${replicas} + cmd="kubectl get deployment/${deployment} -o yaml | grep 'availableReplicas: ${replicas}'" + waitForProcess "$wait_time" "$sleep_time" "$cmd" +} + +teardown() { + rm -f "${pod_config_dir}/test-${deployment}.yaml" + kubectl delete deployment "$deployment" + kubectl delete service "$deployment" +} diff --git a/tests/integration/kubernetes/k8s-seccomp.bats b/tests/integration/kubernetes/k8s-seccomp.bats new file mode 100644 index 000000000..c6a840cb3 --- /dev/null +++ b/tests/integration/kubernetes/k8s-seccomp.bats @@ -0,0 +1,35 @@ +# +# Copyright (c) 2021 Red Hat +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="seccomp-container" + get_pod_config_dir +} + +@test "Support seccomp runtime/default profile" { + expected_seccomp_mode="2" + # Create pod + kubectl create -f "${pod_config_dir}/pod-seccomp.yaml" + + # Wait it to complete + cmd="kubectl get pods ${pod_name} | grep Completed" + waitForProcess "${wait_time}" "${sleep_time}" "${cmd}" + + # Expect Seccomp on mode 2 (filter) + seccomp_mode="$(kubectl logs ${pod_name} | sed 's/Seccomp:\s*\([0-9]\)/\1/')" + [ "$seccomp_mode" -eq "$expected_seccomp_mode" ] +} + +teardown() { + # For debugging purpose + echo "seccomp mode is ${seccomp_mode}, expected $expected_seccomp_mode" + kubectl describe "pod/${pod_name}" + + kubectl delete -f "${pod_config_dir}/pod-seccomp.yaml" || true +} diff --git a/tests/integration/kubernetes/k8s-security-context.bats b/tests/integration/kubernetes/k8s-security-context.bats new file mode 100644 index 000000000..a8f9d7ba9 --- /dev/null +++ b/tests/integration/kubernetes/k8s-security-context.bats @@ -0,0 +1,35 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "Security context" { + pod_name="security-context-test" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-security-context.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + # Check user + cmd="ps --user 1000 -f" + process="tail -f /dev/null" + kubectl exec $pod_name -- sh -c $cmd | grep "$process" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-shared-volume.bats b/tests/integration/kubernetes/k8s-shared-volume.bats new file mode 100644 index 000000000..5e3b6a270 --- /dev/null +++ b/tests/integration/kubernetes/k8s-shared-volume.bats @@ -0,0 +1,51 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + get_pod_config_dir +} + +@test "Containers with shared volume" { + pod_name="test-shared-volume" + first_container_name="busybox-first-container" + second_container_name="busybox-second-container" + + # Create pod + kubectl create -f "${pod_config_dir}/pod-shared-volume.yaml" + + # Check pods + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Communicate containers + cmd="cat /tmp/pod-data" + msg="Hello from the $second_container_name" + kubectl exec "$pod_name" -c "$first_container_name" -- sh -c "$cmd" | grep "$msg" +} + +@test "initContainer with shared volume" { + pod_name="initcontainer-shared-volume" + last_container="last" + + # Create pod + kubectl create -f "${pod_config_dir}/initContainer-shared-volume.yaml" + + # Check pods + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + cmd='test $(cat /volume/initContainer) -lt $(cat /volume/container)' + kubectl exec "$pod_name" -c "$last_container" -- sh -c "$cmd" +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/k8s-sysctls.bats b/tests/integration/kubernetes/k8s-sysctls.bats new file mode 100644 index 000000000..aca6c50d1 --- /dev/null +++ b/tests/integration/kubernetes/k8s-sysctls.bats @@ -0,0 +1,34 @@ +#!/usr/bin/env bats +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +load "${BATS_TEST_DIRNAME}/../../common.bash" +load "${BATS_TEST_DIRNAME}/tests_common.sh" + +setup() { + pod_name="sysctl-test" + get_pod_config_dir +} + +@test "Setting sysctl" { + # Create pod + kubectl apply -f "${pod_config_dir}/pod-sysctl.yaml" + + # Check pod creation + kubectl wait --for=condition=Ready --timeout=$timeout pod $pod_name + + # Check sysctl configuration + cmd="cat /proc/sys/kernel/shm_rmid_forced" + result=$(kubectl exec $pod_name -- sh -c "$cmd") + [ "${result}" = 0 ] +} + +teardown() { + # Debugging information + kubectl describe "pod/$pod_name" + + kubectl delete pod "$pod_name" +} diff --git a/tests/integration/kubernetes/run_kubernetes_tests.sh b/tests/integration/kubernetes/run_kubernetes_tests.sh new file mode 100755 index 000000000..db1e16633 --- /dev/null +++ b/tests/integration/kubernetes/run_kubernetes_tests.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +set -e + +kubernetes_dir=$(dirname "$(readlink -f "$0")") + +TARGET_ARCH="${TARGET_ARCH:-x86_64}" +KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}" +K8S_TEST_DEBUG="${K8S_TEST_DEBUG:-false}" + +if [ -n "${K8S_TEST_UNION:-}" ]; then + K8S_TEST_UNION=($K8S_TEST_UNION) +else + K8S_TEST_UNION=( \ + "k8s-attach-handlers.bats" \ + "k8s-caps.bats" \ + "k8s-configmap.bats" \ + "k8s-copy-file.bats" \ + "k8s-cpu-ns.bats" \ + "k8s-credentials-secrets.bats" \ + "k8s-custom-dns.bats" \ + "k8s-empty-dirs.bats" \ + "k8s-env.bats" \ + "k8s-exec.bats" \ + "k8s-inotify.bats" \ + "k8s-job.bats" \ + "k8s-kill-all-process-in-container.bats" \ + "k8s-limit-range.bats" \ + "k8s-liveness-probes.bats" \ + "k8s-memory.bats" \ + "k8s-nested-configmap-secret.bats" \ + "k8s-number-cpus.bats" \ + "k8s-oom.bats" \ + "k8s-optional-empty-configmap.bats" \ + "k8s-optional-empty-secret.bats" \ + "k8s-parallel.bats" \ + "k8s-pid-ns.bats" \ + "k8s-pod-quota.bats" \ + "k8s-port-forward.bats" \ + "k8s-projected-volume.bats" \ + "k8s-qos-pods.bats" \ + "k8s-replication.bats" \ + "k8s-scale-nginx.bats" \ + "k8s-seccomp.bats" \ + "k8s-sysctls.bats" \ + "k8s-security-context.bats" \ + "k8s-shared-volume.bats" \ + "k8s-nginx-connectivity.bats" \ + ) +fi + +# we may need to skip a few test cases when running on non-x86_64 arch +arch_config_file="${kubernetes_dir}/filter_out_per_arch/${TARGET_ARCH}.yaml" +if [ -f "${arch_config_file}" ]; then + arch_k8s_test_union=$(${kubernetes_dir}/filter_k8s_test.sh ${arch_config_file} "${K8S_TEST_UNION[*]}") + mapfile -d " " -t K8S_TEST_UNION <<< "${arch_k8s_test_union}" +fi + +info "Run tests" +for K8S_TEST_ENTRY in ${K8S_TEST_UNION[@]} +do + bats "${K8S_TEST_ENTRY}" +done diff --git a/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod.yaml b/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod.yaml new file mode 100644 index 000000000..9b5bb530c --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod.yaml @@ -0,0 +1,32 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: busybox +spec: + terminationGracePeriodSeconds: 0 + shareProcessNamespace: true + runtimeClassName: kata + containers: + - name: first-test-container + image: quay.io/prometheus/busybox:latest + env: + - name: CONTAINER_NAME + value: "first-test-container" + command: + - sleep + - "30" + - name: second-test-container + image: quay.io/prometheus/busybox:latest + env: + - name: CONTAINER_NAME + value: "second-test-container" + command: + - sleep + - "30" + stdin: true + tty: true diff --git a/tests/integration/kubernetes/runtimeclass_workloads/busybox-template.yaml b/tests/integration/kubernetes/runtimeclass_workloads/busybox-template.yaml new file mode 100644 index 000000000..a849e5c29 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/busybox-template.yaml @@ -0,0 +1,19 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: POD_NAME +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + shareProcessNamespace: true + containers: + - name: CTR_NAME + image: quay.io/prometheus/busybox:latest + command: + - sleep + - "120" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/configmap.yaml b/tests/integration/kubernetes/runtimeclass_workloads/configmap.yaml new file mode 100644 index 000000000..9d62e8ace --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/configmap.yaml @@ -0,0 +1,12 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-configmap +data: + data-1: value-1 + data-2: value-2 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/footloose-configmap.yaml b/tests/integration/kubernetes/runtimeclass_workloads/footloose-configmap.yaml new file mode 100644 index 000000000..40a8bb881 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/footloose-configmap.yaml @@ -0,0 +1,12 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +data: + authorized_keys: | + ${ssh_key} +kind: ConfigMap +metadata: + name: ssh-config-map diff --git a/tests/integration/kubernetes/runtimeclass_workloads/initContainer-shared-volume.yaml b/tests/integration/kubernetes/runtimeclass_workloads/initContainer-shared-volume.yaml new file mode 100644 index 000000000..508261b33 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/initContainer-shared-volume.yaml @@ -0,0 +1,29 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: initcontainer-shared-volume +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + initContainers: + - name: first + image: quay.io/prometheus/busybox:latest + command: [ "sh", "-c", "echo ${EPOCHREALTIME//.} > /volume/initContainer" ] + volumeMounts: + - mountPath: /volume + name: volume + containers: + - name: last + image: quay.io/prometheus/busybox:latest + command: [ "sh", "-c", "echo ${EPOCHREALTIME//.} > /volume/container; tail -f /dev/null" ] + volumeMounts: + - mountPath: /volume + name: volume + volumes: + - name: volume + emptyDir: {} diff --git a/tests/integration/kubernetes/runtimeclass_workloads/initcontainer-shareprocesspid.yaml b/tests/integration/kubernetes/runtimeclass_workloads/initcontainer-shareprocesspid.yaml new file mode 100644 index 000000000..a3f20fae0 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/initcontainer-shareprocesspid.yaml @@ -0,0 +1,26 @@ +# +# Copyright (c) 2022 AntGroup Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: busybox +spec: + terminationGracePeriodSeconds: 0 + shareProcessNamespace: true + runtimeClassName: kata + initContainers: + - name: first + image: quay.io/prometheus/busybox:latest + command: [ "sh", "-c", "echo 'nohup tail -f /dev/null >/dev/null 2>&1 &' > /init.sh && chmod +x /init.sh && /init.sh" ] + containers: + - name: first-test-container + image: quay.io/prometheus/busybox:latest + env: + - name: CONTAINER_NAME + value: "first-test-container" + command: + - sleep + - "300" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/inject_secret.yaml b/tests/integration/kubernetes/runtimeclass_workloads/inject_secret.yaml new file mode 100644 index 000000000..ec42d7c6f --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/inject_secret.yaml @@ -0,0 +1,12 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Secret +metadata: + name: test-secret +data: + username: bXktYXBw + password: Mzk1MjgkdmRnN0pi diff --git a/tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap-pod.yaml b/tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap-pod.yaml new file mode 100644 index 000000000..c85240c94 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap-pod.yaml @@ -0,0 +1,32 @@ +# +# Copyright (c) 2021 Apple Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: v1 +kind: Pod +metadata: + name: inotify-configmap-testing +spec: + containers: + - name: c1 + image: quay.io/kata-containers/fsnotify:latest + command: ["bash"] + args: ["-c", "inotifywait --timeout 120 -r /config/ && [[ -L /config/config.toml ]] && echo success" ] + resources: + requests: + cpu: 1 + memory: 50Mi + limits: + cpu: 1 + memory: 1024Mi + volumeMounts: + - name: config + mountPath: /config + runtimeClassName: kata + restartPolicy: Never + volumes: + - name: config + configMap: + name: cm diff --git a/tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap.yaml b/tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap.yaml new file mode 100644 index 000000000..02c01d749 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/inotify-configmap.yaml @@ -0,0 +1,13 @@ +# +# Copyright (c) 2021 Apple Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: v1 +data: + config.toml: |- + foo original... +kind: ConfigMap +metadata: + name: cm diff --git a/tests/integration/kubernetes/runtimeclass_workloads/inotify-updated-configmap.yaml b/tests/integration/kubernetes/runtimeclass_workloads/inotify-updated-configmap.yaml new file mode 100644 index 000000000..5442bdd17 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/inotify-updated-configmap.yaml @@ -0,0 +1,14 @@ +# +# Copyright (c) 2021 Apple Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +--- +apiVersion: v1 +data: + config.toml: |- + foo original... + ... updated +kind: ConfigMap +metadata: + name: cm diff --git a/tests/integration/kubernetes/runtimeclass_workloads/job-template.yaml b/tests/integration/kubernetes/runtimeclass_workloads/job-template.yaml new file mode 100644 index 000000000..1e7760d95 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/job-template.yaml @@ -0,0 +1,25 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: batch/v1 +kind: Job +metadata: + name: process-item-$ITEM + labels: + jobgroup: jobtest +spec: + template: + metadata: + name: jobtest + labels: + jobgroup: jobtest + spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + restartPolicy: Never diff --git a/tests/integration/kubernetes/runtimeclass_workloads/job.yaml b/tests/integration/kubernetes/runtimeclass_workloads/job.yaml new file mode 100644 index 000000000..688667fc2 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/job.yaml @@ -0,0 +1,20 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: batch/v1 +kind: Job +metadata: + name: job-pi-test +spec: + template: + spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: pi + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh", "-c", "echo 'scale=5; 4*a(1)' | bc -l"] + restartPolicy: Never + backoffLimit: 4 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/kata-runtimeclass.yaml b/tests/integration/kubernetes/runtimeclass_workloads/kata-runtimeclass.yaml new file mode 100644 index 000000000..83bdfd2de --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/kata-runtimeclass.yaml @@ -0,0 +1,9 @@ +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: kata +handler: kata +overhead: + podFixed: + memory: "160Mi" + cpu: "250m" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/lifecycle-events.yaml b/tests/integration/kubernetes/runtimeclass_workloads/lifecycle-events.yaml new file mode 100644 index 000000000..7a8c731dc --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/lifecycle-events.yaml @@ -0,0 +1,23 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +apiVersion: v1 +kind: Pod +metadata: + name: handlers +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: handlers-container + image: quay.io/sjenning/${nginx_version} + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + preStop: + exec: + command: ["/usr/sbin/nginx","-s","quit"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/limit-range.yaml b/tests/integration/kubernetes/runtimeclass_workloads/limit-range.yaml new file mode 100644 index 000000000..8f774a277 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/limit-range.yaml @@ -0,0 +1,16 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: LimitRange +metadata: + name: cpu-limit-range +spec: + limits: + - default: + cpu: 1 + defaultRequest: + cpu: 0.5 + type: Container diff --git a/tests/integration/kubernetes/runtimeclass_workloads/nginx-deployment.yaml b/tests/integration/kubernetes/runtimeclass_workloads/nginx-deployment.yaml new file mode 100644 index 000000000..5a63b09c5 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/nginx-deployment.yaml @@ -0,0 +1,26 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 2 + template: + metadata: + labels: + app: nginx + spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: nginx + image: quay.io/sjenning/${nginx_version} + ports: + - containerPort: 80 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-besteffort.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-besteffort.yaml new file mode 100644 index 000000000..49280f85c --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-besteffort.yaml @@ -0,0 +1,16 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: besteffort-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: qos-besteffort + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh", "-c", "tail -f /dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-burstable.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-burstable.yaml new file mode 100644 index 000000000..aed6df794 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-burstable.yaml @@ -0,0 +1,21 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: burstable-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: qos-burstable + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh", "-c", "tail -f /dev/null"] + resources: + limits: + memory: "200Mi" + requests: + memory: "100Mi" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-caps.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-caps.yaml new file mode 100644 index 000000000..1493315d6 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-caps.yaml @@ -0,0 +1,18 @@ +# +# Copyright (c) 2021 Apple Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: pod-caps +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: ["sh"] + args: ["-c", "cat /proc/self/status | grep Cap && sleep infinity"] + restartPolicy: Never diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-configmap.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-configmap.yaml new file mode 100644 index 000000000..f0deed156 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-configmap.yaml @@ -0,0 +1,28 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: config-env-test-pod +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + env: + - name: KUBE_CONFIG_1 + valueFrom: + configMapKeyRef: + name: test-configmap + key: data-1 + - name: KUBE_CONFIG_2 + valueFrom: + configMapKeyRef: + name: test-configmap + key: data-2 + restartPolicy: Never diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-cpu-defaults.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-cpu-defaults.yaml new file mode 100644 index 000000000..5121c7459 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-cpu-defaults.yaml @@ -0,0 +1,16 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: default-cpu-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: default-cpu-demo-ctr + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-cpu.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-cpu.yaml new file mode 100644 index 000000000..cf04c4b50 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-cpu.yaml @@ -0,0 +1,23 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: constraints-cpu-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: first-cpu-container + image: quay.io/prometheus/busybox:latest + command: + - sleep + - "30" + resources: + limits: + cpu: "1" + requests: + cpu: "500m" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-custom-dns.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-custom-dns.yaml new file mode 100644 index 000000000..680577a5f --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-custom-dns.yaml @@ -0,0 +1,23 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + namespace: default + name: custom-dns-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + dnsPolicy: "None" + dnsConfig: + nameservers: + - 1.2.3.4 + searches: + - dns.test.search diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir-fsgroup.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir-fsgroup.yaml new file mode 100644 index 000000000..e887cc92c --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir-fsgroup.yaml @@ -0,0 +1,44 @@ +# +# Copyright (c) 2021 Red Hat, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: sharevol-kata +spec: + runtimeClassName: kata + restartPolicy: Never + securityContext: + runAsUser: 1001 + fsGroup: 123 + containers: + - name: mounttest-container + image: ${agnhost_image} + args: + - mounttest + - --fs_type=/test-volume + - --new_file_0660=/test-volume/test-file + - --file_perm=/test-volume/test-file + - --file_owner=/test-volume/test-file + volumeMounts: + - name: emptydir-volume + mountPath: /test-volume + - name: mounttest-container-2 + image: ${agnhost_image} + args: + - mounttest + - --fs_type=/test-volume-2 + - --new_file_0660=/test-volume-2/test-file + - --file_perm=/test-volume-2/test-file + - --file_owner=/test-volume-2/test-file + volumeMounts: + - name: mem-emptydir-volume + mountPath: /test-volume-2 + volumes: + - name: emptydir-volume + emptyDir: {} + - name: mem-emptydir-volume + emptyDir: + medium: Memory diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir.yaml new file mode 100644 index 000000000..20dc02242 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-empty-dir.yaml @@ -0,0 +1,28 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: sharevol-kata +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + volumeMounts: + - name: host-empty-vol + mountPath: "/host/cache" + - name: memory-empty-vol + mountPath: "/tmp/cache" + volumes: + - name: host-empty-vol + emptyDir: {} + - name: memory-empty-vol + emptyDir: + medium: Memory + sizeLimit: "50M" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-env.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-env.yaml new file mode 100644 index 000000000..96c4ca60d --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-env.yaml @@ -0,0 +1,46 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: test-env +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: [ "sh", "-c"] + args: + - while true; do + echo -en '\n'; + printenv MY_POD_NAME; + printenv HOST_IP; + printenv MEMORY_REQUESTS; + printenv MEMORY_LIMITS; + sleep 1; + done; + resources: + requests: + memory: "32Mi" + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: MEMORY_REQUESTS + valueFrom: + resourceFieldRef: + resource: requests.memory + - name: MEMORY_LIMITS + valueFrom: + resourceFieldRef: + resource: limits.memory + restartPolicy: Never diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-file-volume.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-file-volume.yaml new file mode 100644 index 000000000..4784b1477 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-file-volume.yaml @@ -0,0 +1,26 @@ +# +# Copyright (c) 2022 Ant Group +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: test-file-volume +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + restartPolicy: Never + volumes: + - name: shared-file + hostPath: + path: HOST_FILE + type: File + containers: + - name: busybox-file-volume-container + image: busybox + volumeMounts: + - name: shared-file + mountPath: MOUNT_PATH + command: ["/bin/sh"] + args: ["-c", "tail -f /dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-footloose.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-footloose.yaml new file mode 100644 index 000000000..9f427b27d --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-footloose.yaml @@ -0,0 +1,59 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: footubuntu +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + volumes: + - name: runv + emptyDir: + medium: "Memory" + - name: runlockv + emptyDir: + medium: "Memory" + - name: tmpv + emptyDir: + medium: "Memory" + - name: fakecgroup + hostPath: + path: /sys/fs/cgroup + - name: ssh-dir + emptyDir: + medium: "Memory" + - name: ssh-config-map + configMap: + name: ssh-config-map + defaultMode: 384 + containers: + - name: vmcontainer + image: quay.io/footloose/ubuntu18.04:latest + command: ["/sbin/init"] + volumeMounts: + - name: runv + mountPath: /run + - name: runlockv + mountPath: /run/lock + - name: tmpv + mountPath: /tmp + - name: fakecgroup + readOnly: true + mountPath: /sys/fs/cgroup + - name: ssh-dir + mountPath: /root/.ssh + - name: ssh-config-map + mountPath: /root/.ssh/authorized_keys + subPath: authorized_keys + # These containers are run during pod initialization + initContainers: + - name: install + image: quay.io/prometheus/busybox:latest + command: ["sh", "-c", "chmod 700 /root/.ssh"] + volumeMounts: + - name: ssh-dir + mountPath: /root/.ssh diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-guaranteed.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-guaranteed.yaml new file mode 100644 index 000000000..ee8893a1e --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-guaranteed.yaml @@ -0,0 +1,23 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: qos-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: qos-guaranteed + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh", "-c", "tail -f /dev/null"] + resources: + limits: + memory: "200Mi" + cpu: "700m" + requests: + memory: "200Mi" + cpu: "700m" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-http-liveness.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-http-liveness.yaml new file mode 100644 index 000000000..3d336761f --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-http-liveness.yaml @@ -0,0 +1,25 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + labels: + test: liveness-test + name: liveness-http +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: liveness + image: ${agnhost_image} + args: + - liveness + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 3 + periodSeconds: 3 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-hugepage.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-hugepage.yaml new file mode 100644 index 000000000..8156f7bcb --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-hugepage.yaml @@ -0,0 +1,30 @@ +# +# Copyright (c) 2022 Ant Group +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: hugepage-pod +spec: + runtimeClassName: kata + containers: + - name: hugepage-container + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh"] + args: ["-c", "tail -f /dev/null"] + volumeMounts: + - mountPath: /hugepages + name: hugepage + resources: + limits: + hugepages-${hugepages_size}: 512Mi + memory: 512Mi + requests: + hugepages-${hugepages_size}: 512Mi + memory: 512Mi + volumes: + - name: hugepage + emptyDir: + medium: HugePages diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-liveness.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-liveness.yaml new file mode 100644 index 000000000..fe2371c4a --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-liveness.yaml @@ -0,0 +1,28 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + labels: + test: liveness + name: liveness-exec +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: liveness + image: quay.io/prometheus/busybox:latest + args: + - /bin/sh + - -c + - touch /tmp/healthy; echo "Check status"; sleep 6; rm -rf /tmp/healthy; echo "Check dead"; sleep 12 + livenessProbe: + exec: + command: + - cat + - /tmp/healthy + initialDelaySeconds: 3 + periodSeconds: 3 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-memory-limit.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-memory-limit.yaml new file mode 100644 index 000000000..fb8fbca48 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-memory-limit.yaml @@ -0,0 +1,23 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: memory-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: memory-test-ctr + image: quay.io/kata-containers/sysbench-kata:latest + imagePullPolicy: IfNotPresent + resources: + limits: + memory: "${memory_size}" + requests: + memory: "500Mi" + command: ["stress"] + args: ["--vm", "1", "--vm-bytes", "${memory_allocated}", "--vm-hang", "1"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-nested-configmap-secret.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-nested-configmap-secret.yaml new file mode 100644 index 000000000..4d76ab65b --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-nested-configmap-secret.yaml @@ -0,0 +1,44 @@ +# +# Copyright (c) 2021 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: ConfigMap +metadata: + name: config +data: + config_key: myconfig +--- +apiVersion: v1 +data: + secret_key: bXlzZWNyZXQ= #mysecret +kind: Secret +metadata: + name: secret +type: Opaque +--- +apiVersion: v1 +kind: Pod +metadata: + name: nested-configmap-secret-pod +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + volumeMounts: + - mountPath: /config + name: config + - mountPath: /config/secret + name: secret + volumes: + - name: secret + secret: + secretName: secret + - name: config + configMap: + name: config + restartPolicy: Never diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-number-cpu.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-number-cpu.yaml new file mode 100644 index 000000000..55f9597b2 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-number-cpu.yaml @@ -0,0 +1,27 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: cpu-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: c1 + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + resources: + limits: + cpu: "500m" + - name: c2 + image: quay.io/prometheus/busybox:latest + command: + - sleep + - "10" + resources: + limits: + cpu: "500m" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-oom.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-oom.yaml new file mode 100644 index 000000000..672c54e68 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-oom.yaml @@ -0,0 +1,25 @@ +# +# Copyright (c) 2020 Ant Group +# +# SPDX-License-Identifier: Apache-2.0 +# + +apiVersion: v1 +kind: Pod +metadata: + name: pod-oom + namespace: default +spec: + runtimeClassName: kata + restartPolicy: Never + containers: + - image: quay.io/kata-containers/sysbench-kata:latest + imagePullPolicy: IfNotPresent + name: oom-test + command: ["/bin/sh"] + args: ["-c", "sleep 2; stress --vm 2 --vm-bytes 400M --timeout 30s"] + resources: + limits: + memory: 500Mi + requests: + memory: 500Mi diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-configmap.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-configmap.yaml new file mode 100644 index 000000000..73008cf6b --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-configmap.yaml @@ -0,0 +1,30 @@ +# +# Copyright (c) 2021 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: optional-empty-config-test-pod +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + volumeMounts: + - mountPath: /empty-config + name: empty-config + - mountPath: /optional-missing-config + name: optional-missing-config + volumes: + - name: empty-config + configMap: + name: empty-config + - name: optional-missing-config + configMap: + name: optional-missing-config + optional: true + restartPolicy: Never \ No newline at end of file diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-secret.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-secret.yaml new file mode 100644 index 000000000..931db13ef --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-optional-empty-secret.yaml @@ -0,0 +1,30 @@ +# +# Copyright (c) 2021 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: optional-empty-secret-test-pod +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + volumeMounts: + - mountPath: /empty-secret + name: empty-secret + - mountPath: /optional-missing-secret + name: optional-missing-secret + volumes: + - name: empty-secret + secret: + secretName: empty-secret + - name: optional-missing-secret + secret: + secretName: optional-missing-secret + optional: true + restartPolicy: Never \ No newline at end of file diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-projected-volume.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-projected-volume.yaml new file mode 100644 index 000000000..66d954d2d --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-projected-volume.yaml @@ -0,0 +1,28 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: test-projected-volume +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-projected-volume + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + volumeMounts: + - name: all-in-one + mountPath: "/projected-volume" + readOnly: true + volumes: + - name: all-in-one + projected: + sources: + - secret: + name: user + - secret: + name: pass diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-quota-deployment.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-quota-deployment.yaml new file mode 100644 index 000000000..ecdaf5e64 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-quota-deployment.yaml @@ -0,0 +1,26 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: apps/v1 +kind: Deployment +metadata: + name: deploymenttest + namespace: test-quota-ns +spec: + selector: + matchLabels: + purpose: quota-demo + replicas: 2 + template: + metadata: + labels: + purpose: quota-demo + spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: pod-quota-demo + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-readonly-volume.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-readonly-volume.yaml new file mode 100644 index 000000000..8835bae99 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-readonly-volume.yaml @@ -0,0 +1,27 @@ +# +# Copyright (c) 2021 Ant Group +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: test-readonly-volume +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + restartPolicy: Never + volumes: + - name: shared-data + hostPath: + path: /tmp + type: Directory + containers: + - name: busybox-ro-volume-container + image: busybox + volumeMounts: + - name: shared-data + mountPath: /tmp + readOnly: true + command: ["/bin/sh"] + args: ["-c", "tail -f /dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-seccomp.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-seccomp.yaml new file mode 100644 index 000000000..5a00b7fca --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-seccomp.yaml @@ -0,0 +1,22 @@ +# +# Copyright (c) 2021 Red Hat +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: seccomp-container + annotations: + io.katacontainers.config.runtime.disable_guest_seccomp: "false" +spec: + runtimeClassName: kata + terminationGracePeriodSeconds: 0 + restartPolicy: Never + containers: + - name: busybox + image: quay.io/prometheus/busybox:latest + command: ["grep", "Seccomp:", "/proc/self/status"] + securityContext: + seccompProfile: + type: RuntimeDefault diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-secret-env.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-secret-env.yaml new file mode 100644 index 000000000..59ef3d264 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-secret-env.yaml @@ -0,0 +1,27 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: secret-envars-test-pod +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: envars-test-container + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh", "-c", "tail -f /dev/null"] + env: + - name: SECRET_USERNAME + valueFrom: + secretKeyRef: + name: test-secret + key: username + - name: SECRET_PASSWORD + valueFrom: + secretKeyRef: + name: test-secret + key: password diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-secret.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-secret.yaml new file mode 100644 index 000000000..c5350ae3d --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-secret.yaml @@ -0,0 +1,25 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: secret-test-pod +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: test-container + image: quay.io/prometheus/busybox:latest + command: ["/bin/sh", "-c", "tail -f /dev/null"] + volumeMounts: + # name must match the volume name below + - name: secret-volume + mountPath: /tmp/secret-volume + # The secret data is exposed to Containers in the Pod through a Volume. + volumes: + - name: secret-volume + secret: + secretName: test-secret diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-security-context.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-security-context.yaml new file mode 100644 index 000000000..60b92b79b --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-security-context.yaml @@ -0,0 +1,18 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: security-context-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + securityContext: + runAsUser: 1000 + containers: + - name: sec-text + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-shared-volume.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-shared-volume.yaml new file mode 100644 index 000000000..1f795e46f --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-shared-volume.yaml @@ -0,0 +1,31 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: test-shared-volume +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + restartPolicy: Never + volumes: + - name: shared-data + emptyDir: {} + containers: + - name: busybox-first-container + image: quay.io/prometheus/busybox:latest + volumeMounts: + - name: shared-data + mountPath: /tmp + command: ["/bin/sh"] + args: ["-c", "tail -f /dev/null"] + - name: busybox-second-container + image: quay.io/prometheus/busybox:latest + volumeMounts: + - name: shared-data + mountPath: /tmp + command: ["/bin/sh"] + args: ["-c", "echo Hello from the busybox-second-container > /tmp/pod-data && tail -f /dev/null"] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-sysctl.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-sysctl.yaml new file mode 100644 index 000000000..36a1e99bd --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-sysctl.yaml @@ -0,0 +1,28 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: sysctl-test +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + securityContext: + sysctls: + - name: kernel.shm_rmid_forced + value: "0" + containers: + - name: test + securityContext: + privileged: true + image: quay.io/prometheus/busybox:latest + command: ["tail", "-f", "/dev/null"] + initContainers: + - name: init-sys + securityContext: + privileged: true + image: quay.io/prometheus/busybox:latest + command: ['sh', '-c', 'echo "64000" > /proc/sys/vm/max_map_count'] diff --git a/tests/integration/kubernetes/runtimeclass_workloads/pod-tcp-liveness.yaml b/tests/integration/kubernetes/runtimeclass_workloads/pod-tcp-liveness.yaml new file mode 100644 index 000000000..6d5343cfe --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/pod-tcp-liveness.yaml @@ -0,0 +1,31 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: tcptest + labels: + app: tcp-liveness +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: tcp-liveness + image: ${agnhost_image} + args: + - liveness + ports: + - containerPort: 8080 + readinessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 15 + periodSeconds: 20 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/redis-master-deployment.yaml b/tests/integration/kubernetes/runtimeclass_workloads/redis-master-deployment.yaml new file mode 100644 index 000000000..7dcc8cda9 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/redis-master-deployment.yaml @@ -0,0 +1,36 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-master + labels: + app: redis +spec: + selector: + matchLabels: + app: redis + role: master + tier: backend + replicas: 1 + template: + metadata: + labels: + app: redis + role: master + tier: backend + spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: master + image: quay.io/libpod/redis + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/redis-master-service.yaml b/tests/integration/kubernetes/runtimeclass_workloads/redis-master-service.yaml new file mode 100644 index 000000000..cb32ac1a2 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/redis-master-service.yaml @@ -0,0 +1,21 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Service +metadata: + name: redis-master + labels: + app: redis + role: master + tier: backend +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis + role: master + tier: backend diff --git a/tests/integration/kubernetes/runtimeclass_workloads/replication-controller.yaml b/tests/integration/kubernetes/runtimeclass_workloads/replication-controller.yaml new file mode 100644 index 000000000..a971d5a98 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/replication-controller.yaml @@ -0,0 +1,26 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: ReplicationController +metadata: + name: replicationtest +spec: + replicas: 1 + selector: + app: nginx-rc-test + template: + metadata: + name: nginx + labels: + app: nginx-rc-test + spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: nginxtest + image: quay.io/sjenning/${nginx_version} + ports: + - containerPort: 80 diff --git a/tests/integration/kubernetes/runtimeclass_workloads/resource-quota.yaml b/tests/integration/kubernetes/runtimeclass_workloads/resource-quota.yaml new file mode 100644 index 000000000..a8d84d9ad --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/resource-quota.yaml @@ -0,0 +1,20 @@ +# +# Copyright (c) 2018 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: List +items: +- apiVersion: v1 + kind: Namespace + metadata: + name: test-quota-ns +- apiVersion: v1 + kind: ResourceQuota + metadata: + name: pod-quota + namespace: test-quota-ns + spec: + hard: + pods: "2" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/stress/Dockerfile b/tests/integration/kubernetes/runtimeclass_workloads/stress/Dockerfile new file mode 100644 index 000000000..3609eb4a4 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/stress/Dockerfile @@ -0,0 +1,13 @@ +# +# Copyright (c) 2021 IBM Corp. +# +# SPDX-License-Identifier: Apache-2.0 + +# The image has only the 'latest' tag so it needs to ignore DL3007 +#hadolint ignore=DL3007 +FROM quay.io/libpod/ubuntu:latest +RUN apt-get -y update && \ + apt-get -y upgrade && \ + apt-get -y --no-install-recommends install stress && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* diff --git a/tests/integration/kubernetes/runtimeclass_workloads/vfio.yaml b/tests/integration/kubernetes/runtimeclass_workloads/vfio.yaml new file mode 100644 index 000000000..33ea60b7a --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/vfio.yaml @@ -0,0 +1,24 @@ +# +# Copyright (c) 2020 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: vfio +spec: + terminationGracePeriodSeconds: 0 + runtimeClassName: kata + containers: + - name: c1 + image: quay.io/prometheus/busybox:latest + command: + - sh + tty: true + stdin: true + resources: + limits: + intel.com/virtio_net: "1" + requests: + intel.com/virtio_net: "1" diff --git a/tests/integration/kubernetes/tests_common.sh b/tests/integration/kubernetes/tests_common.sh new file mode 100644 index 000000000..0f3e7d98d --- /dev/null +++ b/tests/integration/kubernetes/tests_common.sh @@ -0,0 +1,38 @@ +# +# Copyright (c) 2021 Red Hat, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# This script is evoked within an OpenShift Build to product the binary image, +# which will contain the Kata Containers installation into a given destination +# directory. +# +# This contains variables and functions common to all e2e tests. + +# Variables used by the kubernetes tests +export docker_images_nginx_version="1.15-alpine" +export container_images_agnhost_name="k8s.gcr.io/e2e-test-images/agnhost" +export container_images_agnhost_version="2.21" + +# Timeout options, mainly for use with waitForProcess(). Use them unless the +# operation needs to wait longer. +wait_time=90 +sleep_time=3 + +# Timeout for use with `kubectl wait`, unless it needs to wait longer. +# Note: try to keep timeout and wait_time equal. +timeout=90s + +# issues that can't test yet. +fc_limitations="https://github.com/kata-containers/documentation/issues/351" + +# Path to the kubeconfig file which is used by kubectl and other tools. +# Note: the init script sets that variable but if you want to run the tests in +# your own provisioned cluster and you know what you are doing then you should +# overwrite it. +export KUBECONFIG="${KUBECONFIG:-$HOME/.kube/config}" + +get_pod_config_dir() { + pod_config_dir="${BATS_TEST_DIRNAME}/runtimeclass_workloads" + info "k8s configured to use runtimeclass" +} From c444c24bc559fa820c290f5308a6b4bbb9d9c197 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Fri, 31 Mar 2023 07:37:37 +0200 Subject: [PATCH 030/137] gha: aks: Add snippets to create / delete aks clusters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Those will be shortly used as part of a newly added GitHub action for testing k8s tests on Azure. They've been created using the secrets we already have exposed as part of our GitHub, and they follow a similar way to authenticate to Azure / create an AKS cluster as done in the `/test-kata-deploy` action. Signed-off-by: Fabiano Fidêncio --- .github/workflows/create-aks.yaml | 32 +++++++++++++++++++++++++++++++ .github/workflows/delete-aks.yaml | 31 ++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 .github/workflows/create-aks.yaml create mode 100644 .github/workflows/delete-aks.yaml diff --git a/.github/workflows/create-aks.yaml b/.github/workflows/create-aks.yaml new file mode 100644 index 000000000..b2b6c76e7 --- /dev/null +++ b/.github/workflows/create-aks.yaml @@ -0,0 +1,32 @@ +name: CI | Create AKS cluster +on: + workflow_call: + inputs: + name: + required: true + type: string + +jobs: + create-aks: + runs-on: ubuntu-latest + steps: + - name: Download Azure CLI + run: | + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + + - name: Log into the Azure account + run: | + az login \ + --service-principal \ + -u "${{ secrets.AZ_APPID }}" \ + -p "${{ secrets.AZ_PASSWORD }}" \ + --tenant "${{ secrets.AZ_TENANT_ID }}" + + - name: Create AKS cluster + run: | + az aks create \ + -g "kataCI" \ + -n "${{ inputs.name }}" \ + -s "Standard_D4s_v3" \ + --node-count 1 \ + --generate-ssh-keys diff --git a/.github/workflows/delete-aks.yaml b/.github/workflows/delete-aks.yaml new file mode 100644 index 000000000..2c9e6d21a --- /dev/null +++ b/.github/workflows/delete-aks.yaml @@ -0,0 +1,31 @@ +name: CI | Delete AKS cluster +on: + workflow_call: + inputs: + name: + required: true + type: string + +jobs: + delete-aks: + runs-on: ubuntu-latest + steps: + - name: Download Azure CLI + run: | + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + + - name: Log into the Azure account + run: | + az login \ + --service-principal \ + -u "${{ secrets.AZ_APPID }}" \ + -p "${{ secrets.AZ_PASSWORD }}" \ + --tenant "${{ secrets.AZ_TENANT_ID }}" + + - name: Delete AKS cluster + run: | + az aks delete \ + -g "kataCI" \ + -n "${{ inputs.name }}" \ + --yes \ + --no-wait From 53b526b6bd43ea9da4913df0c76f2c2a1022cb2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Fri, 31 Mar 2023 08:11:29 +0200 Subject: [PATCH 031/137] gha: k8s: Add snippet to run k8s tests on aks clusters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This will be shortly used as part of a newly created GitHub action which will replace our Jenkins CI. Signed-off-by: Fabiano Fidêncio --- .github/workflows/run-k8s-tests-on-aks.yaml | 85 +++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 .github/workflows/run-k8s-tests-on-aks.yaml diff --git a/.github/workflows/run-k8s-tests-on-aks.yaml b/.github/workflows/run-k8s-tests-on-aks.yaml new file mode 100644 index 000000000..09ff68092 --- /dev/null +++ b/.github/workflows/run-k8s-tests-on-aks.yaml @@ -0,0 +1,85 @@ +name: CI | Run kubernetes tests on AKS +on: + workflow_call: + inputs: + image-tag: + required: true + type: string + +jobs: + create-aks: + strategy: + matrix: + vmm: + - clh + - qemu + uses: ./.github/workflows/create-aks.yaml + with: + name: ${{ github.event.pull_request.number }}-${{ github.sha }}-${{ matrix.vmm }}-amd64 + secrets: inherit + + run-k8s-tests: + strategy: + fail-fast: false + matrix: + vmm: + - clh + - qemu + runs-on: ubuntu-latest + needs: create-aks + steps: + - uses: actions/checkout@v3 + - name: Install `bats` + run: | + sudo apt-get update + sudo apt-get -y install bats + + - name: Install `kubectl` + run: | + sudo az aks install-cli + + - name: Log into the Azure account + run: | + az login \ + --service-principal \ + -u "${{ secrets.AZ_APPID }}" \ + -p "${{ secrets.AZ_PASSWORD }}" \ + --tenant "${{ secrets.AZ_TENANT_ID }}" + + - name: Download credentials for the Kubernetes CLI to use them + run: | + az aks get-credentials -g "kataCI" -n ${{ github.event.pull_request.number }}-${{ github.sha }}-${{ matrix.vmm }}-amd64 + + - name: Deploy kata-deploy + run: | + sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|quay.io/kata-containers/kata-deploy-ci:${{ inputs.image-tag }}|g" tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + cat tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + cat tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml | grep "quay.io/kata-containers/kata-deploy-ci:${{ inputs.image-tag }}" || die "Failed to setup the tests image" + + kubectl apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml + kubectl apply -f tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod + kubectl apply -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml + + - name: Run tests + timeout-minutes: 30 + run: | + pushd tests/integration/kubernetes + sed -i -e 's|runtimeClassName: kata|runtimeClassName: kata-${{ matrix.vmm }}|' runtimeclass_workloads/*.yaml + bash run_kubernetes_tests.sh + popd + env: + KATA_HYPERVISOR: ${{ matrix.vmm }} + + delete-aks: + strategy: + matrix: + vmm: + - clh + - qemu + needs: run-k8s-tests + if: always() + uses: ./.github/workflows/delete-aks.yaml + with: + name: ${{ github.event.pull_request.number }}-${{ github.sha }}-${{ matrix.vmm }}-amd64 + secrets: inherit From cab9ca0436e17b9ec783a5e1f648343af6035d80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Fri, 31 Mar 2023 08:30:10 +0200 Subject: [PATCH 032/137] gha: Add a CI pipeline for Kata Containers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is the very first step to replacing the Jenkins CI, and I've decided to start with an x86_64 approach only (although easily expansible for other arches as soon as they're ready to switch), and to start running our kubernetes tests (now running on AKS). Fixes: #6541 Signed-off-by: Fabiano Fidêncio --- .github/workflows/ci-on-push.yaml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 .github/workflows/ci-on-push.yaml diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml new file mode 100644 index 000000000..10cf5aeb3 --- /dev/null +++ b/.github/workflows/ci-on-push.yaml @@ -0,0 +1,25 @@ +name: Kata Containers CI +on: + pull_request + +jobs: + build-kata-static-tarball-amd64: + uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml + with: + tarball-suffix: -${{ github.event.pull_request.number}}-${{ github.sha }} + + publish-kata-deploy-payload-amd64: + needs: build-kata-static-tarball-amd64 + uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml + with: + tarball-suffix: -${{ github.event.pull_request.number}}-${{ github.sha }} + repo: kata-deploy-ci + tag: ${{ github.event.pull_request.number }}-${{ github.sha }}-amd64 + secrets: inherit + + run-k8s-tests-on-aks: + needs: publish-kata-deploy-payload-amd64 + uses: ./.github/workflows/run-k8s-tests-on-aks.yaml + with: + image-tag: ${{ github.event.pull_request.number }}-${{ github.sha }}-amd64 + secrets: inherit From 43894e94591d4c6ac7d683d1e013429f21bdc06e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Fri, 31 Mar 2023 08:36:33 +0200 Subject: [PATCH 033/137] gha: Remove kata-deploy-push.yaml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This becomes redundant now that its steps are covered as part of the `ci-on-push.yaml`. Signed-off-by: Fabiano Fidêncio --- .github/workflows/kata-deploy-push.yaml | 80 ------------------------- 1 file changed, 80 deletions(-) delete mode 100644 .github/workflows/kata-deploy-push.yaml diff --git a/.github/workflows/kata-deploy-push.yaml b/.github/workflows/kata-deploy-push.yaml deleted file mode 100644 index ce45ab5ab..000000000 --- a/.github/workflows/kata-deploy-push.yaml +++ /dev/null @@ -1,80 +0,0 @@ -name: kata deploy build - -on: - pull_request: - types: - - opened - - edited - - reopened - - synchronize - paths: - - tools/** - - versions.yaml - -jobs: - build-asset: - runs-on: ubuntu-latest - strategy: - matrix: - asset: - - kernel - - kernel-dragonball-experimental - - shim-v2 - - qemu - - cloud-hypervisor - - firecracker - - rootfs-image - - rootfs-initrd - - virtiofsd - - nydus - steps: - - uses: actions/checkout@v2 - - name: Build ${{ matrix.asset }} - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - run: | - make "${KATA_ASSET}-tarball" - build_dir=$(readlink -f build) - # store-artifact does not work with symlink - sudo cp -r --preserve=all "${build_dir}" "kata-build" - env: - KATA_ASSET: ${{ matrix.asset }} - - - name: store-artifact ${{ matrix.asset }} - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - uses: actions/upload-artifact@v2 - with: - name: kata-artifacts - path: kata-build/kata-static-${{ matrix.asset }}.tar.xz - if-no-files-found: error - - create-kata-tarball: - runs-on: ubuntu-latest - needs: build-asset - steps: - - uses: actions/checkout@v2 - - name: get-artifacts - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - uses: actions/download-artifact@v2 - with: - name: kata-artifacts - path: build - - name: merge-artifacts - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - run: | - make merge-builds - - name: store-artifacts - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - uses: actions/upload-artifact@v2 - with: - name: kata-static-tarball - path: kata-static.tar.xz - - make-kata-tarball: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: make kata-tarball - if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} - run: | - make kata-tarball - sudo make install-tarball From 60c62c3b69a2e2a43fd2ad94aee21bdb5645990f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Fri, 31 Mar 2023 08:37:29 +0200 Subject: [PATCH 034/137] gha: Remove kata-deploy-test.yaml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This workflow becomes redundant as we're already testing kubernetes using kata-deploy, and also testing it on AKS. Signed-off-by: Fabiano Fidêncio --- .github/workflows/kata-deploy-test.yaml | 164 ------------------------ 1 file changed, 164 deletions(-) delete mode 100644 .github/workflows/kata-deploy-test.yaml diff --git a/.github/workflows/kata-deploy-test.yaml b/.github/workflows/kata-deploy-test.yaml deleted file mode 100644 index 6b30109c2..000000000 --- a/.github/workflows/kata-deploy-test.yaml +++ /dev/null @@ -1,164 +0,0 @@ -on: - workflow_dispatch: # this is used to trigger the workflow on non-main branches - inputs: - pr: - description: 'PR number from the selected branch to test' - type: string - required: true - issue_comment: - types: [created, edited] - -name: test-kata-deploy - -jobs: - check-comment-and-membership: - runs-on: ubuntu-latest - if: | - github.event.issue.pull_request - && github.event_name == 'issue_comment' - && github.event.action == 'created' - && startsWith(github.event.comment.body, '/test_kata_deploy') - || github.event_name == 'workflow_dispatch' - steps: - - name: Check membership on comment or dispatch - uses: kata-containers/is-organization-member@1.0.1 - id: is_organization_member - with: - organization: kata-containers - username: ${{ github.event.comment.user.login || github.event.sender.login }} - token: ${{ secrets.GITHUB_TOKEN }} - - name: Fail if not member - run: | - result=${{ steps.is_organization_member.outputs.result }} - if [ $result == false ]; then - user=${{ github.event.comment.user.login || github.event.sender.login }} - echo Either ${user} is not part of the kata-containers organization - echo or ${user} has its Organization Visibility set to Private at - echo https://github.com/orgs/kata-containers/people?query=${user} - echo - echo Ensure you change your Organization Visibility to Public and - echo trigger the test again. - exit 1 - fi - - build-asset: - runs-on: ubuntu-latest - needs: check-comment-and-membership - strategy: - matrix: - asset: - - cloud-hypervisor - - firecracker - - kernel - - kernel-dragonball-experimental - - nydus - - qemu - - rootfs-image - - rootfs-initrd - - shim-v2 - - virtiofsd - steps: - - name: get-PR-ref - id: get-PR-ref - run: | - if [ ${{ github.event_name }} == 'issue_comment' ]; then - ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#') - else # workflow_dispatch - ref="refs/pull/${{ github.event.inputs.pr }}/merge" - fi - echo "reference for PR: " ${ref} "event:" ${{ github.event_name }} - echo "pr-ref=${ref}" >> $GITHUB_OUTPUT - - uses: actions/checkout@v2 - with: - ref: ${{ steps.get-PR-ref.outputs.pr-ref }} - - - name: Build ${{ matrix.asset }} - run: | - make "${KATA_ASSET}-tarball" - build_dir=$(readlink -f build) - # store-artifact does not work with symlink - sudo cp -r "${build_dir}" "kata-build" - env: - KATA_ASSET: ${{ matrix.asset }} - TAR_OUTPUT: ${{ matrix.asset }}.tar.gz - - - name: store-artifact ${{ matrix.asset }} - uses: actions/upload-artifact@v2 - with: - name: kata-artifacts - path: kata-build/kata-static-${{ matrix.asset }}.tar.xz - if-no-files-found: error - - create-kata-tarball: - runs-on: ubuntu-latest - needs: build-asset - steps: - - name: get-PR-ref - id: get-PR-ref - run: | - if [ ${{ github.event_name }} == 'issue_comment' ]; then - ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#') - else # workflow_dispatch - ref="refs/pull/${{ github.event.inputs.pr }}/merge" - fi - echo "reference for PR: " ${ref} "event:" ${{ github.event_name }} - echo "pr-ref=${ref}" >> $GITHUB_OUTPUT - - uses: actions/checkout@v2 - with: - ref: ${{ steps.get-PR-ref.outputs.pr-ref }} - - name: get-artifacts - uses: actions/download-artifact@v2 - with: - name: kata-artifacts - path: kata-artifacts - - name: merge-artifacts - run: | - ./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts - - name: store-artifacts - uses: actions/upload-artifact@v2 - with: - name: kata-static-tarball - path: kata-static.tar.xz - - kata-deploy: - needs: create-kata-tarball - runs-on: ubuntu-latest - steps: - - name: get-PR-ref - id: get-PR-ref - run: | - if [ ${{ github.event_name }} == 'issue_comment' ]; then - ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#') - else # workflow_dispatch - ref="refs/pull/${{ github.event.inputs.pr }}/merge" - fi - echo "reference for PR: " ${ref} "event:" ${{ github.event_name }} - echo "pr-ref=${ref}" >> $GITHUB_OUTPUT - - uses: actions/checkout@v2 - with: - ref: ${{ steps.get-PR-ref.outputs.pr-ref }} - - name: get-kata-tarball - uses: actions/download-artifact@v2 - with: - name: kata-static-tarball - - name: build-and-push-kata-deploy-ci - id: build-and-push-kata-deploy-ci - run: | - PR_SHA=$(git log --format=format:%H -n1) - mv kata-static.tar.xz $GITHUB_WORKSPACE/tools/packaging/kata-deploy/kata-static.tar.xz - docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t quay.io/kata-containers/kata-deploy-ci:$PR_SHA $GITHUB_WORKSPACE/tools/packaging/kata-deploy - docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io - docker push quay.io/kata-containers/kata-deploy-ci:$PR_SHA - mkdir -p packaging/kata-deploy - ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action - echo "PKG_SHA=${PR_SHA}" >> $GITHUB_OUTPUT - - name: test-kata-deploy-ci-in-aks - uses: ./packaging/kata-deploy/action - with: - packaging-sha: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} - env: - PKG_SHA: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} - AZ_APPID: ${{ secrets.AZ_APPID }} - AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }} - AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }} - AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }} From b661e0cf3f27f59ec91670e1e45aaf5aa2191e9b Mon Sep 17 00:00:00 2001 From: Christophe de Dinechin Date: Fri, 31 Mar 2023 10:49:24 +0200 Subject: [PATCH 035/137] rustjail: Add anyhow context for D-Bus connections In cases where the D-Bus connection fails, add a little additional context about the origin of the error. Fixes: 6561 Signed-off-by: Christophe de Dinechin Suggested-by: Archana Shinde Spell-checked-by: Greg Kurz --- src/agent/rustjail/src/cgroups/systemd/dbus_client.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/agent/rustjail/src/cgroups/systemd/dbus_client.rs b/src/agent/rustjail/src/cgroups/systemd/dbus_client.rs index fd3b9bf8f..0ff606930 100644 --- a/src/agent/rustjail/src/cgroups/systemd/dbus_client.rs +++ b/src/agent/rustjail/src/cgroups/systemd/dbus_client.rs @@ -36,8 +36,9 @@ pub struct DBusClient {} impl DBusClient { fn build_proxy(&self) -> Result> { - let connection = zbus::blocking::Connection::system()?; - let proxy = SystemManager::new(&connection)?; + let connection = + zbus::blocking::Connection::system().context("Establishing a D-Bus connection")?; + let proxy = SystemManager::new(&connection).context("Building a D-Bus proxy manager")?; Ok(proxy) } } @@ -109,7 +110,9 @@ impl SystemdInterface for DBusClient { } fn unit_exists(&self, unit_name: &str) -> Result { - let proxy = self.build_proxy()?; + let proxy = self + .build_proxy() + .with_context(|| format!("Checking if systemd unit {} exists", unit_name))?; Ok(proxy.get_unit(unit_name).is_ok()) } From d17dfe4cdd276b70f4f6aa878568eea001a59b20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Mon, 3 Apr 2023 15:28:01 +0200 Subject: [PATCH 036/137] gha: Use ghcr.io for the k8s CI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's switch to using the `ghcr.io` registry for the k8s CI, as this will save us some troubles on running the CI with PRs coming from forked repos. Fixes: #6587 Signed-off-by: Fabiano Fidêncio --- .github/workflows/ci-on-push.yaml | 8 ++++++-- .github/workflows/payload-after-push.yaml | 3 ++- .../publish-kata-deploy-payload-amd64.yaml | 17 ++++++++++++++++- .../publish-kata-deploy-payload-arm64.yaml | 18 +++++++++++++++++- .../publish-kata-deploy-payload-s390x.yaml | 19 +++++++++++++++++-- .github/workflows/run-k8s-tests-on-aks.yaml | 12 +++++++++--- 6 files changed, 67 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml index 10cf5aeb3..8e9d6d078 100644 --- a/.github/workflows/ci-on-push.yaml +++ b/.github/workflows/ci-on-push.yaml @@ -13,13 +13,17 @@ jobs: uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml with: tarball-suffix: -${{ github.event.pull_request.number}}-${{ github.sha }} - repo: kata-deploy-ci + registry: ghcr.io + repo: ${{ github.repository_owner }}/kata-deploy-ci tag: ${{ github.event.pull_request.number }}-${{ github.sha }}-amd64 + quay-io-login-continue-on-error: true secrets: inherit run-k8s-tests-on-aks: needs: publish-kata-deploy-payload-amd64 uses: ./.github/workflows/run-k8s-tests-on-aks.yaml with: - image-tag: ${{ github.event.pull_request.number }}-${{ github.sha }}-amd64 + registry: ghcr.io + repo: ${{ github.repository_owner }}/kata-deploy-ci + tag: ${{ github.event.pull_request.number }}-${{ github.sha }}-amd64 secrets: inherit diff --git a/.github/workflows/payload-after-push.yaml b/.github/workflows/payload-after-push.yaml index a03d4e543..741af5902 100644 --- a/.github/workflows/payload-after-push.yaml +++ b/.github/workflows/payload-after-push.yaml @@ -19,7 +19,8 @@ jobs: needs: build-assets-amd64 uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml with: - registry: quay.io/kata-containers/kata-deploy-ci + registry: quay.io + repo: kata-containers/kata-deploy-ci tag: kata-containers-amd64 secrets: inherit diff --git a/.github/workflows/publish-kata-deploy-payload-amd64.yaml b/.github/workflows/publish-kata-deploy-payload-amd64.yaml index 27c0ade46..fea62765c 100644 --- a/.github/workflows/publish-kata-deploy-payload-amd64.yaml +++ b/.github/workflows/publish-kata-deploy-payload-amd64.yaml @@ -5,12 +5,19 @@ on: tarball-suffix: required: false type: string + registry: + required: true + type: string repo: required: true type: string tag: required: true type: string + quay-io-login-continue-on-error: + required: false + type: boolean + default: false jobs: kata-payload: @@ -29,10 +36,18 @@ jobs: registry: quay.io username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} + continue-on-error: ${{ inputs.quay-io-login-continue-on-error }} + + - name: Login to Kata Containers ghcr.io + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: build-and-push-kata-payload id: build-and-push-kata-payload run: | ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ $(pwd)/kata-static.tar.xz \ - quay.io/kata-containers/${{ inputs.repo }} ${{ inputs.tag }} + ${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }} diff --git a/.github/workflows/publish-kata-deploy-payload-arm64.yaml b/.github/workflows/publish-kata-deploy-payload-arm64.yaml index 0834b7bf5..9b8e736dc 100644 --- a/.github/workflows/publish-kata-deploy-payload-arm64.yaml +++ b/.github/workflows/publish-kata-deploy-payload-arm64.yaml @@ -5,12 +5,19 @@ on: tarball-suffix: required: false type: string + registry: + required: true + type: string repo: required: true type: string tag: required: true type: string + quay-io-login-continue-on-error: + required: false + type: boolean + default: false jobs: kata-payload: @@ -33,10 +40,19 @@ jobs: registry: quay.io username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} + continue-on-error: ${{ inputs.quay-io-login-continue-on-error }} + + - name: Login to Kata Containers ghcr.io + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: build-and-push-kata-payload id: build-and-push-kata-payload run: | ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ $(pwd)/kata-static.tar.xz \ - quay.io/kata-containers/$${ inputs.repo }} ${{ inputs.tag }} + ${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }} + diff --git a/.github/workflows/publish-kata-deploy-payload-s390x.yaml b/.github/workflows/publish-kata-deploy-payload-s390x.yaml index 6c96df801..89efbd58e 100644 --- a/.github/workflows/publish-kata-deploy-payload-s390x.yaml +++ b/.github/workflows/publish-kata-deploy-payload-s390x.yaml @@ -6,11 +6,18 @@ on: required: false type: string registry: - repo: true + required: true + type: string + repo: + required: true type: string tag: required: true type: string + quay-io-login-continue-on-error: + required: false + type: boolean + default: false jobs: kata-payload: @@ -33,10 +40,18 @@ jobs: registry: quay.io username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} + continue-on-error: ${{ inputs.quay-io-login-continue-on-error }} + + - name: Login to Kata Containers ghcr.io + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: build-and-push-kata-payload id: build-and-push-kata-payload run: | ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \ $(pwd)/kata-static.tar.xz \ - quay.io/kata-containers/${{ inputs.repo }} ${{ inputs.tag }} + ${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }} diff --git a/.github/workflows/run-k8s-tests-on-aks.yaml b/.github/workflows/run-k8s-tests-on-aks.yaml index 09ff68092..83dd4ab7b 100644 --- a/.github/workflows/run-k8s-tests-on-aks.yaml +++ b/.github/workflows/run-k8s-tests-on-aks.yaml @@ -2,7 +2,13 @@ name: CI | Run kubernetes tests on AKS on: workflow_call: inputs: - image-tag: + registry: + required: true + type: string + repo: + required: true + type: string + tag: required: true type: string @@ -52,9 +58,9 @@ jobs: - name: Deploy kata-deploy run: | - sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|quay.io/kata-containers/kata-deploy-ci:${{ inputs.image-tag }}|g" tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}|g" tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml cat tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml - cat tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml | grep "quay.io/kata-containers/kata-deploy-ci:${{ inputs.image-tag }}" || die "Failed to setup the tests image" + cat tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml | grep "${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}" || die "Failed to setup the tests image" kubectl apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml kubectl apply -f tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml From 3215860a47f1fff1436a94669e24523bf9627b02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Mon, 3 Apr 2023 18:16:52 +0200 Subject: [PATCH 037/137] gha: Set ci-on-push to run on `pull_request_target` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is less secure than running the PR on `pull_request`, and will require using an additional `ok-to-test` label to make sure someone deliverately ran the actions coming from a forked repo. Signed-off-by: Fabiano Fidêncio --- .github/workflows/ci-on-push.yaml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml index 8e9d6d078..cbab26837 100644 --- a/.github/workflows/ci-on-push.yaml +++ b/.github/workflows/ci-on-push.yaml @@ -1,14 +1,21 @@ name: Kata Containers CI on: - pull_request + pull_request_target: + types: + - opened + - reopened + - labeled + - synchronize jobs: build-kata-static-tarball-amd64: + if: contains(github.event.pull_request.labels.*.name, 'ok-to-test') uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml with: tarball-suffix: -${{ github.event.pull_request.number}}-${{ github.sha }} publish-kata-deploy-payload-amd64: + if: contains(github.event.pull_request.labels.*.name, 'ok-to-test') needs: build-kata-static-tarball-amd64 uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml with: @@ -20,6 +27,7 @@ jobs: secrets: inherit run-k8s-tests-on-aks: + if: contains(github.event.pull_request.labels.*.name, 'ok-to-test') needs: publish-kata-deploy-payload-amd64 uses: ./.github/workflows/run-k8s-tests-on-aks.yaml with: From fe86c08a634ed3effff1915447c2d8a1197dc960 Mon Sep 17 00:00:00 2001 From: Zvonko Kaiser Date: Mon, 3 Apr 2023 13:40:59 +0000 Subject: [PATCH 038/137] tools: Avoid building the kernel twice Two different kernel build targets (build,install) have both instructions to build the kernel, hence it was executed twice. Install should only do install and build should only do build. Fixes: #6588 Signed-off-by: Zvonko Kaiser --- tools/packaging/kernel/build-kernel.sh | 3 ++- tools/packaging/kernel/kata_config_version | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/packaging/kernel/build-kernel.sh b/tools/packaging/kernel/build-kernel.sh index af7e36e6b..4de27ca33 100755 --- a/tools/packaging/kernel/build-kernel.sh +++ b/tools/packaging/kernel/build-kernel.sh @@ -419,6 +419,8 @@ install_kata() { local kernel_path=${1:-} [ -n "${kernel_path}" ] || die "kernel_path not provided" [ -d "${kernel_path}" ] || die "path to kernel does not exist, use ${script_name} setup" + [ -n "${arch_target}" ] || arch_target="$(uname -m)" + arch_target=$(arch_to_kernel "${arch_target}") pushd "${kernel_path}" >>/dev/null config_version=$(get_config_version) [ -n "${config_version}" ] || die "failed to get config version" @@ -593,7 +595,6 @@ main() { build_kernel "${kernel_path}" ;; install) - build_kernel "${kernel_path}" install_kata "${kernel_path}" ;; setup) diff --git a/tools/packaging/kernel/kata_config_version b/tools/packaging/kernel/kata_config_version index 257e56326..a9c8fe829 100644 --- a/tools/packaging/kernel/kata_config_version +++ b/tools/packaging/kernel/kata_config_version @@ -1 +1 @@ -102 +103 From 8086c75f61a3a3134f861fce27a07518add5f86a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Mon, 3 Apr 2023 14:30:59 +0200 Subject: [PATCH 039/137] gha: Also run k8s tests on AKS with dragonball MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As already done for Cloud Hypervisor and QEMU, let's make sure we can run the AKS tests using dragonball. Fixes: #6583 Signed-off-by: Fabiano Fidêncio --- .github/workflows/run-k8s-tests-on-aks.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/run-k8s-tests-on-aks.yaml b/.github/workflows/run-k8s-tests-on-aks.yaml index 83dd4ab7b..0cad6581e 100644 --- a/.github/workflows/run-k8s-tests-on-aks.yaml +++ b/.github/workflows/run-k8s-tests-on-aks.yaml @@ -18,6 +18,7 @@ jobs: matrix: vmm: - clh + - dragonball - qemu uses: ./.github/workflows/create-aks.yaml with: @@ -30,6 +31,7 @@ jobs: matrix: vmm: - clh + - dragonball - qemu runs-on: ubuntu-latest needs: create-aks @@ -82,6 +84,7 @@ jobs: matrix: vmm: - clh + - dragonball - qemu needs: run-k8s-tests if: always() From a159ffdba7835011abc8c6369e07dd4a52010bbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 5 Apr 2023 09:41:48 +0200 Subject: [PATCH 040/137] gha: ci-on-push: Depend on Commit Message Check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's make this workflow dependent of the commit message check, and only start it if the commit message check one passes. As a side effect, this allows us to run this specific workflow using secrets, without having to rely on `pull_request_target`. Signed-off-by: Fabiano Fidêncio --- .github/workflows/ci-on-push.yaml | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml index cbab26837..37f4b5080 100644 --- a/.github/workflows/ci-on-push.yaml +++ b/.github/workflows/ci-on-push.yaml @@ -1,21 +1,19 @@ name: Kata Containers CI on: - pull_request_target: + workflow_run: + workflows: + - Commit Message Check types: - - opened - - reopened - - labeled - - synchronize + - completed jobs: build-kata-static-tarball-amd64: - if: contains(github.event.pull_request.labels.*.name, 'ok-to-test') + if: ${{ github.event.workflow_run.conclusion == 'success' }} uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml with: tarball-suffix: -${{ github.event.pull_request.number}}-${{ github.sha }} publish-kata-deploy-payload-amd64: - if: contains(github.event.pull_request.labels.*.name, 'ok-to-test') needs: build-kata-static-tarball-amd64 uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml with: @@ -27,7 +25,6 @@ jobs: secrets: inherit run-k8s-tests-on-aks: - if: contains(github.event.pull_request.labels.*.name, 'ok-to-test') needs: publish-kata-deploy-payload-amd64 uses: ./.github/workflows/run-k8s-tests-on-aks.yaml with: From 3a760a157a05a38327468b92e2c3c1b30283b303 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 5 Apr 2023 10:24:31 +0200 Subject: [PATCH 041/137] gha: ci-on-push: Adjust to using workflow_run MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The way previously used to get the PR's commit sha can only be used with `pull_request*` kind of events. Let's adapt it to the `workflow_run` now that we're using it. With this change we ended up dropping the PR number from the tarball suffix, as that's not straightforward to get and, to be honest, not a unique differentiator that would justify the effort. Signed-off-by: Fabiano Fidêncio --- .github/workflows/ci-on-push.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml index 37f4b5080..940be3a60 100644 --- a/.github/workflows/ci-on-push.yaml +++ b/.github/workflows/ci-on-push.yaml @@ -11,16 +11,16 @@ jobs: if: ${{ github.event.workflow_run.conclusion == 'success' }} uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml with: - tarball-suffix: -${{ github.event.pull_request.number}}-${{ github.sha }} + tarball-suffix: -${{ github.event.workflow_run.head_sha }} publish-kata-deploy-payload-amd64: needs: build-kata-static-tarball-amd64 uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml with: - tarball-suffix: -${{ github.event.pull_request.number}}-${{ github.sha }} + tarball-suffix: -${{ github.event.workflow_run.head_sha }} registry: ghcr.io repo: ${{ github.repository_owner }}/kata-deploy-ci - tag: ${{ github.event.pull_request.number }}-${{ github.sha }}-amd64 + tag: ${{ github.event.workflow_run.head_sha }}-amd64 quay-io-login-continue-on-error: true secrets: inherit @@ -30,5 +30,5 @@ jobs: with: registry: ghcr.io repo: ${{ github.repository_owner }}/kata-deploy-ci - tag: ${{ github.event.pull_request.number }}-${{ github.sha }}-amd64 + tag: ${{ github.event.workflow_run.head_sha }}-amd64 secrets: inherit From 7855b43062c1001de372253d33ebacaf120f24d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 5 Apr 2023 10:33:47 +0200 Subject: [PATCH 042/137] gha: ci-on-push: Adapt chained jobs to workflow_run MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As we're using the `workflow_run` event, the checkout action would pull the **current target branch** instead of the PR one. Signed-off-by: Fabiano Fidêncio --- .github/workflows/build-kata-static-tarball-amd64.yaml | 7 +++++++ .github/workflows/build-kata-static-tarball-arm64.yaml | 7 +++++++ .github/workflows/build-kata-static-tarball-s390x.yaml | 7 +++++++ .github/workflows/ci-on-push.yaml | 3 +++ .github/workflows/publish-kata-deploy-payload-amd64.yaml | 6 ++++++ .github/workflows/publish-kata-deploy-payload-arm64.yaml | 6 ++++++ .github/workflows/publish-kata-deploy-payload-s390x.yaml | 6 ++++++ .github/workflows/run-k8s-tests-on-aks.yaml | 6 ++++++ 8 files changed, 48 insertions(+) diff --git a/.github/workflows/build-kata-static-tarball-amd64.yaml b/.github/workflows/build-kata-static-tarball-amd64.yaml index 1a14d145e..f668d16fb 100644 --- a/.github/workflows/build-kata-static-tarball-amd64.yaml +++ b/.github/workflows/build-kata-static-tarball-amd64.yaml @@ -2,6 +2,10 @@ name: CI | Build kata-static tarball for amd64 on: workflow_call: inputs: + checkout-ref: + required: false + type: string + default: ${{ github.sha }} tarball-suffix: required: false type: string @@ -25,6 +29,7 @@ jobs: steps: - uses: actions/checkout@v3 with: + ref: ${{ inputs.checkout-ref }} fetch-depth: 0 # This is needed in order to keep the commit ids history - name: Build ${{ matrix.asset }} run: | @@ -50,6 +55,8 @@ jobs: needs: build-asset steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - name: get-artifacts uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/build-kata-static-tarball-arm64.yaml b/.github/workflows/build-kata-static-tarball-arm64.yaml index f7b040b4a..e797dc824 100644 --- a/.github/workflows/build-kata-static-tarball-arm64.yaml +++ b/.github/workflows/build-kata-static-tarball-arm64.yaml @@ -2,6 +2,10 @@ name: CI | Build kata-static tarball for arm64 on: workflow_call: inputs: + checkout-ref: + required: false + type: string + default: ${{ github.sha }} tarball-suffix: required: false type: string @@ -29,6 +33,7 @@ jobs: - uses: actions/checkout@v3 with: + ref: ${{ inputs.checkout-ref }} fetch-depth: 0 # This is needed in order to keep the commit ids history - name: Build ${{ matrix.asset }} run: | @@ -58,6 +63,8 @@ jobs: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - name: get-artifacts uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/build-kata-static-tarball-s390x.yaml b/.github/workflows/build-kata-static-tarball-s390x.yaml index c00795fe3..cf22379b3 100644 --- a/.github/workflows/build-kata-static-tarball-s390x.yaml +++ b/.github/workflows/build-kata-static-tarball-s390x.yaml @@ -2,6 +2,10 @@ name: CI | Build kata-static tarball for s390x on: workflow_call: inputs: + checkout-ref: + required: false + type: string + default: ${{ github.sha }} tarball-suffix: required: false type: string @@ -25,6 +29,7 @@ jobs: - uses: actions/checkout@v3 with: + ref: ${{ inputs.checkout-ref }} fetch-depth: 0 # This is needed in order to keep the commit ids history - name: Build ${{ matrix.asset }} run: | @@ -55,6 +60,8 @@ jobs: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - name: get-artifacts uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml index 940be3a60..da870d7d2 100644 --- a/.github/workflows/ci-on-push.yaml +++ b/.github/workflows/ci-on-push.yaml @@ -11,12 +11,14 @@ jobs: if: ${{ github.event.workflow_run.conclusion == 'success' }} uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml with: + checkout-ref: ${{ github.event.workflow_run.head_sha }} tarball-suffix: -${{ github.event.workflow_run.head_sha }} publish-kata-deploy-payload-amd64: needs: build-kata-static-tarball-amd64 uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml with: + checkout-ref: ${{ github.event.workflow_run.head_sha }} tarball-suffix: -${{ github.event.workflow_run.head_sha }} registry: ghcr.io repo: ${{ github.repository_owner }}/kata-deploy-ci @@ -28,6 +30,7 @@ jobs: needs: publish-kata-deploy-payload-amd64 uses: ./.github/workflows/run-k8s-tests-on-aks.yaml with: + checkout-ref: ${{ github.event.workflow_run.head_sha }} registry: ghcr.io repo: ${{ github.repository_owner }}/kata-deploy-ci tag: ${{ github.event.workflow_run.head_sha }}-amd64 diff --git a/.github/workflows/publish-kata-deploy-payload-amd64.yaml b/.github/workflows/publish-kata-deploy-payload-amd64.yaml index fea62765c..bcee25eea 100644 --- a/.github/workflows/publish-kata-deploy-payload-amd64.yaml +++ b/.github/workflows/publish-kata-deploy-payload-amd64.yaml @@ -2,6 +2,10 @@ name: CI | Publish kata-deploy payload for amd64 on: workflow_call: inputs: + checkout-ref: + required: false + type: string + default: ${{ github.sha }} tarball-suffix: required: false type: string @@ -24,6 +28,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - name: get-kata-tarball uses: actions/download-artifact@v3 diff --git a/.github/workflows/publish-kata-deploy-payload-arm64.yaml b/.github/workflows/publish-kata-deploy-payload-arm64.yaml index 9b8e736dc..285ac97e4 100644 --- a/.github/workflows/publish-kata-deploy-payload-arm64.yaml +++ b/.github/workflows/publish-kata-deploy-payload-arm64.yaml @@ -2,6 +2,10 @@ name: CI | Publish kata-deploy payload for arm64 on: workflow_call: inputs: + checkout-ref: + required: false + type: string + default: ${{ github.sha }} tarball-suffix: required: false type: string @@ -28,6 +32,8 @@ jobs: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - name: get-kata-tarball uses: actions/download-artifact@v3 diff --git a/.github/workflows/publish-kata-deploy-payload-s390x.yaml b/.github/workflows/publish-kata-deploy-payload-s390x.yaml index 89efbd58e..4341e4397 100644 --- a/.github/workflows/publish-kata-deploy-payload-s390x.yaml +++ b/.github/workflows/publish-kata-deploy-payload-s390x.yaml @@ -2,6 +2,10 @@ name: CI | Publish kata-deploy payload for s390x on: workflow_call: inputs: + checkout-ref: + required: false + type: string + default: ${{ github.sha }} tarball-suffix: required: false type: string @@ -28,6 +32,8 @@ jobs: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - name: get-kata-tarball uses: actions/download-artifact@v3 diff --git a/.github/workflows/run-k8s-tests-on-aks.yaml b/.github/workflows/run-k8s-tests-on-aks.yaml index 83dd4ab7b..ae70f6ce9 100644 --- a/.github/workflows/run-k8s-tests-on-aks.yaml +++ b/.github/workflows/run-k8s-tests-on-aks.yaml @@ -2,6 +2,10 @@ name: CI | Run kubernetes tests on AKS on: workflow_call: inputs: + checkout-ref: + required: false + type: string + default: ${{ github.sha }} registry: required: true type: string @@ -35,6 +39,8 @@ jobs: needs: create-aks steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - name: Install `bats` run: | sudo apt-get update From 41026f003e1d3551712636f334c28ac5f3a0c9eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 5 Apr 2023 12:24:48 +0200 Subject: [PATCH 043/137] gha: payload-after-push: Pass registry / repo as inputs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We made registry / repo mandatory, but we only adapted that to the amd64 job. Let's fix it now and make sure this is also passed to the arm64 and s390x jobs. Signed-off-by: Fabiano Fidêncio --- .github/workflows/payload-after-push.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/payload-after-push.yaml b/.github/workflows/payload-after-push.yaml index 741af5902..b828c57aa 100644 --- a/.github/workflows/payload-after-push.yaml +++ b/.github/workflows/payload-after-push.yaml @@ -28,7 +28,8 @@ jobs: needs: build-assets-arm64 uses: ./.github/workflows/publish-kata-deploy-payload-arm64.yaml with: - registry: quay.io/kata-containers/kata-deploy-ci + registry: quay.io + repo: kata-containers/kata-deploy-ci tag: kata-containers-arm64 secrets: inherit @@ -36,7 +37,8 @@ jobs: needs: build-assets-s390x uses: ./.github/workflows/publish-kata-deploy-payload-s390x.yaml with: - registry: quay.io/kata-containers/kata-deploy-ci + registry: quay.io + repo: kata-containers/kata-deploy-ci tag: kata-containers-s390x secrets: inherit From 13929fc610d30a31a6ba5379c92deaf06af90f6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 5 Apr 2023 12:30:19 +0200 Subject: [PATCH 044/137] gha: publish-kata-deploy-payload: Improve registry login MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's only try to login to the registry that's being passed as an input argument. Signed-off-by: Fabiano Fidêncio --- .github/workflows/ci-on-push.yaml | 1 - .github/workflows/publish-kata-deploy-payload-amd64.yaml | 7 ++----- .github/workflows/publish-kata-deploy-payload-arm64.yaml | 7 ++----- .github/workflows/publish-kata-deploy-payload-s390x.yaml | 7 ++----- 4 files changed, 6 insertions(+), 16 deletions(-) diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml index da870d7d2..f2cd7e055 100644 --- a/.github/workflows/ci-on-push.yaml +++ b/.github/workflows/ci-on-push.yaml @@ -23,7 +23,6 @@ jobs: registry: ghcr.io repo: ${{ github.repository_owner }}/kata-deploy-ci tag: ${{ github.event.workflow_run.head_sha }}-amd64 - quay-io-login-continue-on-error: true secrets: inherit run-k8s-tests-on-aks: diff --git a/.github/workflows/publish-kata-deploy-payload-amd64.yaml b/.github/workflows/publish-kata-deploy-payload-amd64.yaml index bcee25eea..697fdb433 100644 --- a/.github/workflows/publish-kata-deploy-payload-amd64.yaml +++ b/.github/workflows/publish-kata-deploy-payload-amd64.yaml @@ -18,10 +18,6 @@ on: tag: required: true type: string - quay-io-login-continue-on-error: - required: false - type: boolean - default: false jobs: kata-payload: @@ -37,14 +33,15 @@ jobs: name: kata-static-tarball-amd64${{ inputs.tarball-suffix }} - name: Login to Kata Containers quay.io + if: ${{ inputs.registry == 'quay.io' }} uses: docker/login-action@v2 with: registry: quay.io username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - continue-on-error: ${{ inputs.quay-io-login-continue-on-error }} - name: Login to Kata Containers ghcr.io + if: ${{ inputs.registry == 'ghcr.io' }} uses: docker/login-action@v2 with: registry: ghcr.io diff --git a/.github/workflows/publish-kata-deploy-payload-arm64.yaml b/.github/workflows/publish-kata-deploy-payload-arm64.yaml index 285ac97e4..f1de4abac 100644 --- a/.github/workflows/publish-kata-deploy-payload-arm64.yaml +++ b/.github/workflows/publish-kata-deploy-payload-arm64.yaml @@ -18,10 +18,6 @@ on: tag: required: true type: string - quay-io-login-continue-on-error: - required: false - type: boolean - default: false jobs: kata-payload: @@ -41,14 +37,15 @@ jobs: name: kata-static-tarball-arm64${{ inputs.tarball-suffix }} - name: Login to Kata Containers quay.io + if: ${{ inputs.registry == 'quay.io' }} uses: docker/login-action@v2 with: registry: quay.io username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - continue-on-error: ${{ inputs.quay-io-login-continue-on-error }} - name: Login to Kata Containers ghcr.io + if: ${{ inputs.registry == 'ghcr.io' }} uses: docker/login-action@v2 with: registry: ghcr.io diff --git a/.github/workflows/publish-kata-deploy-payload-s390x.yaml b/.github/workflows/publish-kata-deploy-payload-s390x.yaml index 4341e4397..498b8cd98 100644 --- a/.github/workflows/publish-kata-deploy-payload-s390x.yaml +++ b/.github/workflows/publish-kata-deploy-payload-s390x.yaml @@ -18,10 +18,6 @@ on: tag: required: true type: string - quay-io-login-continue-on-error: - required: false - type: boolean - default: false jobs: kata-payload: @@ -41,14 +37,15 @@ jobs: name: kata-static-tarball-s390x${{ inputs.tarball-suffix }} - name: Login to Kata Containers quay.io + if: ${{ inputs.registry == 'quay.io' }} uses: docker/login-action@v2 with: registry: quay.io username: ${{ secrets.QUAY_DEPLOYER_USERNAME }} password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }} - continue-on-error: ${{ inputs.quay-io-login-continue-on-error }} - name: Login to Kata Containers ghcr.io + if: ${{ inputs.registry == 'ghcr.io' }} uses: docker/login-action@v2 with: registry: ghcr.io From e81b8b8ee5a60e4cc7822f2df5803c8ddd07cff4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 5 Apr 2023 12:32:46 +0200 Subject: [PATCH 045/137] local-build: build-and-upload-payload is not quay.io specific MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's just print "to the registry" instead of printing "to quay.io", as the registry used is not tied to quay.io. Signed-off-by: Fabiano Fidêncio --- .../local-build/kata-deploy-build-and-upload-payload.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh index c4ff1abf5..b0cb5676b 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh @@ -20,7 +20,7 @@ IMAGE_TAG="${REGISTRY}:kata-containers-$(git rev-parse HEAD)-$(uname -m)" echo "Building the image" docker build --tag ${IMAGE_TAG} . -echo "Pushing the image to quay.io" +echo "Pushing the image to the registry" docker push ${IMAGE_TAG} if [ -n "${TAG}" ]; then @@ -30,7 +30,7 @@ if [ -n "${TAG}" ]; then docker build --tag ${ADDITIONAL_TAG} . - echo "Pushing the image ${ADDITIONAL_TAG} to quay.io" + echo "Pushing the image ${ADDITIONAL_TAG} to the registry" docker push ${ADDITIONAL_TAG} fi From 2550d4462dded7927b353b345166e10f4b04f6bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Tue, 4 Apr 2023 10:50:39 +0200 Subject: [PATCH 046/137] gha: build-kata-static-tarball: Only push to registry after merge MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 56331bd7bcfbc6aabd8a732a10c544ba3df84622 oversaw the fact that we mistakenly tried to push the build containers to the registry for a PR, rather than doing so only when the code is merged. As the workflow is now shared between different actions, let's introduce an input variable to specify which are the cases we actually need to perform a push to the registry. Fixes: #6592 Signed-off-by: Fabiano Fidêncio --- .github/workflows/build-kata-static-tarball-amd64.yaml | 6 +++++- .github/workflows/build-kata-static-tarball-arm64.yaml | 6 +++++- .github/workflows/build-kata-static-tarball-s390x.yaml | 6 +++++- .github/workflows/payload-after-push.yaml | 6 ++++++ 4 files changed, 21 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-kata-static-tarball-amd64.yaml b/.github/workflows/build-kata-static-tarball-amd64.yaml index f668d16fb..d2c902af2 100644 --- a/.github/workflows/build-kata-static-tarball-amd64.yaml +++ b/.github/workflows/build-kata-static-tarball-amd64.yaml @@ -9,6 +9,10 @@ on: tarball-suffix: required: false type: string + push-to-registry: + required: false + type: string + default: no jobs: build-asset: @@ -40,7 +44,7 @@ jobs: env: KATA_ASSET: ${{ matrix.asset }} TAR_OUTPUT: ${{ matrix.asset }}.tar.gz - PUSH_TO_REGISTRY: yes + PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }} - name: store-artifact ${{ matrix.asset }} uses: actions/upload-artifact@v3 diff --git a/.github/workflows/build-kata-static-tarball-arm64.yaml b/.github/workflows/build-kata-static-tarball-arm64.yaml index e797dc824..a50862231 100644 --- a/.github/workflows/build-kata-static-tarball-arm64.yaml +++ b/.github/workflows/build-kata-static-tarball-arm64.yaml @@ -9,6 +9,10 @@ on: tarball-suffix: required: false type: string + push-to-registry: + required: false + type: string + default: no jobs: build-asset: @@ -44,7 +48,7 @@ jobs: env: KATA_ASSET: ${{ matrix.asset }} TAR_OUTPUT: ${{ matrix.asset }}.tar.gz - PUSH_TO_REGISTRY: yes + PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }} - name: store-artifact ${{ matrix.asset }} uses: actions/upload-artifact@v3 diff --git a/.github/workflows/build-kata-static-tarball-s390x.yaml b/.github/workflows/build-kata-static-tarball-s390x.yaml index cf22379b3..064ed006b 100644 --- a/.github/workflows/build-kata-static-tarball-s390x.yaml +++ b/.github/workflows/build-kata-static-tarball-s390x.yaml @@ -9,6 +9,10 @@ on: tarball-suffix: required: false type: string + push-to-registry: + required: false + type: string + default: no jobs: build-asset: @@ -41,7 +45,7 @@ jobs: env: KATA_ASSET: ${{ matrix.asset }} TAR_OUTPUT: ${{ matrix.asset }}.tar.gz - PUSH_TO_REGISTRY: yes + PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }} - name: store-artifact ${{ matrix.asset }} uses: actions/upload-artifact@v3 diff --git a/.github/workflows/payload-after-push.yaml b/.github/workflows/payload-after-push.yaml index b828c57aa..25a7a18c2 100644 --- a/.github/workflows/payload-after-push.yaml +++ b/.github/workflows/payload-after-push.yaml @@ -8,12 +8,18 @@ on: jobs: build-assets-amd64: uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml + with: + push-to-registry: yes build-assets-arm64: uses: ./.github/workflows/build-kata-static-tarball-arm64.yaml + with: + push-to-registry: yes build-assets-s390x: uses: ./.github/workflows/build-kata-static-tarball-s390x.yaml + with: + push-to-registry: yes publish-kata-deploy-payload-amd64: needs: build-assets-amd64 From 108d80a86dc50ce6c988484186c3ae3ca6ed8328 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 5 Apr 2023 15:53:03 +0200 Subject: [PATCH 047/137] gha: Add the ability to also test Dragonball MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the changes proposed as part of this PR, an AKS cluster will be created but no tests will be performed. The reason we have to do this is because GitHub Actions will only run the tests using the workflows that are part of the **target** branch, instead of the using the ones coming from the PR, and we didn't find yet a way to work this around. Once this commit is in, we'll actually change the tests themselves (not the yaml files for the actions), as those will be the ones we want as the checkout action helps us on this case. Fixes: #6583 Signed-off-by: Fabiano Fidêncio --- tests/integration/kubernetes/run_kubernetes_tests.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/kubernetes/run_kubernetes_tests.sh b/tests/integration/kubernetes/run_kubernetes_tests.sh index db1e16633..2d11f744f 100755 --- a/tests/integration/kubernetes/run_kubernetes_tests.sh +++ b/tests/integration/kubernetes/run_kubernetes_tests.sh @@ -54,6 +54,10 @@ else ) fi +if [ ${KATA_HYPERVISOR} == "dragonball" ]; then + exit 0 +fi + # we may need to skip a few test cases when running on non-x86_64 arch arch_config_file="${kubernetes_dir}/filter_out_per_arch/${TARGET_ARCH}.yaml" if [ -f "${arch_config_file}" ]; then From 1688e4f3f09dec53de40d8c8a09579b6d8372912 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 5 Apr 2023 16:02:17 +0200 Subject: [PATCH 048/137] gha: aks: Use D4s_v5 instance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's been pointed out that D4s_v5 instances are more powerful than the D4s_v3 ones, and have the very same price. With this in mind, let's switch to the newer machines. Fixes: #6606 Signed-off-by: Fabiano Fidêncio --- .github/workflows/create-aks.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/create-aks.yaml b/.github/workflows/create-aks.yaml index b2b6c76e7..cfd08f5cd 100644 --- a/.github/workflows/create-aks.yaml +++ b/.github/workflows/create-aks.yaml @@ -27,6 +27,6 @@ jobs: az aks create \ -g "kataCI" \ -n "${{ inputs.name }}" \ - -s "Standard_D4s_v3" \ + -s "Standard_D4s_v5" \ --node-count 1 \ --generate-ssh-keys From 85cc5bb5343cdb46f3abc80a9134820a3f694474 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 6 Apr 2023 08:41:51 +0200 Subject: [PATCH 049/137] gha: k8s-on-aks: Fix cluster name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This was missed from the last series, as GHA will use the "target branch" yaml file to start the workflow. Basically we changed the name of the cluster created to stop relying on the PR number, as that's not easily accessible on `workflow_run`. Fixes: #6611 Signed-off-by: Fabiano Fidêncio --- .github/workflows/run-k8s-tests-on-aks.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/run-k8s-tests-on-aks.yaml b/.github/workflows/run-k8s-tests-on-aks.yaml index 5beca17a4..371fd05a8 100644 --- a/.github/workflows/run-k8s-tests-on-aks.yaml +++ b/.github/workflows/run-k8s-tests-on-aks.yaml @@ -26,7 +26,7 @@ jobs: - qemu uses: ./.github/workflows/create-aks.yaml with: - name: ${{ github.event.pull_request.number }}-${{ github.sha }}-${{ matrix.vmm }}-amd64 + name: ${{ inputs.checkout-ref }}-${{ matrix.vmm }}-amd64 secrets: inherit run-k8s-tests: @@ -62,7 +62,7 @@ jobs: - name: Download credentials for the Kubernetes CLI to use them run: | - az aks get-credentials -g "kataCI" -n ${{ github.event.pull_request.number }}-${{ github.sha }}-${{ matrix.vmm }}-amd64 + az aks get-credentials -g "kataCI" -n ${{ inputs.checkout-ref }}-${{ matrix.vmm }}-amd64 - name: Deploy kata-deploy run: | @@ -96,5 +96,5 @@ jobs: if: always() uses: ./.github/workflows/delete-aks.yaml with: - name: ${{ github.event.pull_request.number }}-${{ github.sha }}-${{ matrix.vmm }}-amd64 + name: ${{ inputs.checkout-ref }}-${{ matrix.vmm }}-amd64 secrets: inherit From dc6569dbbc8399a4db43baae8f226420386f8e83 Mon Sep 17 00:00:00 2001 From: "alex.lyn" Date: Thu, 6 Apr 2023 16:31:02 +0800 Subject: [PATCH 050/137] runtime-rs/virtio-fs: add support extra handler for cache mode. Add support for virtiofsd when virtio_fs_extra_args with "-o cache auto, ..." users specified. Fixes: #6615 Signed-off-by: alex.lyn --- .../crates/hypervisor/src/dragonball/inner_device.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/runtime-rs/crates/hypervisor/src/dragonball/inner_device.rs b/src/runtime-rs/crates/hypervisor/src/dragonball/inner_device.rs index d6f7baecb..48d9a3508 100644 --- a/src/runtime-rs/crates/hypervisor/src/dragonball/inner_device.rs +++ b/src/runtime-rs/crates/hypervisor/src/dragonball/inner_device.rs @@ -188,6 +188,9 @@ impl DragonballInner { let args: Vec<&str> = opt_list.split(',').collect(); for arg in args { match arg { + "cache=none" => fs_cfg.cache_policy = String::from("none"), + "cache=auto" => fs_cfg.cache_policy = String::from("auto"), + "cache=always" => fs_cfg.cache_policy = String::from("always"), "no_open" => fs_cfg.no_open = true, "open" => fs_cfg.no_open = false, "writeback_cache" => fs_cfg.writeback_cache = true, From 13d857a56dbd3a64215c5d31a69d7cb26e5eeeb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 6 Apr 2023 16:50:55 +0200 Subject: [PATCH 051/137] gha: k8s-on-aks: Set {create,delete}_aks as steps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We've been currently using {create,delete}_aks as jobs. However, it means that if the tests fail we'll end up deleting the AKS cluster (as expected), but not having a way to recreate the cluster without re-running all jobs, which is a waste of resources. Fixes: #6628 Signed-off-by: Fabiano Fidêncio --- .github/workflows/run-k8s-tests-on-aks.yaml | 36 ++++++--------------- 1 file changed, 10 insertions(+), 26 deletions(-) diff --git a/.github/workflows/run-k8s-tests-on-aks.yaml b/.github/workflows/run-k8s-tests-on-aks.yaml index 371fd05a8..5c5e51071 100644 --- a/.github/workflows/run-k8s-tests-on-aks.yaml +++ b/.github/workflows/run-k8s-tests-on-aks.yaml @@ -17,18 +17,6 @@ on: type: string jobs: - create-aks: - strategy: - matrix: - vmm: - - clh - - dragonball - - qemu - uses: ./.github/workflows/create-aks.yaml - with: - name: ${{ inputs.checkout-ref }}-${{ matrix.vmm }}-amd64 - secrets: inherit - run-k8s-tests: strategy: fail-fast: false @@ -38,8 +26,12 @@ jobs: - dragonball - qemu runs-on: ubuntu-latest - needs: create-aks steps: + - name: Create AKS cluster to test ${{ matrix.vmm }} + uses: ./.github/workflows-create-aks.yaml + with: + name: ${{ inputs.checkout-ref }}-${{ matrix.vmm }}-amd64 + - uses: actions/checkout@v3 with: ref: ${{ inputs.checkout-ref }} @@ -85,16 +77,8 @@ jobs: env: KATA_HYPERVISOR: ${{ matrix.vmm }} - delete-aks: - strategy: - matrix: - vmm: - - clh - - dragonball - - qemu - needs: run-k8s-tests - if: always() - uses: ./.github/workflows/delete-aks.yaml - with: - name: ${{ inputs.checkout-ref }}-${{ matrix.vmm }}-amd64 - secrets: inherit + - name: Delete AKS cluster used to test ${{ matrix.vmm }} + if: always() + uses: ./.github/workflows/delete-aks.yaml + with: + name: ${{ inputs.checkout-ref }}-${{ matrix.vmm }}-amd64 From 5d4d720647065dd2132205146d4e015acc22b7cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 6 Apr 2023 18:37:21 +0200 Subject: [PATCH 052/137] Revert "gha: k8s-on-aks: Fix cluster name" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 85cc5bb5343cdb46f3abc80a9134820a3f694474. Unfortunately we have to revert the PRs related to the switch done to using `workflow_run` instead of `pull_request_target`. The reason for that being that we can only mark jobs as required if they are targetting PRs. Signed-off-by: Fabiano Fidêncio --- .github/workflows/run-k8s-tests-on-aks.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/run-k8s-tests-on-aks.yaml b/.github/workflows/run-k8s-tests-on-aks.yaml index 5c5e51071..b444d8db9 100644 --- a/.github/workflows/run-k8s-tests-on-aks.yaml +++ b/.github/workflows/run-k8s-tests-on-aks.yaml @@ -30,7 +30,7 @@ jobs: - name: Create AKS cluster to test ${{ matrix.vmm }} uses: ./.github/workflows-create-aks.yaml with: - name: ${{ inputs.checkout-ref }}-${{ matrix.vmm }}-amd64 + name: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-${{ matrix.vmm }}-amd64 - uses: actions/checkout@v3 with: @@ -54,7 +54,7 @@ jobs: - name: Download credentials for the Kubernetes CLI to use them run: | - az aks get-credentials -g "kataCI" -n ${{ inputs.checkout-ref }}-${{ matrix.vmm }}-amd64 + az aks get-credentials -g "kataCI" -n ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-${{ matrix.vmm }}-amd64 - name: Deploy kata-deploy run: | @@ -81,4 +81,4 @@ jobs: if: always() uses: ./.github/workflows/delete-aks.yaml with: - name: ${{ inputs.checkout-ref }}-${{ matrix.vmm }}-amd64 + name: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-${{ matrix.vmm }}-amd64 From c7ee45f7e53c2a1fac31fbc92f11505915485ddb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 6 Apr 2023 18:32:06 +0200 Subject: [PATCH 053/137] Revert "gha: ci-on-push: Adapt chained jobs to workflow_run" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 7855b43062c1001de372253d33ebacaf120f24d1. Unfortunately we have to revert the PRs related to the switch done to using `workflow_run` instead of `pull_request_target`. The reason for that being that we can only mark jobs as required if they are targetting PRs. Signed-off-by: Fabiano Fidêncio --- .github/workflows/build-kata-static-tarball-amd64.yaml | 8 ++------ .github/workflows/build-kata-static-tarball-arm64.yaml | 8 ++------ .github/workflows/build-kata-static-tarball-s390x.yaml | 8 ++------ .github/workflows/ci-on-push.yaml | 3 --- .github/workflows/publish-kata-deploy-payload-amd64.yaml | 6 +----- .github/workflows/publish-kata-deploy-payload-arm64.yaml | 6 +----- .github/workflows/publish-kata-deploy-payload-s390x.yaml | 6 +----- .github/workflows/run-k8s-tests-on-aks.yaml | 6 +----- 8 files changed, 10 insertions(+), 41 deletions(-) diff --git a/.github/workflows/build-kata-static-tarball-amd64.yaml b/.github/workflows/build-kata-static-tarball-amd64.yaml index d2c902af2..8432e7714 100644 --- a/.github/workflows/build-kata-static-tarball-amd64.yaml +++ b/.github/workflows/build-kata-static-tarball-amd64.yaml @@ -2,10 +2,6 @@ name: CI | Build kata-static tarball for amd64 on: workflow_call: inputs: - checkout-ref: - required: false - type: string - default: ${{ github.sha }} tarball-suffix: required: false type: string @@ -33,7 +29,7 @@ jobs: steps: - uses: actions/checkout@v3 with: - ref: ${{ inputs.checkout-ref }} + ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # This is needed in order to keep the commit ids history - name: Build ${{ matrix.asset }} run: | @@ -60,7 +56,7 @@ jobs: steps: - uses: actions/checkout@v3 with: - ref: ${{ inputs.checkout-ref }} + ref: ${{ github.event.pull_request.head.sha }} - name: get-artifacts uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/build-kata-static-tarball-arm64.yaml b/.github/workflows/build-kata-static-tarball-arm64.yaml index a50862231..753bcf13a 100644 --- a/.github/workflows/build-kata-static-tarball-arm64.yaml +++ b/.github/workflows/build-kata-static-tarball-arm64.yaml @@ -2,10 +2,6 @@ name: CI | Build kata-static tarball for arm64 on: workflow_call: inputs: - checkout-ref: - required: false - type: string - default: ${{ github.sha }} tarball-suffix: required: false type: string @@ -37,7 +33,7 @@ jobs: - uses: actions/checkout@v3 with: - ref: ${{ inputs.checkout-ref }} + ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # This is needed in order to keep the commit ids history - name: Build ${{ matrix.asset }} run: | @@ -68,7 +64,7 @@ jobs: - uses: actions/checkout@v3 with: - ref: ${{ inputs.checkout-ref }} + ref: ${{ github.event.pull_request.head.sha }} - name: get-artifacts uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/build-kata-static-tarball-s390x.yaml b/.github/workflows/build-kata-static-tarball-s390x.yaml index 064ed006b..95e4a5ff5 100644 --- a/.github/workflows/build-kata-static-tarball-s390x.yaml +++ b/.github/workflows/build-kata-static-tarball-s390x.yaml @@ -2,10 +2,6 @@ name: CI | Build kata-static tarball for s390x on: workflow_call: inputs: - checkout-ref: - required: false - type: string - default: ${{ github.sha }} tarball-suffix: required: false type: string @@ -33,7 +29,7 @@ jobs: - uses: actions/checkout@v3 with: - ref: ${{ inputs.checkout-ref }} + ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # This is needed in order to keep the commit ids history - name: Build ${{ matrix.asset }} run: | @@ -65,7 +61,7 @@ jobs: - uses: actions/checkout@v3 with: - ref: ${{ inputs.checkout-ref }} + ref: ${{ github.event.pull_request.head.sha }} - name: get-artifacts uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml index f2cd7e055..1093992f6 100644 --- a/.github/workflows/ci-on-push.yaml +++ b/.github/workflows/ci-on-push.yaml @@ -11,14 +11,12 @@ jobs: if: ${{ github.event.workflow_run.conclusion == 'success' }} uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml with: - checkout-ref: ${{ github.event.workflow_run.head_sha }} tarball-suffix: -${{ github.event.workflow_run.head_sha }} publish-kata-deploy-payload-amd64: needs: build-kata-static-tarball-amd64 uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml with: - checkout-ref: ${{ github.event.workflow_run.head_sha }} tarball-suffix: -${{ github.event.workflow_run.head_sha }} registry: ghcr.io repo: ${{ github.repository_owner }}/kata-deploy-ci @@ -29,7 +27,6 @@ jobs: needs: publish-kata-deploy-payload-amd64 uses: ./.github/workflows/run-k8s-tests-on-aks.yaml with: - checkout-ref: ${{ github.event.workflow_run.head_sha }} registry: ghcr.io repo: ${{ github.repository_owner }}/kata-deploy-ci tag: ${{ github.event.workflow_run.head_sha }}-amd64 diff --git a/.github/workflows/publish-kata-deploy-payload-amd64.yaml b/.github/workflows/publish-kata-deploy-payload-amd64.yaml index 697fdb433..91c7a0612 100644 --- a/.github/workflows/publish-kata-deploy-payload-amd64.yaml +++ b/.github/workflows/publish-kata-deploy-payload-amd64.yaml @@ -2,10 +2,6 @@ name: CI | Publish kata-deploy payload for amd64 on: workflow_call: inputs: - checkout-ref: - required: false - type: string - default: ${{ github.sha }} tarball-suffix: required: false type: string @@ -25,7 +21,7 @@ jobs: steps: - uses: actions/checkout@v3 with: - ref: ${{ inputs.checkout-ref }} + ref: ${{ github.event.pull_request.head.sha }} - name: get-kata-tarball uses: actions/download-artifact@v3 diff --git a/.github/workflows/publish-kata-deploy-payload-arm64.yaml b/.github/workflows/publish-kata-deploy-payload-arm64.yaml index f1de4abac..c4fd32477 100644 --- a/.github/workflows/publish-kata-deploy-payload-arm64.yaml +++ b/.github/workflows/publish-kata-deploy-payload-arm64.yaml @@ -2,10 +2,6 @@ name: CI | Publish kata-deploy payload for arm64 on: workflow_call: inputs: - checkout-ref: - required: false - type: string - default: ${{ github.sha }} tarball-suffix: required: false type: string @@ -29,7 +25,7 @@ jobs: - uses: actions/checkout@v3 with: - ref: ${{ inputs.checkout-ref }} + ref: ${{ github.event.pull_request.head.sha }} - name: get-kata-tarball uses: actions/download-artifact@v3 diff --git a/.github/workflows/publish-kata-deploy-payload-s390x.yaml b/.github/workflows/publish-kata-deploy-payload-s390x.yaml index 498b8cd98..2a0ea8071 100644 --- a/.github/workflows/publish-kata-deploy-payload-s390x.yaml +++ b/.github/workflows/publish-kata-deploy-payload-s390x.yaml @@ -2,10 +2,6 @@ name: CI | Publish kata-deploy payload for s390x on: workflow_call: inputs: - checkout-ref: - required: false - type: string - default: ${{ github.sha }} tarball-suffix: required: false type: string @@ -29,7 +25,7 @@ jobs: - uses: actions/checkout@v3 with: - ref: ${{ inputs.checkout-ref }} + ref: ${{ github.event.pull_request.head.sha }} - name: get-kata-tarball uses: actions/download-artifact@v3 diff --git a/.github/workflows/run-k8s-tests-on-aks.yaml b/.github/workflows/run-k8s-tests-on-aks.yaml index b444d8db9..715814818 100644 --- a/.github/workflows/run-k8s-tests-on-aks.yaml +++ b/.github/workflows/run-k8s-tests-on-aks.yaml @@ -2,10 +2,6 @@ name: CI | Run kubernetes tests on AKS on: workflow_call: inputs: - checkout-ref: - required: false - type: string - default: ${{ github.sha }} registry: required: true type: string @@ -34,7 +30,7 @@ jobs: - uses: actions/checkout@v3 with: - ref: ${{ inputs.checkout-ref }} + ref: ${{ github.event.pull_request.head.sha }} - name: Install `bats` run: | sudo apt-get update From 0d96d496331606909630dee84028cde998888a97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 6 Apr 2023 18:33:31 +0200 Subject: [PATCH 054/137] Revert "gha: ci-on-push: Adjust to using workflow_run" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 3a760a157a05a38327468b92e2c3c1b30283b303. Unfortunately we have to revert the PRs related to the switch done to using `workflow_run` instead of `pull_request_target`. The reason for that being that we can only mark jobs as required if they are targetting PRs. Signed-off-by: Fabiano Fidêncio --- .github/workflows/ci-on-push.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml index 1093992f6..242c62c17 100644 --- a/.github/workflows/ci-on-push.yaml +++ b/.github/workflows/ci-on-push.yaml @@ -11,16 +11,16 @@ jobs: if: ${{ github.event.workflow_run.conclusion == 'success' }} uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml with: - tarball-suffix: -${{ github.event.workflow_run.head_sha }} + tarball-suffix: -${{ github.event.pull_request.number}}-${{ github.event.pull_request.head.sha }} publish-kata-deploy-payload-amd64: needs: build-kata-static-tarball-amd64 uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml with: - tarball-suffix: -${{ github.event.workflow_run.head_sha }} + tarball-suffix: -${{ github.event.pull_request.number}}-${{ github.event.pull_request.head.sha }} registry: ghcr.io repo: ${{ github.repository_owner }}/kata-deploy-ci - tag: ${{ github.event.workflow_run.head_sha }}-amd64 + tag: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-amd64 secrets: inherit run-k8s-tests-on-aks: @@ -29,5 +29,5 @@ jobs: with: registry: ghcr.io repo: ${{ github.repository_owner }}/kata-deploy-ci - tag: ${{ github.event.workflow_run.head_sha }}-amd64 + tag: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-amd64 secrets: inherit From e7bd2545ef680ad89e1a6abed948649ddbe874c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 6 Apr 2023 18:33:48 +0200 Subject: [PATCH 055/137] Revert "gha: ci-on-push: Depend on Commit Message Check" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit a159ffdba7835011abc8c6369e07dd4a52010bbf. Unfortunately we have to revert the PRs related to the switch done to using `workflow_run` instead of `pull_request_target`. The reason for that being that we can only mark jobs as required if they are targetting PRs. Signed-off-by: Fabiano Fidêncio --- .github/workflows/ci-on-push.yaml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml index 242c62c17..8a08d6b6e 100644 --- a/.github/workflows/ci-on-push.yaml +++ b/.github/workflows/ci-on-push.yaml @@ -1,14 +1,9 @@ name: Kata Containers CI on: - workflow_run: - workflows: - - Commit Message Check - types: - - completed + pull_request_target: jobs: build-kata-static-tarball-amd64: - if: ${{ github.event.workflow_run.conclusion == 'success' }} uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml with: tarball-suffix: -${{ github.event.pull_request.number}}-${{ github.event.pull_request.head.sha }} From 2f35b4d4e5d6eb0249affde29521a4779015aae4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 6 Apr 2023 18:40:23 +0200 Subject: [PATCH 056/137] gha: ci-on-push: Only run on `main` branch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's ensure we're only running this workflow when PRs are opened against the main branch. Signed-off-by: Fabiano Fidêncio --- .github/workflows/ci-on-push.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml index 8a08d6b6e..2dfd6c728 100644 --- a/.github/workflows/ci-on-push.yaml +++ b/.github/workflows/ci-on-push.yaml @@ -1,6 +1,8 @@ name: Kata Containers CI on: pull_request_target: + branches: + - 'main' jobs: build-kata-static-tarball-amd64: From 79f3047f061093e19d84b3016674577e8b9cbaa1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 6 Apr 2023 22:34:33 +0200 Subject: [PATCH 057/137] gha: k8s-on-aks: {create,delete} AKS must be a coded-in step MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I should have seen this coming, but currently the "create" and "delete" AKS workflows cannot be imported and uses as a job's step, resulting on an error trying to find the correspondent action.yaml file for those. Fixes: #6630 Signed-off-by: Fabiano Fidêncio --- .github/workflows/create-aks.yaml | 32 --------------- .github/workflows/delete-aks.yaml | 31 --------------- .github/workflows/run-k8s-tests-on-aks.yaml | 43 +++++++++++++-------- 3 files changed, 27 insertions(+), 79 deletions(-) delete mode 100644 .github/workflows/create-aks.yaml delete mode 100644 .github/workflows/delete-aks.yaml diff --git a/.github/workflows/create-aks.yaml b/.github/workflows/create-aks.yaml deleted file mode 100644 index cfd08f5cd..000000000 --- a/.github/workflows/create-aks.yaml +++ /dev/null @@ -1,32 +0,0 @@ -name: CI | Create AKS cluster -on: - workflow_call: - inputs: - name: - required: true - type: string - -jobs: - create-aks: - runs-on: ubuntu-latest - steps: - - name: Download Azure CLI - run: | - curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash - - - name: Log into the Azure account - run: | - az login \ - --service-principal \ - -u "${{ secrets.AZ_APPID }}" \ - -p "${{ secrets.AZ_PASSWORD }}" \ - --tenant "${{ secrets.AZ_TENANT_ID }}" - - - name: Create AKS cluster - run: | - az aks create \ - -g "kataCI" \ - -n "${{ inputs.name }}" \ - -s "Standard_D4s_v5" \ - --node-count 1 \ - --generate-ssh-keys diff --git a/.github/workflows/delete-aks.yaml b/.github/workflows/delete-aks.yaml deleted file mode 100644 index 2c9e6d21a..000000000 --- a/.github/workflows/delete-aks.yaml +++ /dev/null @@ -1,31 +0,0 @@ -name: CI | Delete AKS cluster -on: - workflow_call: - inputs: - name: - required: true - type: string - -jobs: - delete-aks: - runs-on: ubuntu-latest - steps: - - name: Download Azure CLI - run: | - curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash - - - name: Log into the Azure account - run: | - az login \ - --service-principal \ - -u "${{ secrets.AZ_APPID }}" \ - -p "${{ secrets.AZ_PASSWORD }}" \ - --tenant "${{ secrets.AZ_TENANT_ID }}" - - - name: Delete AKS cluster - run: | - az aks delete \ - -g "kataCI" \ - -n "${{ inputs.name }}" \ - --yes \ - --no-wait diff --git a/.github/workflows/run-k8s-tests-on-aks.yaml b/.github/workflows/run-k8s-tests-on-aks.yaml index 715814818..96b6a9307 100644 --- a/.github/workflows/run-k8s-tests-on-aks.yaml +++ b/.github/workflows/run-k8s-tests-on-aks.yaml @@ -23,22 +23,13 @@ jobs: - qemu runs-on: ubuntu-latest steps: - - name: Create AKS cluster to test ${{ matrix.vmm }} - uses: ./.github/workflows-create-aks.yaml - with: - name: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-${{ matrix.vmm }}-amd64 - - uses: actions/checkout@v3 with: ref: ${{ github.event.pull_request.head.sha }} - - name: Install `bats` - run: | - sudo apt-get update - sudo apt-get -y install bats - - name: Install `kubectl` + - name: Download Azure CLI run: | - sudo az aks install-cli + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash - name: Log into the Azure account run: | @@ -48,6 +39,24 @@ jobs: -p "${{ secrets.AZ_PASSWORD }}" \ --tenant "${{ secrets.AZ_TENANT_ID }}" + - name: Create AKS cluster + run: | + az aks create \ + -g "kataCI" \ + -n "${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-${{ matrix.vmm }}-amd64" \ + -s "Standard_D4s_v5" \ + --node-count 1 \ + --generate-ssh-keys + + - name: Install `bats` + run: | + sudo apt-get update + sudo apt-get -y install bats + + - name: Install `kubectl` + run: | + sudo az aks install-cli + - name: Download credentials for the Kubernetes CLI to use them run: | az aks get-credentials -g "kataCI" -n ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-${{ matrix.vmm }}-amd64 @@ -73,8 +82,10 @@ jobs: env: KATA_HYPERVISOR: ${{ matrix.vmm }} - - name: Delete AKS cluster used to test ${{ matrix.vmm }} - if: always() - uses: ./.github/workflows/delete-aks.yaml - with: - name: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-${{ matrix.vmm }}-amd64 + - name: Delete AKS cluster + run: | + az aks delete \ + -g "kataCI" \ + -n "${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-${{ matrix.vmm }}-amd64" \ + --yes \ + --no-wait From 375187e045fc6d58a442aca569f9bee6bbe13a40 Mon Sep 17 00:00:00 2001 From: Bo Chen Date: Thu, 6 Apr 2023 14:14:32 -0700 Subject: [PATCH 058/137] versions: Upgrade to Cloud Hypervisor v31.0 Details of this release can be found in our new roadmap project as iteration v31.0: https://github.com/orgs/cloud-hypervisor/projects/6. Fixes: #6632 Signed-off-by: Bo Chen --- versions.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/versions.yaml b/versions.yaml index a71d29dfb..e109a1c83 100644 --- a/versions.yaml +++ b/versions.yaml @@ -75,7 +75,7 @@ assets: url: "https://github.com/cloud-hypervisor/cloud-hypervisor" uscan-url: >- https://github.com/cloud-hypervisor/cloud-hypervisor/tags.*/v?(\d\S+)\.tar\.gz - version: "v30.0" + version: "v31.0" firecracker: description: "Firecracker micro-VMM" From c1fbaae8d6c72cb64e8a4681012cac9c3f5ccdf1 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Fri, 7 Apr 2023 17:44:41 +0200 Subject: [PATCH 059/137] rustjail: Use CPUWeight with systemd and CgroupsV2 The CPU shares property belongs to CgroupsV1. CgroupsV2 uses CPU weight instead. The correct value is computed in the latter case but it is passed to systemd using the legacy property. Systemd rejects the request and the agent exists with the following error : Value specified in CPUShares is out of range: unknown Replace the "shares" wording with "weight" in the CgroupsV2 code to avoid confusions. Use the "CPUWeight" property since this is what systemd expects in this case. Fixes #6636 References: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#CPUWeight=weight https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#systemd%20252 https://github.com/containers/crun/blob/main/crun.1.md#cpu-controller Signed-off-by: Greg Kurz --- src/agent/rustjail/src/cgroups/systemd/subsystem/cpu.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/agent/rustjail/src/cgroups/systemd/subsystem/cpu.rs b/src/agent/rustjail/src/cgroups/systemd/subsystem/cpu.rs index 6735b4d3c..7f7667fcd 100644 --- a/src/agent/rustjail/src/cgroups/systemd/subsystem/cpu.rs +++ b/src/agent/rustjail/src/cgroups/systemd/subsystem/cpu.rs @@ -71,7 +71,7 @@ impl Cpu { } // v2: - // cpu.shares <-> CPUShares + // cpu.shares <-> CPUWeight // cpu.period <-> CPUQuotaPeriodUSec // cpu.period & cpu.quota <-> CPUQuotaPerSecUSec fn unified_apply( @@ -80,8 +80,8 @@ impl Cpu { systemd_version: &str, ) -> Result<()> { if let Some(shares) = cpu_resources.shares { - let unified_shares = get_unified_cpushares(shares); - properties.push(("CPUShares", Value::U64(unified_shares))); + let weight = shares_to_weight(shares); + properties.push(("CPUWeight", Value::U64(weight))); } if let Some(period) = cpu_resources.period { @@ -104,7 +104,7 @@ impl Cpu { // ref: https://github.com/containers/crun/blob/main/crun.1.md#cgroup-v2 // [2-262144] to [1-10000] -fn get_unified_cpushares(shares: u64) -> u64 { +fn shares_to_weight(shares: u64) -> u64 { if shares == 0 { return 100; } From 3bfaafbf444631e8d3b180ddb67e9c324fddbf14 Mon Sep 17 00:00:00 2001 From: Zhongtao Hu Date: Sun, 9 Apr 2023 20:43:16 +0800 Subject: [PATCH 060/137] fix: oci hook 1. when do the deserialization for the oci hook, we should use camel case for createRuntime 2. we should pass the dir of bundle path instead of the path of config.json Fixes:#4693 Signed-off-by: Zhongtao Hu --- src/libs/oci/src/lib.rs | 31 +++++++++++++++++-- src/runtime-rs/crates/runtimes/src/manager.rs | 2 +- .../runtimes/virt_container/src/sandbox.rs | 6 ++-- 3 files changed, 32 insertions(+), 7 deletions(-) diff --git a/src/libs/oci/src/lib.rs b/src/libs/oci/src/lib.rs index 1c70410cf..d48ad4040 100644 --- a/src/libs/oci/src/lib.rs +++ b/src/libs/oci/src/lib.rs @@ -192,11 +192,23 @@ pub struct Hook { pub struct Hooks { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub prestart: Vec, - #[serde(default, skip_serializing_if = "Vec::is_empty")] + #[serde( + rename = "createRuntime", + default, + skip_serializing_if = "Vec::is_empty" + )] pub create_runtime: Vec, - #[serde(default, skip_serializing_if = "Vec::is_empty")] + #[serde( + rename = "createContainer", + default, + skip_serializing_if = "Vec::is_empty" + )] pub create_container: Vec, - #[serde(default, skip_serializing_if = "Vec::is_empty")] + #[serde( + rename = "startContainer", + default, + skip_serializing_if = "Vec::is_empty" + )] pub start_container: Vec, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub poststart: Vec, @@ -837,6 +849,8 @@ pub struct State { #[cfg(test)] mod tests { + use std::vec; + use super::*; #[test] @@ -1027,6 +1041,11 @@ mod tests { "path": "/usr/bin/setup-network" } ], + "createRuntime": [ + { + "path": "/usr/local/bin/nerdctl" + } + ], "poststart": [ { "path": "/usr/bin/notify-start", @@ -1395,6 +1414,12 @@ mod tests { timeout: None, }, ], + create_runtime: vec![crate::Hook { + path: "/usr/local/bin/nerdctl".to_string(), + args: vec![], + env: vec![], + timeout: None, + }], poststart: vec![crate::Hook { path: "/usr/bin/notify-start".to_string(), args: vec![], diff --git a/src/runtime-rs/crates/runtimes/src/manager.rs b/src/runtime-rs/crates/runtimes/src/manager.rs index d8aad3a0a..9649224ca 100644 --- a/src/runtime-rs/crates/runtimes/src/manager.rs +++ b/src/runtime-rs/crates/runtimes/src/manager.rs @@ -236,7 +236,7 @@ impl RuntimeHandlerManager { id: container_config.container_id.to_string(), status: oci::ContainerState::Creating, pid: 0, - bundle: bundler_path, + bundle: container_config.bundle.clone(), annotations: spec.annotations.clone(), }; diff --git a/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs b/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs index f996c5747..d464a7a00 100644 --- a/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs +++ b/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs @@ -188,9 +188,9 @@ impl Sandbox for VirtSandbox { info!(sl!(), "start vm"); // execute pre-start hook functions, including Prestart Hooks and CreateRuntime Hooks - let (prestart_hooks, create_runtime_hooks) = match spec.hooks.as_ref() { - Some(hooks) => (hooks.prestart.clone(), hooks.create_runtime.clone()), - None => (Vec::new(), Vec::new()), + let (prestart_hooks, create_runtime_hooks, _has_oci_hook) = match spec.hooks.as_ref() { + Some(hooks) => (hooks.prestart.clone(), hooks.create_runtime.clone(), true), + None => (Vec::new(), Vec::new(), false), }; self.execute_oci_hook_functions(&prestart_hooks, &create_runtime_hooks, state) .await?; From f3595e48b0f03e255e0ee4c91c4e60bc13c0200e Mon Sep 17 00:00:00 2001 From: "alex.lyn" Date: Thu, 6 Apr 2023 19:59:00 +0800 Subject: [PATCH 061/137] nydus_rootfs/prefetch_files: add prefetch_files for RAFS A sandbox annotation used to specify prefetch_files.list path the container image being used, and runtime will pass it to Hypervisor to search for corresponding prefetch file: format looks like: "io.katacontainers.config.hypervisor.prefetch_files.list" = /path/to//xyz.com/fedora:36/prefetch_file.list Fixes: #6582 Signed-off-by: alex.lyn --- src/libs/kata-types/src/annotations/mod.rs | 11 +++ .../kata-types/src/config/hypervisor/mod.rs | 15 ++++ .../resource/src/rootfs/nydus_rootfs.rs | 73 ++++++++++++++++++- 3 files changed, 97 insertions(+), 2 deletions(-) diff --git a/src/libs/kata-types/src/annotations/mod.rs b/src/libs/kata-types/src/annotations/mod.rs index c8d63125b..16af5ab28 100644 --- a/src/libs/kata-types/src/annotations/mod.rs +++ b/src/libs/kata-types/src/annotations/mod.rs @@ -308,6 +308,14 @@ pub const KATA_ANNO_CFG_DISABLE_NEW_NETNS: &str = /// A sandbox annotation to specify how attached VFIO devices should be treated. pub const KATA_ANNO_CFG_VFIO_MODE: &str = "io.katacontainers.config.runtime.vfio_mode"; +/// A sandbox annotation used to specify prefetch_files.list host path container image +/// being used, +/// and runtime will pass it to Hypervisor to search for corresponding prefetch list file. +/// "io.katacontainers.config.hypervisor.prefetch_files.list" +/// = "/path/to//xyz.com/fedora:36/prefetch_file.list" +pub const KATA_ANNO_CFG_HYPERVISOR_PREFETCH_FILES_LIST: &str = + "io.katacontainers.config.hypervisor.prefetch_files.list"; + /// A helper structure to query configuration information by check annotations. #[derive(Debug, Default, Deserialize)] pub struct Annotation { @@ -673,6 +681,9 @@ impl Annotation { hv.machine_info.validate_entropy_source(value)?; hv.machine_info.entropy_source = value.to_string(); } + KATA_ANNO_CFG_HYPERVISOR_PREFETCH_FILES_LIST => { + hv.prefetch_list_path = value.to_string(); + } // Hypervisor Memory related annotations KATA_ANNO_CFG_HYPERVISOR_DEFAULT_MEMORY => { match byte_unit::Byte::from_str(value) { diff --git a/src/libs/kata-types/src/config/hypervisor/mod.rs b/src/libs/kata-types/src/config/hypervisor/mod.rs index 98ae2cc79..7818b897c 100644 --- a/src/libs/kata-types/src/config/hypervisor/mod.rs +++ b/src/libs/kata-types/src/config/hypervisor/mod.rs @@ -979,6 +979,13 @@ pub struct Hypervisor { #[serde(default, flatten)] pub shared_fs: SharedFsInfo, + /// A sandbox annotation used to specify prefetch_files.list host path container image + /// being used, and runtime will pass it to Hypervisor to search for corresponding + /// prefetch list file: + /// prefetch_list_path = /path/to//xyz.com/fedora:36/prefetch_file.list + #[serde(default)] + pub prefetch_list_path: String, + /// Vendor customized runtime configuration. #[serde(default, flatten)] pub vendor: HypervisorVendor, @@ -1022,6 +1029,10 @@ impl ConfigOps for Hypervisor { hv.network_info.adjust_config()?; hv.security_info.adjust_config()?; hv.shared_fs.adjust_config()?; + resolve_path!( + hv.prefetch_list_path, + "prefetch_list_path `{}` is invalid: {}" + )?; } else { return Err(eother!("Can not find plugin for hypervisor {}", hypervisor)); } @@ -1056,6 +1067,10 @@ impl ConfigOps for Hypervisor { "Hypervisor control executable `{}` is invalid: {}" )?; validate_path!(hv.jailer_path, "Hypervisor jailer path `{}` is invalid: {}")?; + validate_path!( + hv.prefetch_list_path, + "prefetch_files.list path `{}` is invalid: {}" + )?; } else { return Err(eother!("Can not find plugin for hypervisor {}", hypervisor)); } diff --git a/src/runtime-rs/crates/resource/src/rootfs/nydus_rootfs.rs b/src/runtime-rs/crates/resource/src/rootfs/nydus_rootfs.rs index 16f9c48dd..008443b87 100644 --- a/src/runtime-rs/crates/resource/src/rootfs/nydus_rootfs.rs +++ b/src/runtime-rs/crates/resource/src/rootfs/nydus_rootfs.rs @@ -3,7 +3,7 @@ // // SPDX-License-Identifier: Apache-2.0 // -use std::{fs, sync::Arc}; +use std::{fs, path::Path, sync::Arc}; use super::{Rootfs, TYPE_OVERLAY_FS}; use crate::{ @@ -28,6 +28,8 @@ const NYDUS_ROOTFS_V6: &str = "v6"; const SNAPSHOT_DIR: &str = "snapshotdir"; const KATA_OVERLAY_DEV_TYPE: &str = "overlayfs"; +// nydus prefetch file list name +const NYDUS_PREFETCH_FILE_LIST: &str = "prefetch_file.list"; pub(crate) struct NydusRootfs { guest_path: String, @@ -42,6 +44,9 @@ impl NydusRootfs { cid: &str, rootfs: &Mount, ) -> Result { + let prefetch_list_path = + get_nydus_prefetch_files(h.hypervisor_config().await.prefetch_list_path).await; + let share_fs_mount = share_fs.get_share_fs_mount(); let extra_options = NydusExtraOptions::new(rootfs).context("failed to parse nydus extra options")?; @@ -59,7 +64,7 @@ impl NydusRootfs { rafs_meta.to_string(), rafs_mnt, extra_options.config.clone(), - None, + prefetch_list_path, ) .await .context("failed to do rafs mount")?; @@ -151,3 +156,67 @@ impl Rootfs for NydusRootfs { Ok(()) } } + +// Check prefetch files list path, and if invalid, discard it directly. +// As the result of caller `rafs_mount`, it returns `Option`. +async fn get_nydus_prefetch_files(nydus_prefetch_path: String) -> Option { + // nydus_prefetch_path is an annotation and pod with it will indicate + // that prefetch_files will be included. + if nydus_prefetch_path.is_empty() { + info!(sl!(), "nydus prefetch files path not set, just skip it."); + + return None; + } + + // Ensure the string ends with "/prefetch_files.list" + if !nydus_prefetch_path.ends_with(format!("/{}", NYDUS_PREFETCH_FILE_LIST).as_str()) { + info!( + sl!(), + "nydus prefetch file path no {:?} file exist.", NYDUS_PREFETCH_FILE_LIST + ); + + return None; + } + + // ensure the prefetch_list_path is a regular file. + let prefetch_list_path = Path::new(nydus_prefetch_path.as_str()); + if !prefetch_list_path.is_file() { + info!( + sl!(), + "nydus prefetch list file {:?} not a regular file", &prefetch_list_path + ); + + return None; + } + + return Some(prefetch_list_path.display().to_string()); +} + +#[cfg(test)] +mod tests { + use super::*; + use std::{fs::File, path::PathBuf}; + use tempfile::tempdir; + + #[tokio::test] + async fn test_get_nydus_prefetch_files() { + let temp_dir = tempdir().unwrap(); + let prefetch_list_path01 = temp_dir.path().join("nydus_prefetch_files"); + // /tmp_dir/nydus_prefetch_files/ + std::fs::create_dir_all(prefetch_list_path01.clone()).unwrap(); + // /tmp_dir/nydus_prefetch_files/prefetch_file.list + let prefetch_list_path02 = prefetch_list_path01 + .as_path() + .join(NYDUS_PREFETCH_FILE_LIST); + let file = File::create(prefetch_list_path02.clone()); + assert!(file.is_ok()); + + let prefetch_file = + get_nydus_prefetch_files(prefetch_list_path02.as_path().display().to_string()).await; + assert!(prefetch_file.is_some()); + assert_eq!(PathBuf::from(prefetch_file.unwrap()), prefetch_list_path02); + + drop(file); + temp_dir.close().unwrap_or_default(); + } +} From d1f550bd1e3c76a12b86a0e0e47994e565a31157 Mon Sep 17 00:00:00 2001 From: Qingyuan Hou Date: Wed, 29 Mar 2023 05:02:59 +0000 Subject: [PATCH 062/137] docs: update the rust version from versions.yaml Fixes: #6539 Signed-off-by: Qingyuan Hou --- .../kata-containers-3.0-rust-runtime-installation-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/install/kata-containers-3.0-rust-runtime-installation-guide.md b/docs/install/kata-containers-3.0-rust-runtime-installation-guide.md index d8150bf3d..f83e4ea02 100644 --- a/docs/install/kata-containers-3.0-rust-runtime-installation-guide.md +++ b/docs/install/kata-containers-3.0-rust-runtime-installation-guide.md @@ -49,7 +49,7 @@ Follow the [`kata-deploy`](../../tools/packaging/kata-deploy/README.md). * Download `Rustup` and install `Rust` > **Notes:** - > Rust version 1.62.0 is needed + > For Rust version, please see [`versions.yaml`](../../versions.yaml) file's rust section. Example for `x86_64` ``` From e2a770df55f52abc32cee713f646ecd47d50ded2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 5 Apr 2023 22:51:29 +0200 Subject: [PATCH 063/137] gha: ci-on-push: Run k8s tests with dragonball MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that the infra for running dragonball tests has been enabled, let's actually make sure to have them running on each PR. The tests skipped are: * `k8s-cpu-ns.bats`, as CPU resize doesn't seem to be yet properly supported on runtime-rs * https://github.com/kata-containers/kata-containers/issues/6621 Fixes: #6605 Signed-off-by: Fabiano Fidêncio --- tests/integration/kubernetes/k8s-cpu-ns.bats | 3 +++ tests/integration/kubernetes/run_kubernetes_tests.sh | 4 ---- tests/integration/kubernetes/tests_common.sh | 1 + 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/kubernetes/k8s-cpu-ns.bats b/tests/integration/kubernetes/k8s-cpu-ns.bats index 289dfc667..0089e1c06 100644 --- a/tests/integration/kubernetes/k8s-cpu-ns.bats +++ b/tests/integration/kubernetes/k8s-cpu-ns.bats @@ -10,6 +10,7 @@ load "${BATS_TEST_DIRNAME}/tests_common.sh" setup() { [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + [ "${KATA_HYPERVISOR}" == "dragonball" ] && skip "test not working see: ${dragonball_limitations}" pod_name="constraints-cpu-test" container_name="first-cpu-container" @@ -25,6 +26,7 @@ setup() { @test "Check CPU constraints" { [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + [ "${KATA_HYPERVISOR}" == "dragonball" ] && skip "test not working see: ${dragonball_limitations}" # Create the pod kubectl create -f "${pod_config_dir}/pod-cpu.yaml" @@ -68,6 +70,7 @@ setup() { teardown() { [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" + [ "${KATA_HYPERVISOR}" == "dragonball" ] && skip "test not working see: ${dragonball_limitations}" # Debugging information kubectl describe "pod/$pod_name" diff --git a/tests/integration/kubernetes/run_kubernetes_tests.sh b/tests/integration/kubernetes/run_kubernetes_tests.sh index 2d11f744f..db1e16633 100755 --- a/tests/integration/kubernetes/run_kubernetes_tests.sh +++ b/tests/integration/kubernetes/run_kubernetes_tests.sh @@ -54,10 +54,6 @@ else ) fi -if [ ${KATA_HYPERVISOR} == "dragonball" ]; then - exit 0 -fi - # we may need to skip a few test cases when running on non-x86_64 arch arch_config_file="${kubernetes_dir}/filter_out_per_arch/${TARGET_ARCH}.yaml" if [ -f "${arch_config_file}" ]; then diff --git a/tests/integration/kubernetes/tests_common.sh b/tests/integration/kubernetes/tests_common.sh index 0f3e7d98d..481cf4a57 100644 --- a/tests/integration/kubernetes/tests_common.sh +++ b/tests/integration/kubernetes/tests_common.sh @@ -25,6 +25,7 @@ timeout=90s # issues that can't test yet. fc_limitations="https://github.com/kata-containers/documentation/issues/351" +dragonball_limitations="https://github.com/kata-containers/kata-containers/issues/6621" # Path to the kubeconfig file which is used by kubectl and other tools. # Note: the init script sets that variable but if you want to run the tests in From 49ce685ebfb02c980c1545817a13dea4d5d71b81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Tue, 11 Apr 2023 13:40:40 +0200 Subject: [PATCH 064/137] gha: k8s-on-aks: Always delete the AKS cluster MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Regardless of the tests succeeding or failing, the AKS cluster **must be deleted**. Signed-off-by: Fabiano Fidêncio --- .github/workflows/run-k8s-tests-on-aks.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/run-k8s-tests-on-aks.yaml b/.github/workflows/run-k8s-tests-on-aks.yaml index 96b6a9307..f9a26debb 100644 --- a/.github/workflows/run-k8s-tests-on-aks.yaml +++ b/.github/workflows/run-k8s-tests-on-aks.yaml @@ -83,6 +83,7 @@ jobs: KATA_HYPERVISOR: ${{ matrix.vmm }} - name: Delete AKS cluster + if: always() run: | az aks delete \ -g "kataCI" \ From 1d851b4be392b9e343ecba27c0453de2369a0a27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 08:45:11 +0100 Subject: [PATCH 065/137] local-build: Cosmetic changes in build targets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a simple cosmetic change, adding a space between the function call and the `;;`. Signed-off-by: Fabiano Fidêncio --- .../packaging/kata-deploy/local-build/kata-deploy-binaries.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index 1da2d4f8e..e696849da 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -382,9 +382,9 @@ handle_build() { nydus) install_nydus ;; - kernel-dragonball-experimental) install_dragonball_experimental_kernel;; + kernel-dragonball-experimental) install_dragonball_experimental_kernel ;; - kernel-experimental) install_experimental_kernel;; + kernel-experimental) install_experimental_kernel ;; qemu) install_qemu ;; From 73e108136a6df8530d45d819d672be160ea834ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 08:48:09 +0100 Subject: [PATCH 066/137] local-build: Rename non vanilla kernel build functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to make it easier to read, let's just rename the install_dragonball_experimental_kernel and install_experimental_kernel to install_kernel_dragonball_experimental and install_kernel_experimental, respectively. This allows us to quickly get to those functions when looking for `install_kernel`. Signed-off-by: Fabiano Fidêncio --- .../kata-deploy/local-build/kata-deploy-binaries.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index e696849da..3d5e57228 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -188,7 +188,7 @@ install_kernel() { } #Install dragonball experimental kernel asset -install_dragonball_experimental_kernel() { +install_kernel_dragonball_experimental() { info "build dragonball experimental kernel" export kernel_version="$(yq r $versions_yaml assets.kernel-dragonball-experimental.version)" local kernel_kata_config_version="$(cat ${repo_root_dir}/tools/packaging/kernel/kata_config_version)" @@ -207,7 +207,7 @@ install_dragonball_experimental_kernel() { } #Install experimental kernel asset -install_experimental_kernel() { +install_kernel_experimental() { info "build experimental kernel" export kernel_version="$(yq r $versions_yaml assets.kernel-experimental.tag)" local kernel_kata_config_version="$(cat ${repo_root_dir}/tools/packaging/kernel/kata_config_version)" @@ -382,9 +382,9 @@ handle_build() { nydus) install_nydus ;; - kernel-dragonball-experimental) install_dragonball_experimental_kernel ;; + kernel-dragonball-experimental) install_kernel_dragonball_experimental ;; - kernel-experimental) install_experimental_kernel ;; + kernel-experimental) install_kernel_experimental ;; qemu) install_qemu ;; From 1315bb45f9e9d5edba9512d61f15dc9653aceaa4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 08:50:43 +0100 Subject: [PATCH 067/137] local-build: Add dragonball kernel to the `all` target MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As the dragonball kernel is shipped as part of our releases, it must be added to the `all` target. Signed-off-by: Fabiano Fidêncio --- tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index 3d5e57228..6f2b6173b 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -368,6 +368,7 @@ handle_build() { install_image install_initrd install_kernel + install_kernel_dragonball_experimental install_nydus install_qemu install_shimv2 From 800ee5cd8868f7d59e14c481df814fb405cd2164 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 08:59:12 +0100 Subject: [PATCH 068/137] versions: Move QEMU TDX to its own experimental entry MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Although we've been providing users a way to build QEMU with TDX support, this must be moved to its own experimental entry instead of how it currently is. The reason for that is because the patches are not yet merged into QEMU, and this is still an experimental build of the project. Signed-off-by: Fabiano Fidêncio --- versions.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/versions.yaml b/versions.yaml index a71d29dfb..a906b5307 100644 --- a/versions.yaml +++ b/versions.yaml @@ -98,10 +98,6 @@ assets: uscan-url: >- https://github.com/qemu/qemu/tags .*/v?(\d\S+)\.tar\.gz - tdx: - description: "VMM that uses KVM and supports TDX" - url: "https://github.com/kata-containers/qemu" - tag: "TDX-v3.1" snp: description: "VMM that uses KVM and supports AMD SEV-SNP" url: "https://github.com/AMDESE/qemu" @@ -113,6 +109,11 @@ assets: url: "https://github.com/qemu/qemu" version: "7a800cf9496fddddf71b21a00991e0ec757a170a" + qemu-tdx-experimental: + description: "VMM that uses KVM and supports TDX" + url: "https://github.com/kata-containers/qemu" + tag: "TDX-v3.1" + image: description: | Root filesystem disk image used to boot the guest virtual From 3018c9ad517874779a0017ab38a9796ab1c79da9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 09:03:16 +0100 Subject: [PATCH 069/137] versions: Update QEMU TDX version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's update the QEMU TDX version to what's the latest tested release of the Intel TDX tools with Kata Containers. In order to do such update, we had to relax the checks on the QEMU version for some of the configuration options, as those were removed right after the window was open for the 7.1.0 development (thus the 7.0.50 check). Signed-off-by: Fabiano Fidêncio --- .../no_patches.txt | 0 tools/packaging/scripts/configure-hypervisor.sh | 7 +++++-- versions.yaml | 5 +++-- 3 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 tools/packaging/qemu/patches/tag_patches/ad4c7f529a279685da84297773b4ec8080153c2d-plus-TDX-v1.3/no_patches.txt diff --git a/tools/packaging/qemu/patches/tag_patches/ad4c7f529a279685da84297773b4ec8080153c2d-plus-TDX-v1.3/no_patches.txt b/tools/packaging/qemu/patches/tag_patches/ad4c7f529a279685da84297773b4ec8080153c2d-plus-TDX-v1.3/no_patches.txt new file mode 100644 index 000000000..e69de29bb diff --git a/tools/packaging/scripts/configure-hypervisor.sh b/tools/packaging/scripts/configure-hypervisor.sh index 61e479237..751b2866f 100755 --- a/tools/packaging/scripts/configure-hypervisor.sh +++ b/tools/packaging/scripts/configure-hypervisor.sh @@ -242,9 +242,12 @@ generate_qemu_options() { # Disable graphical network access qemu_options+=(size:--disable-vnc) qemu_options+=(size:--disable-vnc-jpeg) - if ! gt_eq "${qemu_version}" "7.2.0" ; then + if ! gt_eq "${qemu_version}" "7.0.50" ; then qemu_options+=(size:--disable-vnc-png) + else + qemu_options+=(size:--disable-png) fi + qemu_options+=(size:--disable-vnc-sasl) # Disable PAM authentication: it's a feature used together with VNC access @@ -358,7 +361,7 @@ generate_qemu_options() { qemu_options+=(size:--disable-vde) # Don't build other options which can't be depent on build server. - if ! gt_eq "${qemu_version}" "7.2.0" ; then + if ! gt_eq "${qemu_version}" "7.0.50" ; then qemu_options+=(size:--disable-xfsctl) qemu_options+=(size:--disable-libxml2) fi diff --git a/versions.yaml b/versions.yaml index a906b5307..b6c84b3f3 100644 --- a/versions.yaml +++ b/versions.yaml @@ -110,9 +110,10 @@ assets: version: "7a800cf9496fddddf71b21a00991e0ec757a170a" qemu-tdx-experimental: - description: "VMM that uses KVM and supports TDX" + # yamllint disable-line rule:line-length + description: "QEMU with TDX support - based on https://github.com/intel/tdx-tools/releases/tag/2023ww01" url: "https://github.com/kata-containers/qemu" - tag: "TDX-v3.1" + tag: "ad4c7f529a279685da84297773b4ec8080153c2d-plus-TDX-v1.3" image: description: | From f7b7c187ec1a947ac56ea4b21e11b9ec4fae3913 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 09:05:50 +0100 Subject: [PATCH 070/137] static-build: Improve qemu-experimental build script MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's make sure the `qemu_suffix` and `qemu_tarball_name` can be specified. With this we make it really easy to reuse this script for any addition flavour of an experimental QEMU that ends up having to be built (specifically looking at the ones for Confidential Containers here). Signed-off-by: Fabiano Fidêncio --- .../static-build/qemu/build-static-qemu-experimental.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/packaging/static-build/qemu/build-static-qemu-experimental.sh b/tools/packaging/static-build/qemu/build-static-qemu-experimental.sh index be50fb977..1e0541c54 100755 --- a/tools/packaging/static-build/qemu/build-static-qemu-experimental.sh +++ b/tools/packaging/static-build/qemu/build-static-qemu-experimental.sh @@ -14,6 +14,8 @@ source "${script_dir}/../../scripts/lib.sh" qemu_repo="${qemu_repo:-}" qemu_version="${qemu_version:-}" +qemu_suffix="${qemu_suffix:-experimental}" +qemu_tarball_name="${qemu_tarball_name:-kata-static-qemu-experimental.tar.gz}" if [ -z "$qemu_repo" ]; then info "Get qemu information from runtime versions.yaml" @@ -26,4 +28,4 @@ fi [ -n "$qemu_version" ] || qemu_version=$(get_from_kata_deps "assets.hypervisor.qemu-experimental.version") [ -n "$qemu_version" ] || die "failed to get qemu version" -"${script_dir}/build-base-qemu.sh" "${qemu_repo}" "${qemu_version}" "experimental" "kata-static-qemu-experimental.tar.gz" +"${script_dir}/build-base-qemu.sh" "${qemu_repo}" "${qemu_version}" "${qemu_suffix}" "${qemu_tarball_name}" From eceaae30a5119c80844c4f8cd7dca5fc8de92b5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 09:10:14 +0100 Subject: [PATCH 071/137] local-build: Add support to build QEMU for TDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's add the needed targets and modifications to be able to build qemu-tdx-experimental as part of the local-build scripts. Signed-off-by: Fabiano Fidêncio --- .../kata-deploy/local-build/Makefile | 4 ++ .../local-build/kata-deploy-binaries.sh | 48 +++++++++++++++---- 2 files changed, 43 insertions(+), 9 deletions(-) diff --git a/tools/packaging/kata-deploy/local-build/Makefile b/tools/packaging/kata-deploy/local-build/Makefile index 3e391f48e..80069258f 100644 --- a/tools/packaging/kata-deploy/local-build/Makefile +++ b/tools/packaging/kata-deploy/local-build/Makefile @@ -27,6 +27,7 @@ all: serial-targets \ kernel-dragonball-experimental-tarball \ nydus-tarball \ qemu-tarball \ + qemu-tdx-experimental-tarball \ shim-v2-tarball \ virtiofsd-tarball @@ -60,6 +61,9 @@ nydus-tarball: qemu-tarball: ${MAKE} $@-build +qemu-tdx-experimental-tarball: + ${MAKE} $@-build + rootfs-image-tarball: ${MAKE} $@-build diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index 6f2b6173b..4a047114c 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -26,6 +26,7 @@ readonly clh_builder="${static_build_dir}/cloud-hypervisor/build-static-clh.sh" readonly firecracker_builder="${static_build_dir}/firecracker/build-static-firecracker.sh" readonly kernel_builder="${static_build_dir}/kernel/build.sh" readonly qemu_builder="${static_build_dir}/qemu/build-static-qemu.sh" +readonly qemu_experimental_builder="${static_build_dir}/qemu/build-static-qemu-experimental.sh" readonly shimv2_builder="${static_build_dir}/shim-v2/build.sh" readonly virtiofsd_builder="${static_build_dir}/virtiofsd/build.sh" readonly nydus_builder="${static_build_dir}/nydus/build.sh" @@ -81,6 +82,7 @@ options: kernel-experimental nydus qemu + qemu-tdx-experimental rootfs-image rootfs-initrd shim-v2 @@ -225,23 +227,48 @@ install_kernel_experimental() { DESTDIR="${destdir}" PREFIX="${prefix}" "${kernel_builder}" -f -b experimental -v ${kernel_version} } -# Install static qemu asset -install_qemu() { - export qemu_repo="$(yq r $versions_yaml assets.hypervisor.qemu.url)" - export qemu_version="$(yq r $versions_yaml assets.hypervisor.qemu.version)" +install_qemu_helper() { + local qemu_repo_yaml_path="${1}" + local qemu_version_yaml_path="${2}" + local qemu_name="${3}" + local builder="${4}" + local qemu_tarball_name="${qemu_tarball_name:-kata-static-qemu.tar.gz}" + + export qemu_repo="$(get_from_kata_deps ${qemu_repo_yaml_path})" + export qemu_version="$(get_from_kata_deps ${qemu_version_yaml_path})" install_cached_tarball_component \ - "QEMU" \ - "${jenkins_url}/job/kata-containers-main-qemu-$(uname -m)/${cached_artifacts_path}" \ + "${qemu_name}" \ + "${jenkins_url}/job/kata-containers-main-${qemu_name}-$(uname -m)/${cached_artifacts_path}" \ "${qemu_version}-$(calc_qemu_files_sha256sum)" \ "$(get_qemu_image_name)" \ "${final_tarball_name}" \ "${final_tarball_path}" \ && return 0 - info "build static qemu" - "${qemu_builder}" - tar xvf "${builddir}/kata-static-qemu.tar.gz" -C "${destdir}" + info "build static ${qemu_name}" + "${builder}" + tar xvf "${qemu_tarball_name}" -C "${destdir}" +} + +# Install static qemu asset +install_qemu() { + install_qemu_helper \ + "assets.hypervisor.qemu.url" \ + "assets.hypervisor.qemu.version" \ + "qemu" \ + "${qemu_builder}" +} + +install_qemu_tdx_experimental() { + export qemu_suffix="tdx-experimental" + export qemu_tarball_name="kata-static-qemu-${qemu_suffix}.tar.gz" + + install_qemu_helper \ + "assets.hypervisor.qemu-${qemu_suffix}.url" \ + "assets.hypervisor.qemu-${qemu_suffix}.tag" \ + "qemu-${qemu_suffix}" \ + "${qemu_experimental_builder}" } # Install static firecracker asset @@ -371,6 +398,7 @@ handle_build() { install_kernel_dragonball_experimental install_nydus install_qemu + install_qemu_tdx_experimental install_shimv2 install_virtiofsd ;; @@ -389,6 +417,8 @@ handle_build() { qemu) install_qemu ;; + qemu-tdx-experimental) install_qemu_tdx_experimental ;; + rootfs-image) install_image ;; rootfs-initrd) install_initrd ;; From 33dc6c65aae89ba57bdf84678b55989e8c371c68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 09:15:42 +0100 Subject: [PATCH 072/137] gha: Build and ship QEMU for TDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's build QEMU TDX as part of our tests, and let's ship it as part of our releases. Signed-off-by: Fabiano Fidêncio --- .github/workflows/build-kata-static-tarball-amd64.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build-kata-static-tarball-amd64.yaml b/.github/workflows/build-kata-static-tarball-amd64.yaml index 8432e7714..f71dfc03f 100644 --- a/.github/workflows/build-kata-static-tarball-amd64.yaml +++ b/.github/workflows/build-kata-static-tarball-amd64.yaml @@ -22,6 +22,7 @@ jobs: - kernel-dragonball-experimental - nydus - qemu + - qemu-tdx-experimental - rootfs-image - rootfs-initrd - shim-v2 From 3d9ce3982b29f6607b05f98642c1b19e4f96e178 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 10:33:47 +0100 Subject: [PATCH 073/137] cache: Allow specifying the QEMU_FLAVOUR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's do what we already did when caching the kernel, and allow passing a FLAVOUR of the project to build. By doing this we can re-use the same function used to cache QEMU to also cache any kind of experimental QEMU that we may happen to have. Signed-off-by: Fabiano Fidêncio --- tools/packaging/static-build/cache_components_main.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/packaging/static-build/cache_components_main.sh b/tools/packaging/static-build/cache_components_main.sh index e447ab4bf..e6bc2fe79 100755 --- a/tools/packaging/static-build/cache_components_main.sh +++ b/tools/packaging/static-build/cache_components_main.sh @@ -13,6 +13,7 @@ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "${script_dir}/../scripts/lib.sh" KERNEL_FLAVOUR="${KERNEL_FLAVOUR:-kernel}" # kernel | kernel-experimental | kernel-arm-experimetnal | kernel-dragonball-experimental +QEMU_FLAVOUR="${QEMU_FLAVOUR:-qemu}" # qemu | qemu-tdx-experimental ROOTFS_IMAGE_TYPE="${ROOTFS_IMAGE_TYPE:-image}" # image | initrd cache_clh_artifacts() { @@ -42,8 +43,8 @@ cache_nydus_artifacts() { } cache_qemu_artifacts() { - local qemu_tarball_name="kata-static-qemu.tar.xz" - local current_qemu_version=$(get_from_kata_deps "assets.hypervisor.qemu.version") + local qemu_tarball_name="kata-static-${QEMU_FLAVOUR}.tar.xz" + local current_qemu_version=$(get_from_kata_deps "assets.hypervisor.${QEMU_FLAVOUR}.version") local qemu_sha=$(calc_qemu_files_sha256sum) local current_qemu_image="$(get_qemu_image_name)" create_cache_asset "${qemu_tarball_name}" "${current_qemu_version}-${qemu_sha}" "${current_qemu_image}" @@ -109,6 +110,8 @@ Usage: $0 "[options]" The default KERNEL_FLAVOUR value is "kernel" -n Nydus cache -q QEMU cache + * Export QEMU_FLAVOUR="qemu | qemu-tdx-experimental" for a specific build + The default QEMU_FLAVOUR value is "qemu" -r RootFS cache * Export ROOTFS_IMAGE_TYPE="image|initrd" for one of those two types The default ROOTFS_IMAGE_TYPE value is "image" From 20ab2c24207867e0de95c3edac9fa1b444c29fc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 09:26:59 +0100 Subject: [PATCH 074/137] versions: Move Kernel TDX to its own experimental entry MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Although we've been providing users a way to build kernel with TDX support, this must be moved to its own experimental entry instead of how it currently is. The reason for that is because the patches are not yet merged into kernel, and this is still an experimental build of the project. Signed-off-by: Fabiano Fidêncio --- versions.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/versions.yaml b/versions.yaml index b6c84b3f3..425ae1031 100644 --- a/versions.yaml +++ b/versions.yaml @@ -161,10 +161,6 @@ assets: description: "Linux kernel optimised for virtual machines" url: "https://cdn.kernel.org/pub/linux/kernel/v5.x/" version: "v5.19.2" - tdx: - description: "Linux kernel that supports TDX" - url: "https://github.com/kata-containers/linux/archive/refs/tags" - tag: "5.15-plus-TDX" sev: description: "Linux kernel that supports SEV" url: "https://cdn.kernel.org/pub/linux/kernel/v5.x/" @@ -189,6 +185,11 @@ assets: url: "https://cdn.kernel.org/pub/linux/kernel/v5.x/" version: "v5.10.25" + kernel-tdx-experimental: + description: "Linux kernel that supports TDX" + url: "https://github.com/kata-containers/linux/archive/refs/tags" + tag: "5.15-plus-TDX" + externals: description: "Third-party projects used by the system" From f33345c3110074609ee741c1caeea402ba6ba3fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 09:29:06 +0100 Subject: [PATCH 075/137] versions: Update Kernel TDX version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's update the Kernel TDX version to what's the latest tested release of the Intel TDX tools with Kata Containers. Signed-off-by: Fabiano Fidêncio --- tools/packaging/kernel/configs/fragments/x86_64/tdx/tdx.conf | 4 ---- tools/packaging/kernel/kata_config_version | 2 +- tools/packaging/kernel/patches/5.19-TDX-v2.x/no_patches.txt | 0 versions.yaml | 5 +++-- 4 files changed, 4 insertions(+), 7 deletions(-) create mode 100644 tools/packaging/kernel/patches/5.19-TDX-v2.x/no_patches.txt diff --git a/tools/packaging/kernel/configs/fragments/x86_64/tdx/tdx.conf b/tools/packaging/kernel/configs/fragments/x86_64/tdx/tdx.conf index 1b1f8751e..2f877a5c9 100644 --- a/tools/packaging/kernel/configs/fragments/x86_64/tdx/tdx.conf +++ b/tools/packaging/kernel/configs/fragments/x86_64/tdx/tdx.conf @@ -5,13 +5,9 @@ CONFIG_DMA_RESTRICTED_POOL=y CONFIG_EFI=y CONFIG_EFI_STUB=y CONFIG_INTEL_IOMMU_SVM=y -CONFIG_INTEL_TDX_ATTESTATION=y -CONFIG_INTEL_TDX_FIXES=y CONFIG_INTEL_TDX_GUEST=y CONFIG_OF=y CONFIG_OF_RESERVED_MEM=y CONFIG_X86_5LEVEL=y CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y -CONFIG_X86_MEM_ENCRYPT_COMMON=y CONFIG_X86_PLATFORM_DEVICES=y -CONFIG_X86_PLATFORM_DRIVERS_INTEL=y diff --git a/tools/packaging/kernel/kata_config_version b/tools/packaging/kernel/kata_config_version index a9c8fe829..b16e5f75e 100644 --- a/tools/packaging/kernel/kata_config_version +++ b/tools/packaging/kernel/kata_config_version @@ -1 +1 @@ -103 +104 diff --git a/tools/packaging/kernel/patches/5.19-TDX-v2.x/no_patches.txt b/tools/packaging/kernel/patches/5.19-TDX-v2.x/no_patches.txt new file mode 100644 index 000000000..e69de29bb diff --git a/versions.yaml b/versions.yaml index 425ae1031..ef0288a4d 100644 --- a/versions.yaml +++ b/versions.yaml @@ -186,9 +186,10 @@ assets: version: "v5.10.25" kernel-tdx-experimental: - description: "Linux kernel that supports TDX" + # yamllint disable-line rule:line-length + description: "Linux kernel with TDX support -- based on https://github.com/intel/tdx-tools/releases/tag/2023ww01" url: "https://github.com/kata-containers/linux/archive/refs/tags" - tag: "5.15-plus-TDX" + version: "5.19-TDX-v2.2" externals: description: "Third-party projects used by the system" From b2585eecffc606ccb9d615abb8150d29718d2c83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 09:31:47 +0100 Subject: [PATCH 076/137] local-build: Avoid code duplication building the kernel MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's create a `install_kernel_helper()` function, as it was already done for QEMU, and rely on that when calling `install_kernel` and `install_kernel_dragonball_experimental`. This helps us to reduce the code duplication by a fair amount. Signed-off-by: Fabiano Fidêncio --- .../local-build/kata-deploy-binaries.sh | 59 ++++++++----------- 1 file changed, 25 insertions(+), 34 deletions(-) diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index 4a047114c..b73df7d75 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -173,58 +173,49 @@ install_initrd() { } #Install kernel asset -install_kernel() { - export kernel_version="$(yq r $versions_yaml assets.kernel.version)" +install_kernel_helper() { + local kernel_version_yaml_path="${1}" + local kernel_name="${2}" + local extra_cmd=${3} + + export kernel_version="$(get_from_kata_deps ${kernel_version_yaml_path})" local kernel_kata_config_version="$(cat ${repo_root_dir}/tools/packaging/kernel/kata_config_version)" install_cached_tarball_component \ - "kernel" \ - "${jenkins_url}/job/kata-containers-main-kernel-$(uname -m)/${cached_artifacts_path}" \ + "${kernel_name}" \ + "${jenkins_url}/job/kata-containers-main-${kernel_name}-$(uname -m)/${cached_artifacts_path}" \ "${kernel_version}-${kernel_kata_config_version}" \ "$(get_kernel_image_name)" \ "${final_tarball_name}" \ "${final_tarball_path}" \ && return 0 - DESTDIR="${destdir}" PREFIX="${prefix}" "${kernel_builder}" -f -v "${kernel_version}" + info "build ${kernel_name}" + info "Kernel version ${kernel_version}" + DESTDIR="${destdir}" PREFIX="${prefix}" "${kernel_builder}" -v "${kernel_version}" ${extra_cmd} +} + +#Install kernel asset +install_kernel() { + install_kernel_helper \ + "assets.kernel.version" \ + "kernel" \ + "-f" } -#Install dragonball experimental kernel asset install_kernel_dragonball_experimental() { - info "build dragonball experimental kernel" - export kernel_version="$(yq r $versions_yaml assets.kernel-dragonball-experimental.version)" - local kernel_kata_config_version="$(cat ${repo_root_dir}/tools/packaging/kernel/kata_config_version)" - - install_cached_tarball_component \ + install_kernel_helper \ + "assets.kernel-dragonball-experimental.version" \ "kernel-dragonball-experimental" \ - "${jenkins_url}/job/kata-containers-main-kernel-dragonball-experimental-$(uname -m)/${cached_artifacts_path}" \ - "${kernel_version}-${kernel_kata_config_version}" \ - "$(get_kernel_image_name)" \ - "${final_tarball_name}" \ - "${final_tarball_path}" \ - && return 0 - - info "kernel version ${kernel_version}" - DESTDIR="${destdir}" PREFIX="${prefix}" "${kernel_builder}" -e -t dragonball -v ${kernel_version} + "-e -t dragonball" } #Install experimental kernel asset install_kernel_experimental() { - info "build experimental kernel" - export kernel_version="$(yq r $versions_yaml assets.kernel-experimental.tag)" - local kernel_kata_config_version="$(cat ${repo_root_dir}/tools/packaging/kernel/kata_config_version)" - - install_cached_tarball_component \ + install_kernel_helper \ + "assets.kernel-experimental.version" \ "kernel-experimental" \ - "${jenkins_url}/job/kata-containers-main-kernel-experimental-$(uname -m)/${cached_artifacts_path}" \ - "${kernel_version}-${kernel_kata_config_version}" \ - "$(get_kernel_image_name)" \ - "${final_tarball_name}" \ - "${final_tarball_path}" \ - && return 0 - - info "Kernel version ${kernel_version}" - DESTDIR="${destdir}" PREFIX="${prefix}" "${kernel_builder}" -f -b experimental -v ${kernel_version} + "-f -b experimental" } install_qemu_helper() { From 502844ced95ae1d65c1c10a6aaccd6287d250eb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 09:33:46 +0100 Subject: [PATCH 077/137] local-build: Add support to build Kernel for TDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's add the needed targets and modifications to be able to build kernel-tdx-experimental as part of the local-build scripts. Signed-off-by: Fabiano Fidêncio --- tools/packaging/kata-deploy/local-build/Makefile | 4 ++++ .../local-build/kata-deploy-binaries.sh | 14 ++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/tools/packaging/kata-deploy/local-build/Makefile b/tools/packaging/kata-deploy/local-build/Makefile index 80069258f..49f46cc80 100644 --- a/tools/packaging/kata-deploy/local-build/Makefile +++ b/tools/packaging/kata-deploy/local-build/Makefile @@ -25,6 +25,7 @@ all: serial-targets \ firecracker-tarball \ kernel-tarball \ kernel-dragonball-experimental-tarball \ + kernel-tdx-experimental-tarball \ nydus-tarball \ qemu-tarball \ qemu-tdx-experimental-tarball \ @@ -55,6 +56,9 @@ kernel-dragonball-experimental-tarball: kernel-experimental-tarball: ${MAKE} $@-build +kernel-tdx-experimental-tarball: + ${MAKE} $@-build + nydus-tarball: ${MAKE} $@-build diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index b73df7d75..a31dd9c65 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -80,6 +80,7 @@ options: kernel kernel-dragonball-experimental kernel-experimental + kernel-tdx-experimental nydus qemu qemu-tdx-experimental @@ -218,6 +219,16 @@ install_kernel_experimental() { "-f -b experimental" } +#Install experimental TDX kernel asset +install_kernel_tdx_experimental() { + local kernel_url="$(get_from_kata_deps assets.kernel-tdx-experimental.url)" + + install_kernel_helper \ + "assets.kernel-tdx-experimental.version" \ + "kernel-tdx-experimental" \ + "-x tdx -u ${kernel_url}" +} + install_qemu_helper() { local qemu_repo_yaml_path="${1}" local qemu_version_yaml_path="${2}" @@ -387,6 +398,7 @@ handle_build() { install_initrd install_kernel install_kernel_dragonball_experimental + install_kernel_tdx_experimental install_nydus install_qemu install_qemu_tdx_experimental @@ -406,6 +418,8 @@ handle_build() { kernel-experimental) install_kernel_experimental ;; + kernel-tdx-experimental) install_kernel_tdx_experimental ;; + qemu) install_qemu ;; qemu-tdx-experimental) install_qemu_tdx_experimental ;; From fc22ed0a8ac39ff6ee8595f960fe14a55e3a9157 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 09:35:53 +0100 Subject: [PATCH 078/137] gha: Build and ship the Kernel for TDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's build the kernel with TDX support as part of our tests, and let's ship it as part of our releases. Signed-off-by: Fabiano Fidêncio --- .github/workflows/build-kata-static-tarball-amd64.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build-kata-static-tarball-amd64.yaml b/.github/workflows/build-kata-static-tarball-amd64.yaml index f71dfc03f..d1b3dfde4 100644 --- a/.github/workflows/build-kata-static-tarball-amd64.yaml +++ b/.github/workflows/build-kata-static-tarball-amd64.yaml @@ -20,6 +20,7 @@ jobs: - firecracker - kernel - kernel-dragonball-experimental + - kernel-tdx-experimental - nydus - qemu - qemu-tdx-experimental From 6e4726e4542c4e0609162971199ca4f0fcb524e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 10:35:25 +0100 Subject: [PATCH 079/137] cache: Fix typos MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's just fix a few simple typos: * kernek -> kernel * experimetnal -> experimental Signed-off-by: Fabiano Fidêncio --- tools/packaging/static-build/cache_components_main.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/packaging/static-build/cache_components_main.sh b/tools/packaging/static-build/cache_components_main.sh index e6bc2fe79..1a1806499 100755 --- a/tools/packaging/static-build/cache_components_main.sh +++ b/tools/packaging/static-build/cache_components_main.sh @@ -12,7 +12,7 @@ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "${script_dir}/../scripts/lib.sh" -KERNEL_FLAVOUR="${KERNEL_FLAVOUR:-kernel}" # kernel | kernel-experimental | kernel-arm-experimetnal | kernel-dragonball-experimental +KERNEL_FLAVOUR="${KERNEL_FLAVOUR:-kernel}" # kernel | kernel-experimental | kernel-arm-experimental | kernel-dragonball-experimental QEMU_FLAVOUR="${QEMU_FLAVOUR:-qemu}" # qemu | qemu-tdx-experimental ROOTFS_IMAGE_TYPE="${ROOTFS_IMAGE_TYPE:-image}" # image | initrd @@ -106,7 +106,7 @@ Usage: $0 "[options]" -c Cloud hypervisor cache -F Firecracker cache -k Kernel cache - * Export KERNEL_FLAVOUR="kernel|kernek-experimental|kernel-arm-experimental|kernel-dragonball-experimental" for a specific build + * Export KERNEL_FLAVOUR="kernel|kernel-experimental|kernel-arm-experimental|kernel-dragonball-experimental" for a specific build The default KERNEL_FLAVOUR value is "kernel" -n Nydus cache -q QEMU cache From 5d79e96966bb2f3e72b1757ba57a215c05e3acae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 10:40:03 +0100 Subject: [PATCH 080/137] cache: Add a space to ease the reading of the kernel flavours MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Right now it's quite hard to read those, let's improve it a little bit. Signed-off-by: Fabiano Fidêncio --- tools/packaging/static-build/cache_components_main.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/packaging/static-build/cache_components_main.sh b/tools/packaging/static-build/cache_components_main.sh index 1a1806499..df3f93ab6 100755 --- a/tools/packaging/static-build/cache_components_main.sh +++ b/tools/packaging/static-build/cache_components_main.sh @@ -106,7 +106,7 @@ Usage: $0 "[options]" -c Cloud hypervisor cache -F Firecracker cache -k Kernel cache - * Export KERNEL_FLAVOUR="kernel|kernel-experimental|kernel-arm-experimental|kernel-dragonball-experimental" for a specific build + * Export KERNEL_FLAVOUR="kernel | kernel-experimental | kernel-arm-experimental | kernel-dragonball-experimental" for a specific build The default KERNEL_FLAVOUR value is "kernel" -n Nydus cache -q QEMU cache From fbf03d7aca511203cbbf73e1c74aa2f938db14da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 10:41:01 +0100 Subject: [PATCH 081/137] cache: Document kernel-tdx-experimental MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's make users aware of the cache_components_main.sh that they can also cache the kernel-tdx-experimental builds. Signed-off-by: Fabiano Fidêncio --- tools/packaging/static-build/cache_components_main.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/packaging/static-build/cache_components_main.sh b/tools/packaging/static-build/cache_components_main.sh index df3f93ab6..1b3f8ce23 100755 --- a/tools/packaging/static-build/cache_components_main.sh +++ b/tools/packaging/static-build/cache_components_main.sh @@ -12,7 +12,7 @@ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "${script_dir}/../scripts/lib.sh" -KERNEL_FLAVOUR="${KERNEL_FLAVOUR:-kernel}" # kernel | kernel-experimental | kernel-arm-experimental | kernel-dragonball-experimental +KERNEL_FLAVOUR="${KERNEL_FLAVOUR:-kernel}" # kernel | kernel-experimental | kernel-arm-experimental | kernel-dragonball-experimental | kernel-tdx-experimental QEMU_FLAVOUR="${QEMU_FLAVOUR:-qemu}" # qemu | qemu-tdx-experimental ROOTFS_IMAGE_TYPE="${ROOTFS_IMAGE_TYPE:-image}" # image | initrd @@ -106,7 +106,7 @@ Usage: $0 "[options]" -c Cloud hypervisor cache -F Firecracker cache -k Kernel cache - * Export KERNEL_FLAVOUR="kernel | kernel-experimental | kernel-arm-experimental | kernel-dragonball-experimental" for a specific build + * Export KERNEL_FLAVOUR="kernel | kernel-experimental | kernel-arm-experimental | kernel-dragonball-experimental | kernel-tdx-experimental" for a specific build The default KERNEL_FLAVOUR value is "kernel" -n Nydus cache -q QEMU cache From 800fb49da1a729b6ea1d684980a4847e9961ef02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 10:20:13 +0100 Subject: [PATCH 082/137] packaging: Add get_ovmf_image_name() helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As we'll be using this from different places in the near future, let's create a helper function as part of the libs.sh. Signed-off-by: Fabiano Fidêncio --- tools/packaging/scripts/lib.sh | 5 +++++ tools/packaging/static-build/ovmf/build.sh | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/packaging/scripts/lib.sh b/tools/packaging/scripts/lib.sh index aa101b870..b6874bf64 100644 --- a/tools/packaging/scripts/lib.sh +++ b/tools/packaging/scripts/lib.sh @@ -183,6 +183,11 @@ get_shim_v2_image_name() { echo "${BUILDER_REGISTRY}:shim-v2-go-$(get_from_kata_deps "languages.golang.meta.newest-version")-rust-$(get_from_kata_deps "languages.rust.meta.newest-version")-$(get_last_modification ${shim_v2_script_dir})-$(uname -m)" } +get_ovmf_image_name() { + ovmf_script_dir="${repo_root_dir}/tools/packaging/static-build/ovmf" + echo "${BUILDER_REGISTRY}:ovmf-$(get_last_modification ${ovmf_script_dir})-$(uname -m)" +} + get_virtiofsd_image_name() { ARCH=$(uname -m) case ${ARCH} in diff --git a/tools/packaging/static-build/ovmf/build.sh b/tools/packaging/static-build/ovmf/build.sh index 2dfbe5a20..53444254b 100755 --- a/tools/packaging/static-build/ovmf/build.sh +++ b/tools/packaging/static-build/ovmf/build.sh @@ -15,7 +15,7 @@ source "${script_dir}/../../scripts/lib.sh" DESTDIR=${DESTDIR:-${PWD}} PREFIX=${PREFIX:-/opt/kata} -container_image="${OVMF_CONTAINER_BUILDER:-${BUILDER_REGISTRY}:ovmf-$(get_last_modification ${script_dir})-$(uname -m)}" +container_image="${OVMF_CONTAINER_BUILDER:-$(get_ovmf_image_name)}" ovmf_build="${ovmf_build:-x86_64}" kata_version="${kata_version:-}" ovmf_repo="${ovmf_repo:-}" From 054174d3e6330f7f3821983957b4bacfc6847dae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Mon, 20 Mar 2023 22:48:45 +0100 Subject: [PATCH 083/137] versions: Bump OVMF for TDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's update the OVMF for TDX version to what's the latest tested release of the Intel TDX tools with Kata Containers. This change requires a newer version of `nasm` than the one provided by the container used to build the project. This change will also be needed for SEV-SNP and was originally done by Alex Carter (thanks!). Signed-off-by: Fabiano Fidêncio Signed-off-by: Alex Carter --- tools/packaging/static-build/ovmf/Dockerfile | 10 ++++++++-- tools/packaging/static-build/ovmf/build-ovmf.sh | 4 +--- tools/packaging/static-build/ovmf/build.sh | 6 +----- versions.yaml | 10 +++++----- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/tools/packaging/static-build/ovmf/Dockerfile b/tools/packaging/static-build/ovmf/Dockerfile index a9a148a75..78acaccfe 100644 --- a/tools/packaging/static-build/ovmf/Dockerfile +++ b/tools/packaging/static-build/ovmf/Dockerfile @@ -14,9 +14,15 @@ RUN apt-get update && \ git \ iasl \ make \ - nasm \ python \ python3 \ python3-distutils \ uuid-dev && \ - apt-get clean && rm -rf /var/lib/lists/ + apt-get clean && rm -rf /var/lib/lists/ && \ + cd /tmp && curl -fsLO https://www.nasm.us/pub/nasm/releasebuilds/2.15.05/nasm-2.15.05.tar.gz && \ + tar xf nasm-2.15.05.tar.gz && \ + cd nasm-2.15.05 && \ + ./configure && \ + make -j"$(nproc)" && \ + make install && \ + cd /tmp && rm -r nasm-2.15.05* diff --git a/tools/packaging/static-build/ovmf/build-ovmf.sh b/tools/packaging/static-build/ovmf/build-ovmf.sh index 936b53be3..ebdf0669f 100755 --- a/tools/packaging/static-build/ovmf/build-ovmf.sh +++ b/tools/packaging/static-build/ovmf/build-ovmf.sh @@ -56,7 +56,7 @@ fi info "Building ovmf" build_cmd="build -b ${build_target} -t ${toolchain} -a ${architecture} -p ${ovmf_package}" if [ "${ovmf_build}" == "tdx" ]; then - build_cmd+=" -D DEBUG_ON_SERIAL_PORT=TRUE -D TDX_MEM_PARTIAL_ACCEPT=512 -D TDX_EMULATION_ENABLE=FALSE -D TDX_ACCEPT_PAGE_SIZE=2M" + build_cmd+=" -D DEBUG_ON_SERIAL_PORT=FALSE -D TDX_MEM_PARTIAL_ACCEPT=512 -D TDX_EMULATION_ENABLE=FALSE -D SECURE_BOOT_ENABLE=TRUE -D TDX_ACCEPT_PAGE_SIZE=2M" fi eval "${build_cmd}" @@ -70,7 +70,6 @@ if [ "${ovmf_build}" == "tdx" ]; then build_path_arch="${build_path_target_toolchain}/X64" stat "${build_path_fv}/OVMF_CODE.fd" stat "${build_path_fv}/OVMF_VARS.fd" - stat "${build_path_arch}/DumpTdxEventLog.efi" fi #need to leave tmp dir @@ -87,7 +86,6 @@ install $build_root/$ovmf_dir/"${build_path_fv}"/OVMF.fd "${install_dir}" if [ "${ovmf_build}" == "tdx" ]; then install $build_root/$ovmf_dir/"${build_path_fv}"/OVMF_CODE.fd ${install_dir} install $build_root/$ovmf_dir/"${build_path_fv}"/OVMF_VARS.fd ${install_dir} - install $build_root/$ovmf_dir/"${build_path_arch}"/DumpTdxEventLog.efi ${install_dir} fi local_dir=${PWD} diff --git a/tools/packaging/static-build/ovmf/build.sh b/tools/packaging/static-build/ovmf/build.sh index 53444254b..4640e2ac7 100755 --- a/tools/packaging/static-build/ovmf/build.sh +++ b/tools/packaging/static-build/ovmf/build.sh @@ -24,11 +24,7 @@ ovmf_package="${ovmf_package:-}" package_output_dir="${package_output_dir:-}" if [ -z "$ovmf_repo" ]; then - if [ "${ovmf_build}" == "tdx" ]; then - ovmf_repo=$(get_from_kata_deps "externals.ovmf.tdx.url" "${kata_version}") - else - ovmf_repo=$(get_from_kata_deps "externals.ovmf.url" "${kata_version}") - fi + ovmf_repo=$(get_from_kata_deps "externals.ovmf.url" "${kata_version}") fi [ -n "$ovmf_repo" ] || die "failed to get ovmf repo" diff --git a/versions.yaml b/versions.yaml index ef0288a4d..1c92412cb 100644 --- a/versions.yaml +++ b/versions.yaml @@ -279,11 +279,11 @@ externals: package: "OvmfPkg/AmdSev/AmdSevX64.dsc" package_output_dir: "AmdSev" tdx: - url: "https://github.com/tianocore/edk2-staging" - description: "TDVF build needed for TDX measured direct boot." - version: "2022-tdvf-ww28.5" - package: "OvmfPkg/OvmfPkgX64.dsc" - package_output_dir: "OvmfX64" + # yamllint disable-line rule:line-length + description: "QEMU with TDX support - based on https://github.com/intel/tdx-tools/releases/tag/2023ww01" + version: "edk2-stable202211" + package: "OvmfPkg/IntelTdx/IntelTdxX64.dsc" + package_output_dir: "IntelTdx" td-shim: description: "Confidential Containers Shim Firmware" From 39c3fab7b1679d826ce6e3766d8be118d8dd9a61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 22 Mar 2023 23:27:18 +0100 Subject: [PATCH 084/137] local-build: Add support to build OVMF for TDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's add the needed targets and modifications to be able to build OVMF for TDX as part of the local-build scripts. Signed-off-by: Fabiano Fidêncio --- .../kata-deploy/local-build/Makefile | 4 +++ .../local-build/kata-deploy-binaries.sh | 30 +++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/tools/packaging/kata-deploy/local-build/Makefile b/tools/packaging/kata-deploy/local-build/Makefile index 49f46cc80..2ad324b78 100644 --- a/tools/packaging/kata-deploy/local-build/Makefile +++ b/tools/packaging/kata-deploy/local-build/Makefile @@ -30,6 +30,7 @@ all: serial-targets \ qemu-tarball \ qemu-tdx-experimental-tarball \ shim-v2-tarball \ + tdvf-tarball \ virtiofsd-tarball serial-targets: @@ -77,6 +78,9 @@ rootfs-initrd-tarball: shim-v2-tarball: ${MAKE} $@-build +tdvf-tarball: + ${MAKE} $@-build + virtiofsd-tarball: ${MAKE} $@-build diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index a31dd9c65..816c8e01e 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -25,6 +25,7 @@ readonly versions_yaml="${repo_root_dir}/versions.yaml" readonly clh_builder="${static_build_dir}/cloud-hypervisor/build-static-clh.sh" readonly firecracker_builder="${static_build_dir}/firecracker/build-static-firecracker.sh" readonly kernel_builder="${static_build_dir}/kernel/build.sh" +readonly ovmf_builder="${static_build_dir}/ovmf/build.sh" readonly qemu_builder="${static_build_dir}/qemu/build-static-qemu.sh" readonly qemu_experimental_builder="${static_build_dir}/qemu/build-static-qemu-experimental.sh" readonly shimv2_builder="${static_build_dir}/shim-v2/build.sh" @@ -87,6 +88,7 @@ options: rootfs-image rootfs-initrd shim-v2 + tdvf virtiofsd EOF @@ -375,6 +377,31 @@ install_shimv2() { DESTDIR="${destdir}" PREFIX="${prefix}" "${shimv2_builder}" } +install_ovmf() { + ovmf_type="${1:-x86_64}" + tarball_name="${2:-edk2.tar.xz}" + + local component_name="ovmf" + local component_version="$(get_from_kata_deps "externals.ovmf.${ovmf_type}.version")" + [ "${ovmf_type}" == "tdx" ] && component_name="tdvf" + install_cached_tarball_component \ + "${component_name}" \ + "${jenkins_url}/job/kata-containers-main-ovmf-${ovmf_type}-$(uname -m)/${cached_artifacts_path}" \ + "${component_version}" \ + "$(get_ovmf_image_name)" \ + "${final_tarball_name}" \ + "${final_tarball_path}" \ + && return 0 + + DESTDIR="${destdir}" PREFIX="${prefix}" ovmf_build="${ovmf_type}" "${ovmf_builder}" + tar xvf "${builddir}/${tarball_name}" -C "${destdir}" +} + +# Install TDVF +install_tdvf() { + install_ovmf "tdx" "edk2-tdx.tar.gz" +} + get_kata_version() { local v v=$(cat "${version_file}") @@ -403,6 +430,7 @@ handle_build() { install_qemu install_qemu_tdx_experimental install_shimv2 + install_tdvf install_virtiofsd ;; @@ -430,6 +458,8 @@ handle_build() { shim-v2) install_shimv2 ;; + tdvf) install_tdvf ;; + virtiofsd) install_virtiofsd ;; *) From ce8d982512b72f1983d1798ef898432ef5636475 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 10:01:39 +0100 Subject: [PATCH 085/137] gha: Build and ship the OVMF for TDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's build the OVMF with TDX support as part of our tests, and let's ship it as part of our releases. Signed-off-by: Fabiano Fidêncio --- .github/workflows/build-kata-static-tarball-amd64.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build-kata-static-tarball-amd64.yaml b/.github/workflows/build-kata-static-tarball-amd64.yaml index d1b3dfde4..a7f3bdc19 100644 --- a/.github/workflows/build-kata-static-tarball-amd64.yaml +++ b/.github/workflows/build-kata-static-tarball-amd64.yaml @@ -27,6 +27,7 @@ jobs: - rootfs-image - rootfs-initrd - shim-v2 + - tdvf - virtiofsd steps: - uses: actions/checkout@v3 From 9feec533cedac64e5fd861e757ee8381da98f500 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 11:29:57 +0100 Subject: [PATCH 086/137] cache: Add ability to cache OVMF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's add the ability to cache OVMF, which right now we're only building and shipping it for TDX. Signed-off-by: Fabiano Fidêncio --- .../static-build/cache_components_main.sh | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/tools/packaging/static-build/cache_components_main.sh b/tools/packaging/static-build/cache_components_main.sh index 1b3f8ce23..804df1cbf 100755 --- a/tools/packaging/static-build/cache_components_main.sh +++ b/tools/packaging/static-build/cache_components_main.sh @@ -13,6 +13,7 @@ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "${script_dir}/../scripts/lib.sh" KERNEL_FLAVOUR="${KERNEL_FLAVOUR:-kernel}" # kernel | kernel-experimental | kernel-arm-experimental | kernel-dragonball-experimental | kernel-tdx-experimental +OVMF_FLAVOUR="${OVMF_FLAVOUR:-x86_64}" # x86_64 | tdx QEMU_FLAVOUR="${QEMU_FLAVOUR:-qemu}" # qemu | qemu-tdx-experimental ROOTFS_IMAGE_TYPE="${ROOTFS_IMAGE_TYPE:-image}" # image | initrd @@ -42,6 +43,13 @@ cache_nydus_artifacts() { create_cache_asset "${nydus_tarball_name}" "${current_nydus_version}" "" } +cache_ovmf_artifacts() { + local ovmf_tarball_name="kata-static-${OVMF_FLAVOUR}.tar.xz" + local current_ovmf_version="$(get_from_kata_deps "externals.ovmf.${OVMF_FLAVOUR}.version")" + local current_ovmf_image="$(get_ovmf_image_name)" + create_cache_asset "${ovmf_tarball_name}" "${current_ovmf_version}" "${current_ovmf_image}" +} + cache_qemu_artifacts() { local qemu_tarball_name="kata-static-${QEMU_FLAVOUR}.tar.xz" local current_qemu_version=$(get_from_kata_deps "assets.hypervisor.${QEMU_FLAVOUR}.version") @@ -127,12 +135,13 @@ main() { local firecracker_component="${firecracker_component:-}" local kernel_component="${kernel_component:-}" local nydus_component="${nydus_component:-}" + local ovmf_component="${ovmf_component:-}" local qemu_component="${qemu_component:-}" local rootfs_component="${rootfs_component:-}" local shim_v2_component="${shim_v2_component:-}" local virtiofsd_component="${virtiofsd_component:-}" local OPTIND - while getopts ":cFknqrsvh:" opt + while getopts ":cFknoqrsvh:" opt do case "$opt" in c) @@ -147,6 +156,9 @@ main() { n) nydus_component="1" ;; + o) + ovmf_component="1" + ;; q) qemu_component="1" ;; @@ -176,6 +188,7 @@ main() { [[ -z "${firecracker_component}" ]] && \ [[ -z "${kernel_component}" ]] && \ [[ -z "${nydus_component}" ]] && \ + [[ -z "${ovmf_component}" ]] && \ [[ -z "${qemu_component}" ]] && \ [[ -z "${rootfs_component}" ]] && \ [[ -z "${shim_v2_component}" ]] && \ @@ -190,6 +203,7 @@ main() { [ "${firecracker_component}" == "1" ] && cache_firecracker_artifacts [ "${kernel_component}" == "1" ] && cache_kernel_artifacts [ "${nydus_component}" == "1" ] && cache_nydus_artifacts + [ "${ovmf_component}" == "1" ] && cache_ovmf_artifacts [ "${qemu_component}" == "1" ] && cache_qemu_artifacts [ "${rootfs_component}" == "1" ] && cache_rootfs_artifacts [ "${shim_v2_component}" == "1" ] && cache_shim_v2_artifacts From 01bdacb4e4f94fd1c1d6aeebf3091b548d705c22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Fri, 13 Jan 2023 13:31:04 +0100 Subject: [PATCH 087/137] virtcontainers: Also check /sys/firmwares/tdx for TDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's make sure we also check /sys/firmwares/tdx for TDX guest protection, as the location may depend on whether TDX Seam is being used or not. Signed-off-by: Fabiano Fidêncio --- .../virtcontainers/hypervisor_linux_amd64.go | 25 ++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/src/runtime/virtcontainers/hypervisor_linux_amd64.go b/src/runtime/virtcontainers/hypervisor_linux_amd64.go index 8cfc9aca9..304d0446a 100644 --- a/src/runtime/virtcontainers/hypervisor_linux_amd64.go +++ b/src/runtime/virtcontainers/hypervisor_linux_amd64.go @@ -8,7 +8,9 @@ package virtcontainers import "os" const ( - tdxSysFirmwareDir = "/sys/firmware/tdx_seam/" + tdxSeamSysFirmwareDir = "/sys/firmware/tdx_seam/" + + tdxSysFirmwareDir = "/sys/firmware/tdx/" tdxCPUFlag = "tdx" @@ -17,6 +19,23 @@ const ( snpKvmParameterPath = "/sys/module/kvm_amd/parameters/sev_snp" ) +// TDX is supported and properly loaded when the firmware directory (either tdx or tdx_seam) exists or `tdx` is part of the CPU flag +func checkTdxGuestProtection(flags map[string]bool) bool { + if flags[tdxCPUFlag] { + return true + } + + if d, err := os.Stat(tdxSysFirmwareDir); err == nil && d.IsDir() { + return true + } + + if d, err := os.Stat(tdxSeamSysFirmwareDir); err == nil && d.IsDir() { + return true + } + + return false +} + // Implementation of this function is architecture specific func availableGuestProtection() (guestProtection, error) { flags, err := CPUFlags(procCPUInfo) @@ -24,10 +43,10 @@ func availableGuestProtection() (guestProtection, error) { return noneProtection, err } - // TDX is supported and properly loaded when the firmware directory exists or `tdx` is part of the CPU flags - if d, err := os.Stat(tdxSysFirmwareDir); (err == nil && d.IsDir()) || flags[tdxCPUFlag] { + if checkTdxGuestProtection(flags) { return tdxProtection, nil } + // SEV-SNP is supported and enabled when the kvm module `sev_snp` parameter is set to `Y` // SEV-SNP support infers SEV (-ES) support if _, err := os.Stat(snpKvmParameterPath); err == nil { From 25b3cdd38c12f03aab115fe6b5681e6d26ea49d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 10:03:54 +0100 Subject: [PATCH 088/137] virtcontainers: Drop check for the `tdx` CPU flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the recent kernels provided by Intel the `tdx` CPU flag is not present anymore. Signed-off-by: Fabiano Fidêncio --- src/runtime/virtcontainers/hypervisor_linux_amd64.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/runtime/virtcontainers/hypervisor_linux_amd64.go b/src/runtime/virtcontainers/hypervisor_linux_amd64.go index 304d0446a..043b36c9f 100644 --- a/src/runtime/virtcontainers/hypervisor_linux_amd64.go +++ b/src/runtime/virtcontainers/hypervisor_linux_amd64.go @@ -12,8 +12,6 @@ const ( tdxSysFirmwareDir = "/sys/firmware/tdx/" - tdxCPUFlag = "tdx" - sevKvmParameterPath = "/sys/module/kvm_amd/parameters/sev" snpKvmParameterPath = "/sys/module/kvm_amd/parameters/sev_snp" @@ -21,10 +19,6 @@ const ( // TDX is supported and properly loaded when the firmware directory (either tdx or tdx_seam) exists or `tdx` is part of the CPU flag func checkTdxGuestProtection(flags map[string]bool) bool { - if flags[tdxCPUFlag] { - return true - } - if d, err := os.Stat(tdxSysFirmwareDir); err == nil && d.IsDir() { return true } From ed145365ecadf35b6a5050b4bf58f71c3c4a5d66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Feb 2023 14:30:24 +0100 Subject: [PATCH 089/137] runtime/qemu: Drop "kvm-type=tdx" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is not supported since 22ww49. Signed-off-by: Fabiano Fidêncio --- src/runtime/virtcontainers/qemu_amd64.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/runtime/virtcontainers/qemu_amd64.go b/src/runtime/virtcontainers/qemu_amd64.go index ff8104b8e..4ecf0804a 100644 --- a/src/runtime/virtcontainers/qemu_amd64.go +++ b/src/runtime/virtcontainers/qemu_amd64.go @@ -233,7 +233,7 @@ func (q *qemuAmd64) enableProtection() error { if q.qemuMachine.Options != "" { q.qemuMachine.Options += "," } - q.qemuMachine.Options += "kvm-type=tdx,confidential-guest-support=tdx" + q.qemuMachine.Options += "confidential-guest-support=tdx" logger.Info("Enabling TDX guest protection") return nil case sevProtection: From 3c5ffb0c8518c7cde7c89f4bc075dfb2440c0514 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Feb 2023 14:31:51 +0100 Subject: [PATCH 090/137] govmm: Set "sept-ve-disable=on" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is needed since 22ww49. Signed-off-by: Fabiano Fidêncio --- src/runtime/pkg/govmm/qemu/qemu.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/runtime/pkg/govmm/qemu/qemu.go b/src/runtime/pkg/govmm/qemu/qemu.go index 43707c65e..520978b06 100644 --- a/src/runtime/pkg/govmm/qemu/qemu.go +++ b/src/runtime/pkg/govmm/qemu/qemu.go @@ -343,6 +343,7 @@ func (object Object) QemuParams(config *Config) []string { case TDXGuest: objectParams = append(objectParams, string(object.Type)) + objectParams = append(objectParams, "sept-ve-disable=on") objectParams = append(objectParams, fmt.Sprintf("id=%s", object.ID)) if object.Debug { objectParams = append(objectParams, "debug=on") From 3e158001993cc2356d6ac084e6c82714210c9f24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 21:21:49 +0100 Subject: [PATCH 091/137] govmm: Directly pass the firmware using -bios with TDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since TDX doesn't support readonly memslot, TDVF cannot be mapped as pflash device and it actually works as RAM. "-bios" option is chosen to load TDVF. OVMF is the opensource firmware that implements the TDVF support. Thus the command line to specify and load TDVF is ``-bios OVMF.fd`` Signed-off-by: Fabiano Fidêncio --- src/runtime/pkg/govmm/qemu/qemu.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/runtime/pkg/govmm/qemu/qemu.go b/src/runtime/pkg/govmm/qemu/qemu.go index 520978b06..06d7e5895 100644 --- a/src/runtime/pkg/govmm/qemu/qemu.go +++ b/src/runtime/pkg/govmm/qemu/qemu.go @@ -348,12 +348,7 @@ func (object Object) QemuParams(config *Config) []string { if object.Debug { objectParams = append(objectParams, "debug=on") } - deviceParams = append(deviceParams, string(object.Driver)) - deviceParams = append(deviceParams, fmt.Sprintf("id=%s", object.DeviceID)) - deviceParams = append(deviceParams, fmt.Sprintf("file=%s", object.File)) - if object.FirmwareVolume != "" { - deviceParams = append(deviceParams, fmt.Sprintf("config-firmware-volume=%s", object.FirmwareVolume)) - } + config.Bios = object.File case SEVGuest: fallthrough case SNPGuest: From 98682805be4e9f7d440de3ef97a2087f059cc15f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 23 Mar 2023 08:42:06 +0100 Subject: [PATCH 092/137] config: Add configuration for QEMU TDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As the QEMU configuration for TDX differs quite a lot from the normal QEMU configuration, let's add a new configuration file for the QEMU TDX. Signed-off-by: Fabiano Fidêncio --- src/runtime/Makefile | 38 + src/runtime/arch/amd64-options.mk | 2 + .../config/configuration-qemu-tdx.toml.in | 686 ++++++++++++++++++ 3 files changed, 726 insertions(+) create mode 100644 src/runtime/config/configuration-qemu-tdx.toml.in diff --git a/src/runtime/Makefile b/src/runtime/Makefile index 95efaff78..6f719f1cb 100644 --- a/src/runtime/Makefile +++ b/src/runtime/Makefile @@ -95,6 +95,7 @@ GENERATED_VARS = \ VERSION \ CONFIG_ACRN_IN \ CONFIG_QEMU_IN \ + CONFIG_QEMU_TDX_IN \ CONFIG_CLH_IN \ CONFIG_FC_IN \ $(USER_VARS) @@ -121,6 +122,9 @@ DEFROOTFSTYPE := $(ROOTFSTYPE_EXT4) FIRMWAREPATH := FIRMWAREVOLUMEPATH := +FIRMWARETDVFPATH := $(PREFIXDEPS)/share/tdvf/OVMF.fd +FIRMWARETDVFVOLUMEPATH := + # Name of default configuration file the runtime will use. CONFIG_FILE = configuration.toml @@ -138,6 +142,9 @@ HYPERVISORS := $(HYPERVISOR_ACRN) $(HYPERVISOR_FC) $(HYPERVISOR_QEMU) $(HYPERVIS QEMUPATH := $(QEMUBINDIR)/$(QEMUCMD) QEMUVALIDHYPERVISORPATHS := [\"$(QEMUPATH)\"] +QEMUTDXPATH := $(QEMUBINDIR)/$(QEMUTDXCMD) +QEMUTDXVALIDHYPERVISORPATHS := [\"$(QEMUTDXPATH)\"] + QEMUVIRTIOFSPATH := $(QEMUBINDIR)/$(QEMUVIRTIOFSCMD) CLHPATH := $(CLHBINDIR)/$(CLHCMD) @@ -195,6 +202,7 @@ DEFVALIDENTROPYSOURCES := [\"/dev/urandom\",\"/dev/random\",\"\"] DEFDISABLEBLOCK := false DEFSHAREDFS_CLH_VIRTIOFS := virtio-fs DEFSHAREDFS_QEMU_VIRTIOFS := virtio-fs +DEFSHAREDFS_QEMU_TDX_VIRTIOFS := virtio-9p DEFVIRTIOFSDAEMON := $(LIBEXECDIR)/virtiofsd ifeq ($(ARCH),ppc64le) DEFVIRTIOFSDAEMON := $(LIBEXECDIR)/qemu/virtiofsd @@ -265,13 +273,30 @@ ifneq (,$(QEMUCMD)) CONFIGS += $(CONFIG_QEMU) + CONFIG_FILE_QEMU_TDX = configuration-qemu-tdx.toml + CONFIG_QEMU_TDX = config/$(CONFIG_FILE_QEMU_TDX) + CONFIG_QEMU_TDX_IN = $(CONFIG_QEMU_TDX).in + + CONFIG_PATH_QEMU_TDX = $(abspath $(CONFDIR)/$(CONFIG_FILE_QEMU_TDX)) + CONFIG_PATHS += $(CONFIG_PATH_QEMU_TDX) + + SYSCONFIG_QEMU_TDX = $(abspath $(SYSCONFDIR)/$(CONFIG_FILE_QEMU_TDX)) + SYSCONFIG_PATHS_TDX += $(SYSCONFIG_QEMU_TDX) + + CONFIGS += $(CONFIG_QEMU_TDX) + # qemu-specific options (all should be suffixed by "_QEMU") DEFBLOCKSTORAGEDRIVER_QEMU := virtio-scsi DEFBLOCKDEVICEAIO_QEMU := io_uring DEFNETWORKMODEL_QEMU := tcfilter + KERNELTYPE = uncompressed KERNELNAME = $(call MAKE_KERNEL_NAME,$(KERNELTYPE)) KERNELPATH = $(KERNELDIR)/$(KERNELNAME) + + KERNELTDXTYPE = compressed + KERNELTDXNAME = $(call MAKE_KERNEL_TDX_NAME,$(KERNELTDXTYPE)) + KERNELTDXPATH = $(KERNELDIR)/$(KERNELTDXNAME) endif ifneq (,$(CLHCMD)) @@ -427,15 +452,20 @@ USER_VARS += KERNELTYPE_ACRN USER_VARS += KERNELTYPE_CLH USER_VARS += KERNELPATH_ACRN USER_VARS += KERNELPATH +USER_VARS += KERNELTDXPATH USER_VARS += KERNELPATH_CLH USER_VARS += KERNELPATH_FC USER_VARS += KERNELVIRTIOFSPATH USER_VARS += FIRMWAREPATH +USER_VARS += FIRMWARETDVFPATH USER_VARS += FIRMWAREVOLUMEPATH +USER_VARS += FIRMWARETDVFVOLUMEPATH USER_VARS += MACHINEACCELERATORS USER_VARS += CPUFEATURES +USER_VARS += TDXCPUFEATURES USER_VARS += DEFMACHINETYPE_CLH USER_VARS += KERNELPARAMS +USER_VARS += KERNELTDXPARAMS USER_VARS += LIBEXECDIR USER_VARS += LOCALSTATEDIR USER_VARS += PKGDATADIR @@ -451,8 +481,11 @@ USER_VARS += PROJECT_TYPE USER_VARS += PROJECT_URL USER_VARS += QEMUBINDIR USER_VARS += QEMUCMD +USER_VARS += QEMUTDXCMD USER_VARS += QEMUPATH +USER_VARS += QEMUTDXPATH USER_VARS += QEMUVALIDHYPERVISORPATHS +USER_VARS += QEMUTDXVALIDHYPERVISORPATHS USER_VARS += QEMUVIRTIOFSCMD USER_VARS += QEMUVIRTIOFSPATH USER_VARS += RUNTIME_NAME @@ -482,6 +515,7 @@ USER_VARS += DEFBLOCKSTORAGEDRIVER_QEMU USER_VARS += DEFBLOCKDEVICEAIO_QEMU USER_VARS += DEFSHAREDFS_CLH_VIRTIOFS USER_VARS += DEFSHAREDFS_QEMU_VIRTIOFS +USER_VARS += DEFSHAREDFS_QEMU_TDX_VIRTIOFS USER_VARS += DEFVIRTIOFSDAEMON USER_VARS += DEFVALIDVIRTIOFSDAEMONPATHS USER_VARS += DEFVIRTIOFSCACHESIZE @@ -587,6 +621,10 @@ define MAKE_KERNEL_VIRTIOFS_NAME $(if $(findstring uncompressed,$1),vmlinux-virtiofs.container,vmlinuz-virtiofs.container) endef +define MAKE_KERNEL_TDX_NAME +$(if $(findstring uncompressed,$1),vmlinux-tdx.container,vmlinuz-tdx.container) +endef + GENERATED_FILES += pkg/katautils/config-settings.go $(RUNTIME_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) | show-summary diff --git a/src/runtime/arch/amd64-options.mk b/src/runtime/arch/amd64-options.mk index 6c1c9e967..ab82539ba 100644 --- a/src/runtime/arch/amd64-options.mk +++ b/src/runtime/arch/amd64-options.mk @@ -11,6 +11,8 @@ MACHINEACCELERATORS := CPUFEATURES := pmu=off QEMUCMD := qemu-system-x86_64 +QEMUTDXCMD := qemu-system-x86_64-tdx-experimental +TDXCPUFEATURES := -vmx-rdseed-exit,pmu=off # Firecracker binary name FCCMD := firecracker diff --git a/src/runtime/config/configuration-qemu-tdx.toml.in b/src/runtime/config/configuration-qemu-tdx.toml.in new file mode 100644 index 000000000..6cecabdba --- /dev/null +++ b/src/runtime/config/configuration-qemu-tdx.toml.in @@ -0,0 +1,686 @@ +# Copyright (c) 2017-2019 Intel Corporation +# Copyright (c) 2021 Adobe Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# XXX: WARNING: this file is auto-generated. +# XXX: +# XXX: Source file: "@CONFIG_QEMU_IN@" +# XXX: Project: +# XXX: Name: @PROJECT_NAME@ +# XXX: Type: @PROJECT_TYPE@ + +[hypervisor.qemu] +path = "@QEMUTDXPATH@" +kernel = "@KERNELTDXPATH@" +image = "@IMAGEPATH@" +# initrd = "@INITRDPATH@" +machine_type = "@MACHINETYPE@" + +# rootfs filesystem type: +# - ext4 (default) +# - xfs +# - erofs +rootfs_type=@DEFROOTFSTYPE@ + +# Enable confidential guest support. +# Toggling that setting may trigger different hardware features, ranging +# from memory encryption to both memory and CPU-state encryption and integrity. +# The Kata Containers runtime dynamically detects the available feature set and +# aims at enabling the largest possible one, returning an error if none is +# available, or none is supported by the hypervisor. +# +# Known limitations: +# * Does not work by design: +# - CPU Hotplug +# - Memory Hotplug +# - NVDIMM devices +# +# Default false +confidential_guest = true + +# Enable running QEMU VMM as a non-root user. +# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as +# a non-root random user. See documentation for the limitations of this mode. +# rootless = true + +# List of valid annotation names for the hypervisor +# Each member of the list is a regular expression, which is the base name +# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path" +enable_annotations = @DEFENABLEANNOTATIONS@ + +# List of valid annotations values for the hypervisor +# Each member of the list is a path pattern as described by glob(3). +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @QEMUVALIDHYPERVISORPATHS@ +valid_hypervisor_paths = @QEMUTDXVALIDHYPERVISORPATHS@ + +# Optional space-separated list of options to pass to the guest kernel. +# For example, use `kernel_params = "vsyscall=emulate"` if you are having +# trouble running pre-2.15 glibc. +# +# WARNING: - any parameter specified here will take priority over the default +# parameter value of the same name used to start the virtual machine. +# Do not set values here unless you understand the impact of doing so as you +# may stop the virtual machine from booting. +# To see the list of default parameters, enable hypervisor debug, create a +# container and look for 'default-kernel-parameters' log entries. +kernel_params = "@KERNELTDXPARAMS@" + +# Path to the firmware. +# If you want that qemu uses the default firmware leave this option empty +firmware = "@FIRMWARETDVFPATH@" + +# Path to the firmware volume. +# firmware TDVF or OVMF can be split into FIRMWARE_VARS.fd (UEFI variables +# as configuration) and FIRMWARE_CODE.fd (UEFI program image). UEFI variables +# can be customized per each user while UEFI code is kept same. +firmware_volume = "@FIRMWARETDVFVOLUMEPATH@" + +# Machine accelerators +# comma-separated list of machine accelerators to pass to the hypervisor. +# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"` +machine_accelerators="@MACHINEACCELERATORS@" + +# Qemu seccomp sandbox feature +# comma-separated list of seccomp sandbox features to control the syscall access. +# For example, `seccompsandbox= "on,obsolete=deny,spawn=deny,resourcecontrol=deny"` +# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox +# Another note: enabling this feature may reduce performance, you may enable +# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html +#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@" + +# CPU features +# comma-separated list of cpu features to pass to the cpu +# For example, `cpu_features = "pmu=off,vmx=off" +cpu_features="@TDXCPUFEATURES@" + +# Default number of vCPUs per SB/VM: +# unspecified or 0 --> will be set to @DEFVCPUS@ +# < 0 --> will be set to the actual number of physical cores +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores +default_vcpus = 1 + +# Default maximum number of vCPUs per SB/VM: +# unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when +# the actual number of physical cores is greater than it. +# WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU +# the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs +# can be added to a SB/VM, but the memory footprint will be big. Another example, with +# `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of +# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable, +# unless you know what are you doing. +# NOTICE: on arm platform with gicv2 interrupt controller, set it to 8. +default_maxvcpus = @DEFMAXVCPUS@ + +# Bridges can be used to hot plug devices. +# Limitations: +# * Currently only pci bridges are supported +# * Until 30 devices per bridge can be hot plugged. +# * Until 5 PCI bridges can be cold plugged per VM. +# This limitation could be a bug in qemu or in the kernel +# Default number of bridges per SB/VM: +# unspecified or 0 --> will be set to @DEFBRIDGES@ +# > 1 <= 5 --> will be set to the specified number +# > 5 --> will be set to 5 +default_bridges = @DEFBRIDGES@ + +# Default memory size in MiB for SB/VM. +# If unspecified then it will be set @DEFMEMSZ@ MiB. +default_memory = @DEFMEMSZ@ +# +# Default memory slots per SB/VM. +# If unspecified then it will be set @DEFMEMSLOTS@. +# This is will determine the times that memory will be hotadded to sandbox/VM. +#memory_slots = @DEFMEMSLOTS@ + +# Default maximum memory in MiB per SB / VM +# unspecified or == 0 --> will be set to the actual amount of physical RAM +# > 0 <= amount of physical RAM --> will be set to the specified number +# > amount of physical RAM --> will be set to the actual amount of physical RAM +default_maxmemory = @DEFMAXMEMSZ@ + +# The size in MiB will be plused to max memory of hypervisor. +# It is the memory address space for the NVDIMM devie. +# If set block storage driver (block_device_driver) to "nvdimm", +# should set memory_offset to the size of block device. +# Default 0 +#memory_offset = 0 + +# Specifies virtio-mem will be enabled or not. +# Please note that this option should be used with the command +# "echo 1 > /proc/sys/vm/overcommit_memory". +# Default false +#enable_virtio_mem = true + +# Disable block device from being used for a container's rootfs. +# In case of a storage driver like devicemapper where a container's +# root file system is backed by a block device, the block device is passed +# directly to the hypervisor for performance reasons. +# This flag prevents the block device from being passed to the hypervisor, +# virtio-fs is used instead to pass the rootfs. +disable_block_device_use = @DEFDISABLEBLOCK@ + +# Shared file system type: +# - virtio-fs (default) +# - virtio-9p +# - virtio-fs-nydus +shared_fs = "@DEFSHAREDFS_QEMU_TDX_VIRTIOFS@" + +# Path to vhost-user-fs daemon. +virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@" + +# List of valid annotations values for the virtiofs daemon +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @DEFVALIDVIRTIOFSDAEMONPATHS@ +valid_virtio_fs_daemon_paths = @DEFVALIDVIRTIOFSDAEMONPATHS@ + +# Default size of DAX cache in MiB +virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@ + +# Default size of virtqueues +virtio_fs_queue_size = @DEFVIRTIOFSQUEUESIZE@ + +# Extra args for virtiofsd daemon +# +# Format example: +# ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"] +# Examples: +# Set virtiofsd log level to debug : ["-o", "log_level=debug"] or ["-d"] +# +# see `virtiofsd -h` for possible options. +virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@ + +# Cache mode: +# +# - never +# Metadata, data, and pathname lookup are not cached in guest. They are +# always fetched from host and any changes are immediately pushed to host. +# +# - auto +# Metadata and pathname lookup cache expires after a configured amount of +# time (default is 1 second). Data is cached while the file is open (close +# to open consistency). +# +# - always +# Metadata, data, and pathname lookup are cached in guest and never expire. +virtio_fs_cache = "@DEFVIRTIOFSCACHE@" + +# Block storage driver to be used for the hypervisor in case the container +# rootfs is backed by a block device. This is virtio-scsi, virtio-blk +# or nvdimm. +block_device_driver = "@DEFBLOCKSTORAGEDRIVER_QEMU@" + +# aio is the I/O mechanism used by qemu +# Options: +# +# - threads +# Pthread based disk I/O. +# +# - native +# Native Linux I/O. +# +# - io_uring +# Linux io_uring API. This provides the fastest I/O operations on Linux, requires kernel>5.1 and +# qemu >=5.0. +block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@" + +# Specifies cache-related options will be set to block devices or not. +# Default false +#block_device_cache_set = true + +# Specifies cache-related options for block devices. +# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled. +# Default false +#block_device_cache_direct = true + +# Specifies cache-related options for block devices. +# Denotes whether flush requests for the device are ignored. +# Default false +#block_device_cache_noflush = true + +# Enable iothreads (data-plane) to be used. This causes IO to be +# handled in a separate IO thread. This is currently only implemented +# for SCSI. +# +enable_iothreads = @DEFENABLEIOTHREADS@ + +# Enable pre allocation of VM RAM, default false +# Enabling this will result in lower container density +# as all of the memory will be allocated and locked +# This is useful when you want to reserve all the memory +# upfront or in the cases where you want memory latencies +# to be very predictable +# Default false +#enable_mem_prealloc = true + +# Enable huge pages for VM RAM, default false +# Enabling this will result in the VM memory +# being allocated using huge pages. +# This is useful when you want to use vhost-user network +# stacks within the container. This will automatically +# result in memory pre allocation +#enable_hugepages = true + +# Enable vhost-user storage device, default false +# Enabling this will result in some Linux reserved block type +# major range 240-254 being chosen to represent vhost-user devices. +enable_vhost_user_store = @DEFENABLEVHOSTUSERSTORE@ + +# The base directory specifically used for vhost-user devices. +# Its sub-path "block" is used for block devices; "block/sockets" is +# where we expect vhost-user sockets to live; "block/devices" is where +# simulated block device nodes for vhost-user devices to live. +vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@" + +# Enable vIOMMU, default false +# Enabling this will result in the VM having a vIOMMU device +# This will also add the following options to the kernel's +# command line: intel_iommu=on,iommu=pt +#enable_iommu = true + +# Enable IOMMU_PLATFORM, default false +# Enabling this will result in the VM device having iommu_platform=on set +#enable_iommu_platform = true + +# List of valid annotations values for the vhost user store path +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @DEFVALIDVHOSTUSERSTOREPATHS@ +valid_vhost_user_store_paths = @DEFVALIDVHOSTUSERSTOREPATHS@ + +# The timeout for reconnecting on non-server spdk sockets when the remote end goes away. +# qemu will delay this many seconds and then attempt to reconnect. +# Zero disables reconnecting, and the default is zero. +vhost_user_reconnect_timeout_sec = 0 + +# Enable file based guest memory support. The default is an empty string which +# will disable this feature. In the case of virtio-fs, this is enabled +# automatically and '/dev/shm' is used as the backing folder. +# This option will be ignored if VM templating is enabled. +#file_mem_backend = "@DEFFILEMEMBACKEND@" + +# List of valid annotations values for the file_mem_backend annotation +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @DEFVALIDFILEMEMBACKENDS@ +valid_file_mem_backends = @DEFVALIDFILEMEMBACKENDS@ + +# -pflash can add image file to VM. The arguments of it should be in format +# of ["/path/to/flash0.img", "/path/to/flash1.img"] +pflashes = [] + +# This option changes the default hypervisor and kernel parameters +# to enable debug output where available. And Debug also enable the hmp socket. +# +# Default false +#enable_debug = true + +# Disable the customizations done in the runtime when it detects +# that it is running on top a VMM. This will result in the runtime +# behaving as it would when running on bare metal. +# +#disable_nesting_checks = true + +# This is the msize used for 9p shares. It is the number of bytes +# used for 9p packet payload. +#msize_9p = @DEFMSIZE9P@ + +# If false and nvdimm is supported, use nvdimm device to plug guest image. +# Otherwise virtio-block device is used. +# +# nvdimm is not supported when `confidential_guest = true`. +# +# Default is false +#disable_image_nvdimm = true + +# VFIO devices are hotplugged on a bridge by default. +# Enable hotplugging on root bus. This may be required for devices with +# a large PCI bar, as this is a current limitation with hotplugging on +# a bridge. +# Default false +#hotplug_vfio_on_root_bus = true + +# Before hot plugging a PCIe device, you need to add a pcie_root_port device. +# Use this parameter when using some large PCI bar devices, such as Nvidia GPU +# The value means the number of pcie_root_port +# This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35" +# Default 0 +#pcie_root_port = 2 + +# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off +# security (vhost-net runs ring0) for network I/O performance. +#disable_vhost_net = true + +# +# Default entropy source. +# The path to a host source of entropy (including a real hardware RNG) +# /dev/urandom and /dev/random are two main options. +# Be aware that /dev/random is a blocking source of entropy. If the host +# runs out of entropy, the VMs boot time will increase leading to get startup +# timeouts. +# The source of entropy /dev/urandom is non-blocking and provides a +# generally acceptable source of entropy. It should work well for pretty much +# all practical purposes. +#entropy_source= "@DEFENTROPYSOURCE@" + +# List of valid annotations values for entropy_source +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @DEFVALIDENTROPYSOURCES@ +valid_entropy_sources = @DEFVALIDENTROPYSOURCES@ + +# Path to OCI hook binaries in the *guest rootfs*. +# This does not affect host-side hooks which must instead be added to +# the OCI spec passed to the runtime. +# +# You can create a rootfs with hooks by customizing the osbuilder scripts: +# https://github.com/kata-containers/kata-containers/tree/main/tools/osbuilder +# +# Hooks must be stored in a subdirectory of guest_hook_path according to their +# hook type, i.e. "guest_hook_path/{prestart,poststart,poststop}". +# The agent will scan these directories for executable files and add them, in +# lexicographical order, to the lifecycle of the guest container. +# Hooks are executed in the runtime namespace of the guest. See the official documentation: +# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks +# Warnings will be logged if any error is encountered while scanning for hooks, +# but it will not abort container execution. +#guest_hook_path = "/usr/share/oci/hooks" +# +# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM). +# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic. +# Default 0-sized value means unlimited rate. +#rx_rate_limiter_max_rate = 0 +# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM). +# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block) +# to discipline traffic. +# Default 0-sized value means unlimited rate. +#tx_rate_limiter_max_rate = 0 + +# Set where to save the guest memory dump file. +# If set, when GUEST_PANICKED event occurred, +# guest memeory will be dumped to host filesystem under guest_memory_dump_path, +# This directory will be created automatically if it does not exist. +# +# The dumped file(also called vmcore) can be processed with crash or gdb. +# +# WARNING: +# Dump guest’s memory can take very long depending on the amount of guest memory +# and use much disk space. +#guest_memory_dump_path="/var/crash/kata" + +# If enable paging. +# Basically, if you want to use "gdb" rather than "crash", +# or need the guest-virtual addresses in the ELF vmcore, +# then you should enable paging. +# +# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details +#guest_memory_dump_paging=false + +# Enable swap in the guest. Default false. +# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device +# if the swappiness of a container (set by annotation "io.katacontainers.container.resource.swappiness") +# is bigger than 0. +# The size of the swap device should be +# swap_in_bytes (set by annotation "io.katacontainers.container.resource.swap_in_bytes") - memory_limit_in_bytes. +# If swap_in_bytes is not set, the size should be memory_limit_in_bytes. +# If swap_in_bytes and memory_limit_in_bytes is not set, the size should +# be default_memory. +#enable_guest_swap = true + +# use legacy serial for guest console if available and implemented for architecture. Default false +#use_legacy_serial = true + +# disable applying SELinux on the VMM process (default false) +disable_selinux=@DEFDISABLESELINUX@ + +# disable applying SELinux on the container process +# If set to false, the type `container_t` is applied to the container process by default. +# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built +# with `SELINUX=yes`. +# (default: true) +disable_guest_selinux=@DEFDISABLEGUESTSELINUX@ + + +[factory] +# VM templating support. Once enabled, new VMs are created from template +# using vm cloning. They will share the same initial kernel, initramfs and +# agent memory by mapping it readonly. It helps speeding up new container +# creation and saves a lot of memory if there are many kata containers running +# on the same host. +# +# When disabled, new VMs are created from scratch. +# +# Note: Requires "initrd=" to be set ("image=" is not supported). +# +# Default false +#enable_template = true + +# Specifies the path of template. +# +# Default "/run/vc/vm/template" +#template_path = "/run/vc/vm/template" + +# The number of caches of VMCache: +# unspecified or == 0 --> VMCache is disabled +# > 0 --> will be set to the specified number +# +# VMCache is a function that creates VMs as caches before using it. +# It helps speed up new container creation. +# The function consists of a server and some clients communicating +# through Unix socket. The protocol is gRPC in protocols/cache/cache.proto. +# The VMCache server will create some VMs and cache them by factory cache. +# It will convert the VM to gRPC format and transport it when gets +# requestion from clients. +# Factory grpccache is the VMCache client. It will request gRPC format +# VM and convert it back to a VM. If VMCache function is enabled, +# kata-runtime will request VM from factory grpccache when it creates +# a new sandbox. +# +# Default 0 +#vm_cache_number = 0 + +# Specify the address of the Unix socket that is used by VMCache. +# +# Default /var/run/kata-containers/cache.sock +#vm_cache_endpoint = "/var/run/kata-containers/cache.sock" + +[agent.@PROJECT_TYPE@] +# If enabled, make the agent display debug-level messages. +# (default: disabled) +#enable_debug = true + +# Enable agent tracing. +# +# If enabled, the agent will generate OpenTelemetry trace spans. +# +# Notes: +# +# - If the runtime also has tracing enabled, the agent spans will be +# associated with the appropriate runtime parent span. +# - If enabled, the runtime will wait for the container to shutdown, +# increasing the container shutdown time slightly. +# +# (default: disabled) +#enable_tracing = true + +# Comma separated list of kernel modules and their parameters. +# These modules will be loaded in the guest kernel using modprobe(8). +# The following example can be used to load two kernel modules with parameters +# - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"] +# The first word is considered as the module name and the rest as its parameters. +# Container will not be started when: +# * A kernel module is specified and the modprobe command is not installed in the guest +# or it fails loading the module. +# * The module is not available in the guest or it doesn't met the guest kernel +# requirements, like architecture and version. +# +kernel_modules=[] + +# Enable debug console. + +# If enabled, user can connect guest OS running inside hypervisor +# through "kata-runtime exec " command + +#debug_console_enabled = true + +# Agent connection dialing timeout value in seconds +# (default: 30) +#dial_timeout = 30 + +[runtime] +# If enabled, the runtime will log additional debug messages to the +# system log +# (default: disabled) +#enable_debug = true +# +# Internetworking model +# Determines how the VM should be connected to the +# the container network interface +# Options: +# +# - macvtap +# Used when the Container network interface can be bridged using +# macvtap. +# +# - none +# Used when customize network. Only creates a tap device. No veth pair. +# +# - tcfilter +# Uses tc filter rules to redirect traffic from the network interface +# provided by plugin to a tap interface connected to the VM. +# +internetworking_model="@DEFNETWORKMODEL_QEMU@" + +# disable guest seccomp +# Determines whether container seccomp profiles are passed to the virtual +# machine and applied by the kata agent. If set to true, seccomp is not applied +# within the guest +# (default: true) +disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@ + +# vCPUs pinning settings +# if enabled, each vCPU thread will be scheduled to a fixed CPU +# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet) +# enable_vcpus_pinning = false + +# Apply a custom SELinux security policy to the container process inside the VM. +# This is used when you want to apply a type other than the default `container_t`, +# so general users should not uncomment and apply it. +# (format: "user:role:type") +# Note: You cannot specify MCS policy with the label because the sensitivity levels and +# categories are determined automatically by high-level container runtimes such as containerd. +#guest_selinux_label="@DEFGUESTSELINUXLABEL@" + +# If enabled, the runtime will create opentracing.io traces and spans. +# (See https://www.jaegertracing.io/docs/getting-started). +# (default: disabled) +#enable_tracing = true + +# Set the full url to the Jaeger HTTP Thrift collector. +# The default if not set will be "http://localhost:14268/api/traces" +#jaeger_endpoint = "" + +# Sets the username to be used if basic auth is required for Jaeger. +#jaeger_user = "" + +# Sets the password to be used if basic auth is required for Jaeger. +#jaeger_password = "" + +# If enabled, the runtime will not create a network namespace for shim and hypervisor processes. +# This option may have some potential impacts to your host. It should only be used when you know what you're doing. +# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only +# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge +# (like OVS) directly. +# (default: false) +#disable_new_netns = true + +# if enabled, the runtime will add all the kata processes inside one dedicated cgroup. +# The container cgroups in the host are not created, just one single cgroup per sandbox. +# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox. +# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation. +# The sandbox cgroup is constrained if there is no container type annotation. +# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType +sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ + +# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In +# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful +# when a hardware architecture or hypervisor solutions is utilized which does not support CPU and/or memory hotplug. +# Compatibility for determining appropriate sandbox (VM) size: +# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O +# does not yet support sandbox sizing annotations. +# - When running single containers using a tool like ctr, container sizing information will be available. +static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT@ + +# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path. +# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory. +# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts` +# These will not be exposed to the container workloads, and are only provided for potential guest services. +sandbox_bind_mounts=@DEFBINDMOUNTS@ + +# VFIO Mode +# Determines how VFIO devices should be be presented to the container. +# Options: +# +# - vfio +# Matches behaviour of OCI runtimes (e.g. runc) as much as +# possible. VFIO devices will appear in the container as VFIO +# character devices under /dev/vfio. The exact names may differ +# from the host (they need to match the VM's IOMMU group numbers +# rather than the host's) +# +# - guest-kernel +# This is a Kata-specific behaviour that's useful in certain cases. +# The VFIO device is managed by whatever driver in the VM kernel +# claims it. This means it will appear as one or more device nodes +# or network interfaces depending on the nature of the device. +# Using this mode requires specially built workloads that know how +# to locate the relevant device interfaces within the VM. +# +vfio_mode="@DEFVFIOMODE@" + +# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will +# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest. +disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@ + +# Enabled experimental feature list, format: ["a", "b"]. +# Experimental features are features not stable enough for production, +# they may break compatibility, and are prepared for a big version bump. +# Supported experimental features: +# (default: []) +experimental=@DEFAULTEXPFEATURES@ + +# If enabled, user can run pprof tools with shim v2 process through kata-monitor. +# (default: false) +# enable_pprof = true + +# WARNING: All the options in the following section have not been implemented yet. +# This section was added as a placeholder. DO NOT USE IT! +[image] +# Container image service. +# +# Offload the CRI image management service to the Kata agent. +# (default: false) +#service_offload = true + +# Container image decryption keys provisioning. +# Applies only if service_offload is true. +# Keys can be provisioned locally (e.g. through a special command or +# a local file) or remotely (usually after the guest is remotely attested). +# The provision setting is a complete URL that lets the Kata agent decide +# which method to use in order to fetch the keys. +# +# Keys can be stored in a local file, in a measured and attested initrd: +#provision=data:///local/key/file +# +# Keys could be fetched through a special command or binary from the +# initrd (guest) image, e.g. a firmware call: +#provision=file:///path/to/bin/fetcher/in/guest +# +# Keys can be remotely provisioned. The Kata agent fetches them from e.g. +# a HTTPS URL: +#provision=https://my-key-broker.foo/tenant/ From 5a0727ecb4ed17a80bf5ca05ea853db770bf2930 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Mon, 27 Mar 2023 14:54:04 +0200 Subject: [PATCH 093/137] kata-deploy: Ship kata-qemu-tdx runtimeClass MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's make sure we configure containerd for the kata-qemu-tdx handler and ship the kata-qemu-tdx runtime class for kubernetes. Fixes: #6537 Signed-off-by: Fabiano Fidêncio --- .../runtimeclasses/kata-runtimeClasses.yaml | 13 +++++++++++++ tools/packaging/kata-deploy/scripts/kata-deploy.sh | 1 + 2 files changed, 14 insertions(+) diff --git a/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml b/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml index d3260d4a8..daa4d1e2f 100644 --- a/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml +++ b/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml @@ -14,6 +14,19 @@ scheduling: --- kind: RuntimeClass apiVersion: node.k8s.io/v1 +metadata: + name: kata-qemu-tdx +handler: kata-qemu-tdx +overhead: + podFixed: + memory: "160Mi" + cpu: "250m" +scheduling: + nodeSelector: + katacontainers.io/kata-runtime: "true" +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1 metadata: name: kata-clh handler: kata-clh diff --git a/tools/packaging/kata-deploy/scripts/kata-deploy.sh b/tools/packaging/kata-deploy/scripts/kata-deploy.sh index e4e48732b..8991e04fc 100755 --- a/tools/packaging/kata-deploy/scripts/kata-deploy.sh +++ b/tools/packaging/kata-deploy/scripts/kata-deploy.sh @@ -16,6 +16,7 @@ containerd_conf_file_backup="${containerd_conf_file}.bak" shims=( "fc" "qemu" + "qemu-tdx" "clh" "dragonball" ) From 69d7a959c8f4736c38556fc39d10e2a73eae61df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Tue, 11 Apr 2023 10:03:11 +0200 Subject: [PATCH 094/137] gha: ci-on-push: Run tests on TDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we've added a TDX capable external runner, let's make sure we also run the basic tests using TDX. Signed-off-by: Fabiano Fidêncio --- .github/workflows/ci-on-push.yaml | 8 ++++ .github/workflows/run-k8s-tests-on-tdx.yaml | 50 +++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 .github/workflows/run-k8s-tests-on-tdx.yaml diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml index 2dfd6c728..6db1cda72 100644 --- a/.github/workflows/ci-on-push.yaml +++ b/.github/workflows/ci-on-push.yaml @@ -28,3 +28,11 @@ jobs: repo: ${{ github.repository_owner }}/kata-deploy-ci tag: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-amd64 secrets: inherit + + run-k8s-tests-on-tdx: + needs: publish-kata-deploy-payload-amd64 + uses: ./.github/workflows/run-k8s-tests-on-tdx.yaml + with: + registry: ghcr.io + repo: ${{ github.repository_owner }}/kata-deploy-ci + tag: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}-amd64 diff --git a/.github/workflows/run-k8s-tests-on-tdx.yaml b/.github/workflows/run-k8s-tests-on-tdx.yaml new file mode 100644 index 000000000..78e5d5a89 --- /dev/null +++ b/.github/workflows/run-k8s-tests-on-tdx.yaml @@ -0,0 +1,50 @@ +name: CI | Run kubernetes tests on TDX +on: + workflow_call: + inputs: + registry: + required: true + type: string + repo: + required: true + type: string + tag: + required: true + type: string + +jobs: + run-k8s-tests: + strategy: + fail-fast: false + matrix: + vmm: + - qemu-tdx + runs-on: tdx + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Deploy kata-deploy + run: | + sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}|g" tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + cat tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + cat tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml | grep "${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}" || die "Failed to setup the tests image" + + kubectl apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml + kubectl apply -f tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod + kubectl apply -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + + - name: Run tests + timeout-minutes: 30 + run: | + pushd tests/integration/kubernetes + sed -i -e 's|runtimeClassName: kata|runtimeClassName: kata-${{ matrix.vmm }}|' runtimeclass_workloads/*.yaml + bash run_kubernetes_tests.sh + popd + env: + KATA_HYPERVISOR: ${{ matrix.vmm }} + KUBECONFIG: /etc/rancher/k3s/k3s.yaml From b31f103d1254379cdff870a4d4cb937b70f19030 Mon Sep 17 00:00:00 2001 From: Zhongtao Hu Date: Sun, 9 Apr 2023 21:44:45 +0800 Subject: [PATCH 095/137] runtime-rs: enable nerdctl cni plugin 1. when we use nerdctl to setup network for kata, no netns is created by nerdctl, kata need to create netns by its own 2. after start VM, nerdctl will call cni plugin via oci hook, we need to rescan the netns after the interfaces have been created, and hotplug the network device into the VM Fixes:#4693 Signed-off-by: Zhongtao Hu --- src/runtime-rs/Cargo.lock | 13 +++ src/runtime-rs/crates/resource/Cargo.toml | 1 + src/runtime-rs/crates/resource/src/manager.rs | 6 ++ .../crates/resource/src/manager_inner.rs | 63 ++++++------ .../crates/resource/src/network/mod.rs | 2 +- .../src/network/network_with_netns.rs | 1 + .../resource/src/network/utils/netns.rs | 25 +++++ src/runtime-rs/crates/runtimes/Cargo.toml | 3 + .../crates/runtimes/common/src/lib.rs | 2 +- .../crates/runtimes/common/src/sandbox.rs | 8 +- src/runtime-rs/crates/runtimes/src/manager.rs | 67 ++++++++----- .../runtimes/virt_container/src/sandbox.rs | 96 +++++++++++++------ 12 files changed, 205 insertions(+), 82 deletions(-) diff --git a/src/runtime-rs/Cargo.lock b/src/runtime-rs/Cargo.lock index 1a92b385d..db556e9a7 100644 --- a/src/runtime-rs/Cargo.lock +++ b/src/runtime-rs/Cargo.lock @@ -1734,6 +1734,16 @@ dependencies = [ "tokio", ] +[[package]] +name = "netns-rs" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23541694f1d7d18cd1a0da3a1352a6ea48b01cbb4a8e7a6e547963823fd5276e" +dependencies = [ + "nix 0.23.2", + "thiserror", +] + [[package]] name = "nix" version = "0.23.2" @@ -2401,6 +2411,7 @@ dependencies = [ "byte-unit 4.0.18", "cgroups-rs", "futures 0.3.26", + "hex", "hypervisor", "kata-sys-util", "kata-types", @@ -2464,9 +2475,11 @@ dependencies = [ "lazy_static", "linux_container", "logging", + "netns-rs", "nix 0.25.1", "oci", "persist", + "resource", "serde_json", "shim-interface", "slog", diff --git a/src/runtime-rs/crates/resource/Cargo.toml b/src/runtime-rs/crates/resource/Cargo.toml index 73b577c5a..c7acfb584 100644 --- a/src/runtime-rs/crates/resource/Cargo.toml +++ b/src/runtime-rs/crates/resource/Cargo.toml @@ -16,6 +16,7 @@ bitflags = "1.2.1" byte-unit = "4.0.14" cgroups-rs = "0.3.2" futures = "0.3.11" +hex = "0.4.3" lazy_static = "1.4.0" libc = ">=0.2.39" netlink-sys = "0.8.3" diff --git a/src/runtime-rs/crates/resource/src/manager.rs b/src/runtime-rs/crates/resource/src/manager.rs index 0a0082512..9514b6013 100644 --- a/src/runtime-rs/crates/resource/src/manager.rs +++ b/src/runtime-rs/crates/resource/src/manager.rs @@ -4,6 +4,7 @@ // SPDX-License-Identifier: Apache-2.0 // +use crate::network::NetworkConfig; use crate::resource_persist::ResourceState; use crate::{manager_inner::ResourceManagerInner, rootfs::Rootfs, volume::Volume, ResourceConfig}; use agent::{Agent, Storage}; @@ -55,6 +56,11 @@ impl ResourceManager { inner.prepare_before_start_vm(device_configs).await } + pub async fn handle_network(&self, network_config: NetworkConfig) -> Result<()> { + let mut inner = self.inner.write().await; + inner.handle_network(network_config).await + } + pub async fn setup_after_start_vm(&self) -> Result<()> { let mut inner = self.inner.write().await; inner.setup_after_start_vm().await diff --git a/src/runtime-rs/crates/resource/src/manager_inner.rs b/src/runtime-rs/crates/resource/src/manager_inner.rs index e8623e37c..6c6e4067a 100644 --- a/src/runtime-rs/crates/resource/src/manager_inner.rs +++ b/src/runtime-rs/crates/resource/src/manager_inner.rs @@ -6,7 +6,7 @@ use std::{sync::Arc, thread}; -use crate::resource_persist::ResourceState; +use crate::{network::NetworkConfig, resource_persist::ResourceState}; use agent::{Agent, Storage}; use anyhow::{anyhow, Context, Ok, Result}; use async_trait::async_trait; @@ -89,32 +89,9 @@ impl ResourceManagerInner { }; } ResourceConfig::Network(c) => { - // 1. When using Rust asynchronous programming, we use .await to - // allow other task to run instead of waiting for the completion of the current task. - // 2. Also, when handling the pod network, we need to set the shim threads - // into the network namespace to perform those operations. - // However, as the increase of the I/O intensive tasks, two issues could be caused by the two points above: - // a. When the future is blocked, the current thread (which is in the pod netns) - // might be take over by other tasks. After the future is finished, the thread take over - // the current task might not be in the pod netns. But the current task still need to run in pod netns - // b. When finish setting up the network, the current thread will be set back to the host namespace. - // In Rust Async, if the current thread is taken over by other task, the netns is dropped on another thread, - // but it is not in netns. So, the previous thread would still remain in the pod netns. - // The solution is to block the future on the current thread, it is enabled by spawn an os thread, create a - // tokio runtime, and block the task on it. - let hypervisor = self.hypervisor.clone(); - let network = thread::spawn(move || -> Result> { - let rt = runtime::Builder::new_current_thread().enable_io().build()?; - let d = rt.block_on(network::new(&c)).context("new network")?; - rt.block_on(d.setup(hypervisor.as_ref())) - .context("setup network")?; - Ok(d) - }) - .join() - .map_err(|e| anyhow!("{:?}", e)) - .context("Couldn't join on the associated thread")? - .context("failed to set up network")?; - self.network = Some(network); + self.handle_network(c) + .await + .context("failed to handle network")?; } }; } @@ -122,6 +99,38 @@ impl ResourceManagerInner { Ok(()) } + pub async fn handle_network(&mut self, network_config: NetworkConfig) -> Result<()> { + // 1. When using Rust asynchronous programming, we use .await to + // allow other task to run instead of waiting for the completion of the current task. + // 2. Also, when handling the pod network, we need to set the shim threads + // into the network namespace to perform those operations. + // However, as the increase of the I/O intensive tasks, two issues could be caused by the two points above: + // a. When the future is blocked, the current thread (which is in the pod netns) + // might be take over by other tasks. After the future is finished, the thread take over + // the current task might not be in the pod netns. But the current task still need to run in pod netns + // b. When finish setting up the network, the current thread will be set back to the host namespace. + // In Rust Async, if the current thread is taken over by other task, the netns is dropped on another thread, + // but it is not in netns. So, the previous thread would still remain in the pod netns. + // The solution is to block the future on the current thread, it is enabled by spawn an os thread, create a + // tokio runtime, and block the task on it. + let hypervisor = self.hypervisor.clone(); + let network = thread::spawn(move || -> Result> { + let rt = runtime::Builder::new_current_thread().enable_io().build()?; + let d = rt + .block_on(network::new(&network_config)) + .context("new network")?; + rt.block_on(d.setup(hypervisor.as_ref())) + .context("setup network")?; + Ok(d) + }) + .join() + .map_err(|e| anyhow!("{:?}", e)) + .context("Couldn't join on the associated thread")? + .context("failed to set up network")?; + self.network = Some(network); + Ok(()) + } + async fn handle_interfaces(&self, network: &dyn Network) -> Result<()> { for i in network.interfaces().await.context("get interfaces")? { // update interface diff --git a/src/runtime-rs/crates/resource/src/network/mod.rs b/src/runtime-rs/crates/resource/src/network/mod.rs index a85c2213d..6a83db8bb 100644 --- a/src/runtime-rs/crates/resource/src/network/mod.rs +++ b/src/runtime-rs/crates/resource/src/network/mod.rs @@ -18,7 +18,7 @@ use network_with_netns::NetworkWithNetns; mod network_pair; use network_pair::NetworkPair; mod utils; -pub use utils::netns::NetnsGuard; +pub use utils::netns::{generate_netns_name, NetnsGuard}; use std::sync::Arc; diff --git a/src/runtime-rs/crates/resource/src/network/network_with_netns.rs b/src/runtime-rs/crates/resource/src/network/network_with_netns.rs index 809897eba..651c2497c 100644 --- a/src/runtime-rs/crates/resource/src/network/network_with_netns.rs +++ b/src/runtime-rs/crates/resource/src/network/network_with_netns.rs @@ -33,6 +33,7 @@ pub struct NetworkWithNetNsConfig { pub network_model: String, pub netns_path: String, pub queues: usize, + pub network_created: bool, } struct NetworkWithNetnsInner { diff --git a/src/runtime-rs/crates/resource/src/network/utils/netns.rs b/src/runtime-rs/crates/resource/src/network/utils/netns.rs index bb0343dff..f2dc2ae6f 100644 --- a/src/runtime-rs/crates/resource/src/network/utils/netns.rs +++ b/src/runtime-rs/crates/resource/src/network/utils/netns.rs @@ -9,6 +9,7 @@ use std::{fs::File, os::unix::io::AsRawFd}; use anyhow::{Context, Result}; use nix::sched::{setns, CloneFlags}; use nix::unistd::{getpid, gettid}; +use rand::Rng; pub struct NetnsGuard { old_netns: Option, @@ -50,6 +51,20 @@ impl Drop for NetnsGuard { } } +// generate the network namespace name +pub fn generate_netns_name() -> String { + let mut rng = rand::thread_rng(); + let random_bytes: [u8; 16] = rng.gen(); + format!( + "cnitest-{}-{}-{}-{}-{}", + hex::encode(&random_bytes[..4]), + hex::encode(&random_bytes[4..6]), + hex::encode(&random_bytes[6..8]), + hex::encode(&random_bytes[8..10]), + hex::encode(&random_bytes[10..]) + ) +} + #[cfg(test)] mod tests { use super::*; @@ -67,4 +82,14 @@ mod tests { let empty_path = ""; assert!(NetnsGuard::new(empty_path).unwrap().old_netns.is_none()); } + + #[test] + fn test_generate_netns_name() { + let name1 = generate_netns_name(); + let name2 = generate_netns_name(); + let name3 = generate_netns_name(); + assert_ne!(name1, name2); + assert_ne!(name2, name3); + assert_ne!(name1, name3); + } } diff --git a/src/runtime-rs/crates/runtimes/Cargo.toml b/src/runtime-rs/crates/runtimes/Cargo.toml index 3a6ab0a1b..768122684 100644 --- a/src/runtime-rs/crates/runtimes/Cargo.toml +++ b/src/runtime-rs/crates/runtimes/Cargo.toml @@ -8,6 +8,7 @@ license = "Apache-2.0" [dependencies] anyhow = "^1.0" lazy_static = "1.4.0" +netns-rs = "0.1.0" slog = "2.5.2" slog-scope = "4.4.0" tokio = { version = "1.8.0", features = ["rt-multi-thread"] } @@ -26,6 +27,8 @@ oci = { path = "../../../libs/oci" } shim-interface = { path = "../../../libs/shim-interface" } persist = { path = "../persist" } hypervisor = { path = "../hypervisor" } +resource = { path = "../resource" } + # runtime handler linux_container = { path = "./linux_container", optional = true } virt_container = { path = "./virt_container", optional = true } diff --git a/src/runtime-rs/crates/runtimes/common/src/lib.rs b/src/runtime-rs/crates/runtimes/common/src/lib.rs index 36977964a..adb5ca002 100644 --- a/src/runtime-rs/crates/runtimes/common/src/lib.rs +++ b/src/runtime-rs/crates/runtimes/common/src/lib.rs @@ -11,5 +11,5 @@ pub mod message; mod runtime_handler; pub use runtime_handler::{RuntimeHandler, RuntimeInstance}; mod sandbox; -pub use sandbox::Sandbox; +pub use sandbox::{Sandbox, SandboxNetworkEnv}; pub mod types; diff --git a/src/runtime-rs/crates/runtimes/common/src/sandbox.rs b/src/runtime-rs/crates/runtimes/common/src/sandbox.rs index 0aee04922..efe06fa43 100644 --- a/src/runtime-rs/crates/runtimes/common/src/sandbox.rs +++ b/src/runtime-rs/crates/runtimes/common/src/sandbox.rs @@ -7,14 +7,20 @@ use anyhow::Result; use async_trait::async_trait; +#[derive(Clone)] +pub struct SandboxNetworkEnv { + pub netns: Option, + pub network_created: bool, +} + #[async_trait] pub trait Sandbox: Send + Sync { async fn start( &self, - netns: Option, dns: Vec, spec: &oci::Spec, state: &oci::State, + network_env: SandboxNetworkEnv, ) -> Result<()>; async fn stop(&self) -> Result<()>; async fn cleanup(&self) -> Result<()>; diff --git a/src/runtime-rs/crates/runtimes/src/manager.rs b/src/runtime-rs/crates/runtimes/src/manager.rs index 9649224ca..f97861f23 100644 --- a/src/runtime-rs/crates/runtimes/src/manager.rs +++ b/src/runtime-rs/crates/runtimes/src/manager.rs @@ -4,20 +4,21 @@ // SPDX-License-Identifier: Apache-2.0 // -use std::{str::from_utf8, sync::Arc}; - -use anyhow::{anyhow, Context, Result}; +use std::{path::PathBuf, str::from_utf8, sync::Arc}; use crate::{shim_mgmt::server::MgmtServer, static_resource::StaticResourceManager}; +use anyhow::{anyhow, Context, Result}; use common::{ message::Message, types::{Request, Response}, - RuntimeHandler, RuntimeInstance, Sandbox, + RuntimeHandler, RuntimeInstance, Sandbox, SandboxNetworkEnv, }; use hypervisor::Param; use kata_types::{ annotations::Annotation, config::default::DEFAULT_GUEST_DNS_FILE, config::TomlConfig, }; +use netns_rs::NetNs; +use resource::network::generate_netns_name; #[cfg(feature = "linux")] use linux_container::LinuxContainer; @@ -53,7 +54,7 @@ impl RuntimeHandlerManagerInner { &mut self, spec: &oci::Spec, state: &oci::State, - netns: Option, + network_env: SandboxNetworkEnv, dns: Vec, config: Arc, ) -> Result<()> { @@ -77,7 +78,7 @@ impl RuntimeHandlerManagerInner { // start sandbox runtime_instance .sandbox - .start(netns, dns, spec, state) + .start(dns, spec, state, network_env) .await .context("start sandbox")?; self.runtime_instance = Some(Arc::new(runtime_instance)); @@ -104,23 +105,6 @@ impl RuntimeHandlerManagerInner { #[cfg(feature = "virt")] VirtContainer::init().context("init virt container")?; - let netns = if let Some(linux) = &spec.linux { - let mut netns = None; - for ns in &linux.namespaces { - if ns.r#type.as_str() != oci::NETWORKNAMESPACE { - continue; - } - - if !ns.path.is_empty() { - netns = Some(ns.path.clone()); - break; - } - } - netns - } else { - None - }; - for m in &spec.mounts { if m.destination == DEFAULT_GUEST_DNS_FILE { let contents = fs::read_to_string(&m.source).await?; @@ -129,7 +113,42 @@ impl RuntimeHandlerManagerInner { } let config = load_config(spec, options).context("load config")?; - self.init_runtime_handler(spec, state, netns, dns, Arc::new(config)) + + let mut network_created = false; + // set netns to None if we want no network for the VM + let netns = if config.runtime.disable_new_netns { + None + } else { + let mut netns_path = None; + if let Some(linux) = &spec.linux { + for ns in &linux.namespaces { + if ns.r#type.as_str() != oci::NETWORKNAMESPACE { + continue; + } + // get netns path from oci spec + if !ns.path.is_empty() { + netns_path = Some(ns.path.clone()); + } + // if we get empty netns from oci spec, we need to create netns for the VM + else { + let ns_name = generate_netns_name(); + let netns = NetNs::new(ns_name)?; + let path = PathBuf::from(netns.path()).to_str().map(|s| s.to_string()); + info!(sl!(), "the netns path is {:?}", path); + netns_path = path; + network_created = true; + } + break; + } + } + netns_path + }; + + let network_env = SandboxNetworkEnv { + netns, + network_created, + }; + self.init_runtime_handler(spec, state, network_env, dns, Arc::new(config)) .await .context("init runtime handler")?; diff --git a/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs b/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs index d464a7a00..881b5f78b 100644 --- a/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs +++ b/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs @@ -14,7 +14,7 @@ use anyhow::{anyhow, Context, Result}; use async_trait::async_trait; use common::{ message::{Action, Message}, - Sandbox, + Sandbox, SandboxNetworkEnv, }; use containerd_shim_protos::events::task::TaskOOM; use hypervisor::{dragonball::Dragonball, Hypervisor, HYPERVISOR_DRAGONBALL}; @@ -86,30 +86,21 @@ impl VirtSandbox { }) } - async fn prepare_for_start_sandbox( + async fn prepare_config_for_sandbox( &self, _id: &str, - netns: Option, + network_env: SandboxNetworkEnv, ) -> Result> { let mut resource_configs = vec![]; - - let config = self.resource_manager.config().await; - if let Some(netns_path) = netns { - let network_config = ResourceConfig::Network(NetworkConfig::NetworkResourceWithNetNs( - NetworkWithNetNsConfig { - network_model: config.runtime.internetworking_model.clone(), - netns_path, - queues: self - .hypervisor - .hypervisor_config() - .await - .network_info - .network_queues as usize, - }, - )); - resource_configs.push(network_config); + if !network_env.network_created { + if let Some(netns_path) = network_env.netns { + let network_config = ResourceConfig::Network( + self.prepare_network_config(netns_path, network_env.network_created) + .await, + ); + resource_configs.push(network_config); + } } - let hypervisor_config = self.hypervisor.hypervisor_config().await; let virtio_fs_config = ResourceConfig::ShareFs(hypervisor_config.shared_fs); resource_configs.push(virtio_fs_config); @@ -149,16 +140,43 @@ impl VirtSandbox { Ok(()) } + + async fn prepare_network_config( + &self, + netns_path: String, + network_created: bool, + ) -> NetworkConfig { + let config = self.resource_manager.config().await; + NetworkConfig::NetworkResourceWithNetNs(NetworkWithNetNsConfig { + network_model: config.runtime.internetworking_model.clone(), + netns_path, + queues: self + .hypervisor + .hypervisor_config() + .await + .network_info + .network_queues as usize, + network_created, + }) + } + + fn has_prestart_hooks( + &self, + prestart_hooks: Vec, + create_runtime_hooks: Vec, + ) -> bool { + !prestart_hooks.is_empty() || !create_runtime_hooks.is_empty() + } } #[async_trait] impl Sandbox for VirtSandbox { async fn start( &self, - netns: Option, dns: Vec, spec: &oci::Spec, state: &oci::State, + network_env: SandboxNetworkEnv, ) -> Result<()> { let id = &self.sid; @@ -171,13 +189,15 @@ impl Sandbox for VirtSandbox { } self.hypervisor - .prepare_vm(id, netns.clone()) + .prepare_vm(id, network_env.netns.clone()) .await .context("prepare vm")?; // generate device and setup before start vm // should after hypervisor.prepare_vm - let resources = self.prepare_for_start_sandbox(id, netns).await?; + let resources = self + .prepare_config_for_sandbox(id, network_env.clone()) + .await?; self.resource_manager .prepare_before_start_vm(resources) .await @@ -188,15 +208,35 @@ impl Sandbox for VirtSandbox { info!(sl!(), "start vm"); // execute pre-start hook functions, including Prestart Hooks and CreateRuntime Hooks - let (prestart_hooks, create_runtime_hooks, _has_oci_hook) = match spec.hooks.as_ref() { - Some(hooks) => (hooks.prestart.clone(), hooks.create_runtime.clone(), true), - None => (Vec::new(), Vec::new(), false), + let (prestart_hooks, create_runtime_hooks) = match spec.hooks.as_ref() { + Some(hooks) => (hooks.prestart.clone(), hooks.create_runtime.clone()), + None => (Vec::new(), Vec::new()), }; self.execute_oci_hook_functions(&prestart_hooks, &create_runtime_hooks, state) .await?; - // TODO: if prestart_hooks is not empty, rescan the network endpoints(rely on hotplug endpoints). - // see: https://github.com/kata-containers/kata-containers/issues/6378 + // 1. if there are pre-start hook functions, network config might have been changed. + // We need to rescan the netns to handle the change. + // 2. Do not scan the netns if we want no network for the VM. + // TODO In case of vm factory, scan the netns to hotplug interfaces after the VM is started. + if self.has_prestart_hooks(prestart_hooks, create_runtime_hooks) + && !self + .resource_manager + .config() + .await + .runtime + .disable_new_netns + { + if let Some(netns_path) = network_env.netns { + let network_resource = self + .prepare_network_config(netns_path, network_env.network_created) + .await; + self.resource_manager + .handle_network(network_resource) + .await + .context("set up device after start vm")?; + } + } // connect agent // set agent socket From 69ba2098f8cad92e53b585a27c959f96cb4cce80 Mon Sep 17 00:00:00 2001 From: Zhongtao Hu Date: Sun, 9 Apr 2023 21:57:58 +0800 Subject: [PATCH 096/137] runtime-rs: remove network entities and netns remove network entities and netns Fixes:#4693 Signed-off-by: Zhongtao Hu --- src/runtime-rs/Cargo.lock | 1 + src/runtime-rs/crates/resource/Cargo.toml | 1 + .../crates/resource/src/network/mod.rs | 1 + .../src/network/network_with_netns.rs | 32 +++++++++++++++++-- 4 files changed, 32 insertions(+), 3 deletions(-) diff --git a/src/runtime-rs/Cargo.lock b/src/runtime-rs/Cargo.lock index db556e9a7..89ea2abf1 100644 --- a/src/runtime-rs/Cargo.lock +++ b/src/runtime-rs/Cargo.lock @@ -2420,6 +2420,7 @@ dependencies = [ "logging", "netlink-packet-route", "netlink-sys", + "netns-rs", "nix 0.24.3", "oci", "persist", diff --git a/src/runtime-rs/crates/resource/Cargo.toml b/src/runtime-rs/crates/resource/Cargo.toml index c7acfb584..baafd28b7 100644 --- a/src/runtime-rs/crates/resource/Cargo.toml +++ b/src/runtime-rs/crates/resource/Cargo.toml @@ -19,6 +19,7 @@ futures = "0.3.11" hex = "0.4.3" lazy_static = "1.4.0" libc = ">=0.2.39" +netns-rs = "0.1.0" netlink-sys = "0.8.3" netlink-packet-route = "0.13.0" nix = "0.24.2" diff --git a/src/runtime-rs/crates/resource/src/network/mod.rs b/src/runtime-rs/crates/resource/src/network/mod.rs index 6a83db8bb..0fe3aa294 100644 --- a/src/runtime-rs/crates/resource/src/network/mod.rs +++ b/src/runtime-rs/crates/resource/src/network/mod.rs @@ -38,6 +38,7 @@ pub trait Network: Send + Sync { async fn routes(&self) -> Result>; async fn neighs(&self) -> Result>; async fn save(&self) -> Option>; + async fn remove(&self, h: &dyn Hypervisor) -> Result<()>; } pub async fn new(config: &NetworkConfig) -> Result> { diff --git a/src/runtime-rs/crates/resource/src/network/network_with_netns.rs b/src/runtime-rs/crates/resource/src/network/network_with_netns.rs index 651c2497c..bb5273ffc 100644 --- a/src/runtime-rs/crates/resource/src/network/network_with_netns.rs +++ b/src/runtime-rs/crates/resource/src/network/network_with_netns.rs @@ -4,9 +4,12 @@ // SPDX-License-Identifier: Apache-2.0 // -use std::sync::{ - atomic::{AtomicU32, Ordering}, - Arc, +use std::{ + fs, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }, }; use super::endpoint::endpoint_persist::EndpointState; @@ -14,6 +17,7 @@ use anyhow::{anyhow, Context, Result}; use async_trait::async_trait; use futures::stream::TryStreamExt; use hypervisor::Hypervisor; +use netns_rs::get_from_path; use scopeguard::defer; use tokio::sync::RwLock; @@ -39,6 +43,7 @@ pub struct NetworkWithNetNsConfig { struct NetworkWithNetnsInner { netns_path: String, entity_list: Vec, + network_created: bool, } impl NetworkWithNetnsInner { @@ -55,6 +60,7 @@ impl NetworkWithNetnsInner { Ok(Self { netns_path: config.netns_path.to_string(), entity_list, + network_created: config.network_created, }) } } @@ -121,6 +127,26 @@ impl Network for NetworkWithNetns { } Some(endpoint) } + + async fn remove(&self, h: &dyn Hypervisor) -> Result<()> { + let inner = self.inner.read().await; + // The network namespace would have been deleted at this point + // if it has not been created by virtcontainers. + if !inner.network_created { + return Ok(()); + } + { + let _netns_guard = + netns::NetnsGuard::new(&inner.netns_path).context("net netns guard")?; + for e in &inner.entity_list { + e.endpoint.detach(h).await.context("detach")?; + } + } + let netns = get_from_path(inner.netns_path.clone())?; + netns.remove()?; + fs::remove_dir_all(inner.netns_path.clone()).context("failed to remove netns path")?; + Ok(()) + } } async fn get_entity_from_netns(config: &NetworkWithNetNsConfig) -> Result> { From e4b3b08871d6c2017ce0f2cad5ed9c7388b622a7 Mon Sep 17 00:00:00 2001 From: Zvonko Kaiser Date: Wed, 12 Apr 2023 10:08:03 +0000 Subject: [PATCH 097/137] gpu: Add proper CONFIG_LOCALVERSION depending on TEE If conf_guest is set we need to update the CONFIG_LOCALVERSION to match the suffix created in install_kata -nvidia-gpu-{snp|tdx}, the linux headers will be named the very same if build with make deb-pkg for TDX or SNP. Signed-off-by: Zvonko Kaiser --- tools/packaging/kernel/build-kernel.sh | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/tools/packaging/kernel/build-kernel.sh b/tools/packaging/kernel/build-kernel.sh index 4de27ca33..4cbaabd9c 100755 --- a/tools/packaging/kernel/build-kernel.sh +++ b/tools/packaging/kernel/build-kernel.sh @@ -239,8 +239,23 @@ get_kernel_frag_path() { if [[ "${gpu_vendor}" != "" ]];then info "Add kernel config for GPU due to '-g ${gpu_vendor}'" - local gpu_configs="$(ls ${gpu_path}/${gpu_vendor}.conf)" - all_configs="${all_configs} ${gpu_configs}" + # If conf_guest is set we need to update the CONFIG_LOCALVERSION + # to match the suffix created in install_kata + # -nvidia-gpu-{snp|tdx}, the linux headers will be named the very + # same if build with make deb-pkg for TDX or SNP. + if [[ "${conf_guest}" != "" ]];then + local gpu_cc_configs=$(mktemp).conf + local gpu_subst_configs="$(ls ${gpu_path}/${gpu_vendor}.conf.in)" + + export CONF_GUEST_SUFFIX="-${conf_guest}" + envsubst <${gpu_subst_configs} >${gpu_cc_configs} + unset CONF_GUEST_SUFFIX + + all_configs="${all_configs} ${gpu_cc_configs}" + else + local gpu_configs="$(ls ${gpu_path}/${gpu_vendor}.conf)" + all_configs="${all_configs} ${gpu_configs}" + fi fi if [[ "${conf_guest}" != "" ]];then @@ -545,7 +560,7 @@ main() { if [ -n "$kernel_version" ]; then kernel_major_version=$(get_major_kernel_version "${kernel_version}") if [[ ${kernel_major_version} != "5.10" ]]; then - info "dragonball-experimental kernel patches are only tested on 5.10.x kernel now, other kernel version may cause confliction" + info "dragonball-experimental kernel patches are only tested on 5.10.x kernel now, other kernel version may cause confliction" fi fi fi From 6d315719f025f24a643340d0f8c5e9cea32ed668 Mon Sep 17 00:00:00 2001 From: Jianyong Wu Date: Thu, 6 Apr 2023 17:16:57 +0800 Subject: [PATCH 098/137] snap: fix docker start fail issue In Arm baseline CI, docker starts fail with error: "no sockets found via socket activation: make sure the service was started by systemd". I find a solusion in [1] to fix it. [1] https://forums.docker.com/t/failed-to-load-listeners-no-sockets-found-via-socket-activation-make-sure-the-service-was-started-by-systemd/62505 Fixes: #6619 Signed-off-by: Jianyong Wu --- snap/snapcraft.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 8baf8d7e1..4bbb8e0f1 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -63,6 +63,9 @@ parts: echo "Adding $USER into docker group" sudo -E gpasswd -a $USER docker echo "Starting docker" + # docker may fail to start using "fd://" in docker.service + sudo sed -i 's/fd:\/\//unix:\/\//g' /lib/systemd/system/docker.service + sudo systemctl daemon-reload sudo -E systemctl start docker || true image: From db2cac34d8b743c18ff391d33a68ca6dca8ce031 Mon Sep 17 00:00:00 2001 From: Alexandru Matei Date: Thu, 6 Apr 2023 14:00:27 +0300 Subject: [PATCH 099/137] runtime: Don't create socket file in /run/kata The socket file for shim management is created in /run/kata and it isn't deleted after the container is stopped. After running and stopping thousands of containers /run folder will run out of space. Fixes #6622 Signed-off-by: Alexandru Matei Co-authored-by: Greg Kurz --- .../pkg/containerd-shim-v2/shim_management.go | 34 ++++++++++++++++--- src/runtime/pkg/kata-monitor/pprof.go | 2 +- .../shimclient/shim_management_client.go | 2 +- 3 files changed, 31 insertions(+), 7 deletions(-) diff --git a/src/runtime/pkg/containerd-shim-v2/shim_management.go b/src/runtime/pkg/containerd-shim-v2/shim_management.go index f9c31b8b2..0c6d5c6e2 100644 --- a/src/runtime/pkg/containerd-shim-v2/shim_management.go +++ b/src/runtime/pkg/containerd-shim-v2/shim_management.go @@ -243,7 +243,7 @@ func (s *service) genericIPTablesHandler(w http.ResponseWriter, r *http.Request, func (s *service) startManagementServer(ctx context.Context, ociSpec *specs.Spec) { // metrics socket will under sandbox's bundle path - metricsAddress := SocketAddress(s.id) + metricsAddress := ServerSocketAddress(s.id) listener, err := cdshim.NewSocket(metricsAddress) if err != nil { @@ -312,14 +312,38 @@ func GetSandboxesStoragePathRust() string { return "/run/kata" } -// SocketAddress returns the address of the unix domain socket for communicating with the +// SocketPath returns the path of the socket using the given storagePath +func SocketPath(id string, storagePath string) string { + return filepath.Join(string(filepath.Separator), storagePath, id, "shim-monitor.sock") +} + +// SocketPathGo returns the path of the socket to be used with the go runtime +func SocketPathGo(id string) string { + return SocketPath(id, GetSandboxesStoragePath()) +} + +// SocketPathRust returns the path of the socket to be used with the rust runtime +func SocketPathRust(id string) string { + return SocketPath(id, GetSandboxesStoragePathRust()) +} + +// ServerSocketAddress returns the address of the unix domain socket the shim management endpoint +// should listen. +// NOTE: this code is only called by the go shim management implementation. +func ServerSocketAddress(id string) string { + return fmt.Sprintf("unix://%s", SocketPathGo(id)) +} + +// ClientSocketAddress returns the address of the unix domain socket for communicating with the // shim management endpoint -func SocketAddress(id string) string { +// NOTE: this code allows various go clients, e.g. kata-runtime or kata-monitor commands, to +// connect to the rust shim management implementation. +func ClientSocketAddress(id string) string { // get the go runtime uds path - socketPath := filepath.Join(string(filepath.Separator), GetSandboxesStoragePath(), id, "shim-monitor.sock") + socketPath := SocketPathGo(id) // if the path not exist, use the rust runtime uds path instead if _, err := os.Stat(socketPath); err != nil { - return fmt.Sprintf("unix://%s", filepath.Join(string(filepath.Separator), GetSandboxesStoragePathRust(), id, "shim-monitor.sock")) + socketPath = SocketPathRust(id) } return fmt.Sprintf("unix://%s", socketPath) } diff --git a/src/runtime/pkg/kata-monitor/pprof.go b/src/runtime/pkg/kata-monitor/pprof.go index 0d768e428..afaae8556 100644 --- a/src/runtime/pkg/kata-monitor/pprof.go +++ b/src/runtime/pkg/kata-monitor/pprof.go @@ -32,7 +32,7 @@ func (km *KataMonitor) composeSocketAddress(r *http.Request) (string, error) { return "", err } - return shim.SocketAddress(sandbox), nil + return shim.ClientSocketAddress(sandbox), nil } func (km *KataMonitor) proxyRequest(w http.ResponseWriter, r *http.Request, diff --git a/src/runtime/pkg/utils/shimclient/shim_management_client.go b/src/runtime/pkg/utils/shimclient/shim_management_client.go index 1b9635c17..28ef3708d 100644 --- a/src/runtime/pkg/utils/shimclient/shim_management_client.go +++ b/src/runtime/pkg/utils/shimclient/shim_management_client.go @@ -19,7 +19,7 @@ import ( // BuildShimClient builds and returns an http client for communicating with the provided sandbox func BuildShimClient(sandboxID string, timeout time.Duration) (*http.Client, error) { - return buildUnixSocketClient(shim.SocketAddress(sandboxID), timeout) + return buildUnixSocketClient(shim.ClientSocketAddress(sandboxID), timeout) } // buildUnixSocketClient build http client for Unix socket From da35241a91ab5856dc5dac8a8671745ac9fc7a30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 12 Apr 2023 12:58:51 +0200 Subject: [PATCH 100/137] tests: k8s: Skip k8s-cpu-ns when testing TDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TEEs do not support CPU / memory hotplug, thus this test must be skipped. Signed-off-by: Fabiano Fidêncio --- tests/integration/kubernetes/k8s-cpu-ns.bats | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration/kubernetes/k8s-cpu-ns.bats b/tests/integration/kubernetes/k8s-cpu-ns.bats index 0089e1c06..4d5f2e883 100644 --- a/tests/integration/kubernetes/k8s-cpu-ns.bats +++ b/tests/integration/kubernetes/k8s-cpu-ns.bats @@ -11,6 +11,7 @@ load "${BATS_TEST_DIRNAME}/tests_common.sh" setup() { [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" [ "${KATA_HYPERVISOR}" == "dragonball" ] && skip "test not working see: ${dragonball_limitations}" + [ "${KATA_HYPERVISOR}" == "qemu-tdx" ] && skip "TEEs do not support memory / CPU hotplug" pod_name="constraints-cpu-test" container_name="first-cpu-container" @@ -27,6 +28,7 @@ setup() { @test "Check CPU constraints" { [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" [ "${KATA_HYPERVISOR}" == "dragonball" ] && skip "test not working see: ${dragonball_limitations}" + [ "${KATA_HYPERVISOR}" == "qemu-tdx" ] && skip "TEEs do not support memory / CPU hotplug" # Create the pod kubectl create -f "${pod_config_dir}/pod-cpu.yaml" @@ -71,6 +73,7 @@ setup() { teardown() { [ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}" [ "${KATA_HYPERVISOR}" == "dragonball" ] && skip "test not working see: ${dragonball_limitations}" + [ "${KATA_HYPERVISOR}" == "qemu-tdx" ] && skip "TEEs do not support memory / CPU hotplug" # Debugging information kubectl describe "pod/$pod_name" From d7fdf19e9bdc96bc006030df058d66fad8ec0e1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 12 Apr 2023 13:01:25 +0200 Subject: [PATCH 101/137] gha: tdx: Delete kata-deploy after the tests finish MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We must ensure that no kata-deploy is left behind after the tests finish, otherwise it may interfere with the next run. Fixes: #6647 Signed-off-by: Fabiano Fidêncio --- .github/workflows/run-k8s-tests-on-tdx.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/.github/workflows/run-k8s-tests-on-tdx.yaml b/.github/workflows/run-k8s-tests-on-tdx.yaml index 78e5d5a89..a842e2659 100644 --- a/.github/workflows/run-k8s-tests-on-tdx.yaml +++ b/.github/workflows/run-k8s-tests-on-tdx.yaml @@ -48,3 +48,21 @@ jobs: env: KATA_HYPERVISOR: ${{ matrix.vmm }} KUBECONFIG: /etc/rancher/k3s/k3s.yaml + + - name: Delete kata-deploy + if: always() + run: | + kubectl delete -f tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod + + sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}|g" tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml + cat tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml + cat tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml | grep "${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}" || die "Failed to setup the tests image" + kubectl apply -f tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml + sleep 180s + + kubectl delete -f tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml + kubectl delete -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml + kubectl delete -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml From 542bb0f3f305b7f91f1ec2798fbb1b096508f603 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 12 Apr 2023 14:57:03 +0200 Subject: [PATCH 102/137] gha: tdx: Set KUBECONFIG env at the job level MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit By doing this we avoid having to set it up on every step. Signed-off-by: Fabiano Fidêncio --- .github/workflows/run-k8s-tests-on-tdx.yaml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/.github/workflows/run-k8s-tests-on-tdx.yaml b/.github/workflows/run-k8s-tests-on-tdx.yaml index a842e2659..e1d9ba764 100644 --- a/.github/workflows/run-k8s-tests-on-tdx.yaml +++ b/.github/workflows/run-k8s-tests-on-tdx.yaml @@ -20,6 +20,8 @@ jobs: vmm: - qemu-tdx runs-on: tdx + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml steps: - uses: actions/checkout@v3 with: @@ -35,8 +37,6 @@ jobs: kubectl apply -f tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod kubectl apply -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml - env: - KUBECONFIG: /etc/rancher/k3s/k3s.yaml - name: Run tests timeout-minutes: 30 @@ -47,7 +47,6 @@ jobs: popd env: KATA_HYPERVISOR: ${{ matrix.vmm }} - KUBECONFIG: /etc/rancher/k3s/k3s.yaml - name: Delete kata-deploy if: always() @@ -64,5 +63,3 @@ jobs: kubectl delete -f tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml kubectl delete -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml kubectl delete -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml - env: - KUBECONFIG: /etc/rancher/k3s/k3s.yaml From e31efc861cb347000d4109d53071f0b183caa15e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 12 Apr 2023 19:12:08 +0200 Subject: [PATCH 103/137] gha: tdx: Use the k3s overlay MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As the TDX machine is using k3s, let's make sure we're deploying kat-deploy using the k3s overlay. Signed-off-by: Fabiano Fidêncio --- .github/workflows/run-k8s-tests-on-tdx.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/run-k8s-tests-on-tdx.yaml b/.github/workflows/run-k8s-tests-on-tdx.yaml index e1d9ba764..4c38b9d23 100644 --- a/.github/workflows/run-k8s-tests-on-tdx.yaml +++ b/.github/workflows/run-k8s-tests-on-tdx.yaml @@ -34,7 +34,7 @@ jobs: cat tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml | grep "${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}" || die "Failed to setup the tests image" kubectl apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml - kubectl apply -f tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + kubectl apply -k tools/packaging/kata-deploy/kata-deploy/overlay/k3s kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod kubectl apply -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml @@ -51,7 +51,7 @@ jobs: - name: Delete kata-deploy if: always() run: | - kubectl delete -f tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml + kubectl delete -k tools/packaging/kata-deploy/kata-deploy/overlay/k3s kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}|g" tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml From ea386700fe703da6c4aef81f7291175a98da2b1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 12 Apr 2023 21:29:12 +0200 Subject: [PATCH 104/137] kata-deploy: Update podOverhead for TDX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As TEEs cannot hotplug memory / CPU, we *must* consider the default values for those as part of the podOverhead. Signed-off-by: Fabiano Fidêncio --- .../kata-deploy/runtimeclasses/kata-runtimeClasses.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml b/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml index daa4d1e2f..dc8644957 100644 --- a/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml +++ b/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml @@ -19,8 +19,8 @@ metadata: handler: kata-qemu-tdx overhead: podFixed: - memory: "160Mi" - cpu: "250m" + memory: "2048Mi" + cpu: "1.0" scheduling: nodeSelector: katacontainers.io/kata-runtime: "true" From 5ec9ae0f0498b7366fc85ed1448d36e3c9b6ac35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 12 Apr 2023 21:13:41 +0200 Subject: [PATCH 105/137] kata-deploy: Use readinessProbe to ensure everything is ready MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit readinessProbe will help us to only have the kata-deploy pod marked as Ready when it finishes all the needed configurations in the node. Related: #6649 Signed-off-by: Fabiano Fidêncio --- .../packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml | 3 +++ tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml | 3 +++ tools/packaging/kata-deploy/scripts/kata-deploy.sh | 2 ++ 3 files changed, 8 insertions(+) diff --git a/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml b/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml index 095876b73..23c3efe02 100644 --- a/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml +++ b/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml @@ -21,6 +21,9 @@ spec: image: quay.io/kata-containers/kata-deploy:latest imagePullPolicy: Always command: [ "bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh reset" ] + readinessProbe: + exec: + command: [ "bash", "-c", "[ -f /opt/kata/kata-deployed ]", "&&", "bash", "-c", "[ $? == 1 ]" ] env: - name: NODE_NAME valueFrom: diff --git a/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml b/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml index 97e98ee74..5b5d835b6 100644 --- a/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml +++ b/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml @@ -18,6 +18,9 @@ spec: - name: kube-kata image: quay.io/kata-containers/kata-deploy:latest imagePullPolicy: Always + readinessProbe: + exec: + command: [ "bash", "-c", "[ -f /opt/kata/kata-deployed ]", "&&", "bash", "-c", "[ $? == 0 ]" ] lifecycle: preStop: exec: diff --git a/tools/packaging/kata-deploy/scripts/kata-deploy.sh b/tools/packaging/kata-deploy/scripts/kata-deploy.sh index 8991e04fc..f6206bc96 100755 --- a/tools/packaging/kata-deploy/scripts/kata-deploy.sh +++ b/tools/packaging/kata-deploy/scripts/kata-deploy.sh @@ -310,11 +310,13 @@ function main() { install_artifacts configure_cri_runtime "$runtime" kubectl label node "$NODE_NAME" --overwrite katacontainers.io/kata-runtime=true + touch /opt/kata/kata-deployed ;; cleanup) cleanup_cri_runtime "$runtime" kubectl label node "$NODE_NAME" --overwrite katacontainers.io/kata-runtime=cleanup remove_artifacts + rm /opt/kata/kata-deployed ;; reset) reset_runtime $runtime From 3b76abb3664980b83b134b490f5ce200af09e49d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Wed, 12 Apr 2023 15:39:49 +0200 Subject: [PATCH 106/137] kata-deploy: Ensure node is ready after CRI Engine restart MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's ensure the node is ready after the CRI Engine restart, otherwise we may proceed and scripts may simply fail if they try to deploy a pod while the CRI Engine is not yet restarted (and, consequently, the node is not Ready). Related: #6649 Signed-off-by: Fabiano Fidêncio --- tools/packaging/kata-deploy/scripts/kata-deploy.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tools/packaging/kata-deploy/scripts/kata-deploy.sh b/tools/packaging/kata-deploy/scripts/kata-deploy.sh index f6206bc96..cc36e6367 100755 --- a/tools/packaging/kata-deploy/scripts/kata-deploy.sh +++ b/tools/packaging/kata-deploy/scripts/kata-deploy.sh @@ -63,6 +63,15 @@ function install_artifacts() { chmod +x /opt/kata/runtime-rs/bin/* } +function wait_till_node_is_ready() { + local ready="False" + + while ! [[ "${ready}" == "True" ]]; do + sleep 2s + ready=$(kubectl get node $NODE_NAME -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') + done +} + function configure_cri_runtime() { configure_different_shims_base @@ -76,6 +85,8 @@ function configure_cri_runtime() { esac systemctl daemon-reload systemctl restart "$1" + + wait_till_node_is_ready } function configure_different_shims_base() { @@ -266,6 +277,8 @@ function reset_runtime() { if [ "$1" == "crio" ] || [ "$1" == "containerd" ]; then systemctl restart kubelet fi + + wait_till_node_is_ready } function main() { From f478b9115ea15f7ddeacf4997a49a669bd784738 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 13 Apr 2023 07:06:36 +0200 Subject: [PATCH 107/137] clh: tdx: Update timeouts for confidential guest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Booting up TDX takes more time than booting up a normal VM. Those values are being already used as part of the CCv0 branch, and we're just bringing them to the `main` branch as well. Signed-off-by: Fabiano Fidêncio --- src/runtime/virtcontainers/clh.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/runtime/virtcontainers/clh.go b/src/runtime/virtcontainers/clh.go index 1cf40c4d2..6ae99d673 100644 --- a/src/runtime/virtcontainers/clh.go +++ b/src/runtime/virtcontainers/clh.go @@ -73,12 +73,12 @@ const ( // Values based on: clhTimeout = 10 clhAPITimeout = 1 - clhAPITimeoutConfidentialGuest = 10 + clhAPITimeoutConfidentialGuest = 20 // Timeout for hot-plug - hotplug devices can take more time, than usual API calls // Use longer time timeout for it. clhHotPlugAPITimeout = 5 clhStopSandboxTimeout = 3 - clhStopSandboxTimeoutConfidentialGuest = 5 + clhStopSandboxTimeoutConfidentialGuest = 10 clhSocket = "clh.sock" clhAPISocket = "clh-api.sock" virtioFsSocket = "virtiofsd.sock" From eb1762e813c52d5b593bacb04932d6fbf0ca3a19 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Thu, 13 Apr 2023 13:11:08 +0200 Subject: [PATCH 108/137] osbuilder: Enable dbus in the dracut case The agent now offloads cgroup configuration to systemd when possible. This requires to enable D-Bus in order to communicate with systemd. Fixes #6657 Signed-off-by: Greg Kurz --- tools/osbuilder/dracut/dracut.conf.d/05-base.conf | 2 +- tools/osbuilder/rootfs-builder/rootfs.sh | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/osbuilder/dracut/dracut.conf.d/05-base.conf b/tools/osbuilder/dracut/dracut.conf.d/05-base.conf index 1dd41c4a1..88591dda9 100644 --- a/tools/osbuilder/dracut/dracut.conf.d/05-base.conf +++ b/tools/osbuilder/dracut/dracut.conf.d/05-base.conf @@ -14,4 +14,4 @@ hostonly_cmdline="no" # create reproducible images reproducible="yes" # dracut modules to include (NOTE: these are NOT kernel modules) -dracutmodules="kernel-modules udev-rules syslog systemd" +dracutmodules="kernel-modules udev-rules syslog systemd dbus" diff --git a/tools/osbuilder/rootfs-builder/rootfs.sh b/tools/osbuilder/rootfs-builder/rootfs.sh index 43c79fd7d..6dfb8734e 100755 --- a/tools/osbuilder/rootfs-builder/rootfs.sh +++ b/tools/osbuilder/rootfs-builder/rootfs.sh @@ -475,6 +475,8 @@ prepare_overlay() # Kata systemd unit file mkdir -p ./etc/systemd/system/basic.target.wants/ ln -sf /usr/lib/systemd/system/kata-containers.target ./etc/systemd/system/basic.target.wants/kata-containers.target + mkdir -p ./etc/systemd/system/kata-containers.target.wants/ + ln -sf /usr/lib/systemd/system/dbus.socket ./etc/systemd/system/kata-containers.target.wants/dbus.socket popd > /dev/null } From dc662333df06646c22ebe747a5f338d579ee11fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 13 Apr 2023 22:42:52 +0200 Subject: [PATCH 109/137] runtime: Increase the dial_timeout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When testing on AKS, we've been hitting the dial_timeout every now and then. Let's increase it to 45 seconds (instead of 30) for all the VMMs, and to 60 seconfs in case of TEEs. Signed-off-by: Fabiano Fidêncio --- src/runtime-rs/config/configuration-dragonball.toml.in | 4 ++-- src/runtime/config/configuration-acrn.toml.in | 4 ++-- src/runtime/config/configuration-clh.toml.in | 4 ++-- src/runtime/config/configuration-fc.toml.in | 4 ++-- src/runtime/config/configuration-qemu-tdx.toml.in | 4 ++-- src/runtime/config/configuration-qemu.toml.in | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/runtime-rs/config/configuration-dragonball.toml.in b/src/runtime-rs/config/configuration-dragonball.toml.in index 8b963e12d..174f270e7 100644 --- a/src/runtime-rs/config/configuration-dragonball.toml.in +++ b/src/runtime-rs/config/configuration-dragonball.toml.in @@ -206,8 +206,8 @@ container_pipe_size=@PIPESIZE@ #debug_console_enabled = true # Agent connection dialing timeout value in seconds -# (default: 30) -#dial_timeout = 30 +# (default: 45) +dial_timeout = 45 [runtime] # If enabled, the runtime will log additional debug messages to the diff --git a/src/runtime/config/configuration-acrn.toml.in b/src/runtime/config/configuration-acrn.toml.in index 2d2b7065e..ef0207589 100644 --- a/src/runtime/config/configuration-acrn.toml.in +++ b/src/runtime/config/configuration-acrn.toml.in @@ -154,8 +154,8 @@ disable_selinux=@DEFDISABLESELINUX@ #debug_console_enabled = true # Agent connection dialing timeout value in seconds -# (default: 30) -#dial_timeout = 30 +# (default: 45) +dial_timeout = 45 [runtime] # If enabled, the runtime will log additional debug messages to the diff --git a/src/runtime/config/configuration-clh.toml.in b/src/runtime/config/configuration-clh.toml.in index d79770487..d6653bce9 100644 --- a/src/runtime/config/configuration-clh.toml.in +++ b/src/runtime/config/configuration-clh.toml.in @@ -305,8 +305,8 @@ block_device_driver = "virtio-blk" #debug_console_enabled = true # Agent connection dialing timeout value in seconds -# (default: 30) -#dial_timeout = 30 +# (default: 45) +dial_timeout = 45 [runtime] # If enabled, the runtime will log additional debug messages to the diff --git a/src/runtime/config/configuration-fc.toml.in b/src/runtime/config/configuration-fc.toml.in index 10dc17700..e28316cfa 100644 --- a/src/runtime/config/configuration-fc.toml.in +++ b/src/runtime/config/configuration-fc.toml.in @@ -284,8 +284,8 @@ kernel_modules=[] #debug_console_enabled = true # Agent connection dialing timeout value in seconds -# (default: 30) -#dial_timeout = 30 +# (default: 45) +dial_timeout = 45 [runtime] # If enabled, the runtime will log additional debug messages to the diff --git a/src/runtime/config/configuration-qemu-tdx.toml.in b/src/runtime/config/configuration-qemu-tdx.toml.in index 6cecabdba..b9c130e65 100644 --- a/src/runtime/config/configuration-qemu-tdx.toml.in +++ b/src/runtime/config/configuration-qemu-tdx.toml.in @@ -529,8 +529,8 @@ kernel_modules=[] #debug_console_enabled = true # Agent connection dialing timeout value in seconds -# (default: 30) -#dial_timeout = 30 +# (default: 60) +dial_timeout = 60 [runtime] # If enabled, the runtime will log additional debug messages to the diff --git a/src/runtime/config/configuration-qemu.toml.in b/src/runtime/config/configuration-qemu.toml.in index 4fb5a8ba0..6446b0d0d 100644 --- a/src/runtime/config/configuration-qemu.toml.in +++ b/src/runtime/config/configuration-qemu.toml.in @@ -535,8 +535,8 @@ kernel_modules=[] #debug_console_enabled = true # Agent connection dialing timeout value in seconds -# (default: 30) -#dial_timeout = 30 +# (default: 45) +dial_timeout = 45 [runtime] # If enabled, the runtime will log additional debug messages to the From aca6ff7289f8eae304966a320b209c545115be92 Mon Sep 17 00:00:00 2001 From: Zvonko Kaiser Date: Mon, 3 Apr 2023 10:10:47 +0000 Subject: [PATCH 110/137] gpu: Build and Ship an GPU enabled Kernel With each release make sure we ship a GPU and TEE enabled kernel Fixes: #6553 Signed-off-by: Zvonko Kaiser --- .../build-kata-static-tarball-amd64.yaml | 2 + .github/workflows/release.yaml | 2 +- .../kata-deploy/local-build/Makefile | 8 ++++ .../local-build/kata-deploy-binaries.sh | 26 +++++++++++++ tools/packaging/kernel/build-kernel.sh | 39 +++++++++++++++++-- tools/packaging/kernel/kata_config_version | 2 +- .../packaging/static-build/kernel/Dockerfile | 6 ++- tools/packaging/static-build/kernel/build.sh | 6 +++ 8 files changed, 84 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build-kata-static-tarball-amd64.yaml b/.github/workflows/build-kata-static-tarball-amd64.yaml index a7f3bdc19..f86c8b125 100644 --- a/.github/workflows/build-kata-static-tarball-amd64.yaml +++ b/.github/workflows/build-kata-static-tarball-amd64.yaml @@ -21,6 +21,8 @@ jobs: - kernel - kernel-dragonball-experimental - kernel-tdx-experimental + - kernel-gpu-snp + - kernel-gpu-tdx - nydus - qemu - qemu-tdx-experimental diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index a642fa36f..f9e810aad 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -92,7 +92,7 @@ jobs: tarball="kata-containers-$tag-vendor.tar.gz" pushd $GITHUB_WORKSPACE bash -c "tools/packaging/release/generate_vendor.sh ${tarball}" - GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}" + GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}" popd upload-libseccomp-tarball: diff --git a/tools/packaging/kata-deploy/local-build/Makefile b/tools/packaging/kata-deploy/local-build/Makefile index 2ad324b78..02c36e417 100644 --- a/tools/packaging/kata-deploy/local-build/Makefile +++ b/tools/packaging/kata-deploy/local-build/Makefile @@ -26,6 +26,8 @@ all: serial-targets \ kernel-tarball \ kernel-dragonball-experimental-tarball \ kernel-tdx-experimental-tarball \ + kernel-gpu-snp-tarball \ + kernel-gpu-tdx-tarball \ nydus-tarball \ qemu-tarball \ qemu-tdx-experimental-tarball \ @@ -54,6 +56,12 @@ kernel-tarball: kernel-dragonball-experimental-tarball: ${MAKE} $@-build +kernel-gpu-snp-tarball: + ${MAKE} $@-build + +kernel-gpu-tdx-tarball: + ${MAKE} $@-build + kernel-experimental-tarball: ${MAKE} $@-build diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index 816c8e01e..534b7cfd0 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -82,6 +82,8 @@ options: kernel-dragonball-experimental kernel-experimental kernel-tdx-experimental + kernel-gpu-snp + kernel-gpu-tdx nydus qemu qemu-tdx-experimental @@ -213,6 +215,26 @@ install_kernel_dragonball_experimental() { "-e -t dragonball" } +#Install GPU and SNP enabled kernel asset +install_kernel_gpu_snp() { + local kernel_url="$(get_from_kata_deps assets.kernel.snp.url)" + + install_kernel_helper \ + "assets.kernel.snp.version" \ + "kernel-gpu-snp" \ + "-x snp -g nvidia -u ${kernel_url} -H deb" +} + +#Install GPU and TDX enabled kernel asset +install_kernel_gpu_tdx() { + local kernel_url="$(get_from_kata_deps assets.kernel-tdx-experimental.url)" + + install_kernel_helper \ + "assets.kernel-tdx-experimental.version" \ + "kernel-gpu-tdx" \ + "-x tdx -g nvidia -u ${kernel_url} -H deb" +} + #Install experimental kernel asset install_kernel_experimental() { install_kernel_helper \ @@ -448,6 +470,10 @@ handle_build() { kernel-tdx-experimental) install_kernel_tdx_experimental ;; + kernel-gpu-snp) install_kernel_gpu_snp;; + + kernel-gpu-tdx) install_kernel_gpu_tdx;; + qemu) install_qemu ;; qemu-tdx-experimental) install_qemu_tdx_experimental ;; diff --git a/tools/packaging/kernel/build-kernel.sh b/tools/packaging/kernel/build-kernel.sh index 4cbaabd9c..2f3e0e353 100755 --- a/tools/packaging/kernel/build-kernel.sh +++ b/tools/packaging/kernel/build-kernel.sh @@ -61,6 +61,8 @@ DESTDIR="${DESTDIR:-/}" PREFIX="${PREFIX:-/usr}" #Kernel URL kernel_url="" +#Linux headers for GPU guest fs module building +linux_headers="" packaging_scripts_dir="${script_dir}/../scripts" source "${packaging_scripts_dir}/lib.sh" @@ -239,6 +241,8 @@ get_kernel_frag_path() { if [[ "${gpu_vendor}" != "" ]];then info "Add kernel config for GPU due to '-g ${gpu_vendor}'" + local gpu_configs="$(ls ${gpu_path}/${gpu_vendor}.conf)" + all_configs="${all_configs} ${gpu_configs}" # If conf_guest is set we need to update the CONFIG_LOCALVERSION # to match the suffix created in install_kata # -nvidia-gpu-{snp|tdx}, the linux headers will be named the very @@ -430,6 +434,24 @@ build_kernel() { popd >>/dev/null } +build_kernel_headers() { + local kernel_path=${1:-} + [ -n "${kernel_path}" ] || die "kernel_path not provided" + [ -d "${kernel_path}" ] || die "path to kernel does not exist, use ${script_name} setup" + [ -n "${arch_target}" ] || arch_target="$(uname -m)" + arch_target=$(arch_to_kernel "${arch_target}") + pushd "${kernel_path}" >>/dev/null + + if [ "$linux_headers" == "deb" ]; then + make -j $(nproc ${CI:+--ignore 1}) deb-pkg ARCH="${arch_target}" + fi + if [ "$linux_headers" == "rpm" ]; then + make -j $(nproc ${CI:+--ignore 1}) rpm-pkg ARCH="${arch_target}" + fi + + popd >>/dev/null +} + install_kata() { local kernel_path=${1:-} [ -n "${kernel_path}" ] || die "kernel_path not provided" @@ -445,14 +467,15 @@ install_kata() { if [[ ${build_type} != "" ]]; then suffix="-${build_type}" fi - if [[ ${gpu_vendor} != "" ]];then - suffix="-${gpu_vendor}-gpu${suffix}" - fi if [[ ${conf_guest} != "" ]];then suffix="-${conf_guest}${suffix}" fi + if [[ ${gpu_vendor} != "" ]];then + suffix="-${gpu_vendor}-gpu${suffix}" + fi + vmlinuz="vmlinuz-${kernel_version}-${config_version}${suffix}" vmlinux="vmlinux-${kernel_version}-${config_version}${suffix}" @@ -487,10 +510,12 @@ install_kata() { ls -la "${install_path}/vmlinux${suffix}.container" ls -la "${install_path}/vmlinuz${suffix}.container" popd >>/dev/null + + set +x } main() { - while getopts "a:b:c:deEfg:hk:p:t:u:v:x:" opt; do + while getopts "a:b:c:deEfg:hH:k:p:t:u:v:x:" opt; do case "$opt" in a) arch_target="${OPTARG}" @@ -521,6 +546,9 @@ main() { h) usage 0 ;; + H) + linux_headers="${OPTARG}" + ;; k) kernel_path="$(realpath ${OPTARG})" ;; @@ -609,6 +637,9 @@ main() { build) build_kernel "${kernel_path}" ;; + build-headers) + build_kernel_headers "${kernel_path}" + ;; install) install_kata "${kernel_path}" ;; diff --git a/tools/packaging/kernel/kata_config_version b/tools/packaging/kernel/kata_config_version index b16e5f75e..f96ac0672 100644 --- a/tools/packaging/kernel/kata_config_version +++ b/tools/packaging/kernel/kata_config_version @@ -1 +1 @@ -104 +105 diff --git a/tools/packaging/static-build/kernel/Dockerfile b/tools/packaging/static-build/kernel/Dockerfile index 4ccf2c0df..b4c232972 100644 --- a/tools/packaging/static-build/kernel/Dockerfile +++ b/tools/packaging/static-build/kernel/Dockerfile @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: Apache-2.0 -FROM ubuntu:20.04 +FROM ubuntu:22.04 ENV DEBIAN_FRONTEND=noninteractive # kernel deps @@ -18,6 +18,10 @@ RUN apt-get update && \ iptables \ kmod \ libelf-dev \ + libssl-dev \ + gettext \ + rsync \ + cpio \ patch && \ if [ "$(uname -m)" = "s390x" ]; then apt-get install -y --no-install-recommends libssl-dev; fi && \ apt-get clean && rm -rf /var/lib/lists/ diff --git a/tools/packaging/static-build/kernel/build.sh b/tools/packaging/static-build/kernel/build.sh index d9f6ccd90..091f76cef 100755 --- a/tools/packaging/static-build/kernel/build.sh +++ b/tools/packaging/static-build/kernel/build.sh @@ -38,3 +38,9 @@ sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \ --env DESTDIR="${DESTDIR}" --env PREFIX="${PREFIX}" \ "${container_image}" \ bash -c "${kernel_builder} $* install" + +sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \ + -w "${PWD}" \ + --env DESTDIR="${DESTDIR}" --env PREFIX="${PREFIX}" \ + "${container_image}" \ + bash -c "${kernel_builder} $* build-headers" From 87ea43cd4e38552222b5e387e3e1297a0596651d Mon Sep 17 00:00:00 2001 From: Zvonko Kaiser Date: Fri, 14 Apr 2023 07:46:52 +0000 Subject: [PATCH 111/137] gpu: Add configuration fragment Adding configuration fragment for the kernel, depending on the TEE kernel update the LOCALVERSION Signed-off-by: Zvonko Kaiser --- .../kernel/configs/fragments/gpu/nvidia.conf.in | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 tools/packaging/kernel/configs/fragments/gpu/nvidia.conf.in diff --git a/tools/packaging/kernel/configs/fragments/gpu/nvidia.conf.in b/tools/packaging/kernel/configs/fragments/gpu/nvidia.conf.in new file mode 100644 index 000000000..73cce6173 --- /dev/null +++ b/tools/packaging/kernel/configs/fragments/gpu/nvidia.conf.in @@ -0,0 +1,14 @@ +# Support mmconfig PCI config space access. +# It's used to enable the MMIO access method for PCIe devices. +CONFIG_PCI_MMCONFIG=y + +# Support for loading modules. +# It is used to support loading GPU drivers. +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y + +# CRYPTO_FIPS requires this config when loading modules is enabled. +CONFIG_MODULE_SIG=y + +# Linux kernel version suffix +CONFIG_LOCALVERSION="-nvidia-gpu${CONF_GUEST_SUFFIX}" From 80e3a2d408c81757e03574723088587bf2950ee6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Sat, 15 Apr 2023 13:27:34 +0200 Subject: [PATCH 112/137] cache-components: Fix TDX QEMU caching MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TDX QEMU caching is not working as expected, as we're checking for its version looking at "assets.hypervisor.${QEMU_FLAVOUR}.version", which is correct for standard QEMU. However, for TDX QEMU we should be checking for "assets.hypervisor.${QEMU_FLAVOUR}.tag" Fixes: #6668 Signed-off-by: Fabiano Fidêncio --- tools/packaging/static-build/cache_components_main.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/packaging/static-build/cache_components_main.sh b/tools/packaging/static-build/cache_components_main.sh index 804df1cbf..656a906d8 100755 --- a/tools/packaging/static-build/cache_components_main.sh +++ b/tools/packaging/static-build/cache_components_main.sh @@ -53,6 +53,7 @@ cache_ovmf_artifacts() { cache_qemu_artifacts() { local qemu_tarball_name="kata-static-${QEMU_FLAVOUR}.tar.xz" local current_qemu_version=$(get_from_kata_deps "assets.hypervisor.${QEMU_FLAVOUR}.version") + [ -z "${current_qemu_version}" ] && current_qemu_version=$(get_from_kata_deps "assets.hypervisor.${QEMU_FLAVOUR}.tag") local qemu_sha=$(calc_qemu_files_sha256sum) local current_qemu_image="$(get_qemu_image_name)" create_cache_asset "${qemu_tarball_name}" "${current_qemu_version}-${qemu_sha}" "${current_qemu_image}" From 3fa0890e5e372dcc8006bf81267d65ea65407ab1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Sat, 15 Apr 2023 14:02:18 +0200 Subject: [PATCH 113/137] cache-components: Fix TDVF caching MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TDVF caching is not working as the tarball name is incorrect. The result expected is kata-static-tdvf.tar.xz, but it's looking for kata-static-tdx.tar.xz. This happens as a logic to convert tdx -> tdvf has been added as part of the building scripts, but I missed doing this as part of the caching scripts. Fixes: #6669 Signed-off-by: Fabiano Fidêncio --- tools/packaging/static-build/cache_components_main.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/packaging/static-build/cache_components_main.sh b/tools/packaging/static-build/cache_components_main.sh index 656a906d8..0e8a0120f 100755 --- a/tools/packaging/static-build/cache_components_main.sh +++ b/tools/packaging/static-build/cache_components_main.sh @@ -44,8 +44,9 @@ cache_nydus_artifacts() { } cache_ovmf_artifacts() { - local ovmf_tarball_name="kata-static-${OVMF_FLAVOUR}.tar.xz" local current_ovmf_version="$(get_from_kata_deps "externals.ovmf.${OVMF_FLAVOUR}.version")" + [ "${OVMF_FLAVOUR}" == "tdx" ] && OVMF_FLAVOUR="tdvf" + local ovmf_tarball_name="kata-static-${OVMF_FLAVOUR}.tar.xz" local current_ovmf_image="$(get_ovmf_image_name)" create_cache_asset "${ovmf_tarball_name}" "${current_ovmf_version}" "${current_ovmf_image}" } From a1272bcf1dd03e920db92d6e7666d8f31521842c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Sat, 15 Apr 2023 15:00:06 +0200 Subject: [PATCH 114/137] gha: tdx: Fix typo overlay -> overlays MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The beauty of GHA not allowing us to easily test changes in the yaml files as part of the PR has hit us again. :-/ The correct path for the k3s deployment is tools/packaging/kata-deploy/kata-deploy/overlays/k3s instead of tools/packaging/kata-deploy/kata-deploy/overlay/k3s. Signed-off-by: Fabiano Fidêncio --- .github/workflows/run-k8s-tests-on-tdx.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/run-k8s-tests-on-tdx.yaml b/.github/workflows/run-k8s-tests-on-tdx.yaml index 4c38b9d23..1777a16c8 100644 --- a/.github/workflows/run-k8s-tests-on-tdx.yaml +++ b/.github/workflows/run-k8s-tests-on-tdx.yaml @@ -34,7 +34,7 @@ jobs: cat tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml | grep "${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}" || die "Failed to setup the tests image" kubectl apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml - kubectl apply -k tools/packaging/kata-deploy/kata-deploy/overlay/k3s + kubectl apply -k tools/packaging/kata-deploy/kata-deploy/overlays/k3s kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod kubectl apply -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml @@ -51,7 +51,7 @@ jobs: - name: Delete kata-deploy if: always() run: | - kubectl delete -k tools/packaging/kata-deploy/kata-deploy/overlay/k3s + kubectl delete -k tools/packaging/kata-deploy/kata-deploy/overlays/k3s kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${{ inputs.registry }}/${{ inputs.repo }}:${{ inputs.tag }}|g" tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml From e4ee07f7d45f657acb19508abbdd019a418792c3 Mon Sep 17 00:00:00 2001 From: Zvonko Kaiser Date: Mon, 17 Apr 2023 09:26:54 +0000 Subject: [PATCH 115/137] gpu: Add GPU TDX experimental kernel With each release make sure we ship a GPU and TEE enabled kernel This adds tdx-experimental kernel support Signed-off-by: Zvonko Kaiser --- .github/workflows/build-kata-static-tarball-amd64.yaml | 2 +- .github/workflows/release.yaml | 2 +- tools/packaging/kata-deploy/local-build/Makefile | 6 +++--- .../kata-deploy/local-build/kata-deploy-binaries.sh | 9 +++++---- tools/packaging/kernel/build-kernel.sh | 4 +--- 5 files changed, 11 insertions(+), 12 deletions(-) diff --git a/.github/workflows/build-kata-static-tarball-amd64.yaml b/.github/workflows/build-kata-static-tarball-amd64.yaml index f86c8b125..47d264060 100644 --- a/.github/workflows/build-kata-static-tarball-amd64.yaml +++ b/.github/workflows/build-kata-static-tarball-amd64.yaml @@ -22,7 +22,7 @@ jobs: - kernel-dragonball-experimental - kernel-tdx-experimental - kernel-gpu-snp - - kernel-gpu-tdx + - kernel-gpu-tdx-experimental - nydus - qemu - qemu-tdx-experimental diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index f9e810aad..f31261d51 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -78,7 +78,7 @@ jobs: mv kata-static.tar.xz "$GITHUB_WORKSPACE/${tarball}" pushd $GITHUB_WORKSPACE echo "uploading asset '${tarball}' for tag: ${tag}" - GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}" + GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}" popd upload-cargo-vendored-tarball: diff --git a/tools/packaging/kata-deploy/local-build/Makefile b/tools/packaging/kata-deploy/local-build/Makefile index 02c36e417..8451ae7ce 100644 --- a/tools/packaging/kata-deploy/local-build/Makefile +++ b/tools/packaging/kata-deploy/local-build/Makefile @@ -27,7 +27,7 @@ all: serial-targets \ kernel-dragonball-experimental-tarball \ kernel-tdx-experimental-tarball \ kernel-gpu-snp-tarball \ - kernel-gpu-tdx-tarball \ + kernel-gpu-tdx-experimental-tarball \ nydus-tarball \ qemu-tarball \ qemu-tdx-experimental-tarball \ @@ -59,8 +59,8 @@ kernel-dragonball-experimental-tarball: kernel-gpu-snp-tarball: ${MAKE} $@-build -kernel-gpu-tdx-tarball: - ${MAKE} $@-build +kernel-gpu-tdx-experimental-tarball: + ${MAKE} $@-build kernel-experimental-tarball: ${MAKE} $@-build diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index 534b7cfd0..350a7d6ae 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -82,8 +82,9 @@ options: kernel-dragonball-experimental kernel-experimental kernel-tdx-experimental + kernel-gpu kernel-gpu-snp - kernel-gpu-tdx + kernel-gpu-tdx-experimental nydus qemu qemu-tdx-experimental @@ -225,8 +226,8 @@ install_kernel_gpu_snp() { "-x snp -g nvidia -u ${kernel_url} -H deb" } -#Install GPU and TDX enabled kernel asset -install_kernel_gpu_tdx() { +#Install GPU and TDX experimental enabled kernel asset +install_kernel_gpu_tdx_experimental() { local kernel_url="$(get_from_kata_deps assets.kernel-tdx-experimental.url)" install_kernel_helper \ @@ -472,7 +473,7 @@ handle_build() { kernel-gpu-snp) install_kernel_gpu_snp;; - kernel-gpu-tdx) install_kernel_gpu_tdx;; + kernel-gpu-tdx-experimental) install_kernel_gpu_tdx_experimental;; qemu) install_qemu ;; diff --git a/tools/packaging/kernel/build-kernel.sh b/tools/packaging/kernel/build-kernel.sh index 2f3e0e353..67c4c8f5c 100755 --- a/tools/packaging/kernel/build-kernel.sh +++ b/tools/packaging/kernel/build-kernel.sh @@ -510,8 +510,6 @@ install_kata() { ls -la "${install_path}/vmlinux${suffix}.container" ls -la "${install_path}/vmlinuz${suffix}.container" popd >>/dev/null - - set +x } main() { @@ -588,7 +586,7 @@ main() { if [ -n "$kernel_version" ]; then kernel_major_version=$(get_major_kernel_version "${kernel_version}") if [[ ${kernel_major_version} != "5.10" ]]; then - info "dragonball-experimental kernel patches are only tested on 5.10.x kernel now, other kernel version may cause confliction" + info "dragonball-experimental kernel patches are only tested on 5.10.x kernel now, other kernel version may cause confliction" fi fi fi From 825e769483abd6621ec717e02551e1964d034f8d Mon Sep 17 00:00:00 2001 From: Zvonko Kaiser Date: Mon, 17 Apr 2023 09:48:00 +0000 Subject: [PATCH 116/137] gpu: Add GPU support to default kernel without any TEE With each release make sure we ship a GPU enabled kernel Signed-off-by: Zvonko Kaiser --- .../workflows/build-kata-static-tarball-amd64.yaml | 1 + tools/packaging/kata-deploy/local-build/Makefile | 4 ++++ .../kata-deploy/local-build/kata-deploy-binaries.sh | 13 +++++++++++++ 3 files changed, 18 insertions(+) diff --git a/.github/workflows/build-kata-static-tarball-amd64.yaml b/.github/workflows/build-kata-static-tarball-amd64.yaml index 47d264060..5942a5d79 100644 --- a/.github/workflows/build-kata-static-tarball-amd64.yaml +++ b/.github/workflows/build-kata-static-tarball-amd64.yaml @@ -21,6 +21,7 @@ jobs: - kernel - kernel-dragonball-experimental - kernel-tdx-experimental + - kernel-gpu - kernel-gpu-snp - kernel-gpu-tdx-experimental - nydus diff --git a/tools/packaging/kata-deploy/local-build/Makefile b/tools/packaging/kata-deploy/local-build/Makefile index 8451ae7ce..82356f1fa 100644 --- a/tools/packaging/kata-deploy/local-build/Makefile +++ b/tools/packaging/kata-deploy/local-build/Makefile @@ -26,6 +26,7 @@ all: serial-targets \ kernel-tarball \ kernel-dragonball-experimental-tarball \ kernel-tdx-experimental-tarball \ + kernel-gpu \ kernel-gpu-snp-tarball \ kernel-gpu-tdx-experimental-tarball \ nydus-tarball \ @@ -56,6 +57,9 @@ kernel-tarball: kernel-dragonball-experimental-tarball: ${MAKE} $@-build +kernel-gpu-tarball: + ${MAKE} $@-build + kernel-gpu-snp-tarball: ${MAKE} $@-build diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index 350a7d6ae..55fcc0624 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -81,6 +81,7 @@ options: kernel kernel-dragonball-experimental kernel-experimental + kernel-gpu kernel-tdx-experimental kernel-gpu kernel-gpu-snp @@ -216,6 +217,16 @@ install_kernel_dragonball_experimental() { "-e -t dragonball" } +#Install GPU enabled kernel asset +install_kernel_gpu() { + local kernel_url="$(get_from_kata_deps assets.kernel.url)" + + install_kernel_helper \ + "assets.kernel.version" \ + "kernel-gpu" \ + "-g nvidia -u ${kernel_url} -H deb" +} + #Install GPU and SNP enabled kernel asset install_kernel_gpu_snp() { local kernel_url="$(get_from_kata_deps assets.kernel.snp.url)" @@ -471,6 +482,8 @@ handle_build() { kernel-tdx-experimental) install_kernel_tdx_experimental ;; + kernel-gpu) install_kernel_gpu ;; + kernel-gpu-snp) install_kernel_gpu_snp;; kernel-gpu-tdx-experimental) install_kernel_gpu_tdx_experimental;; From f4f958d53cafbafdf529e0b54235194b9e6cef98 Mon Sep 17 00:00:00 2001 From: Zvonko Kaiser Date: Fri, 14 Apr 2023 10:49:34 +0000 Subject: [PATCH 117/137] gpu: Do not pass-through PCI (Host) Bridges On some systems a GPU is in a IOMMU group with a PCI Bridge and PCI Host Bridge. Per default no PCI Bridge needs to be passed-through. When scanning the IOMMU group, ignore devices with a 0x60 class ID prefix. Fixes: #6663 Signed-off-by: Zvonko Kaiser --- src/runtime/pkg/device/drivers/vfio.go | 34 +++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/src/runtime/pkg/device/drivers/vfio.go b/src/runtime/pkg/device/drivers/vfio.go index 94139aaa2..1099f8f0b 100644 --- a/src/runtime/pkg/device/drivers/vfio.go +++ b/src/runtime/pkg/device/drivers/vfio.go @@ -54,6 +54,25 @@ func NewVFIODevice(devInfo *config.DeviceInfo) *VFIODevice { } } +// Ignore specific PCI devices, supply the pciClass and the bitmask to check +// against the device class, deviceBDF for meaningfull info message +func (device *VFIODevice) checkIgnorePCIClass(pciClass string, deviceBDF string, bitmask uint64) (bool, error) { + if pciClass == "" { + return false, nil + } + pciClassID, err := strconv.ParseUint(pciClass, 0, 32) + if err != nil { + return false, err + } + // ClassID is 16 bits, remove the two trailing zeros + pciClassID = pciClassID >> 8 + if pciClassID&bitmask == bitmask { + deviceLogger().Infof("Ignoring PCI (Host) Bridge deviceBDF %v Class %x", deviceBDF, pciClassID) + return true, nil + } + return false, nil +} + // Attach is standard interface of api.Device, it's used to add device to some // DeviceReceiver func (device *VFIODevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (retErr error) { @@ -88,6 +107,18 @@ func (device *VFIODevice) Attach(ctx context.Context, devReceiver api.DeviceRece } id := utils.MakeNameID("vfio", device.DeviceInfo.ID+strconv.Itoa(i), maxDevIDSize) + pciClass := getPCIDeviceProperty(deviceBDF, PCISysFsDevicesClass) + // We need to ignore Host or PCI Bridges that are in the same IOMMU group as the + // passed-through devices. One CANNOT pass-through a PCI bridge or Host bridge. + // Class 0x0604 is PCI bridge, 0x0600 is Host bridge + ignorePCIDevice, err := device.checkIgnorePCIClass(pciClass, deviceBDF, 0x0600) + if err != nil { + return err + } + if ignorePCIDevice { + continue + } + var vfio config.VFIODev switch vfioDeviceType { @@ -100,7 +131,7 @@ func (device *VFIODevice) Attach(ctx context.Context, devReceiver api.DeviceRece BDF: deviceBDF, SysfsDev: deviceSysfsDev, IsPCIe: isPCIe, - Class: getPCIDeviceProperty(deviceBDF, PCISysFsDevicesClass), + Class: pciClass, } if isPCIe { vfioPCI.Bus = fmt.Sprintf("%s%d", pcieRootPortPrefix, len(AllPCIeDevs)) @@ -121,6 +152,7 @@ func (device *VFIODevice) Attach(ctx context.Context, devReceiver api.DeviceRece default: return fmt.Errorf("Failed to append device: VFIO device type unrecognized") } + device.VfioDevs = append(device.VfioDevs, &vfio) } From 392732e2132c67e85977812a154e6e64afc79a68 Mon Sep 17 00:00:00 2001 From: Tim Zhang Date: Wed, 12 Apr 2023 16:45:21 +0800 Subject: [PATCH 118/137] protocols: Bump ttrpc from 0.6.0 to 0.7.1 Fixes: #6646 Signed-off-by: Tim Zhang --- src/libs/Cargo.lock | 85 ++++++--- src/libs/protocols/.gitignore | 14 +- src/libs/protocols/Cargo.toml | 7 +- src/libs/protocols/build.rs | 10 +- src/libs/protocols/src/lib.rs | 1 + src/libs/protocols/src/trans.rs | 297 ++++++++++++++------------------ 6 files changed, 210 insertions(+), 204 deletions(-) diff --git a/src/libs/Cargo.lock b/src/libs/Cargo.lock index 16bdfda6f..2f03109f8 100644 --- a/src/libs/Cargo.lock +++ b/src/libs/Cargo.lock @@ -703,9 +703,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.9.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "parking_lot" @@ -845,9 +845,16 @@ name = "protobuf" version = "2.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" + +[[package]] +name = "protobuf" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55bad9126f378a853655831eb7363b7b01b81d19f8cb1218861086ca4a1a61e" dependencies = [ - "serde", - "serde_derive", + "once_cell", + "protobuf-support", + "thiserror", ] [[package]] @@ -856,17 +863,47 @@ version = "2.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aec1632b7c8f2e620343439a7dfd1f3c47b18906c4be58982079911482b5d707" dependencies = [ - "protobuf", + "protobuf 2.27.1", ] [[package]] -name = "protobuf-codegen-pure" -version = "2.27.1" +name = "protobuf-codegen" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f8122fdb18e55190c796b088a16bdb70cd7acdcd48f7a8b796b58c62e532cc6" +checksum = "0dd418ac3c91caa4032d37cb80ff0d44e2ebe637b2fb243b6234bf89cdac4901" dependencies = [ - "protobuf", - "protobuf-codegen", + "anyhow", + "once_cell", + "protobuf 3.2.0", + "protobuf-parse", + "regex", + "tempfile", + "thiserror", +] + +[[package]] +name = "protobuf-parse" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d39b14605eaa1f6a340aec7f320b34064feb26c93aec35d6a9a2272a8ddfa49" +dependencies = [ + "anyhow", + "indexmap", + "log", + "protobuf 3.2.0", + "protobuf-support", + "tempfile", + "thiserror", + "which", +] + +[[package]] +name = "protobuf-support" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d4d7b8601c814cfb36bcebb79f0e61e45e1e93640cf778837833bbed05c372" +dependencies = [ + "thiserror", ] [[package]] @@ -875,7 +912,7 @@ version = "0.1.0" dependencies = [ "async-trait", "oci", - "protobuf", + "protobuf 3.2.0", "serde", "serde_json", "ttrpc", @@ -1314,9 +1351,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "ttrpc" -version = "0.6.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ecfff459a859c6ba6668ff72b34c2f1d94d9d58f7088414c2674ad0f31cc7d8" +checksum = "a35f22a2964bea14afee161665bb260b83cb48e665e0260ca06ec0e775c8b06c" dependencies = [ "async-trait", "byteorder", @@ -1324,8 +1361,8 @@ dependencies = [ "libc", "log", "nix 0.23.1", - "protobuf", - "protobuf-codegen-pure", + "protobuf 3.2.0", + "protobuf-codegen 3.2.0", "thiserror", "tokio", "tokio-vsock", @@ -1333,28 +1370,28 @@ dependencies = [ [[package]] name = "ttrpc-codegen" -version = "0.2.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809eda4e459820237104e4b61d6b41bbe6c9e1ce6adf4057955e6e6722a90408" +checksum = "94d7f7631d7a9ebed715a47cd4cb6072cbc7ae1d4ec01598971bbec0024340c2" dependencies = [ - "protobuf", - "protobuf-codegen", - "protobuf-codegen-pure", + "protobuf 2.27.1", + "protobuf-codegen 3.2.0", + "protobuf-support", "ttrpc-compiler", ] [[package]] name = "ttrpc-compiler" -version = "0.4.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2978ed3fa047d8fd55cbeb4d4a61d461fb3021a90c9618519c73ce7e5bb66c15" +checksum = "ec3cb5dbf1f0865a34fe3f722290fe776cacb16f50428610b779467b76ddf647" dependencies = [ "derive-new", "prost", "prost-build", "prost-types", - "protobuf", - "protobuf-codegen", + "protobuf 2.27.1", + "protobuf-codegen 2.27.1", "tempfile", ] diff --git a/src/libs/protocols/.gitignore b/src/libs/protocols/.gitignore index 0a83b1689..5b5d2f3df 100644 --- a/src/libs/protocols/.gitignore +++ b/src/libs/protocols/.gitignore @@ -1,11 +1,5 @@ Cargo.lock -src/agent.rs -src/agent_ttrpc.rs -src/agent_ttrpc_async.rs -src/csi.rs -src/empty.rs -src/health.rs -src/health_ttrpc.rs -src/health_ttrpc_async.rs -src/oci.rs -src/types.rs + +src/*.rs +!src/lib.rs +!src/trans.rs diff --git a/src/libs/protocols/Cargo.toml b/src/libs/protocols/Cargo.toml index 03b9c8b3d..9c0033d17 100644 --- a/src/libs/protocols/Cargo.toml +++ b/src/libs/protocols/Cargo.toml @@ -11,12 +11,13 @@ with-serde = [ "serde", "serde_json" ] async = ["ttrpc/async", "async-trait"] [dependencies] -ttrpc = { version = "0.6.0" } +ttrpc = { version = "0.7.1" } async-trait = { version = "0.1.42", optional = true } -protobuf = { version = "2.27.0", features = ["with-serde"] } +protobuf = { version = "3.2.0" } serde = { version = "1.0.130", features = ["derive"], optional = true } serde_json = { version = "1.0.68", optional = true } oci = { path = "../oci" } [build-dependencies] -ttrpc-codegen = "0.2.0" +ttrpc-codegen = "0.4.2" +protobuf = { version = "3.2.0" } diff --git a/src/libs/protocols/build.rs b/src/libs/protocols/build.rs index 8c0341762..12818b057 100644 --- a/src/libs/protocols/build.rs +++ b/src/libs/protocols/build.rs @@ -7,6 +7,7 @@ use std::fs::{self, File}; use std::io::{BufRead, BufReader, Read, Write}; use std::path::Path; use std::process::exit; + use ttrpc_codegen::{Codegen, Customize, ProtobufCustomize}; fn replace_text_in_file(file_name: &str, from: &str, to: &str) -> Result<(), std::io::Error> { @@ -103,10 +104,10 @@ fn codegen(path: &str, protos: &[&str], async_all: bool) -> Result<(), std::io:: ..Default::default() }; - let protobuf_options = ProtobufCustomize { - serde_derive: Some(true), - ..Default::default() - }; + let protobuf_options = ProtobufCustomize::default() + .gen_mod_rs(false) + .generate_getter(true) + .generate_accessors(true); let out_dir = Path::new("src"); @@ -147,6 +148,7 @@ fn real_main() -> Result<(), std::io::Error> { "src", &[ "protos/google/protobuf/empty.proto", + "protos/gogo/protobuf/gogoproto/gogo.proto", "protos/oci.proto", "protos/types.proto", "protos/csi.proto", diff --git a/src/libs/protocols/src/lib.rs b/src/libs/protocols/src/lib.rs index 0c62b8a93..801b70060 100644 --- a/src/libs/protocols/src/lib.rs +++ b/src/libs/protocols/src/lib.rs @@ -11,6 +11,7 @@ pub mod agent_ttrpc; pub mod agent_ttrpc_async; pub mod csi; pub mod empty; +mod gogo; pub mod health; pub mod health_ttrpc; #[cfg(feature = "async")] diff --git a/src/libs/protocols/src/trans.rs b/src/libs/protocols/src/trans.rs index 1e1514788..f05af8395 100644 --- a/src/libs/protocols/src/trans.rs +++ b/src/libs/protocols/src/trans.rs @@ -15,19 +15,19 @@ use oci::{ }; // translate from interface to ttprc tools -fn from_option>(from: Option) -> ::protobuf::SingularPtrField { +fn from_option>(from: Option) -> protobuf::MessageField { match from { - Some(f) => ::protobuf::SingularPtrField::from_option(Some(T::from(f))), - None => ::protobuf::SingularPtrField::none(), + Some(f) => protobuf::MessageField::from_option(Some(f.into())), + None => protobuf::MessageField::none(), } } -fn from_vec>(from: Vec) -> ::protobuf::RepeatedField { +fn from_vec>(from: Vec) -> Vec { let mut to: Vec = vec![]; for data in from { - to.push(T::from(data)); + to.push(data.into()); } - ::protobuf::RepeatedField::from_vec(to) + to } impl From for crate::oci::Box { @@ -35,8 +35,7 @@ impl From for crate::oci::Box { crate::oci::Box { Height: from.height, Width: from.width, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -48,8 +47,7 @@ impl From for crate::oci::User { GID: from.gid, AdditionalGids: from.additional_gids, Username: from.username, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -57,13 +55,12 @@ impl From for crate::oci::User { impl From for crate::oci::LinuxCapabilities { fn from(from: LinuxCapabilities) -> Self { crate::oci::LinuxCapabilities { - Bounding: from_vec(from.bounding), - Effective: from_vec(from.effective), - Inheritable: from_vec(from.inheritable), - Permitted: from_vec(from.permitted), - Ambient: from_vec(from.ambient), - unknown_fields: Default::default(), - cached_size: Default::default(), + Bounding: from.bounding, + Effective: from.effective, + Inheritable: from.inheritable, + Permitted: from.permitted, + Ambient: from.ambient, + ..Default::default() } } } @@ -74,8 +71,7 @@ impl From for crate::oci::POSIXRlimit { Type: from.r#type, Hard: from.hard, Soft: from.soft, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -86,8 +82,8 @@ impl From for crate::oci::Process { Terminal: from.terminal, ConsoleSize: from_option(from.console_size), User: from_option(Some(from.user)), - Args: from_vec(from.args), - Env: from_vec(from.env), + Args: from.args, + Env: from.env, Cwd: from.cwd, Capabilities: from_option(from.capabilities), Rlimits: from_vec(from.rlimits), @@ -95,8 +91,7 @@ impl From for crate::oci::Process { ApparmorProfile: from.apparmor_profile, OOMScoreAdj: from.oom_score_adj.map_or(0, |t| t as i64), SelinuxLabel: from.selinux_label, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -109,8 +104,7 @@ impl From for crate::oci::LinuxDeviceCgroup { Major: from.major.map_or(0, |t| t), Minor: from.minor.map_or(0, |t| t), Access: from.access, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -125,8 +119,7 @@ impl From for crate::oci::LinuxMemory { KernelTCP: from.kernel_tcp.map_or(0, |t| t), Swappiness: from.swappiness.map_or(0, |t| t), DisableOOMKiller: from.disable_oom_killer.map_or(false, |t| t), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -141,8 +134,7 @@ impl From for crate::oci::LinuxCPU { RealtimePeriod: from.realtime_period.map_or(0, |t| t), Cpus: from.cpus, Mems: from.mems, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -151,8 +143,7 @@ impl From for crate::oci::LinuxPids { fn from(from: LinuxPids) -> Self { crate::oci::LinuxPids { Limit: from.limit, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -165,8 +156,7 @@ impl From for crate::oci::LinuxWeightDevice { Minor: 0, Weight: from.weight.map_or(0, |t| t as u32), LeafWeight: from.leaf_weight.map_or(0, |t| t as u32), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -178,8 +168,7 @@ impl From for crate::oci::LinuxThrottleDevice { Major: 0, Minor: 0, Rate: from.rate, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -194,8 +183,7 @@ impl From for crate::oci::LinuxBlockIO { ThrottleWriteBpsDevice: from_vec(from.throttle_write_bps_device), ThrottleReadIOPSDevice: from_vec(from.throttle_read_iops_device), ThrottleWriteIOPSDevice: from_vec(from.throttle_write_iops_device), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -205,8 +193,7 @@ impl From for crate::oci::LinuxHugepageLimit { crate::oci::LinuxHugepageLimit { Pagesize: from.page_size, Limit: from.limit, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -216,8 +203,7 @@ impl From for crate::oci::LinuxInterfacePriority { crate::oci::LinuxInterfacePriority { Name: from.name, Priority: from.priority, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -227,8 +213,7 @@ impl From for crate::oci::LinuxNetwork { crate::oci::LinuxNetwork { ClassID: from.class_id.map_or(0, |t| t), Priorities: from_vec(from.priorities), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -243,8 +228,7 @@ impl From for crate::oci::LinuxResources { BlockIO: from_option(from.block_io), HugepageLimits: from_vec(from.hugepage_limits), Network: from_option(from.network), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -254,8 +238,7 @@ impl From for crate::oci::Root { crate::oci::Root { Path: from.path, Readonly: from.readonly, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -265,10 +248,9 @@ impl From for crate::oci::Mount { crate::oci::Mount { destination: from.destination, source: from.source, - field_type: from.r#type, - options: from_vec(from.options), - unknown_fields: Default::default(), - cached_size: Default::default(), + type_: from.r#type, + options: from.options, + ..Default::default() } } } @@ -281,11 +263,10 @@ impl From for crate::oci::Hook { } crate::oci::Hook { Path: from.path, - Args: from_vec(from.args), - Env: from_vec(from.env), + Args: from.args, + Env: from.env, Timeout: timeout, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -299,8 +280,7 @@ impl From for crate::oci::Hooks { StartContainer: from_vec(from.start_container), Poststart: from_vec(from.poststart), Poststop: from_vec(from.poststop), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -311,8 +291,7 @@ impl From for crate::oci::LinuxIDMapping { HostID: from.host_id, ContainerID: from.container_id, Size: from.size, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -322,8 +301,7 @@ impl From for crate::oci::LinuxNamespace { crate::oci::LinuxNamespace { Type: from.r#type, Path: from.path, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -338,8 +316,7 @@ impl From for crate::oci::LinuxDevice { FileMode: from.file_mode.map_or(0, |v| v), UID: from.uid.map_or(0, |v| v), GID: from.gid.map_or(0, |v| v), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -351,8 +328,7 @@ impl From for crate::oci::LinuxSeccompArg { Value: from.value, ValueTwo: from.value_two, Op: from.op, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -360,14 +336,13 @@ impl From for crate::oci::LinuxSeccompArg { impl From for crate::oci::LinuxSyscall { fn from(from: LinuxSyscall) -> Self { crate::oci::LinuxSyscall { - Names: from_vec(from.names), + Names: from.names, Action: from.action, Args: from_vec(from.args), - ErrnoRet: Some(crate::oci::LinuxSyscall_oneof_ErrnoRet::errnoret( + ErrnoRet: Some(crate::oci::linux_syscall::ErrnoRet::Errnoret( from.errno_ret, )), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -376,11 +351,10 @@ impl From for crate::oci::LinuxSeccomp { fn from(from: LinuxSeccomp) -> Self { crate::oci::LinuxSeccomp { DefaultAction: from.default_action, - Architectures: from_vec(from.architectures), + Architectures: from.architectures, Syscalls: from_vec(from.syscalls), - Flags: from_vec(from.flags), - unknown_fields: Default::default(), - cached_size: Default::default(), + Flags: from.flags, + ..Default::default() } } } @@ -389,8 +363,7 @@ impl From for crate::oci::LinuxIntelRdt { fn from(from: LinuxIntelRdt) -> Self { crate::oci::LinuxIntelRdt { L3CacheSchema: from.l3_cache_schema, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -407,12 +380,11 @@ impl From for crate::oci::Linux { Devices: from_vec(from.devices), Seccomp: from_option(from.seccomp), RootfsPropagation: from.rootfs_propagation, - MaskedPaths: from_vec(from.masked_paths), - ReadonlyPaths: from_vec(from.readonly_paths), + MaskedPaths: from.masked_paths, + ReadonlyPaths: from.readonly_paths, MountLabel: from.mount_label, IntelRdt: from_option(from.intel_rdt), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -430,8 +402,7 @@ impl From for crate::oci::Spec { Linux: from_option(from.linux), Solaris: Default::default(), Windows: Default::default(), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -449,7 +420,7 @@ impl From for oci::Mount { fn from(mut from: crate::oci::Mount) -> Self { let options = from.take_options().to_vec(); Self { - r#type: from.take_field_type(), + r#type: from.take_type_(), destination: from.take_destination(), source: from.take_source(), options, @@ -460,9 +431,9 @@ impl From for oci::Mount { impl From for oci::LinuxIdMapping { fn from(from: crate::oci::LinuxIDMapping) -> Self { LinuxIdMapping { - container_id: from.get_ContainerID(), - host_id: from.get_HostID(), - size: from.get_Size(), + container_id: from.ContainerID(), + host_id: from.HostID(), + size: from.Size(), } } } @@ -470,17 +441,17 @@ impl From for oci::LinuxIdMapping { impl From for oci::LinuxDeviceCgroup { fn from(mut from: crate::oci::LinuxDeviceCgroup) -> Self { let mut major = None; - if from.get_Major() > 0 { - major = Some(from.get_Major()); + if from.Major() > 0 { + major = Some(from.Major()); } let mut minor = None; - if from.get_Minor() > 0 { - minor = Some(from.get_Minor()) + if from.Minor() > 0 { + minor = Some(from.Minor()) } oci::LinuxDeviceCgroup { - allow: from.get_Allow(), + allow: from.Allow(), r#type: from.take_Type(), major, minor, @@ -492,36 +463,36 @@ impl From for oci::LinuxDeviceCgroup { impl From for oci::LinuxMemory { fn from(from: crate::oci::LinuxMemory) -> Self { let mut limit = None; - if from.get_Limit() > 0 { - limit = Some(from.get_Limit()); + if from.Limit() > 0 { + limit = Some(from.Limit()); } let mut reservation = None; - if from.get_Reservation() > 0 { - reservation = Some(from.get_Reservation()); + if from.Reservation() > 0 { + reservation = Some(from.Reservation()); } let mut swap = None; - if from.get_Swap() > 0 { - swap = Some(from.get_Swap()); + if from.Swap() > 0 { + swap = Some(from.Swap()); } let mut kernel = None; - if from.get_Kernel() > 0 { - kernel = Some(from.get_Kernel()); + if from.Kernel() > 0 { + kernel = Some(from.Kernel()); } let mut kernel_tcp = None; - if from.get_KernelTCP() > 0 { - kernel_tcp = Some(from.get_KernelTCP()); + if from.KernelTCP() > 0 { + kernel_tcp = Some(from.KernelTCP()); } let mut swappiness = None; - if from.get_Swappiness() > 0 { - swappiness = Some(from.get_Swappiness()); + if from.Swappiness() > 0 { + swappiness = Some(from.Swappiness()); } - let disable_oom_killer = Some(from.get_DisableOOMKiller()); + let disable_oom_killer = Some(from.DisableOOMKiller()); oci::LinuxMemory { limit, @@ -538,28 +509,28 @@ impl From for oci::LinuxMemory { impl From for oci::LinuxCpu { fn from(mut from: crate::oci::LinuxCPU) -> Self { let mut shares = None; - if from.get_Shares() > 0 { - shares = Some(from.get_Shares()); + if from.Shares() > 0 { + shares = Some(from.Shares()); } let mut quota = None; - if from.get_Quota() > 0 { - quota = Some(from.get_Quota()); + if from.Quota() > 0 { + quota = Some(from.Quota()); } let mut period = None; - if from.get_Period() > 0 { - period = Some(from.get_Period()); + if from.Period() > 0 { + period = Some(from.Period()); } let mut realtime_runtime = None; - if from.get_RealtimeRuntime() > 0 { - realtime_runtime = Some(from.get_RealtimeRuntime()); + if from.RealtimeRuntime() > 0 { + realtime_runtime = Some(from.RealtimeRuntime()); } let mut realtime_period = None; - if from.get_RealtimePeriod() > 0 { - realtime_period = Some(from.get_RealtimePeriod()); + if from.RealtimePeriod() > 0 { + realtime_period = Some(from.RealtimePeriod()); } let cpus = from.take_Cpus(); @@ -580,7 +551,7 @@ impl From for oci::LinuxCpu { impl From for oci::LinuxPids { fn from(from: crate::oci::LinuxPids) -> Self { oci::LinuxPids { - limit: from.get_Limit(), + limit: from.Limit(), } } } @@ -588,35 +559,35 @@ impl From for oci::LinuxPids { impl From for oci::LinuxBlockIo { fn from(from: crate::oci::LinuxBlockIO) -> Self { let mut weight = None; - if from.get_Weight() > 0 { - weight = Some(from.get_Weight() as u16); + if from.Weight() > 0 { + weight = Some(from.Weight() as u16); } let mut leaf_weight = None; - if from.get_LeafWeight() > 0 { - leaf_weight = Some(from.get_LeafWeight() as u16); + if from.LeafWeight() > 0 { + leaf_weight = Some(from.LeafWeight() as u16); } let mut weight_device = Vec::new(); - for wd in from.get_WeightDevice() { + for wd in from.WeightDevice() { weight_device.push(wd.clone().into()); } let mut throttle_read_bps_device = Vec::new(); - for td in from.get_ThrottleReadBpsDevice() { + for td in from.ThrottleReadBpsDevice() { throttle_read_bps_device.push(td.clone().into()); } let mut throttle_write_bps_device = Vec::new(); - for td in from.get_ThrottleWriteBpsDevice() { + for td in from.ThrottleWriteBpsDevice() { throttle_write_bps_device.push(td.clone().into()); } let mut throttle_read_iops_device = Vec::new(); - for td in from.get_ThrottleReadIOPSDevice() { + for td in from.ThrottleReadIOPSDevice() { throttle_read_iops_device.push(td.clone().into()); } let mut throttle_write_iops_device = Vec::new(); - for td in from.get_ThrottleWriteIOPSDevice() { + for td in from.ThrottleWriteIOPSDevice() { throttle_write_iops_device.push(td.clone().into()); } @@ -661,7 +632,7 @@ impl From for oci::LinuxInterfacePriority { fn from(mut from: crate::oci::LinuxInterfacePriority) -> Self { oci::LinuxInterfacePriority { name: from.take_Name(), - priority: from.get_Priority(), + priority: from.Priority(), } } } @@ -669,11 +640,11 @@ impl From for oci::LinuxInterfacePriority { impl From for oci::LinuxNetwork { fn from(mut from: crate::oci::LinuxNetwork) -> Self { let mut class_id = None; - if from.get_ClassID() > 0 { - class_id = Some(from.get_ClassID()); + if from.ClassID() > 0 { + class_id = Some(from.ClassID()); } let mut priorities = Vec::new(); - for p in from.take_Priorities().to_vec() { + for p in from.take_Priorities() { priorities.push(p.into()) } @@ -688,7 +659,7 @@ impl From for oci::LinuxHugepageLimit { fn from(mut from: crate::oci::LinuxHugepageLimit) -> Self { oci::LinuxHugepageLimit { page_size: from.take_Pagesize(), - limit: from.get_Limit(), + limit: from.Limit(), } } } @@ -696,7 +667,7 @@ impl From for oci::LinuxHugepageLimit { impl From for oci::LinuxResources { fn from(mut from: crate::oci::LinuxResources) -> Self { let mut devices = Vec::new(); - for d in from.take_Devices().to_vec() { + for d in from.take_Devices() { devices.push(d.into()); } @@ -712,16 +683,16 @@ impl From for oci::LinuxResources { let mut pids = None; if from.has_Pids() { - pids = Some(from.get_Pids().clone().into()) + pids = Some(from.Pids().clone().into()) } let mut block_io = None; if from.has_BlockIO() { - block_io = Some(from.get_BlockIO().clone().into()); + block_io = Some(from.BlockIO().clone().into()); } let mut hugepage_limits = Vec::new(); - for hl in from.get_HugepageLimits() { + for hl in from.HugepageLimits() { hugepage_limits.push(hl.clone().into()); } @@ -750,11 +721,11 @@ impl From for oci::LinuxDevice { oci::LinuxDevice { path: from.take_Path(), r#type: from.take_Type(), - major: from.get_Major(), - minor: from.get_Minor(), - file_mode: Some(from.get_FileMode()), - uid: Some(from.get_UID()), - gid: Some(from.get_GID()), + major: from.Major(), + minor: from.Minor(), + file_mode: Some(from.FileMode()), + uid: Some(from.UID()), + gid: Some(from.GID()), } } } @@ -762,9 +733,9 @@ impl From for oci::LinuxDevice { impl From for oci::LinuxSeccompArg { fn from(mut from: crate::oci::LinuxSeccompArg) -> Self { oci::LinuxSeccompArg { - index: from.get_Index() as u32, - value: from.get_Value(), - value_two: from.get_ValueTwo(), + index: from.Index() as u32, + value: from.Value(), + value_two: from.ValueTwo(), op: from.take_Op(), } } @@ -773,14 +744,14 @@ impl From for oci::LinuxSeccompArg { impl From for oci::LinuxSyscall { fn from(mut from: crate::oci::LinuxSyscall) -> Self { let mut args = Vec::new(); - for ag in from.take_Args().to_vec() { + for ag in from.take_Args() { args.push(ag.into()); } oci::LinuxSyscall { names: from.take_Names().to_vec(), action: from.take_Action(), args, - errno_ret: from.get_errnoret(), + errno_ret: from.errnoret(), } } } @@ -788,7 +759,7 @@ impl From for oci::LinuxSyscall { impl From for oci::LinuxSeccomp { fn from(mut from: crate::oci::LinuxSeccomp) -> Self { let mut syscalls = Vec::new(); - for s in from.take_Syscalls().to_vec() { + for s in from.take_Syscalls() { syscalls.push(s.into()); } @@ -813,16 +784,16 @@ impl From for oci::LinuxNamespace { impl From for oci::Linux { fn from(mut from: crate::oci::Linux) -> Self { let mut uid_mappings = Vec::new(); - for id_map in from.take_UIDMappings().to_vec() { + for id_map in from.take_UIDMappings() { uid_mappings.push(id_map.into()) } let mut gid_mappings = Vec::new(); - for id_map in from.take_GIDMappings().to_vec() { + for id_map in from.take_GIDMappings() { gid_mappings.push(id_map.into()) } - let sysctl = from.get_Sysctl().clone(); + let sysctl = from.Sysctl().clone(); let mut resources = None; if from.has_Resources() { resources = Some(from.take_Resources().into()); @@ -830,12 +801,12 @@ impl From for oci::Linux { let cgroups_path = from.take_CgroupsPath(); let mut namespaces = Vec::new(); - for ns in from.take_Namespaces().to_vec() { + for ns in from.take_Namespaces() { namespaces.push(ns.into()) } let mut devices = Vec::new(); - for d in from.take_Devices().to_vec() { + for d in from.take_Devices() { devices.push(d.into()); } @@ -874,8 +845,8 @@ impl From for oci::PosixRlimit { fn from(mut from: crate::oci::POSIXRlimit) -> Self { oci::PosixRlimit { r#type: from.take_Type(), - hard: from.get_Hard(), - soft: from.get_Soft(), + hard: from.Hard(), + soft: from.Soft(), } } } @@ -895,8 +866,8 @@ impl From for oci::LinuxCapabilities { impl From for oci::User { fn from(mut from: crate::oci::User) -> Self { oci::User { - uid: from.get_UID(), - gid: from.get_GID(), + uid: from.UID(), + gid: from.GID(), additional_gids: from.take_AdditionalGids().to_vec(), username: from.take_Username(), } @@ -906,8 +877,8 @@ impl From for oci::User { impl From for oci::Box { fn from(from: crate::oci::Box) -> Self { oci::Box { - height: from.get_Height(), - width: from.get_Width(), + height: from.Height(), + width: from.Width(), } } } @@ -920,22 +891,22 @@ impl From for oci::Process { } let user = from.take_User().into(); - let args = from.take_Args().into_vec(); - let env = from.take_Env().into_vec(); + let args = from.take_Args(); + let env = from.take_Env(); let cwd = from.take_Cwd(); let mut capabilities = None; if from.has_Capabilities() { capabilities = Some(from.take_Capabilities().into()); } let mut rlimits = Vec::new(); - for rl in from.take_Rlimits().to_vec() { + for rl in from.take_Rlimits() { rlimits.push(rl.into()); } - let no_new_privileges = from.get_NoNewPrivileges(); + let no_new_privileges = from.NoNewPrivileges(); let apparmor_profile = from.take_ApparmorProfile(); let mut oom_score_adj = None; - if from.get_OOMScoreAdj() != 0 { - oom_score_adj = Some(from.get_OOMScoreAdj() as i32); + if from.OOMScoreAdj() != 0 { + oom_score_adj = Some(from.OOMScoreAdj() as i32); } let selinux_label = from.take_SelinuxLabel(); @@ -959,8 +930,8 @@ impl From for oci::Process { impl From for oci::Hook { fn from(mut from: crate::oci::Hook) -> Self { let mut timeout = None; - if from.get_Timeout() > 0 { - timeout = Some(from.get_Timeout() as i32); + if from.Timeout() > 0 { + timeout = Some(from.Timeout() as i32); } oci::Hook { path: from.take_Path(), @@ -1020,7 +991,7 @@ impl From for oci::Spec { } let mut mounts = Vec::new(); - for m in from.take_Mounts().into_vec() { + for m in from.take_Mounts() { mounts.push(m.into()) } From 009b42dbff8850675d287b0df60df597390c7f51 Mon Sep 17 00:00:00 2001 From: Tim Zhang Date: Fri, 14 Apr 2023 10:53:46 +0800 Subject: [PATCH 119/137] protocols: Fix unit test Fixes: #6646 Signed-off-by: Tim Zhang --- src/libs/protocols/src/trans.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libs/protocols/src/trans.rs b/src/libs/protocols/src/trans.rs index f05af8395..db59b7c2a 100644 --- a/src/libs/protocols/src/trans.rs +++ b/src/libs/protocols/src/trans.rs @@ -1056,7 +1056,7 @@ mod tests { #[test] fn test_from_vec_len_0() { let from: Vec = vec![]; - let to: ::protobuf::RepeatedField = from_vec(from.clone()); + let to: Vec = from_vec(from.clone()); assert_eq!(from.len(), to.len()); } @@ -1065,7 +1065,7 @@ mod tests { let from: Vec = vec![TestA { from: "a".to_string(), }]; - let to: ::protobuf::RepeatedField = from_vec(from.clone()); + let to: Vec = from_vec(from.clone()); assert_eq!(from.len(), to.len()); assert_eq!(from[0].from, to[0].to); From 8af6fc77cd0d0705de768398fb83112e82d0c87b Mon Sep 17 00:00:00 2001 From: Tim Zhang Date: Wed, 12 Apr 2023 21:39:15 +0800 Subject: [PATCH 120/137] agent: Bump ttrpc from 0.6.0 to 0.7.1 Fixes: #6646 Signed-off-by: Tim Zhang --- src/agent/Cargo.lock | 87 +++++++++++++++------- src/agent/Cargo.toml | 4 +- src/agent/rustjail/Cargo.toml | 2 +- src/agent/rustjail/src/cgroups/fs/mod.rs | 91 ++++++++++-------------- src/agent/rustjail/src/cgroups/mock.rs | 13 ++-- src/agent/rustjail/src/container.rs | 4 +- src/agent/rustjail/src/lib.rs | 34 ++++----- src/agent/src/device.rs | 10 +-- src/agent/src/mount.rs | 6 +- src/agent/src/netlink.rs | 25 ++++--- src/agent/src/rpc.rs | 67 +++++++++-------- 11 files changed, 181 insertions(+), 162 deletions(-) diff --git a/src/agent/Cargo.lock b/src/agent/Cargo.lock index 8a352fae0..d5c557a3d 100644 --- a/src/agent/Cargo.lock +++ b/src/agent/Cargo.lock @@ -819,7 +819,7 @@ dependencies = [ "opentelemetry", "procfs", "prometheus", - "protobuf", + "protobuf 3.2.0", "protocols", "regex", "rtnetlink", @@ -1431,7 +1431,7 @@ dependencies = [ "memchr", "parking_lot 0.12.1", "procfs", - "protobuf", + "protobuf 2.27.1", "thiserror", ] @@ -1491,9 +1491,16 @@ name = "protobuf" version = "2.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" + +[[package]] +name = "protobuf" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55bad9126f378a853655831eb7363b7b01b81d19f8cb1218861086ca4a1a61e" dependencies = [ - "serde", - "serde_derive", + "once_cell", + "protobuf-support", + "thiserror", ] [[package]] @@ -1502,17 +1509,47 @@ version = "2.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aec1632b7c8f2e620343439a7dfd1f3c47b18906c4be58982079911482b5d707" dependencies = [ - "protobuf", + "protobuf 2.27.1", ] [[package]] -name = "protobuf-codegen-pure" -version = "2.27.1" +name = "protobuf-codegen" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f8122fdb18e55190c796b088a16bdb70cd7acdcd48f7a8b796b58c62e532cc6" +checksum = "0dd418ac3c91caa4032d37cb80ff0d44e2ebe637b2fb243b6234bf89cdac4901" dependencies = [ - "protobuf", - "protobuf-codegen", + "anyhow", + "once_cell", + "protobuf 3.2.0", + "protobuf-parse", + "regex", + "tempfile", + "thiserror", +] + +[[package]] +name = "protobuf-parse" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d39b14605eaa1f6a340aec7f320b34064feb26c93aec35d6a9a2272a8ddfa49" +dependencies = [ + "anyhow", + "indexmap", + "log", + "protobuf 3.2.0", + "protobuf-support", + "tempfile", + "thiserror", + "which", +] + +[[package]] +name = "protobuf-support" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d4d7b8601c814cfb36bcebb79f0e61e45e1e93640cf778837833bbed05c372" +dependencies = [ + "thiserror", ] [[package]] @@ -1521,7 +1558,7 @@ version = "0.1.0" dependencies = [ "async-trait", "oci", - "protobuf", + "protobuf 3.2.0", "ttrpc", "ttrpc-codegen", ] @@ -1705,7 +1742,7 @@ dependencies = [ "nix 0.24.2", "oci", "path-absolutize", - "protobuf", + "protobuf 3.2.0", "protocols", "regex", "rlimit", @@ -2217,9 +2254,9 @@ dependencies = [ [[package]] name = "ttrpc" -version = "0.6.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ecfff459a859c6ba6668ff72b34c2f1d94d9d58f7088414c2674ad0f31cc7d8" +checksum = "a35f22a2964bea14afee161665bb260b83cb48e665e0260ca06ec0e775c8b06c" dependencies = [ "async-trait", "byteorder", @@ -2227,8 +2264,8 @@ dependencies = [ "libc", "log", "nix 0.23.1", - "protobuf", - "protobuf-codegen-pure", + "protobuf 3.2.0", + "protobuf-codegen 3.2.0", "thiserror", "tokio", "tokio-vsock", @@ -2236,28 +2273,28 @@ dependencies = [ [[package]] name = "ttrpc-codegen" -version = "0.2.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809eda4e459820237104e4b61d6b41bbe6c9e1ce6adf4057955e6e6722a90408" +checksum = "94d7f7631d7a9ebed715a47cd4cb6072cbc7ae1d4ec01598971bbec0024340c2" dependencies = [ - "protobuf", - "protobuf-codegen", - "protobuf-codegen-pure", + "protobuf 2.27.1", + "protobuf-codegen 3.2.0", + "protobuf-support", "ttrpc-compiler", ] [[package]] name = "ttrpc-compiler" -version = "0.4.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2978ed3fa047d8fd55cbeb4d4a61d461fb3021a90c9618519c73ce7e5bb66c15" +checksum = "ec3cb5dbf1f0865a34fe3f722290fe776cacb16f50428610b779467b76ddf647" dependencies = [ "derive-new", "prost", "prost-build", "prost-types", - "protobuf", - "protobuf-codegen", + "protobuf 2.27.1", + "protobuf-codegen 2.27.1", "tempfile", ] diff --git a/src/agent/Cargo.toml b/src/agent/Cargo.toml index 4b4a71c8f..d8ba20db3 100644 --- a/src/agent/Cargo.toml +++ b/src/agent/Cargo.toml @@ -10,8 +10,8 @@ oci = { path = "../libs/oci" } rustjail = { path = "rustjail" } protocols = { path = "../libs/protocols", features = ["async"] } lazy_static = "1.3.0" -ttrpc = { version = "0.6.0", features = ["async"], default-features = false } -protobuf = "2.27.0" +ttrpc = { version = "0.7.1", features = ["async"], default-features = false } +protobuf = "3.2.0" libc = "0.2.58" nix = "0.24.2" capctl = "0.2.0" diff --git a/src/agent/rustjail/Cargo.toml b/src/agent/rustjail/Cargo.toml index 8c9c2230d..19602bee2 100644 --- a/src/agent/rustjail/Cargo.toml +++ b/src/agent/rustjail/Cargo.toml @@ -18,7 +18,7 @@ scopeguard = "1.0.0" capctl = "0.2.0" lazy_static = "1.3.0" libc = "0.2.58" -protobuf = "2.27.0" +protobuf = "3.2.0" slog = "2.5.2" slog-scope = "4.1.2" scan_fmt = "0.2.6" diff --git a/src/agent/rustjail/src/cgroups/fs/mod.rs b/src/agent/rustjail/src/cgroups/fs/mod.rs index 6eaa9870d..fc023ac61 100644 --- a/src/agent/rustjail/src/cgroups/fs/mod.rs +++ b/src/agent/rustjail/src/cgroups/fs/mod.rs @@ -27,7 +27,7 @@ use oci::{ LinuxNetwork, LinuxPids, LinuxResources, }; -use protobuf::{CachedSize, RepeatedField, SingularPtrField, UnknownFields}; +use protobuf::MessageField; use protocols::agent::{ BlkioStats, BlkioStatsEntry, CgroupStats, CpuStats, CpuUsage, HugetlbStats, MemoryData, MemoryStats, PidsStats, ThrottlingData, @@ -50,7 +50,7 @@ macro_rules! get_controller_or_return_singular_none { ($cg:ident) => { match $cg.controller_of() { Some(c) => c, - None => return SingularPtrField::none(), + None => return MessageField::none(), } }; } @@ -134,11 +134,10 @@ impl CgroupManager for Manager { let throttling_data = get_cpu_stats(&self.cgroup); - let cpu_stats = SingularPtrField::some(CpuStats { + let cpu_stats = MessageField::some(CpuStats { cpu_usage, throttling_data, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }); // Memorystats @@ -160,8 +159,7 @@ impl CgroupManager for Manager { pids_stats, blkio_stats, hugetlb_stats, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }) } @@ -446,14 +444,14 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool let memstat = get_memory_stats(cg) .into_option() .ok_or_else(|| anyhow!("failed to get the cgroup memory stats"))?; - let memusage = memstat.get_usage(); + let memusage = memstat.usage(); // When update memory limit, the kernel would check the current memory limit // set against the new swap setting, if the current memory limit is large than // the new swap, then set limit first, otherwise the kernel would complain and // refused to set; on the other hand, if the current memory limit is smaller than // the new swap, then we should set the swap first and then set the memor limit. - if swap == -1 || memusage.get_limit() < swap as u64 { + if swap == -1 || memusage.limit() < swap as u64 { mem_controller.set_memswap_limit(swap)?; set_resource!(mem_controller, set_limit, memory, limit); } else { @@ -657,21 +655,20 @@ lazy_static! { }; } -fn get_cpu_stats(cg: &cgroups::Cgroup) -> SingularPtrField { +fn get_cpu_stats(cg: &cgroups::Cgroup) -> MessageField { let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg); let stat = cpu_controller.cpu().stat; let h = lines_to_map(&stat); - SingularPtrField::some(ThrottlingData { + MessageField::some(ThrottlingData { periods: *h.get("nr_periods").unwrap_or(&0), throttled_periods: *h.get("nr_throttled").unwrap_or(&0), throttled_time: *h.get("throttled_time").unwrap_or(&0), - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }) } -fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField { +fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> MessageField { if let Some(cpuacct_controller) = cg.controller_of::() { let cpuacct = cpuacct_controller.cpuacct(); @@ -685,13 +682,12 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField { let percpu_usage = line_to_vec(&cpuacct.usage_percpu); - return SingularPtrField::some(CpuUsage { + return MessageField::some(CpuUsage { total_usage, percpu_usage, usage_in_kernelmode, usage_in_usermode, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }); } @@ -704,17 +700,16 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField { let total_usage = *h.get("usage_usec").unwrap_or(&0); let percpu_usage = vec![]; - SingularPtrField::some(CpuUsage { + MessageField::some(CpuUsage { total_usage, percpu_usage, usage_in_kernelmode, usage_in_usermode, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }) } -fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField { +fn get_memory_stats(cg: &cgroups::Cgroup) -> MessageField { let memory_controller: &MemController = get_controller_or_return_singular_none!(cg); // cache from memory stat @@ -726,52 +721,48 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField { let use_hierarchy = value == 1; // get memory data - let usage = SingularPtrField::some(MemoryData { + let usage = MessageField::some(MemoryData { usage: memory.usage_in_bytes, max_usage: memory.max_usage_in_bytes, failcnt: memory.fail_cnt, limit: memory.limit_in_bytes as u64, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }); // get swap usage let memswap = memory_controller.memswap(); - let swap_usage = SingularPtrField::some(MemoryData { + let swap_usage = MessageField::some(MemoryData { usage: memswap.usage_in_bytes, max_usage: memswap.max_usage_in_bytes, failcnt: memswap.fail_cnt, limit: memswap.limit_in_bytes as u64, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }); // get kernel usage let kmem_stat = memory_controller.kmem_stat(); - let kernel_usage = SingularPtrField::some(MemoryData { + let kernel_usage = MessageField::some(MemoryData { usage: kmem_stat.usage_in_bytes, max_usage: kmem_stat.max_usage_in_bytes, failcnt: kmem_stat.fail_cnt, limit: kmem_stat.limit_in_bytes as u64, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }); - SingularPtrField::some(MemoryStats { + MessageField::some(MemoryStats { cache, usage, swap_usage, kernel_usage, use_hierarchy, stats: memory.stat.raw, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }) } -fn get_pids_stats(cg: &cgroups::Cgroup) -> SingularPtrField { +fn get_pids_stats(cg: &cgroups::Cgroup) -> MessageField { let pid_controller: &PidController = get_controller_or_return_singular_none!(cg); let current = pid_controller.get_pid_current().unwrap_or(0); @@ -785,11 +776,10 @@ fn get_pids_stats(cg: &cgroups::Cgroup) -> SingularPtrField { }, } as u64; - SingularPtrField::some(PidsStats { + MessageField::some(PidsStats { current, limit, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }) } @@ -825,8 +815,8 @@ https://github.com/opencontainers/runc/blob/a5847db387ae28c0ca4ebe4beee1a76900c8 Total 0 */ -fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> RepeatedField { - let mut m = RepeatedField::new(); +fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> Vec { + let mut m = Vec::new(); if blkiodata.is_empty() { return m; } @@ -839,16 +829,15 @@ fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> RepeatedField RepeatedField { - let mut m = RepeatedField::new(); +fn get_blkio_stat_ioservice(services: &[IoService]) -> Vec { + let mut m = Vec::new(); if services.is_empty() { return m; @@ -872,17 +861,16 @@ fn build_blkio_stats_entry(major: i16, minor: i16, op: &str, value: u64) -> Blki minor: minor as u64, op: op.to_string(), value, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() } } -fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> SingularPtrField { +fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> MessageField { let blkio_controller: &BlkIoController = get_controller_or_return_singular_none!(cg); let blkio = blkio_controller.blkio(); let mut resp = BlkioStats::new(); - let mut blkio_stats = RepeatedField::new(); + let mut blkio_stats = Vec::new(); let stat = blkio.io_stat; for s in stat { @@ -898,10 +886,10 @@ fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> SingularPtrField { resp.io_service_bytes_recursive = blkio_stats; - SingularPtrField::some(resp) + MessageField::some(resp) } -fn get_blkio_stats(cg: &cgroups::Cgroup) -> SingularPtrField { +fn get_blkio_stats(cg: &cgroups::Cgroup) -> MessageField { if cg.v2() { return get_blkio_stats_v2(cg); } @@ -934,7 +922,7 @@ fn get_blkio_stats(cg: &cgroups::Cgroup) -> SingularPtrField { m.sectors_recursive = get_blkio_stat_blkiodata(&blkio.sectors_recursive); } - SingularPtrField::some(m) + MessageField::some(m) } fn get_hugetlb_stats(cg: &cgroups::Cgroup) -> HashMap { @@ -958,8 +946,7 @@ fn get_hugetlb_stats(cg: &cgroups::Cgroup) -> HashMap { usage, max_usage, failcnt, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }, ); } diff --git a/src/agent/rustjail/src/cgroups/mock.rs b/src/agent/rustjail/src/cgroups/mock.rs index 3bcc99955..8ac77c63b 100644 --- a/src/agent/rustjail/src/cgroups/mock.rs +++ b/src/agent/rustjail/src/cgroups/mock.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 // -use protobuf::{CachedSize, SingularPtrField, UnknownFields}; +use protobuf::MessageField; use crate::cgroups::Manager as CgroupManager; use crate::protocols::agent::{BlkioStats, CgroupStats, CpuStats, MemoryStats, PidsStats}; @@ -33,13 +33,12 @@ impl CgroupManager for Manager { fn get_stats(&self) -> Result { Ok(CgroupStats { - cpu_stats: SingularPtrField::some(CpuStats::default()), - memory_stats: SingularPtrField::some(MemoryStats::new()), - pids_stats: SingularPtrField::some(PidsStats::new()), - blkio_stats: SingularPtrField::some(BlkioStats::new()), + cpu_stats: MessageField::some(CpuStats::default()), + memory_stats: MessageField::some(MemoryStats::new()), + pids_stats: MessageField::some(PidsStats::new()), + blkio_stats: MessageField::some(BlkioStats::new()), hugetlb_stats: HashMap::new(), - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), + ..Default::default() }) } diff --git a/src/agent/rustjail/src/container.rs b/src/agent/rustjail/src/container.rs index 60b936557..b1d7499cd 100644 --- a/src/agent/rustjail/src/container.rs +++ b/src/agent/rustjail/src/container.rs @@ -48,7 +48,7 @@ use nix::unistd::{self, fork, ForkResult, Gid, Pid, Uid, User}; use std::os::unix::fs::MetadataExt; use std::os::unix::io::AsRawFd; -use protobuf::SingularPtrField; +use protobuf::MessageField; use oci::State as OCIState; use regex::Regex; @@ -875,7 +875,7 @@ impl BaseContainer for LinuxContainer { // what about network interface stats? Ok(StatsContainerResponse { - cgroup_stats: SingularPtrField::some(self.cgroup_manager.as_ref().get_stats()?), + cgroup_stats: MessageField::some(self.cgroup_manager.as_ref().get_stats()?), ..Default::default() }) } diff --git a/src/agent/rustjail/src/lib.rs b/src/agent/rustjail/src/lib.rs index 18ee0c333..fcfa3e029 100644 --- a/src/agent/rustjail/src/lib.rs +++ b/src/agent/rustjail/src/lib.rs @@ -82,11 +82,11 @@ pub fn process_grpc_to_oci(p: &grpc::Process) -> oci::Process { let cap = p.Capabilities.as_ref().unwrap(); Some(oci::LinuxCapabilities { - bounding: cap.Bounding.clone().into_vec(), - effective: cap.Effective.clone().into_vec(), - inheritable: cap.Inheritable.clone().into_vec(), - permitted: cap.Permitted.clone().into_vec(), - ambient: cap.Ambient.clone().into_vec(), + bounding: cap.Bounding.clone(), + effective: cap.Effective.clone(), + inheritable: cap.Inheritable.clone(), + permitted: cap.Permitted.clone(), + ambient: cap.Ambient.clone(), }) } else { None @@ -108,8 +108,8 @@ pub fn process_grpc_to_oci(p: &grpc::Process) -> oci::Process { terminal: p.Terminal, console_size, user, - args: p.Args.clone().into_vec(), - env: p.Env.clone().into_vec(), + args: p.Args.clone(), + env: p.Env.clone(), cwd: p.Cwd.clone(), capabilities, rlimits, @@ -130,9 +130,9 @@ fn root_grpc_to_oci(root: &grpc::Root) -> oci::Root { fn mount_grpc_to_oci(m: &grpc::Mount) -> oci::Mount { oci::Mount { destination: m.destination.clone(), - r#type: m.field_type.clone(), + r#type: m.type_.clone(), source: m.source.clone(), - options: m.options.clone().into_vec(), + options: m.options.clone(), } } @@ -143,8 +143,8 @@ fn hook_grpc_to_oci(h: &[grpcHook]) -> Vec { for e in h.iter() { r.push(oci::Hook { path: e.Path.clone(), - args: e.Args.clone().into_vec(), - env: e.Env.clone().into_vec(), + args: e.Args.clone(), + env: e.Env.clone(), timeout: Some(e.Timeout as i32), }); } @@ -359,7 +359,7 @@ fn seccomp_grpc_to_oci(sec: &grpc::LinuxSeccomp) -> oci::LinuxSeccomp { let mut args = Vec::new(); let errno_ret: u32 = if sys.has_errnoret() { - sys.get_errnoret() + sys.errnoret() } else { libc::EPERM as u32 }; @@ -374,7 +374,7 @@ fn seccomp_grpc_to_oci(sec: &grpc::LinuxSeccomp) -> oci::LinuxSeccomp { } r.push(oci::LinuxSyscall { - names: sys.Names.clone().into_vec(), + names: sys.Names.clone(), action: sys.Action.clone(), errno_ret, args, @@ -385,8 +385,8 @@ fn seccomp_grpc_to_oci(sec: &grpc::LinuxSeccomp) -> oci::LinuxSeccomp { oci::LinuxSeccomp { default_action: sec.DefaultAction.clone(), - architectures: sec.Architectures.clone().into_vec(), - flags: sec.Flags.clone().into_vec(), + architectures: sec.Architectures.clone(), + flags: sec.Flags.clone(), syscalls, } } @@ -456,8 +456,8 @@ fn linux_grpc_to_oci(l: &grpc::Linux) -> oci::Linux { devices, seccomp, rootfs_propagation: l.RootfsPropagation.clone(), - masked_paths: l.MaskedPaths.clone().into_vec(), - readonly_paths: l.ReadonlyPaths.clone().into_vec(), + masked_paths: l.MaskedPaths.clone(), + readonly_paths: l.ReadonlyPaths.clone(), mount_label: l.MountLabel.clone(), intel_rdt, } diff --git a/src/agent/src/device.rs b/src/agent/src/device.rs index 535a729f6..67d35651b 100644 --- a/src/agent/src/device.rs +++ b/src/agent/src/device.rs @@ -759,7 +759,7 @@ async fn vfio_pci_device_handler( device: &Device, sandbox: &Arc>, ) -> Result { - let vfio_in_guest = device.field_type != DRIVER_VFIO_PCI_GK_TYPE; + let vfio_in_guest = device.type_ != DRIVER_VFIO_PCI_GK_TYPE; let mut pci_fixups = Vec::<(pci::Address, pci::Address)>::new(); let mut group = None; @@ -874,9 +874,9 @@ pub async fn add_devices( async fn add_device(device: &Device, sandbox: &Arc>) -> Result { // log before validation to help with debugging gRPC protocol version differences. info!(sl!(), "device-id: {}, device-type: {}, device-vm-path: {}, device-container-path: {}, device-options: {:?}", - device.id, device.field_type, device.vm_path, device.container_path, device.options); + device.id, device.type_, device.vm_path, device.container_path, device.options); - if device.field_type.is_empty() { + if device.type_.is_empty() { return Err(anyhow!("invalid type for device {:?}", device)); } @@ -888,7 +888,7 @@ async fn add_device(device: &Device, sandbox: &Arc>) -> Result virtio_blk_device_handler(device, sandbox).await, DRIVER_BLK_CCW_TYPE => virtio_blk_ccw_device_handler(device, sandbox).await, DRIVER_MMIO_BLK_TYPE => virtiommio_blk_device_handler(device, sandbox).await, @@ -898,7 +898,7 @@ async fn add_device(device: &Device, sandbox: &Arc>) -> Result vfio_ap_device_handler(device, sandbox).await, - _ => Err(anyhow!("Unknown device type {}", device.field_type)), + _ => Err(anyhow!("Unknown device type {}", device.type_)), } } diff --git a/src/agent/src/mount.rs b/src/agent/src/mount.rs index bc13a6896..a863c3a31 100644 --- a/src/agent/src/mount.rs +++ b/src/agent/src/mount.rs @@ -214,7 +214,7 @@ async fn ephemeral_storage_handler( if storage.options.len() > 0 { // ephemeral_storage didn't support mount options except fsGroup. let mut new_storage = storage.clone(); - new_storage.options = protobuf::RepeatedField::default(); + new_storage.options = Default::default(); common_storage_handler(logger, &new_storage)?; let opts_vec: Vec = storage.options.to_vec(); @@ -654,7 +654,7 @@ pub fn set_ownership(logger: &Logger, storage: &Storage) -> Result<()> { if storage.fs_group.is_none() { return Ok(()); } - let fs_group = storage.get_fs_group(); + let fs_group = storage.fs_group(); let mut read_only = false; let opts_vec: Vec = storage.options.to_vec(); @@ -671,7 +671,7 @@ pub fn set_ownership(logger: &Logger, storage: &Storage) -> Result<()> { err })?; - if fs_group.group_change_policy == FSGroupChangePolicy::OnRootMismatch + if fs_group.group_change_policy == FSGroupChangePolicy::OnRootMismatch.into() && metadata.gid() == fs_group.group_id { let mut mask = if read_only { RO_MASK } else { RW_MASK }; diff --git a/src/agent/src/netlink.rs b/src/agent/src/netlink.rs index 29785fc43..f5e9d271b 100644 --- a/src/agent/src/netlink.rs +++ b/src/agent/src/netlink.rs @@ -7,7 +7,6 @@ use anyhow::{anyhow, Context, Result}; use futures::{future, StreamExt, TryStreamExt}; use ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network}; use nix::errno::Errno; -use protobuf::RepeatedField; use protocols::types::{ARPNeighbor, IPAddress, IPFamily, Interface, Route}; use rtnetlink::{new_connection, packet, IpVersion}; use std::convert::{TryFrom, TryInto}; @@ -83,8 +82,8 @@ impl Handle { // Add new ip addresses from request for ip_address in &iface.IPAddresses { - let ip = IpAddr::from_str(ip_address.get_address())?; - let mask = ip_address.get_mask().parse::()?; + let ip = IpAddr::from_str(ip_address.address())?; + let mask = ip_address.mask().parse::()?; self.add_addresses(link.index(), std::iter::once(IpNetwork::new(ip, mask)?)) .await?; @@ -152,7 +151,7 @@ impl Handle { .map(|p| p.try_into()) .collect::>>()?; - iface.IPAddresses = RepeatedField::from_vec(ips); + iface.IPAddresses = ips; list.push(iface); } @@ -334,7 +333,7 @@ impl Handle { // `rtnetlink` offers a separate request builders for different IP versions (IP v4 and v6). // This if branch is a bit clumsy because it does almost the same. - if route.get_family() == IPFamily::v6 { + if route.family() == IPFamily::v6 { let dest_addr = if !route.dest.is_empty() { Ipv6Network::from_str(&route.dest)? } else { @@ -368,9 +367,9 @@ impl Handle { if Errno::from_i32(message.code.abs()) != Errno::EEXIST { return Err(anyhow!( "Failed to add IP v6 route (src: {}, dst: {}, gtw: {},Err: {})", - route.get_source(), - route.get_dest(), - route.get_gateway(), + route.source(), + route.dest(), + route.gateway(), message )); } @@ -409,9 +408,9 @@ impl Handle { if Errno::from_i32(message.code.abs()) != Errno::EEXIST { return Err(anyhow!( "Failed to add IP v4 route (src: {}, dst: {}, gtw: {},Err: {})", - route.get_source(), - route.get_dest(), - route.get_gateway(), + route.source(), + route.dest(), + route.gateway(), message )); } @@ -506,7 +505,7 @@ impl Handle { self.add_arp_neighbor(&neigh).await.map_err(|err| { anyhow!( "Failed to add ARP neighbor {}: {:?}", - neigh.get_toIPAddress().get_address(), + neigh.toIPAddress().address(), err ) })?; @@ -725,7 +724,7 @@ impl TryFrom
for IPAddress { let mask = format!("{}", value.0.header.prefix_len); Ok(IPAddress { - family, + family: family.into(), address, mask, ..Default::default() diff --git a/src/agent/src/rpc.rs b/src/agent/src/rpc.rs index 3a927b3b5..dbcb4bd13 100644 --- a/src/agent/src/rpc.rs +++ b/src/agent/src/rpc.rs @@ -21,17 +21,20 @@ use ttrpc::{ use anyhow::{anyhow, Context, Result}; use cgroups::freezer::FreezerState; use oci::{LinuxNamespace, Root, Spec}; -use protobuf::{Message, RepeatedField, SingularPtrField}; +use protobuf::{MessageDyn, MessageField}; use protocols::agent::{ AddSwapRequest, AgentDetails, CopyFileRequest, GetIPTablesRequest, GetIPTablesResponse, GuestDetailsResponse, Interfaces, Metrics, OOMEvent, ReadStreamResponse, Routes, SetIPTablesRequest, SetIPTablesResponse, StatsContainerResponse, VolumeStatsRequest, WaitProcessResponse, WriteStreamResponse, }; -use protocols::csi::{VolumeCondition, VolumeStatsResponse, VolumeUsage, VolumeUsage_Unit}; +use protocols::csi::{ + volume_usage::Unit as VolumeUsage_Unit, VolumeCondition, VolumeStatsResponse, VolumeUsage, +}; use protocols::empty::Empty; use protocols::health::{ - HealthCheckResponse, HealthCheckResponse_ServingStatus, VersionCheckResponse, + health_check_response::ServingStatus as HealthCheckResponse_ServingStatus, HealthCheckResponse, + VersionCheckResponse, }; use protocols::types::Interface; use protocols::{agent_ttrpc_async as agent_ttrpc, health_ttrpc_async as health_ttrpc}; @@ -124,11 +127,11 @@ macro_rules! is_allowed { if !AGENT_CONFIG .read() .await - .is_allowed_endpoint($req.descriptor().name()) + .is_allowed_endpoint($req.descriptor_dyn().name()) { return Err(ttrpc_error!( ttrpc::Code::UNIMPLEMENTED, - format!("{} is blocked", $req.descriptor().name()), + format!("{} is blocked", $req.descriptor_dyn().name()), )); } }; @@ -151,7 +154,7 @@ impl AgentService { kata_sys_util::validate::verify_id(&cid)?; let mut oci_spec = req.OCI.clone(); - let use_sandbox_pidns = req.get_sandbox_pidns(); + let use_sandbox_pidns = req.sandbox_pidns(); let sandbox; let mut s; @@ -785,7 +788,7 @@ impl agent_ttrpc::AgentService for AgentService { ) -> ttrpc::Result { trace_rpc_call!(ctx, "pause_container", req); is_allowed!(req); - let cid = req.get_container_id(); + let cid = req.container_id(); let s = Arc::clone(&self.sandbox); let mut sandbox = s.lock().await; @@ -809,7 +812,7 @@ impl agent_ttrpc::AgentService for AgentService { ) -> ttrpc::Result { trace_rpc_call!(ctx, "resume_container", req); is_allowed!(req); - let cid = req.get_container_id(); + let cid = req.container_id(); let s = Arc::clone(&self.sandbox); let mut sandbox = s.lock().await; @@ -964,16 +967,12 @@ impl agent_ttrpc::AgentService for AgentService { trace_rpc_call!(ctx, "update_routes", req); is_allowed!(req); - let new_routes = req - .routes - .into_option() - .map(|r| r.Routes.into_vec()) - .ok_or_else(|| { - ttrpc_error!( - ttrpc::Code::INVALID_ARGUMENT, - "empty update routes request".to_string(), - ) - })?; + let new_routes = req.routes.into_option().map(|r| r.Routes).ok_or_else(|| { + ttrpc_error!( + ttrpc::Code::INVALID_ARGUMENT, + "empty update routes request".to_string(), + ) + })?; let mut sandbox = self.sandbox.lock().await; @@ -992,7 +991,7 @@ impl agent_ttrpc::AgentService for AgentService { })?; Ok(protocols::agent::Routes { - Routes: RepeatedField::from_vec(list), + Routes: list, ..Default::default() }) } @@ -1191,7 +1190,7 @@ impl agent_ttrpc::AgentService for AgentService { })?; Ok(protocols::agent::Interfaces { - Interfaces: RepeatedField::from_vec(list), + Interfaces: list, ..Default::default() }) } @@ -1214,7 +1213,7 @@ impl agent_ttrpc::AgentService for AgentService { .map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, format!("list routes: {:?}", e)))?; Ok(protocols::agent::Routes { - Routes: RepeatedField::from_vec(list), + Routes: list, ..Default::default() }) } @@ -1330,7 +1329,7 @@ impl agent_ttrpc::AgentService for AgentService { let neighs = req .neighbors .into_option() - .map(|n| n.ARPNeighbors.into_vec()) + .map(|n| n.ARPNeighbors) .ok_or_else(|| { ttrpc_error!( ttrpc::Code::INVALID_ARGUMENT, @@ -1414,7 +1413,7 @@ impl agent_ttrpc::AgentService for AgentService { // to get agent details let detail = get_agent_details(); - resp.agent_details = SingularPtrField::some(detail); + resp.agent_details = MessageField::some(detail); Ok(resp) } @@ -1539,8 +1538,8 @@ impl agent_ttrpc::AgentService for AgentService { .map(|u| usage_vec.push(u)) .map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?; - resp.usage = RepeatedField::from_vec(usage_vec); - resp.volume_condition = SingularPtrField::some(condition); + resp.usage = usage_vec; + resp.volume_condition = MessageField::some(condition); Ok(resp) } @@ -1644,7 +1643,7 @@ fn get_volume_capacity_stats(path: &str) -> Result { usage.total = stat.blocks() * block_size; usage.available = stat.blocks_free() * block_size; usage.used = usage.total - usage.available; - usage.unit = VolumeUsage_Unit::BYTES; + usage.unit = VolumeUsage_Unit::BYTES.into(); Ok(usage) } @@ -1656,7 +1655,7 @@ fn get_volume_inode_stats(path: &str) -> Result { usage.total = stat.files(); usage.available = stat.files_free(); usage.used = usage.total - usage.available; - usage.unit = VolumeUsage_Unit::INODES; + usage.unit = VolumeUsage_Unit::INODES.into(); Ok(usage) } @@ -1676,14 +1675,12 @@ fn get_agent_details() -> AgentDetails { detail.set_supports_seccomp(have_seccomp()); detail.init_daemon = unistd::getpid() == Pid::from_raw(1); - detail.device_handlers = RepeatedField::new(); - detail.storage_handlers = RepeatedField::from_vec( - STORAGE_HANDLER_LIST - .to_vec() - .iter() - .map(|x| x.to_string()) - .collect(), - ); + detail.device_handlers = Vec::new(); + detail.storage_handlers = STORAGE_HANDLER_LIST + .to_vec() + .iter() + .map(|x| x.to_string()) + .collect(); detail } From a81fff706fcb7f025662b8626d4f2c9181c33d5b Mon Sep 17 00:00:00 2001 From: Zvonko Kaiser Date: Mon, 17 Apr 2023 10:38:58 +0000 Subject: [PATCH 121/137] gpu: Adding a GPU enabled configuration We need to set hotplug on pci root port and enable at least one root port. Also set the guest-hooks-dir to the correct path Fixes: #6675 Signed-off-by: Zvonko Kaiser --- src/runtime/Makefile | 7 + .../config/configuration-qemu-gpu.toml.in | 692 ++++++++++++++++++ 2 files changed, 699 insertions(+) create mode 100644 src/runtime/config/configuration-qemu-gpu.toml.in diff --git a/src/runtime/Makefile b/src/runtime/Makefile index 6f719f1cb..aa03ce852 100644 --- a/src/runtime/Makefile +++ b/src/runtime/Makefile @@ -96,6 +96,7 @@ GENERATED_VARS = \ CONFIG_ACRN_IN \ CONFIG_QEMU_IN \ CONFIG_QEMU_TDX_IN \ + CONFIG_QEMU_GPU_IN \ CONFIG_CLH_IN \ CONFIG_FC_IN \ $(USER_VARS) @@ -285,6 +286,12 @@ ifneq (,$(QEMUCMD)) CONFIGS += $(CONFIG_QEMU_TDX) + CONFIG_FILE_QEMU_GPU = configuration-qemu-gpu.toml + CONFIG_QEMU_GPU = config/$(CONFIG_FILE_QEMU_GPU) + CONFIG_QEMU_GPU_IN = $(CONFIG_QEMU_GPU).in + + CONFIGS += $(CONFIG_QEMU_GPU) + # qemu-specific options (all should be suffixed by "_QEMU") DEFBLOCKSTORAGEDRIVER_QEMU := virtio-scsi DEFBLOCKDEVICEAIO_QEMU := io_uring diff --git a/src/runtime/config/configuration-qemu-gpu.toml.in b/src/runtime/config/configuration-qemu-gpu.toml.in new file mode 100644 index 000000000..33574b17d --- /dev/null +++ b/src/runtime/config/configuration-qemu-gpu.toml.in @@ -0,0 +1,692 @@ +# Copyright (c) 2017-2019 Intel Corporation +# Copyright (c) 2021 Adobe Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# XXX: WARNING: this file is auto-generated. +# XXX: +# XXX: Source file: "@CONFIG_QEMU_IN@" +# XXX: Project: +# XXX: Name: @PROJECT_NAME@ +# XXX: Type: @PROJECT_TYPE@ + +[hypervisor.qemu] +path = "@QEMUPATH@" +kernel = "@KERNELPATH@" +image = "@IMAGEPATH@" +# initrd = "@INITRDPATH@" +machine_type = "@MACHINETYPE@" + +# rootfs filesystem type: +# - ext4 (default) +# - xfs +# - erofs +rootfs_type=@DEFROOTFSTYPE@ + +# Enable confidential guest support. +# Toggling that setting may trigger different hardware features, ranging +# from memory encryption to both memory and CPU-state encryption and integrity. +# The Kata Containers runtime dynamically detects the available feature set and +# aims at enabling the largest possible one, returning an error if none is +# available, or none is supported by the hypervisor. +# +# Known limitations: +# * Does not work by design: +# - CPU Hotplug +# - Memory Hotplug +# - NVDIMM devices +# +# Default false +# confidential_guest = true + +# Choose AMD SEV-SNP confidential guests +# In case of using confidential guests on AMD hardware that supports both SEV +# and SEV-SNP, the following enables SEV-SNP guests. SEV guests are default. +# Default false +# sev_snp_guest = true + +# Enable running QEMU VMM as a non-root user. +# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as +# a non-root random user. See documentation for the limitations of this mode. +# rootless = true + +# List of valid annotation names for the hypervisor +# Each member of the list is a regular expression, which is the base name +# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path" +enable_annotations = @DEFENABLEANNOTATIONS@ + +# List of valid annotations values for the hypervisor +# Each member of the list is a path pattern as described by glob(3). +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @QEMUVALIDHYPERVISORPATHS@ +valid_hypervisor_paths = @QEMUVALIDHYPERVISORPATHS@ + +# Optional space-separated list of options to pass to the guest kernel. +# For example, use `kernel_params = "vsyscall=emulate"` if you are having +# trouble running pre-2.15 glibc. +# +# WARNING: - any parameter specified here will take priority over the default +# parameter value of the same name used to start the virtual machine. +# Do not set values here unless you understand the impact of doing so as you +# may stop the virtual machine from booting. +# To see the list of default parameters, enable hypervisor debug, create a +# container and look for 'default-kernel-parameters' log entries. +kernel_params = "@KERNELPARAMS@" + +# Path to the firmware. +# If you want that qemu uses the default firmware leave this option empty +firmware = "@FIRMWAREPATH@" + +# Path to the firmware volume. +# firmware TDVF or OVMF can be split into FIRMWARE_VARS.fd (UEFI variables +# as configuration) and FIRMWARE_CODE.fd (UEFI program image). UEFI variables +# can be customized per each user while UEFI code is kept same. +firmware_volume = "@FIRMWAREVOLUMEPATH@" + +# Machine accelerators +# comma-separated list of machine accelerators to pass to the hypervisor. +# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"` +machine_accelerators="@MACHINEACCELERATORS@" + +# Qemu seccomp sandbox feature +# comma-separated list of seccomp sandbox features to control the syscall access. +# For example, `seccompsandbox= "on,obsolete=deny,spawn=deny,resourcecontrol=deny"` +# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox +# Another note: enabling this feature may reduce performance, you may enable +# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html +#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@" + +# CPU features +# comma-separated list of cpu features to pass to the cpu +# For example, `cpu_features = "pmu=off,vmx=off" +cpu_features="@CPUFEATURES@" + +# Default number of vCPUs per SB/VM: +# unspecified or 0 --> will be set to @DEFVCPUS@ +# < 0 --> will be set to the actual number of physical cores +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores +default_vcpus = 1 + +# Default maximum number of vCPUs per SB/VM: +# unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when +# the actual number of physical cores is greater than it. +# WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU +# the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs +# can be added to a SB/VM, but the memory footprint will be big. Another example, with +# `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of +# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable, +# unless you know what are you doing. +# NOTICE: on arm platform with gicv2 interrupt controller, set it to 8. +default_maxvcpus = @DEFMAXVCPUS@ + +# Bridges can be used to hot plug devices. +# Limitations: +# * Currently only pci bridges are supported +# * Until 30 devices per bridge can be hot plugged. +# * Until 5 PCI bridges can be cold plugged per VM. +# This limitation could be a bug in qemu or in the kernel +# Default number of bridges per SB/VM: +# unspecified or 0 --> will be set to @DEFBRIDGES@ +# > 1 <= 5 --> will be set to the specified number +# > 5 --> will be set to 5 +default_bridges = @DEFBRIDGES@ + +# Default memory size in MiB for SB/VM. +# If unspecified then it will be set @DEFMEMSZ@ MiB. +default_memory = @DEFMEMSZ@ +# +# Default memory slots per SB/VM. +# If unspecified then it will be set @DEFMEMSLOTS@. +# This is will determine the times that memory will be hotadded to sandbox/VM. +#memory_slots = @DEFMEMSLOTS@ + +# Default maximum memory in MiB per SB / VM +# unspecified or == 0 --> will be set to the actual amount of physical RAM +# > 0 <= amount of physical RAM --> will be set to the specified number +# > amount of physical RAM --> will be set to the actual amount of physical RAM +default_maxmemory = @DEFMAXMEMSZ@ + +# The size in MiB will be plused to max memory of hypervisor. +# It is the memory address space for the NVDIMM devie. +# If set block storage driver (block_device_driver) to "nvdimm", +# should set memory_offset to the size of block device. +# Default 0 +#memory_offset = 0 + +# Specifies virtio-mem will be enabled or not. +# Please note that this option should be used with the command +# "echo 1 > /proc/sys/vm/overcommit_memory". +# Default false +#enable_virtio_mem = true + +# Disable block device from being used for a container's rootfs. +# In case of a storage driver like devicemapper where a container's +# root file system is backed by a block device, the block device is passed +# directly to the hypervisor for performance reasons. +# This flag prevents the block device from being passed to the hypervisor, +# virtio-fs is used instead to pass the rootfs. +disable_block_device_use = @DEFDISABLEBLOCK@ + +# Shared file system type: +# - virtio-fs (default) +# - virtio-9p +# - virtio-fs-nydus +shared_fs = "@DEFSHAREDFS_QEMU_VIRTIOFS@" + +# Path to vhost-user-fs daemon. +virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@" + +# List of valid annotations values for the virtiofs daemon +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @DEFVALIDVIRTIOFSDAEMONPATHS@ +valid_virtio_fs_daemon_paths = @DEFVALIDVIRTIOFSDAEMONPATHS@ + +# Default size of DAX cache in MiB +virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@ + +# Default size of virtqueues +virtio_fs_queue_size = @DEFVIRTIOFSQUEUESIZE@ + +# Extra args for virtiofsd daemon +# +# Format example: +# ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"] +# Examples: +# Set virtiofsd log level to debug : ["-o", "log_level=debug"] or ["-d"] +# +# see `virtiofsd -h` for possible options. +virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@ + +# Cache mode: +# +# - never +# Metadata, data, and pathname lookup are not cached in guest. They are +# always fetched from host and any changes are immediately pushed to host. +# +# - auto +# Metadata and pathname lookup cache expires after a configured amount of +# time (default is 1 second). Data is cached while the file is open (close +# to open consistency). +# +# - always +# Metadata, data, and pathname lookup are cached in guest and never expire. +virtio_fs_cache = "@DEFVIRTIOFSCACHE@" + +# Block storage driver to be used for the hypervisor in case the container +# rootfs is backed by a block device. This is virtio-scsi, virtio-blk +# or nvdimm. +block_device_driver = "@DEFBLOCKSTORAGEDRIVER_QEMU@" + +# aio is the I/O mechanism used by qemu +# Options: +# +# - threads +# Pthread based disk I/O. +# +# - native +# Native Linux I/O. +# +# - io_uring +# Linux io_uring API. This provides the fastest I/O operations on Linux, requires kernel>5.1 and +# qemu >=5.0. +block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@" + +# Specifies cache-related options will be set to block devices or not. +# Default false +#block_device_cache_set = true + +# Specifies cache-related options for block devices. +# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled. +# Default false +#block_device_cache_direct = true + +# Specifies cache-related options for block devices. +# Denotes whether flush requests for the device are ignored. +# Default false +#block_device_cache_noflush = true + +# Enable iothreads (data-plane) to be used. This causes IO to be +# handled in a separate IO thread. This is currently only implemented +# for SCSI. +# +enable_iothreads = @DEFENABLEIOTHREADS@ + +# Enable pre allocation of VM RAM, default false +# Enabling this will result in lower container density +# as all of the memory will be allocated and locked +# This is useful when you want to reserve all the memory +# upfront or in the cases where you want memory latencies +# to be very predictable +# Default false +#enable_mem_prealloc = true + +# Enable huge pages for VM RAM, default false +# Enabling this will result in the VM memory +# being allocated using huge pages. +# This is useful when you want to use vhost-user network +# stacks within the container. This will automatically +# result in memory pre allocation +#enable_hugepages = true + +# Enable vhost-user storage device, default false +# Enabling this will result in some Linux reserved block type +# major range 240-254 being chosen to represent vhost-user devices. +enable_vhost_user_store = @DEFENABLEVHOSTUSERSTORE@ + +# The base directory specifically used for vhost-user devices. +# Its sub-path "block" is used for block devices; "block/sockets" is +# where we expect vhost-user sockets to live; "block/devices" is where +# simulated block device nodes for vhost-user devices to live. +vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@" + +# Enable vIOMMU, default false +# Enabling this will result in the VM having a vIOMMU device +# This will also add the following options to the kernel's +# command line: intel_iommu=on,iommu=pt +#enable_iommu = true + +# Enable IOMMU_PLATFORM, default false +# Enabling this will result in the VM device having iommu_platform=on set +#enable_iommu_platform = true + +# List of valid annotations values for the vhost user store path +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @DEFVALIDVHOSTUSERSTOREPATHS@ +valid_vhost_user_store_paths = @DEFVALIDVHOSTUSERSTOREPATHS@ + +# The timeout for reconnecting on non-server spdk sockets when the remote end goes away. +# qemu will delay this many seconds and then attempt to reconnect. +# Zero disables reconnecting, and the default is zero. +vhost_user_reconnect_timeout_sec = 0 + +# Enable file based guest memory support. The default is an empty string which +# will disable this feature. In the case of virtio-fs, this is enabled +# automatically and '/dev/shm' is used as the backing folder. +# This option will be ignored if VM templating is enabled. +#file_mem_backend = "@DEFFILEMEMBACKEND@" + +# List of valid annotations values for the file_mem_backend annotation +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @DEFVALIDFILEMEMBACKENDS@ +valid_file_mem_backends = @DEFVALIDFILEMEMBACKENDS@ + +# -pflash can add image file to VM. The arguments of it should be in format +# of ["/path/to/flash0.img", "/path/to/flash1.img"] +pflashes = [] + +# This option changes the default hypervisor and kernel parameters +# to enable debug output where available. And Debug also enable the hmp socket. +# +# Default false +#enable_debug = true + +# Disable the customizations done in the runtime when it detects +# that it is running on top a VMM. This will result in the runtime +# behaving as it would when running on bare metal. +# +#disable_nesting_checks = true + +# This is the msize used for 9p shares. It is the number of bytes +# used for 9p packet payload. +#msize_9p = @DEFMSIZE9P@ + +# If false and nvdimm is supported, use nvdimm device to plug guest image. +# Otherwise virtio-block device is used. +# +# nvdimm is not supported when `confidential_guest = true`. +# +# Default is false +#disable_image_nvdimm = true + +# VFIO devices are hotplugged on a bridge by default. +# Enable hotplugging on root bus. This may be required for devices with +# a large PCI bar, as this is a current limitation with hotplugging on +# a bridge. +# Default false +hotplug_vfio_on_root_bus = true + +# Before hot plugging a PCIe device, you need to add a pcie_root_port device. +# Use this parameter when using some large PCI bar devices, such as Nvidia GPU +# The value means the number of pcie_root_port +# This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35" +# Default 0 +pcie_root_port = 1 + +# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off +# security (vhost-net runs ring0) for network I/O performance. +#disable_vhost_net = true + +# +# Default entropy source. +# The path to a host source of entropy (including a real hardware RNG) +# /dev/urandom and /dev/random are two main options. +# Be aware that /dev/random is a blocking source of entropy. If the host +# runs out of entropy, the VMs boot time will increase leading to get startup +# timeouts. +# The source of entropy /dev/urandom is non-blocking and provides a +# generally acceptable source of entropy. It should work well for pretty much +# all practical purposes. +#entropy_source= "@DEFENTROPYSOURCE@" + +# List of valid annotations values for entropy_source +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @DEFVALIDENTROPYSOURCES@ +valid_entropy_sources = @DEFVALIDENTROPYSOURCES@ + +# Path to OCI hook binaries in the *guest rootfs*. +# This does not affect host-side hooks which must instead be added to +# the OCI spec passed to the runtime. +# +# You can create a rootfs with hooks by customizing the osbuilder scripts: +# https://github.com/kata-containers/kata-containers/tree/main/tools/osbuilder +# +# Hooks must be stored in a subdirectory of guest_hook_path according to their +# hook type, i.e. "guest_hook_path/{prestart,poststart,poststop}". +# The agent will scan these directories for executable files and add them, in +# lexicographical order, to the lifecycle of the guest container. +# Hooks are executed in the runtime namespace of the guest. See the official documentation: +# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks +# Warnings will be logged if any error is encountered while scanning for hooks, +# but it will not abort container execution. +guest_hook_path = "/etc/oci/hooks.d" +# +# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM). +# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic. +# Default 0-sized value means unlimited rate. +#rx_rate_limiter_max_rate = 0 +# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM). +# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block) +# to discipline traffic. +# Default 0-sized value means unlimited rate. +#tx_rate_limiter_max_rate = 0 + +# Set where to save the guest memory dump file. +# If set, when GUEST_PANICKED event occurred, +# guest memeory will be dumped to host filesystem under guest_memory_dump_path, +# This directory will be created automatically if it does not exist. +# +# The dumped file(also called vmcore) can be processed with crash or gdb. +# +# WARNING: +# Dump guest’s memory can take very long depending on the amount of guest memory +# and use much disk space. +#guest_memory_dump_path="/var/crash/kata" + +# If enable paging. +# Basically, if you want to use "gdb" rather than "crash", +# or need the guest-virtual addresses in the ELF vmcore, +# then you should enable paging. +# +# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details +#guest_memory_dump_paging=false + +# Enable swap in the guest. Default false. +# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device +# if the swappiness of a container (set by annotation "io.katacontainers.container.resource.swappiness") +# is bigger than 0. +# The size of the swap device should be +# swap_in_bytes (set by annotation "io.katacontainers.container.resource.swap_in_bytes") - memory_limit_in_bytes. +# If swap_in_bytes is not set, the size should be memory_limit_in_bytes. +# If swap_in_bytes and memory_limit_in_bytes is not set, the size should +# be default_memory. +#enable_guest_swap = true + +# use legacy serial for guest console if available and implemented for architecture. Default false +#use_legacy_serial = true + +# disable applying SELinux on the VMM process (default false) +disable_selinux=@DEFDISABLESELINUX@ + +# disable applying SELinux on the container process +# If set to false, the type `container_t` is applied to the container process by default. +# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built +# with `SELINUX=yes`. +# (default: true) +disable_guest_selinux=@DEFDISABLEGUESTSELINUX@ + + +[factory] +# VM templating support. Once enabled, new VMs are created from template +# using vm cloning. They will share the same initial kernel, initramfs and +# agent memory by mapping it readonly. It helps speeding up new container +# creation and saves a lot of memory if there are many kata containers running +# on the same host. +# +# When disabled, new VMs are created from scratch. +# +# Note: Requires "initrd=" to be set ("image=" is not supported). +# +# Default false +#enable_template = true + +# Specifies the path of template. +# +# Default "/run/vc/vm/template" +#template_path = "/run/vc/vm/template" + +# The number of caches of VMCache: +# unspecified or == 0 --> VMCache is disabled +# > 0 --> will be set to the specified number +# +# VMCache is a function that creates VMs as caches before using it. +# It helps speed up new container creation. +# The function consists of a server and some clients communicating +# through Unix socket. The protocol is gRPC in protocols/cache/cache.proto. +# The VMCache server will create some VMs and cache them by factory cache. +# It will convert the VM to gRPC format and transport it when gets +# requestion from clients. +# Factory grpccache is the VMCache client. It will request gRPC format +# VM and convert it back to a VM. If VMCache function is enabled, +# kata-runtime will request VM from factory grpccache when it creates +# a new sandbox. +# +# Default 0 +#vm_cache_number = 0 + +# Specify the address of the Unix socket that is used by VMCache. +# +# Default /var/run/kata-containers/cache.sock +#vm_cache_endpoint = "/var/run/kata-containers/cache.sock" + +[agent.@PROJECT_TYPE@] +# If enabled, make the agent display debug-level messages. +# (default: disabled) +#enable_debug = true + +# Enable agent tracing. +# +# If enabled, the agent will generate OpenTelemetry trace spans. +# +# Notes: +# +# - If the runtime also has tracing enabled, the agent spans will be +# associated with the appropriate runtime parent span. +# - If enabled, the runtime will wait for the container to shutdown, +# increasing the container shutdown time slightly. +# +# (default: disabled) +#enable_tracing = true + +# Comma separated list of kernel modules and their parameters. +# These modules will be loaded in the guest kernel using modprobe(8). +# The following example can be used to load two kernel modules with parameters +# - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"] +# The first word is considered as the module name and the rest as its parameters. +# Container will not be started when: +# * A kernel module is specified and the modprobe command is not installed in the guest +# or it fails loading the module. +# * The module is not available in the guest or it doesn't met the guest kernel +# requirements, like architecture and version. +# +kernel_modules=[] + +# Enable debug console. + +# If enabled, user can connect guest OS running inside hypervisor +# through "kata-runtime exec " command + +#debug_console_enabled = true + +# Agent connection dialing timeout value in seconds +# (default: 30) +#dial_timeout = 30 + +[runtime] +# If enabled, the runtime will log additional debug messages to the +# system log +# (default: disabled) +#enable_debug = true +# +# Internetworking model +# Determines how the VM should be connected to the +# the container network interface +# Options: +# +# - macvtap +# Used when the Container network interface can be bridged using +# macvtap. +# +# - none +# Used when customize network. Only creates a tap device. No veth pair. +# +# - tcfilter +# Uses tc filter rules to redirect traffic from the network interface +# provided by plugin to a tap interface connected to the VM. +# +internetworking_model="@DEFNETWORKMODEL_QEMU@" + +# disable guest seccomp +# Determines whether container seccomp profiles are passed to the virtual +# machine and applied by the kata agent. If set to true, seccomp is not applied +# within the guest +# (default: true) +disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@ + +# vCPUs pinning settings +# if enabled, each vCPU thread will be scheduled to a fixed CPU +# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet) +# enable_vcpus_pinning = false + +# Apply a custom SELinux security policy to the container process inside the VM. +# This is used when you want to apply a type other than the default `container_t`, +# so general users should not uncomment and apply it. +# (format: "user:role:type") +# Note: You cannot specify MCS policy with the label because the sensitivity levels and +# categories are determined automatically by high-level container runtimes such as containerd. +#guest_selinux_label="@DEFGUESTSELINUXLABEL@" + +# If enabled, the runtime will create opentracing.io traces and spans. +# (See https://www.jaegertracing.io/docs/getting-started). +# (default: disabled) +#enable_tracing = true + +# Set the full url to the Jaeger HTTP Thrift collector. +# The default if not set will be "http://localhost:14268/api/traces" +#jaeger_endpoint = "" + +# Sets the username to be used if basic auth is required for Jaeger. +#jaeger_user = "" + +# Sets the password to be used if basic auth is required for Jaeger. +#jaeger_password = "" + +# If enabled, the runtime will not create a network namespace for shim and hypervisor processes. +# This option may have some potential impacts to your host. It should only be used when you know what you're doing. +# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only +# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge +# (like OVS) directly. +# (default: false) +#disable_new_netns = true + +# if enabled, the runtime will add all the kata processes inside one dedicated cgroup. +# The container cgroups in the host are not created, just one single cgroup per sandbox. +# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox. +# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation. +# The sandbox cgroup is constrained if there is no container type annotation. +# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType +sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ + +# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In +# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful +# when a hardware architecture or hypervisor solutions is utilized which does not support CPU and/or memory hotplug. +# Compatibility for determining appropriate sandbox (VM) size: +# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O +# does not yet support sandbox sizing annotations. +# - When running single containers using a tool like ctr, container sizing information will be available. +static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT@ + +# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path. +# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory. +# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts` +# These will not be exposed to the container workloads, and are only provided for potential guest services. +sandbox_bind_mounts=@DEFBINDMOUNTS@ + +# VFIO Mode +# Determines how VFIO devices should be be presented to the container. +# Options: +# +# - vfio +# Matches behaviour of OCI runtimes (e.g. runc) as much as +# possible. VFIO devices will appear in the container as VFIO +# character devices under /dev/vfio. The exact names may differ +# from the host (they need to match the VM's IOMMU group numbers +# rather than the host's) +# +# - guest-kernel +# This is a Kata-specific behaviour that's useful in certain cases. +# The VFIO device is managed by whatever driver in the VM kernel +# claims it. This means it will appear as one or more device nodes +# or network interfaces depending on the nature of the device. +# Using this mode requires specially built workloads that know how +# to locate the relevant device interfaces within the VM. +# +vfio_mode="@DEFVFIOMODE@" + +# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will +# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest. +disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@ + +# Enabled experimental feature list, format: ["a", "b"]. +# Experimental features are features not stable enough for production, +# they may break compatibility, and are prepared for a big version bump. +# Supported experimental features: +# (default: []) +experimental=@DEFAULTEXPFEATURES@ + +# If enabled, user can run pprof tools with shim v2 process through kata-monitor. +# (default: false) +# enable_pprof = true + +# WARNING: All the options in the following section have not been implemented yet. +# This section was added as a placeholder. DO NOT USE IT! +[image] +# Container image service. +# +# Offload the CRI image management service to the Kata agent. +# (default: false) +#service_offload = true + +# Container image decryption keys provisioning. +# Applies only if service_offload is true. +# Keys can be provisioned locally (e.g. through a special command or +# a local file) or remotely (usually after the guest is remotely attested). +# The provision setting is a complete URL that lets the Kata agent decide +# which method to use in order to fetch the keys. +# +# Keys can be stored in a local file, in a measured and attested initrd: +#provision=data:///local/key/file +# +# Keys could be fetched through a special command or binary from the +# initrd (guest) image, e.g. a firmware call: +#provision=file:///path/to/bin/fetcher/in/guest +# +# Keys can be remotely provisioned. The Kata agent fetches them from e.g. +# a HTTPS URL: +#provision=https://my-key-broker.foo/tenant/ From a0cc8a75f2ed81fe3b9babb678451878583b59b5 Mon Sep 17 00:00:00 2001 From: Zvonko Kaiser Date: Mon, 17 Apr 2023 10:42:04 +0000 Subject: [PATCH 122/137] gpu: Add a kube runtime class With the added configuration add the corresponding kube runtime class. Signed-off-by: Zvonko Kaiser --- .../runtimeclasses/kata-runtimeClasses.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml b/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml index daa4d1e2f..2b72becd7 100644 --- a/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml +++ b/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml @@ -63,3 +63,16 @@ overhead: scheduling: nodeSelector: katacontainers.io/kata-runtime: "true" +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: kata-qemu-gpu +handler: kata-qemu-gpu +overhead: + podFixed: + memory: "160Mi" + cpu: "250m" +scheduling: + nodeSelector: + katacontainers.io/kata-runtime: "true" From ac7c63bc66b4ddea821ed2e6d39841aaf6fd6cc1 Mon Sep 17 00:00:00 2001 From: Zvonko Kaiser Date: Mon, 17 Apr 2023 10:45:04 +0000 Subject: [PATCH 123/137] gpu: Add containerd shim for qemu-gpu Last but not least add the continerd shim configuration pointing to the correct configuration-.toml Signed-off-by: Zvonko Kaiser --- tools/packaging/kata-deploy/scripts/kata-deploy.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/packaging/kata-deploy/scripts/kata-deploy.sh b/tools/packaging/kata-deploy/scripts/kata-deploy.sh index 8991e04fc..e1bb862a0 100755 --- a/tools/packaging/kata-deploy/scripts/kata-deploy.sh +++ b/tools/packaging/kata-deploy/scripts/kata-deploy.sh @@ -17,6 +17,7 @@ shims=( "fc" "qemu" "qemu-tdx" + "qemu-gpu" "clh" "dragonball" ) From a6b4d92c84f65506691998a181fa381f8adead8a Mon Sep 17 00:00:00 2001 From: Tim Zhang Date: Wed, 12 Apr 2023 21:39:51 +0800 Subject: [PATCH 124/137] runtime-rs: Bump ttrpc from 0.6.0 to 0.7.1 Fixes: #6646 Signed-off-by: Tim Zhang --- src/runtime-rs/Cargo.lock | 95 +++++--- src/runtime-rs/crates/agent/Cargo.toml | 4 +- src/runtime-rs/crates/agent/src/kata/agent.rs | 4 +- src/runtime-rs/crates/agent/src/kata/trans.rs | 227 +++++++----------- .../crates/runtimes/common/Cargo.toml | 6 +- .../common/src/types/trans_from_agent.rs | 10 +- .../common/src/types/trans_from_shim.rs | 21 +- .../common/src/types/trans_into_shim.rs | 35 ++- .../crates/runtimes/virt_container/Cargo.toml | 4 +- src/runtime-rs/crates/service/Cargo.toml | 4 +- src/runtime-rs/crates/service/src/manager.rs | 2 +- src/runtime-rs/crates/shim/Cargo.toml | 4 +- src/runtime-rs/crates/shim/src/shim_delete.rs | 4 +- 13 files changed, 189 insertions(+), 231 deletions(-) diff --git a/src/runtime-rs/Cargo.lock b/src/runtime-rs/Cargo.lock index 89ea2abf1..a68b485d1 100644 --- a/src/runtime-rs/Cargo.lock +++ b/src/runtime-rs/Cargo.lock @@ -50,7 +50,7 @@ dependencies = [ "logging", "nix 0.24.3", "oci", - "protobuf", + "protobuf 3.2.0", "protocols", "serde", "serde_json", @@ -475,7 +475,7 @@ dependencies = [ "nix 0.24.3", "oci", "persist", - "protobuf", + "protobuf 3.2.0", "serde_json", "slog", "slog-scope", @@ -508,13 +508,14 @@ checksum = "f3ad85c1f65dc7b37604eb0e89748faf0b9653065f2a8ef69f96a687ec1e9279" [[package]] name = "containerd-shim-protos" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "077ec778a0835d9d85502e8535362130187759b69eddabe2bdb3a68ffb575bd0" +checksum = "ef45f1c71aa587d8f657c546d8da38ea04f113dd05da0ef993c4515fa25fbdd1" dependencies = [ "async-trait", - "protobuf", + "protobuf 3.2.0", "ttrpc", + "ttrpc-codegen", ] [[package]] @@ -2192,9 +2193,16 @@ name = "protobuf" version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + +[[package]] +name = "protobuf" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55bad9126f378a853655831eb7363b7b01b81d19f8cb1218861086ca4a1a61e" dependencies = [ - "serde", - "serde_derive", + "once_cell", + "protobuf-support", + "thiserror", ] [[package]] @@ -2203,36 +2211,47 @@ version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "033460afb75cf755fcfc16dfaed20b86468082a2ea24e05ac35ab4a099a017d6" dependencies = [ - "protobuf", + "protobuf 2.28.0", ] [[package]] -name = "protobuf-codegen-pure" -version = "2.28.0" +name = "protobuf-codegen" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a29399fc94bcd3eeaa951c715f7bea69409b2445356b00519740bcd6ddd865" +checksum = "0dd418ac3c91caa4032d37cb80ff0d44e2ebe637b2fb243b6234bf89cdac4901" dependencies = [ - "protobuf", - "protobuf-codegen", + "anyhow", + "once_cell", + "protobuf 3.2.0", + "protobuf-parse", + "regex", + "tempfile", + "thiserror", ] [[package]] -name = "protobuf-codegen-pure3" -version = "2.28.2" +name = "protobuf-parse" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b351add14db0721ad0842f4858aec11a5088684112ef163fc50f113c63e69b2e" +checksum = "9d39b14605eaa1f6a340aec7f320b34064feb26c93aec35d6a9a2272a8ddfa49" dependencies = [ - "protobuf", - "protobuf-codegen3", + "anyhow", + "indexmap", + "log", + "protobuf 3.2.0", + "protobuf-support", + "tempfile", + "thiserror", + "which", ] [[package]] -name = "protobuf-codegen3" -version = "2.28.2" +name = "protobuf-support" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73c5878d0fa872bd7d61782c6aa2d2d56761ba4ed4514eb6992f5f83162f1d2f" +checksum = "a5d4d7b8601c814cfb36bcebb79f0e61e45e1e93640cf778837833bbed05c372" dependencies = [ - "protobuf", + "thiserror", ] [[package]] @@ -2241,7 +2260,7 @@ version = "0.1.0" dependencies = [ "async-trait", "oci", - "protobuf", + "protobuf 3.2.0", "ttrpc", "ttrpc-codegen", ] @@ -2676,7 +2695,7 @@ dependencies = [ "logging", "nix 0.24.3", "oci", - "protobuf", + "protobuf 3.2.0", "rand 0.8.5", "serial_test", "service", @@ -3136,9 +3155,9 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "ttrpc" -version = "0.6.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ecfff459a859c6ba6668ff72b34c2f1d94d9d58f7088414c2674ad0f31cc7d8" +checksum = "a35f22a2964bea14afee161665bb260b83cb48e665e0260ca06ec0e775c8b06c" dependencies = [ "async-trait", "byteorder", @@ -3146,8 +3165,8 @@ dependencies = [ "libc", "log", "nix 0.23.2", - "protobuf", - "protobuf-codegen-pure", + "protobuf 3.2.0", + "protobuf-codegen 3.2.0", "thiserror", "tokio", "tokio-vsock", @@ -3155,28 +3174,28 @@ dependencies = [ [[package]] name = "ttrpc-codegen" -version = "0.2.3" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2365c9179ad82b29bda1b0162c7542ab5861a7844abfedd8cfdf8bd7e12358f9" +checksum = "94d7f7631d7a9ebed715a47cd4cb6072cbc7ae1d4ec01598971bbec0024340c2" dependencies = [ - "protobuf", - "protobuf-codegen-pure3", - "protobuf-codegen3", + "protobuf 2.28.0", + "protobuf-codegen 3.2.0", + "protobuf-support", "ttrpc-compiler", ] [[package]] name = "ttrpc-compiler" -version = "0.4.3" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed57c2d6669099791507b8b491b2940f2e8975b52a73fe82efad24257d10e9bc" +checksum = "ec3cb5dbf1f0865a34fe3f722290fe776cacb16f50428610b779467b76ddf647" dependencies = [ "derive-new", "prost", "prost-build", "prost-types", - "protobuf", - "protobuf-codegen3", + "protobuf 2.28.0", + "protobuf-codegen 2.28.0", "tempfile", ] @@ -3297,7 +3316,7 @@ dependencies = [ "nix 0.24.3", "oci", "persist", - "protobuf", + "protobuf 3.2.0", "resource", "serde", "serde_derive", diff --git a/src/runtime-rs/crates/agent/Cargo.toml b/src/runtime-rs/crates/agent/Cargo.toml index 0deca014d..4475c6d47 100644 --- a/src/runtime-rs/crates/agent/Cargo.toml +++ b/src/runtime-rs/crates/agent/Cargo.toml @@ -12,12 +12,12 @@ futures = "0.1.27" anyhow = "1.0.26" async-trait = "0.1.48" log = "0.4.14" -protobuf = "2.27.0" +protobuf = "3.2.0" serde = { version = "^1.0", features = ["derive"] } serde_json = ">=1.0.9" slog = "2.5.2" slog-scope = "4.4.0" -ttrpc = { version = "0.6.1" } +ttrpc = { version = "0.7.1" } tokio = { version = "1.8.0", features = ["fs", "rt"] } url = "2.2.2" nix = "0.24.2" diff --git a/src/runtime-rs/crates/agent/src/kata/agent.rs b/src/runtime-rs/crates/agent/src/kata/agent.rs index aa0df0857..d06da15ea 100644 --- a/src/runtime-rs/crates/agent/src/kata/agent.rs +++ b/src/runtime-rs/crates/agent/src/kata/agent.rs @@ -56,7 +56,7 @@ macro_rules! impl_health_service { impl HealthService for KataAgent { $(async fn $name(&self, req: $req) -> Result<$resp> { let r = req.into(); - let (mut client, timeout, _) = self.get_health_client().await.context("get health client")?; + let (client, timeout, _) = self.get_health_client().await.context("get health client")?; let resp = client.$name(new_ttrpc_ctx(timeout * MILLISECOND_TO_NANOSECOND), &r).await?; Ok(resp.into()) })* @@ -75,7 +75,7 @@ macro_rules! impl_agent { impl Agent for KataAgent { $(async fn $name(&self, req: $req) -> Result<$resp> { let r = req.into(); - let (mut client, mut timeout, _) = self.get_agent_client().await.context("get client")?; + let (client, mut timeout, _) = self.get_agent_client().await.context("get client")?; // update new timeout if let Some(v) = $new_timeout { diff --git a/src/runtime-rs/crates/agent/src/kata/trans.rs b/src/runtime-rs/crates/agent/src/kata/trans.rs index 7d33a0992..172095ceb 100644 --- a/src/runtime-rs/crates/agent/src/kata/trans.rs +++ b/src/runtime-rs/crates/agent/src/kata/trans.rs @@ -30,30 +30,18 @@ use crate::{ OomEventResponse, WaitProcessResponse, WriteStreamResponse, }; -fn from_vec, T: Sized>(from: Vec) -> ::protobuf::RepeatedField { - let mut to: Vec = vec![]; - for data in from { - to.push(data.into()); - } - ::protobuf::RepeatedField::from_vec(to) +fn trans_vec>(from: Vec) -> Vec { + from.into_iter().map(|f| f.into()).collect() } -fn into_vec>(from: ::protobuf::RepeatedField) -> Vec { - let mut to: Vec = vec![]; - for data in from.to_vec() { - to.push(data.into()); - } - to -} - -fn from_option>(from: Option) -> ::protobuf::SingularPtrField { +fn from_option>(from: Option) -> protobuf::MessageField { match from { - Some(f) => ::protobuf::SingularPtrField::from_option(Some(T::from(f))), - None => ::protobuf::SingularPtrField::none(), + Some(f) => protobuf::MessageField::from_option(Some(T::from(f))), + None => protobuf::MessageField::none(), } } -fn into_option, T: Sized>(from: ::protobuf::SingularPtrField) -> Option { +fn into_option, T: Sized>(from: protobuf::MessageField) -> Option { from.into_option().map(|f| f.into()) } @@ -84,9 +72,8 @@ impl From for agent::FSGroup { Self { group_id: from.group_id, - group_change_policy: policy, - unknown_fields: Default::default(), - cached_size: Default::default(), + group_change_policy: policy.into(), + ..Default::default() } } } @@ -96,9 +83,8 @@ impl From for agent::StringUser { Self { uid: from.uid, gid: from.gid, - additionalGids: ::protobuf::RepeatedField::from_vec(from.additional_gids), - unknown_fields: Default::default(), - cached_size: Default::default(), + additionalGids: from.additional_gids, + ..Default::default() } } } @@ -107,12 +93,11 @@ impl From for agent::Device { fn from(from: Device) -> Self { Self { id: from.id, - field_type: from.field_type, + type_: from.field_type, vm_path: from.vm_path, container_path: from.container_path, - options: from_vec(from.options), - unknown_fields: Default::default(), - cached_size: Default::default(), + options: trans_vec(from.options), + ..Default::default() } } } @@ -121,14 +106,13 @@ impl From for agent::Storage { fn from(from: Storage) -> Self { Self { driver: from.driver, - driver_options: from_vec(from.driver_options), + driver_options: trans_vec(from.driver_options), source: from.source, fstype: from.fs_type, fs_group: from_option(from.fs_group), - options: from_vec(from.options), + options: trans_vec(from.options), mount_point: from.mount_point, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -137,9 +121,8 @@ impl From for agent::KernelModule { fn from(from: KernelModule) -> Self { Self { name: from.name, - parameters: from_vec(from.parameters), - unknown_fields: Default::default(), - cached_size: Default::default(), + parameters: trans_vec(from.parameters), + ..Default::default() } } } @@ -166,11 +149,10 @@ impl From for IPFamily { impl From for types::IPAddress { fn from(from: IPAddress) -> Self { Self { - family: from.family.into(), + family: protobuf::EnumOrUnknown::new(from.family.into()), address: from.address, mask: from.mask, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -178,7 +160,7 @@ impl From for types::IPAddress { impl From for IPAddress { fn from(src: types::IPAddress) -> Self { Self { - family: src.family.into(), + family: src.family.unwrap().into(), address: "".to_string(), mask: "".to_string(), } @@ -190,14 +172,13 @@ impl From for types::Interface { Self { device: from.device, name: from.name, - IPAddresses: from_vec(from.ip_addresses), + IPAddresses: trans_vec(from.ip_addresses), mtu: from.mtu, hwAddr: from.hw_addr, pciPath: from.pci_addr, - field_type: from.field_type, + type_: from.field_type, raw_flags: from.raw_flags, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -207,11 +188,11 @@ impl From for Interface { Self { device: src.device, name: src.name, - ip_addresses: into_vec(src.IPAddresses), + ip_addresses: trans_vec(src.IPAddresses), mtu: src.mtu, hw_addr: src.hwAddr, pci_addr: src.pciPath, - field_type: src.field_type, + field_type: src.type_, raw_flags: src.raw_flags, } } @@ -220,7 +201,7 @@ impl From for Interface { impl From for Interfaces { fn from(src: agent::Interfaces) -> Self { Self { - interfaces: into_vec(src.Interfaces), + interfaces: trans_vec(src.Interfaces), } } } @@ -233,9 +214,8 @@ impl From for types::Route { device: from.device, source: from.source, scope: from.scope, - family: from.family.into(), - unknown_fields: Default::default(), - cached_size: Default::default(), + family: protobuf::EnumOrUnknown::new(from.family.into()), + ..Default::default() } } } @@ -248,7 +228,7 @@ impl From for Route { device: src.device, source: src.source, scope: src.scope, - family: src.family.into(), + family: src.family.unwrap().into(), } } } @@ -256,9 +236,8 @@ impl From for Route { impl From for agent::Routes { fn from(from: Routes) -> Self { Self { - Routes: from_vec(from.routes), - unknown_fields: Default::default(), - cached_size: Default::default(), + Routes: trans_vec(from.routes), + ..Default::default() } } } @@ -266,7 +245,7 @@ impl From for agent::Routes { impl From for Routes { fn from(src: agent::Routes) -> Self { Self { - routes: into_vec(src.Routes), + routes: trans_vec(src.Routes), } } } @@ -277,12 +256,11 @@ impl From for agent::CreateContainerRequest { container_id: from.process_id.container_id(), exec_id: from.process_id.exec_id(), string_user: from_option(from.string_user), - devices: from_vec(from.devices), - storages: from_vec(from.storages), + devices: trans_vec(from.devices), + storages: trans_vec(from.storages), OCI: from_option(from.oci), sandbox_pidns: from.sandbox_pidns, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -292,8 +270,7 @@ impl From for agent::RemoveContainerRequest { Self { container_id: from.container_id, timeout: from.timeout, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -302,8 +279,7 @@ impl From for agent::StartContainerRequest { fn from(from: ContainerID) -> Self { Self { container_id: from.container_id, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -312,8 +288,7 @@ impl From for agent::StatsContainerRequest { fn from(from: ContainerID) -> Self { Self { container_id: from.container_id, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -322,8 +297,7 @@ impl From for agent::PauseContainerRequest { fn from(from: ContainerID) -> Self { Self { container_id: from.container_id, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -332,8 +306,7 @@ impl From for agent::ResumeContainerRequest { fn from(from: ContainerID) -> Self { Self { container_id: from.container_id, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -344,8 +317,7 @@ impl From for agent::SignalProcessRequest { container_id: from.process_id.container_id(), exec_id: from.process_id.exec_id(), signal: from.signal, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -355,8 +327,7 @@ impl From for agent::WaitProcessRequest { Self { container_id: from.process_id.container_id(), exec_id: from.process_id.exec_id(), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -366,8 +337,7 @@ impl From for agent::UpdateContainerRequest { Self { container_id: from.container_id, resources: from_option(Some(from.resources)), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -378,8 +348,7 @@ impl From for agent::WriteStreamRequest { container_id: from.process_id.container_id(), exec_id: from.process_id.exec_id(), data: from.data, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -402,7 +371,7 @@ impl From for agent::GetIPTablesRequest { impl From for GetIPTablesResponse { fn from(from: agent::GetIPTablesResponse) -> Self { Self { - data: from.get_data().to_vec(), + data: from.data().to_vec(), } } } @@ -420,7 +389,7 @@ impl From for agent::SetIPTablesRequest { impl From for SetIPTablesResponse { fn from(from: agent::SetIPTablesResponse) -> Self { Self { - data: from.get_data().to_vec(), + data: from.data().to_vec(), } } } @@ -432,8 +401,7 @@ impl From for agent::ExecProcessRequest { exec_id: from.process_id.exec_id(), string_user: from_option(from.string_user), process: from_option(from.process), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -515,14 +483,14 @@ impl From for BlkioStatsEntry { impl From for BlkioStats { fn from(src: agent::BlkioStats) -> Self { Self { - io_service_bytes_recursive: into_vec(src.io_service_bytes_recursive), - io_serviced_recursive: into_vec(src.io_serviced_recursive), - io_queued_recursive: into_vec(src.io_queued_recursive), - io_service_time_recursive: into_vec(src.io_service_time_recursive), - io_wait_time_recursive: into_vec(src.io_wait_time_recursive), - io_merged_recursive: into_vec(src.io_merged_recursive), - io_time_recursive: into_vec(src.io_time_recursive), - sectors_recursive: into_vec(src.sectors_recursive), + io_service_bytes_recursive: trans_vec(src.io_service_bytes_recursive), + io_serviced_recursive: trans_vec(src.io_serviced_recursive), + io_queued_recursive: trans_vec(src.io_queued_recursive), + io_service_time_recursive: trans_vec(src.io_service_time_recursive), + io_wait_time_recursive: trans_vec(src.io_wait_time_recursive), + io_merged_recursive: trans_vec(src.io_merged_recursive), + io_time_recursive: trans_vec(src.io_time_recursive), + sectors_recursive: trans_vec(src.sectors_recursive), } } } @@ -570,7 +538,7 @@ impl From for StatsContainerResponse { fn from(src: agent::StatsContainerResponse) -> Self { Self { cgroup_stats: into_option(src.cgroup_stats), - network_stats: into_vec(src.network_stats), + network_stats: trans_vec(src.network_stats), } } } @@ -581,8 +549,7 @@ impl From for agent::ReadStreamRequest { container_id: from.process_id.container_id(), exec_id: from.process_id.exec_id(), len: from.len, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -598,8 +565,7 @@ impl From for agent::CloseStdinRequest { Self { container_id: from.process_id.container_id(), exec_id: from.process_id.exec_id(), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -611,8 +577,7 @@ impl From for agent::TtyWinResizeRequest { exec_id: from.process_id.exec_id(), row: from.row, column: from.column, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -621,8 +586,7 @@ impl From for agent::UpdateInterfaceRequest { fn from(from: UpdateInterfaceRequest) -> Self { Self { interface: from_option(from.interface), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -630,8 +594,7 @@ impl From for agent::UpdateInterfaceRequest { impl From for agent::ListInterfacesRequest { fn from(_: Empty) -> Self { Self { - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -640,8 +603,7 @@ impl From for agent::UpdateRoutesRequest { fn from(from: UpdateRoutesRequest) -> Self { Self { routes: from_option(from.route), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -649,8 +611,7 @@ impl From for agent::UpdateRoutesRequest { impl From for agent::ListRoutesRequest { fn from(_: Empty) -> Self { Self { - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -663,8 +624,7 @@ impl From for types::ARPNeighbor { lladdr: from.ll_addr, state: from.state, flags: from.flags, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -672,9 +632,8 @@ impl From for types::ARPNeighbor { impl From for agent::ARPNeighbors { fn from(from: ARPNeighbors) -> Self { Self { - ARPNeighbors: from_vec(from.neighbors), - unknown_fields: Default::default(), - cached_size: Default::default(), + ARPNeighbors: trans_vec(from.neighbors), + ..Default::default() } } } @@ -683,8 +642,7 @@ impl From for agent::AddARPNeighborsRequest { fn from(from: AddArpNeighborRequest) -> Self { Self { neighbors: from_option(from.neighbors), - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -693,14 +651,13 @@ impl From for agent::CreateSandboxRequest { fn from(from: CreateSandboxRequest) -> Self { Self { hostname: from.hostname, - dns: from_vec(from.dns), - storages: from_vec(from.storages), + dns: trans_vec(from.dns), + storages: trans_vec(from.storages), sandbox_pidns: from.sandbox_pidns, sandbox_id: from.sandbox_id, guest_hook_path: from.guest_hook_path, - kernel_modules: from_vec(from.kernel_modules), - unknown_fields: Default::default(), - cached_size: Default::default(), + kernel_modules: trans_vec(from.kernel_modules), + ..Default::default() } } } @@ -708,8 +665,7 @@ impl From for agent::CreateSandboxRequest { impl From for agent::DestroySandboxRequest { fn from(_: Empty) -> Self { Self { - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -720,8 +676,7 @@ impl From for agent::OnlineCPUMemRequest { wait: from.wait, nb_cpus: from.nb_cpus, cpu_only: from.cpu_only, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -730,8 +685,7 @@ impl From for agent::ReseedRandomDevRequest { fn from(from: ReseedRandomDevRequest) -> Self { Self { data: from.data, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -740,8 +694,7 @@ impl From for agent::MemHotplugByProbeRequest { fn from(from: MemHotplugByProbeRequest) -> Self { Self { memHotplugProbeAddr: from.mem_hotplug_probe_addr, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -751,8 +704,7 @@ impl From for agent::SetGuestDateTimeRequest { Self { Sec: from.sec, Usec: from.usec, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -762,8 +714,8 @@ impl From for AgentDetails { Self { version: src.version, init_daemon: src.init_daemon, - device_handlers: into_vec(src.device_handlers), - storage_handlers: into_vec(src.storage_handlers), + device_handlers: trans_vec(src.device_handlers), + storage_handlers: trans_vec(src.storage_handlers), supports_seccomp: src.supports_seccomp, } } @@ -790,8 +742,7 @@ impl From for agent::CopyFileRequest { gid: from.gid, offset: from.offset, data: from.data, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -807,8 +758,7 @@ impl From for WaitProcessResponse { impl From for agent::GetOOMEventRequest { fn from(_: Empty) -> Self { Self { - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -817,8 +767,7 @@ impl From for health::CheckRequest { fn from(from: CheckRequest) -> Self { Self { service: from.service, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -826,7 +775,7 @@ impl From for health::CheckRequest { impl From for HealthCheckResponse { fn from(from: health::HealthCheckResponse) -> Self { Self { - status: from.status as u32, + status: from.status.value() as u32, } } } @@ -852,8 +801,7 @@ impl From for agent::VolumeStatsRequest { fn from(from: VolumeStatsRequest) -> Self { Self { volume_guest_path: from.volume_guest_path, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } @@ -862,8 +810,8 @@ impl From for VolumeStatsResponse { fn from(from: csi::VolumeStatsResponse) -> Self { let result: String = format!( "Usage: {:?} Volume Condition: {:?}", - from.get_usage(), - from.get_volume_condition() + from.usage(), + from.volume_condition() ); Self { data: result } } @@ -874,8 +822,7 @@ impl From for agent::ResizeVolumeRequest { Self { volume_guest_path: from.volume_guest_path, size: from.size, - unknown_fields: Default::default(), - cached_size: Default::default(), + ..Default::default() } } } diff --git a/src/runtime-rs/crates/runtimes/common/Cargo.toml b/src/runtime-rs/crates/runtimes/common/Cargo.toml index 78a640e95..440db1486 100644 --- a/src/runtime-rs/crates/runtimes/common/Cargo.toml +++ b/src/runtime-rs/crates/runtimes/common/Cargo.toml @@ -10,17 +10,17 @@ license = "Apache-2.0" [dependencies] anyhow = "^1.0" async-trait = "0.1.48" -containerd-shim-protos = { version = "0.2.0", features = ["async"]} +containerd-shim-protos = { version = "0.3.0", features = ["async"]} lazy_static = "1.4.0" nix = "0.24.2" -protobuf = "2.27.0" +protobuf = "3.2.0" serde_json = "1.0.39" slog = "2.5.2" slog-scope = "4.4.0" strum = { version = "0.24.0", features = ["derive"] } thiserror = "^1.0" tokio = { version = "1.8.0", features = ["rt-multi-thread", "process", "fs"] } -ttrpc = { version = "0.6.1" } +ttrpc = { version = "0.7.1" } persist = {path = "../../persist"} agent = { path = "../../agent" } kata-sys-util = { path = "../../../../libs/kata-sys-util" } diff --git a/src/runtime-rs/crates/runtimes/common/src/types/trans_from_agent.rs b/src/runtime-rs/crates/runtimes/common/src/types/trans_from_agent.rs index 887777122..f28f50582 100644 --- a/src/runtime-rs/crates/runtimes/common/src/types/trans_from_agent.rs +++ b/src/runtime-rs/crates/runtimes/common/src/types/trans_from_agent.rs @@ -151,7 +151,7 @@ impl From> for StatsInfo { } if !cg_stats.hugetlb_stats.is_empty() { - let mut p_huge = ::protobuf::RepeatedField::new(); + let mut p_huge = Vec::new(); for (k, v) in cg_stats.hugetlb_stats { let mut h = metrics::HugetlbStat::new(); h.set_pagesize(k); @@ -166,7 +166,7 @@ impl From> for StatsInfo { let net_stats = stats.network_stats; if !net_stats.is_empty() { - let mut p_net = ::protobuf::RepeatedField::new(); + let mut p_net = Vec::new(); for v in net_stats.iter() { let mut h = metrics::NetworkStat::new(); h.set_name(v.name.clone()); @@ -195,10 +195,8 @@ impl From> for StatsInfo { } } -fn copy_blkio_entry( - entry: &[agent::BlkioStatsEntry], -) -> ::protobuf::RepeatedField { - let mut p_entry = ::protobuf::RepeatedField::new(); +fn copy_blkio_entry(entry: &[agent::BlkioStatsEntry]) -> Vec { + let mut p_entry = Vec::new(); for e in entry.iter() { let mut blk = metrics::BlkIOEntry::new(); diff --git a/src/runtime-rs/crates/runtimes/common/src/types/trans_from_shim.rs b/src/runtime-rs/crates/runtimes/common/src/types/trans_from_shim.rs index 4d5d7ddf1..29a4a676c 100644 --- a/src/runtime-rs/crates/runtimes/common/src/types/trans_from_shim.rs +++ b/src/runtime-rs/crates/runtimes/common/src/types/trans_from_shim.rs @@ -16,7 +16,7 @@ use std::{ path::PathBuf, }; -fn trans_from_shim_mount(from: api::Mount) -> Mount { +fn trans_from_shim_mount(from: &api::Mount) -> Mount { let options = from.options.to_vec(); let mut read_only = false; for o in &options { @@ -29,7 +29,7 @@ fn trans_from_shim_mount(from: api::Mount) -> Mount { Mount { source: from.source.clone(), destination: PathBuf::from(&from.target), - fs_type: from.field_type, + fs_type: from.type_.clone(), options, device_id: None, host_shared_fs_path: None, @@ -41,19 +41,14 @@ impl TryFrom for Request { type Error = anyhow::Error; fn try_from(from: api::CreateTaskRequest) -> Result { let options = if from.has_options() { - Some(from.get_options().get_value().to_vec()) + Some(from.options().value.to_vec()) } else { None }; Ok(Request::CreateContainer(ContainerConfig { container_id: from.id.clone(), bundle: from.bundle.clone(), - rootfs_mounts: from - .rootfs - .to_vec() - .into_iter() - .map(trans_from_shim_mount) - .collect(), + rootfs_mounts: from.rootfs.iter().map(trans_from_shim_mount).collect(), terminal: from.terminal, options, stdin: (!from.stdin.is_empty()).then(|| from.stdin.clone()), @@ -84,15 +79,15 @@ impl TryFrom for Request { impl TryFrom for Request { type Error = anyhow::Error; fn try_from(from: api::ExecProcessRequest) -> Result { - let spec = from.get_spec(); + let spec = from.spec(); Ok(Request::ExecProcess(ExecProcessRequest { process: ContainerProcess::new(&from.id, &from.exec_id).context("new process id")?, terminal: from.terminal, stdin: (!from.stdin.is_empty()).then(|| from.stdin.clone()), stdout: (!from.stdout.is_empty()).then(|| from.stdout.clone()), stderr: (!from.stderr.is_empty()).then(|| from.stderr.clone()), - spec_type_url: spec.get_type_url().to_string(), - spec_value: spec.get_value().to_vec(), + spec_type_url: spec.type_url.to_string(), + spec_value: spec.value.to_vec(), })) } } @@ -182,7 +177,7 @@ impl TryFrom for Request { fn try_from(from: api::UpdateTaskRequest) -> Result { Ok(Request::UpdateContainer(UpdateRequest { container_id: from.id.to_string(), - value: from.get_resources().get_value().to_vec(), + value: from.resources().value.to_vec(), })) } } diff --git a/src/runtime-rs/crates/runtimes/common/src/types/trans_into_shim.rs b/src/runtime-rs/crates/runtimes/common/src/types/trans_into_shim.rs index 345e02d93..841805bb0 100644 --- a/src/runtime-rs/crates/runtimes/common/src/types/trans_into_shim.rs +++ b/src/runtime-rs/crates/runtimes/common/src/types/trans_into_shim.rs @@ -16,24 +16,24 @@ use containerd_shim_protos::api; use super::{ProcessExitStatus, ProcessStateInfo, ProcessStatus, Response}; use crate::error::Error; -fn system_time_into(time: time::SystemTime) -> ::protobuf::well_known_types::Timestamp { - let mut proto_time = ::protobuf::well_known_types::Timestamp::new(); - proto_time.set_seconds( - time.duration_since(time::UNIX_EPOCH) - .unwrap_or_default() - .as_secs() - .try_into() - .unwrap_or_default(), - ); +fn system_time_into(time: time::SystemTime) -> ::protobuf::well_known_types::timestamp::Timestamp { + let mut proto_time = ::protobuf::well_known_types::timestamp::Timestamp::new(); + proto_time.seconds = time + .duration_since(time::UNIX_EPOCH) + .unwrap_or_default() + .as_secs() + .try_into() + .unwrap_or_default(); + proto_time } fn option_system_time_into( time: Option, -) -> ::protobuf::SingularPtrField<::protobuf::well_known_types::Timestamp> { +) -> protobuf::MessageField { match time { - Some(v) => ::protobuf::SingularPtrField::some(system_time_into(v)), - None => ::protobuf::SingularPtrField::none(), + Some(v) => ::protobuf::MessageField::some(system_time_into(v)), + None => ::protobuf::MessageField::none(), } } @@ -66,7 +66,7 @@ impl From for api::StateResponse { id: from.container_id.clone(), bundle: from.bundle.clone(), pid: from.pid.pid, - status: from.status.into(), + status: protobuf::EnumOrUnknown::new(from.status.into()), stdin: from.stdin.unwrap_or_default(), stdout: from.stdout.unwrap_or_default(), stderr: from.stderr.unwrap_or_default(), @@ -164,13 +164,13 @@ impl TryFrom for api::StateResponse { impl TryFrom for api::StatsResponse { type Error = anyhow::Error; fn try_from(from: Response) -> Result { - let mut any = ::protobuf::well_known_types::Any::new(); + let mut any = ::protobuf::well_known_types::any::Any::new(); let mut response = api::StatsResponse::new(); match from { Response::StatsContainer(resp) => { if let Some(value) = resp.value { - any.set_type_url(value.type_url); - any.set_value(value.value); + any.type_url = value.type_url; + any.value = value.value; response.set_stats(any); } Ok(response) @@ -193,8 +193,7 @@ impl TryFrom for api::PidsResponse { let mut res = api::PidsResponse::new(); p_info.set_pid(resp.pid); processes.push(p_info); - let v = protobuf::RepeatedField::::from_vec(processes); - res.set_processes(v); + res.set_processes(processes); Ok(res) } _ => Err(anyhow!(Error::UnexpectedResponse( diff --git a/src/runtime-rs/crates/runtimes/virt_container/Cargo.toml b/src/runtime-rs/crates/runtimes/virt_container/Cargo.toml index 6dea5e762..f3d8d9375 100644 --- a/src/runtime-rs/crates/runtimes/virt_container/Cargo.toml +++ b/src/runtime-rs/crates/runtimes/virt_container/Cargo.toml @@ -9,12 +9,12 @@ license = "Apache-2.0" anyhow = "^1.0" async-trait = "0.1.48" awaitgroup = "0.6.0" -containerd-shim-protos = { version = "0.2.0", features = ["async"]} +containerd-shim-protos = { version = "0.3.0", features = ["async"]} futures = "0.3.19" lazy_static = "1.4.0" libc = ">=0.2.39" nix = "0.24.2" -protobuf = "2.27.0" +protobuf = "3.2.0" serde = { version = "1.0.100", features = ["derive"] } serde_derive = "1.0.27" serde_json = "1.0.82" diff --git a/src/runtime-rs/crates/service/Cargo.toml b/src/runtime-rs/crates/service/Cargo.toml index 82e2c4fbc..cb414abe3 100644 --- a/src/runtime-rs/crates/service/Cargo.toml +++ b/src/runtime-rs/crates/service/Cargo.toml @@ -11,10 +11,10 @@ async-trait = "0.1.48" slog = "2.5.2" slog-scope = "4.4.0" tokio = { version = "1.8.0", features = ["rt-multi-thread"] } -ttrpc = { version = "0.6.1" } +ttrpc = { version = "0.7.1" } common = { path = "../runtimes/common" } -containerd-shim-protos = { version = "0.2.0", features = ["async"]} +containerd-shim-protos = { version = "0.3.0", features = ["async"]} logging = { path = "../../../libs/logging"} shim-interface = { path = "../../../libs/shim-interface" } runtimes = { path = "../runtimes" } diff --git a/src/runtime-rs/crates/service/src/manager.rs b/src/runtime-rs/crates/service/src/manager.rs index fe31c179b..ff0fd997c 100644 --- a/src/runtime-rs/crates/service/src/manager.rs +++ b/src/runtime-rs/crates/service/src/manager.rs @@ -14,7 +14,7 @@ use std::{ use anyhow::{Context, Result}; use common::message::{Action, Event, Message}; use containerd_shim_protos::{ - protobuf::{well_known_types::Any, Message as ProtobufMessage}, + protobuf::{well_known_types::any::Any, Message as ProtobufMessage}, shim_async, }; use runtimes::RuntimeHandlerManager; diff --git a/src/runtime-rs/crates/shim/Cargo.toml b/src/runtime-rs/crates/shim/Cargo.toml index 76abe1e9f..84521eb00 100644 --- a/src/runtime-rs/crates/shim/Cargo.toml +++ b/src/runtime-rs/crates/shim/Cargo.toml @@ -15,12 +15,12 @@ path = "src/bin/main.rs" [dependencies] anyhow = "^1.0" backtrace = {version = ">=0.3.35", features = ["libunwind", "libbacktrace", "std"], default-features = false} -containerd-shim-protos = { version = "0.2.0", features = ["async"]} +containerd-shim-protos = { version = "0.3.0", features = ["async"]} go-flag = "0.1.0" libc = "0.2.108" log = "0.4.14" nix = "0.24.2" -protobuf = "2.27.0" +protobuf = "3.2.0" sha2 = "=0.9.3" slog = {version = "2.5.2", features = ["std", "release_max_level_trace", "max_level_trace"]} slog-async = "2.5.2" diff --git a/src/runtime-rs/crates/shim/src/shim_delete.rs b/src/runtime-rs/crates/shim/src/shim_delete.rs index e1053927f..412fc8be6 100644 --- a/src/runtime-rs/crates/shim/src/shim_delete.rs +++ b/src/runtime-rs/crates/shim/src/shim_delete.rs @@ -26,12 +26,12 @@ impl ShimExecutor { async fn do_cleanup(&self) -> Result { let mut rsp = api::DeleteResponse::new(); rsp.set_exit_status(128 + libc::SIGKILL as u32); - let mut exited_time = protobuf::well_known_types::Timestamp::new(); + let mut exited_time = protobuf::well_known_types::timestamp::Timestamp::new(); let seconds = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .map_err(Error::SystemTime)? .as_secs() as i64; - exited_time.set_seconds(seconds); + exited_time.seconds = seconds; rsp.set_exited_at(exited_time); let address = self From 59568c79ddd42ed3f089ffe59c82d59bba76fe4c Mon Sep 17 00:00:00 2001 From: Tim Zhang Date: Fri, 14 Apr 2023 10:27:55 +0800 Subject: [PATCH 125/137] protocols: add support for Serde rust-protobuf@3 does not support Serde natively anymore. So we need to do it by ourselves. Signed-off-by: Tim Zhang --- src/libs/protocols/.gitignore | 1 + src/libs/protocols/build.rs | 40 +++++++++++++++++++++++++- src/libs/protocols/src/lib.rs | 8 ++++++ src/libs/protocols/src/serde_config.rs | 38 ++++++++++++++++++++++++ 4 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 src/libs/protocols/src/serde_config.rs diff --git a/src/libs/protocols/.gitignore b/src/libs/protocols/.gitignore index 5b5d2f3df..bc7e10bf3 100644 --- a/src/libs/protocols/.gitignore +++ b/src/libs/protocols/.gitignore @@ -3,3 +3,4 @@ Cargo.lock src/*.rs !src/lib.rs !src/trans.rs +!src/serde_config.rs diff --git a/src/libs/protocols/build.rs b/src/libs/protocols/build.rs index 12818b057..1ec2da982 100644 --- a/src/libs/protocols/build.rs +++ b/src/libs/protocols/build.rs @@ -8,7 +8,45 @@ use std::io::{BufRead, BufReader, Read, Write}; use std::path::Path; use std::process::exit; -use ttrpc_codegen::{Codegen, Customize, ProtobufCustomize}; +use protobuf::{ + descriptor::field_descriptor_proto::Type, + reflect::{EnumDescriptor, FieldDescriptor, MessageDescriptor, OneofDescriptor}, +}; +use ttrpc_codegen::{Codegen, Customize, ProtobufCustomize, ProtobufCustomizeCallback}; + +struct GenSerde; + +impl ProtobufCustomizeCallback for GenSerde { + fn message(&self, _message: &MessageDescriptor) -> ProtobufCustomize { + ProtobufCustomize::default().before("#[cfg_attr(feature = \"with-serde\", derive(::serde::Serialize, ::serde::Deserialize))]") + } + + fn enumeration(&self, _enum_type: &EnumDescriptor) -> ProtobufCustomize { + ProtobufCustomize::default().before("#[cfg_attr(feature = \"with-serde\", derive(::serde::Serialize, ::serde::Deserialize))]") + } + + fn oneof(&self, _oneof: &OneofDescriptor) -> ProtobufCustomize { + ProtobufCustomize::default().before("#[cfg_attr(feature = \"with-serde\", derive(::serde::Serialize, ::serde::Deserialize))]") + } + + fn field(&self, field: &FieldDescriptor) -> ProtobufCustomize { + if field.proto().type_() == Type::TYPE_ENUM { + ProtobufCustomize::default().before( + "#[cfg_attr(feature = \"with-serde\", serde(serialize_with = \"crate::serialize_enum_or_unknown\", deserialize_with = \"crate::deserialize_enum_or_unknown\"))]", + ) + } else if field.proto().type_() == Type::TYPE_MESSAGE && field.is_singular() { + ProtobufCustomize::default().before( + "#[cfg_attr(feature = \"with-serde\", serde(serialize_with = \"crate::serialize_message_field\", deserialize_with = \"crate::deserialize_message_field\"))]", + ) + } else { + ProtobufCustomize::default() + } + } + + fn special_field(&self, _message: &MessageDescriptor, _field: &str) -> ProtobufCustomize { + ProtobufCustomize::default().before("#[cfg_attr(feature = \"with-serde\", serde(skip))]") + } +} fn replace_text_in_file(file_name: &str, from: &str, to: &str) -> Result<(), std::io::Error> { let mut src = File::open(file_name)?; diff --git a/src/libs/protocols/src/lib.rs b/src/libs/protocols/src/lib.rs index 801b70060..33f75ca0e 100644 --- a/src/libs/protocols/src/lib.rs +++ b/src/libs/protocols/src/lib.rs @@ -17,5 +17,13 @@ pub mod health_ttrpc; #[cfg(feature = "async")] pub mod health_ttrpc_async; pub mod oci; +#[cfg(feature = "with-serde")] +mod serde_config; pub mod trans; pub mod types; + +#[cfg(feature = "with-serde")] +pub use serde_config::{ + deserialize_enum_or_unknown, deserialize_message_field, serialize_enum_or_unknown, + serialize_message_field, +}; diff --git a/src/libs/protocols/src/serde_config.rs b/src/libs/protocols/src/serde_config.rs new file mode 100644 index 000000000..c1a1d2b7c --- /dev/null +++ b/src/libs/protocols/src/serde_config.rs @@ -0,0 +1,38 @@ +// Copyright (c) 2023 Ant Group +// +// SPDX-License-Identifier: Apache-2.0 +// + +use protobuf::{EnumOrUnknown, MessageField}; +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "with-serde")] +pub fn serialize_enum_or_unknown( + e: &protobuf::EnumOrUnknown, + s: S, +) -> Result { + e.value().serialize(s) +} + +pub fn serialize_message_field( + e: &protobuf::MessageField, + s: S, +) -> Result { + if e.is_some() { + e.as_ref().unwrap().serialize(s) + } else { + s.serialize_unit() + } +} + +pub fn deserialize_enum_or_unknown<'de, E: Deserialize<'de>, D: serde::Deserializer<'de>>( + d: D, +) -> Result, D::Error> { + i32::deserialize(d).map(EnumOrUnknown::from_i32) +} + +pub fn deserialize_message_field<'de, E: Deserialize<'de>, D: serde::Deserializer<'de>>( + d: D, +) -> Result, D::Error> { + Option::deserialize(d).map(MessageField::from_option) +} From eb3d20dccb30f93f69756a125c3992bf71271117 Mon Sep 17 00:00:00 2001 From: Tim Zhang Date: Fri, 14 Apr 2023 10:54:28 +0800 Subject: [PATCH 126/137] protocols: Add ut for Serde Fixes: #6646 Signed-off-by: Tim Zhang --- src/libs/protocols/src/serde_config.rs | 30 ++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/libs/protocols/src/serde_config.rs b/src/libs/protocols/src/serde_config.rs index c1a1d2b7c..064379cd1 100644 --- a/src/libs/protocols/src/serde_config.rs +++ b/src/libs/protocols/src/serde_config.rs @@ -36,3 +36,33 @@ pub fn deserialize_message_field<'de, E: Deserialize<'de>, D: serde::Deserialize ) -> Result, D::Error> { Option::deserialize(d).map(MessageField::from_option) } + +#[cfg(test)] +mod tests { + use crate::agent::{ExecProcessRequest, StringUser}; + use crate::health::{health_check_response::ServingStatus, HealthCheckResponse}; + + #[test] + fn test_serde_for_enum_or_unknown() { + let mut hc = HealthCheckResponse::new(); + hc.set_status(ServingStatus::SERVING); + + let json = serde_json::to_string(&hc).unwrap(); + let from_json: HealthCheckResponse = serde_json::from_str(&json).unwrap(); + + assert_eq!(from_json, hc); + } + + #[test] + fn test_serde_for_message_field() { + let mut epr = ExecProcessRequest::new(); + let mut str_user = StringUser::new(); + str_user.uid = "Someone's id".to_string(); + epr.set_string_user(str_user); + + let json = serde_json::to_string(&epr).unwrap(); + let from_json: ExecProcessRequest = serde_json::from_str(&json).unwrap(); + + assert_eq!(from_json, epr); + } +} From 76d2e30547d4e3e0f559ec670b1db6f2adecd7fe Mon Sep 17 00:00:00 2001 From: Tim Zhang Date: Fri, 14 Apr 2023 10:31:32 +0800 Subject: [PATCH 127/137] agent-ctl: Bump ttrpc from 0.6.0 to 0.7.1 Fixes: #6646 Signed-off-by: Tim Zhang --- src/libs/protocols/build.rs | 1 + src/tools/agent-ctl/Cargo.lock | 174 ++++++++++++++++++++----------- src/tools/agent-ctl/Cargo.toml | 4 +- src/tools/agent-ctl/src/utils.rs | 169 +++++++++++++----------------- src/tools/kata-ctl/Cargo.lock | 104 +++++++++++++----- 5 files changed, 268 insertions(+), 184 deletions(-) diff --git a/src/libs/protocols/build.rs b/src/libs/protocols/build.rs index 1ec2da982..bc34c07a0 100644 --- a/src/libs/protocols/build.rs +++ b/src/libs/protocols/build.rs @@ -156,6 +156,7 @@ fn codegen(path: &str, protos: &[&str], async_all: bool) -> Result<(), std::io:: .customize(ttrpc_options) .rust_protobuf() .rust_protobuf_customize(protobuf_options) + .rust_protobuf_customize_callback(GenSerde) .run()?; let autogen_comment = format!("\n//! Generated by {:?} ({:?})", file!(), module_path!()); diff --git a/src/tools/agent-ctl/Cargo.lock b/src/tools/agent-ctl/Cargo.lock index 234830c3a..bfa0177fb 100644 --- a/src/tools/agent-ctl/Cargo.lock +++ b/src/tools/agent-ctl/Cargo.lock @@ -31,9 +31,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.51" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b26702f315f53b6071259e15dd9d64528213b44d61de1ec926eca7715d62203" +checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" [[package]] name = "arc-swap" @@ -114,7 +114,7 @@ checksum = "d7d78656ba01f1b93024b7c3a0467f1608e4be67d725749fdcd7d2c7678fd7a2" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -131,7 +131,7 @@ checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -176,7 +176,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd9e32d7420c85055e8107e5b2463c4eeefeaac18b52359fe9f9c08a18f342b2" dependencies = [ "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -340,7 +340,7 @@ checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -351,7 +351,7 @@ checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -398,7 +398,7 @@ checksum = "f58dc3c5e468259f19f2d46304a6b28f1c3d034442e14b322d2b850e36f6d5ae" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -527,7 +527,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -593,9 +593,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "heck" @@ -651,9 +651,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.7.0" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown", @@ -728,7 +728,7 @@ dependencies = [ "logging", "nix 0.23.1", "oci", - "protobuf", + "protobuf 3.2.0", "protocols", "rand 0.8.4", "rustjail", @@ -1084,11 +1084,11 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.34" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f84e92c0f7c9d58328b85a78557813e4bd845130db68d7184635344399423b1" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] @@ -1129,7 +1129,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -1144,31 +1144,68 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.27.1" +version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + +[[package]] +name = "protobuf" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55bad9126f378a853655831eb7363b7b01b81d19f8cb1218861086ca4a1a61e" dependencies = [ - "serde", - "serde_derive", + "once_cell", + "protobuf-support", + "thiserror", ] [[package]] name = "protobuf-codegen" -version = "2.27.1" +version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aec1632b7c8f2e620343439a7dfd1f3c47b18906c4be58982079911482b5d707" +checksum = "033460afb75cf755fcfc16dfaed20b86468082a2ea24e05ac35ab4a099a017d6" dependencies = [ - "protobuf", + "protobuf 2.28.0", ] [[package]] -name = "protobuf-codegen-pure" -version = "2.27.1" +name = "protobuf-codegen" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f8122fdb18e55190c796b088a16bdb70cd7acdcd48f7a8b796b58c62e532cc6" +checksum = "0dd418ac3c91caa4032d37cb80ff0d44e2ebe637b2fb243b6234bf89cdac4901" dependencies = [ - "protobuf", - "protobuf-codegen", + "anyhow", + "once_cell", + "protobuf 3.2.0", + "protobuf-parse", + "regex", + "tempfile", + "thiserror", +] + +[[package]] +name = "protobuf-parse" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d39b14605eaa1f6a340aec7f320b34064feb26c93aec35d6a9a2272a8ddfa49" +dependencies = [ + "anyhow", + "indexmap", + "log", + "protobuf 3.2.0", + "protobuf-support", + "tempfile", + "thiserror", + "which", +] + +[[package]] +name = "protobuf-support" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d4d7b8601c814cfb36bcebb79f0e61e45e1e93640cf778837833bbed05c372" +dependencies = [ + "thiserror", ] [[package]] @@ -1176,7 +1213,7 @@ name = "protocols" version = "0.1.0" dependencies = [ "oci", - "protobuf", + "protobuf 3.2.0", "serde", "serde_json", "ttrpc", @@ -1185,9 +1222,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.10" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -1347,7 +1384,7 @@ dependencies = [ "nix 0.24.2", "oci", "path-absolutize", - "protobuf", + "protobuf 3.2.0", "protocols", "regex", "rlimit", @@ -1386,22 +1423,22 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.132" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9875c23cf305cd1fd7eb77234cbb705f21ea6a72c637a5c6db5fe4b8e7f008" +checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.132" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc0db5cb2556c0e558887d9bbdcf6ac4471e83ff66cf696e5419024d1606276" +checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.14", ] [[package]] @@ -1423,7 +1460,7 @@ checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -1552,6 +1589,17 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "syn" +version = "2.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcf316d5356ed6847742d036f8a39c3b8435cac10bd528a4bd461928a6ab34d5" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "take_mut" version = "0.2.2" @@ -1598,7 +1646,7 @@ checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -1645,7 +1693,7 @@ checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -1677,7 +1725,7 @@ checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] [[package]] @@ -1691,43 +1739,43 @@ dependencies = [ [[package]] name = "ttrpc" -version = "0.6.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7d6c992964a013c17814c08d31708d577b0aae44ebadb58755659dd824c2d1" +checksum = "a35f22a2964bea14afee161665bb260b83cb48e665e0260ca06ec0e775c8b06c" dependencies = [ "byteorder", "libc", "log", "nix 0.23.1", - "protobuf", - "protobuf-codegen-pure", + "protobuf 3.2.0", + "protobuf-codegen 3.2.0", "thiserror", ] [[package]] name = "ttrpc-codegen" -version = "0.2.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809eda4e459820237104e4b61d6b41bbe6c9e1ce6adf4057955e6e6722a90408" +checksum = "94d7f7631d7a9ebed715a47cd4cb6072cbc7ae1d4ec01598971bbec0024340c2" dependencies = [ - "protobuf", - "protobuf-codegen", - "protobuf-codegen-pure", + "protobuf 2.28.0", + "protobuf-codegen 3.2.0", + "protobuf-support", "ttrpc-compiler", ] [[package]] name = "ttrpc-compiler" -version = "0.4.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2978ed3fa047d8fd55cbeb4d4a61d461fb3021a90c9618519c73ce7e5bb66c15" +checksum = "ec3cb5dbf1f0865a34fe3f722290fe776cacb16f50428610b779467b76ddf647" dependencies = [ "derive-new", "prost", "prost-build", "prost-types", - "protobuf", - "protobuf-codegen", + "protobuf 2.28.0", + "protobuf-codegen 2.28.0", "tempfile", ] @@ -1741,6 +1789,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "unicode-ident" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" + [[package]] name = "unicode-segmentation" version = "1.8.0" @@ -1804,7 +1858,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn", + "syn 1.0.82", "wasm-bindgen-shared", ] @@ -1826,7 +1880,7 @@ checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.82", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -1981,7 +2035,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn", + "syn 1.0.82", ] [[package]] @@ -2018,5 +2072,5 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.82", ] diff --git a/src/tools/agent-ctl/Cargo.toml b/src/tools/agent-ctl/Cargo.toml index 25852d83b..033380396 100644 --- a/src/tools/agent-ctl/Cargo.toml +++ b/src/tools/agent-ctl/Cargo.toml @@ -27,12 +27,12 @@ logging = { path = "../../libs/logging" } slog = "2.7.0" slog-scope = "4.4.0" rand = "0.8.4" -protobuf = "2.27.0" +protobuf = "3.2.0" nix = "0.23.0" libc = "0.2.112" # XXX: Must be the same as the version used by the agent -ttrpc = { version = "0.6.0" } +ttrpc = { version = "0.7.1" } # For parsing timeouts humantime = "2.1.0" diff --git a/src/tools/agent-ctl/src/utils.rs b/src/tools/agent-ctl/src/utils.rs index 6ba10841b..59dba46f0 100644 --- a/src/tools/agent-ctl/src/utils.rs +++ b/src/tools/agent-ctl/src/utils.rs @@ -268,8 +268,7 @@ fn root_oci_to_ttrpc(bundle_dir: &str, root: &ociRoot) -> Result { let ttrpc_root = ttrpcRoot { Path: path, Readonly: root.readonly, - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + ..Default::default() }; Ok(ttrpc_root) @@ -281,9 +280,9 @@ fn process_oci_to_ttrpc(p: &ociProcess) -> ttrpcProcess { let mut b = ttrpcBox::new(); b.set_Width(s.width); b.set_Height(s.height); - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), }; let oom_score_adj: i64 = match p.oom_score_adj { @@ -298,23 +297,23 @@ fn process_oci_to_ttrpc(p: &ociProcess) -> ttrpcProcess { // FIXME: Implement RLimits OCI spec handling (copy from p.rlimits) //let rlimits = vec![ttrpcPOSIXRlimit::new()]; - let rlimits = protobuf::RepeatedField::new(); + let rlimits = Vec::new(); let capabilities = match &p.capabilities { Some(c) => { let mut gc = ttrpcLinuxCapabilities::new(); - gc.set_Bounding(protobuf::RepeatedField::from_slice(&c.bounding)); - gc.set_Effective(protobuf::RepeatedField::from_slice(&c.effective)); - gc.set_Inheritable(protobuf::RepeatedField::from_slice(&c.inheritable)); - gc.set_Permitted(protobuf::RepeatedField::from_slice(&c.permitted)); - gc.set_Ambient(protobuf::RepeatedField::from_slice(&c.ambient)); + gc.set_Bounding(c.bounding.clone()); + gc.set_Effective(c.effective.clone()); + gc.set_Inheritable(c.inheritable.clone()); + gc.set_Permitted(c.permitted.clone()); + gc.set_Ambient(c.ambient.clone()); - protobuf::SingularPtrField::some(gc) + protobuf::MessageField::some(gc) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), }; - let mut env = protobuf::RepeatedField::new(); + let mut env = Vec::new(); for pair in &p.env { env.push(pair.to_string()); } @@ -322,8 +321,8 @@ fn process_oci_to_ttrpc(p: &ociProcess) -> ttrpcProcess { ttrpcProcess { Terminal: p.terminal, ConsoleSize: console_size, - User: protobuf::SingularPtrField::some(user), - Args: protobuf::RepeatedField::from_vec(p.args.clone()), + User: protobuf::MessageField::some(user), + Args: p.args.clone(), Env: env, Cwd: p.cwd.clone(), Capabilities: capabilities, @@ -332,13 +331,12 @@ fn process_oci_to_ttrpc(p: &ociProcess) -> ttrpcProcess { ApparmorProfile: p.apparmor_profile.clone(), OOMScoreAdj: oom_score_adj, SelinuxLabel: p.selinux_label.clone(), - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + ..Default::default() } } fn mount_oci_to_ttrpc(m: &ociMount) -> ttrpcMount { - let mut ttrpc_options = protobuf::RepeatedField::new(); + let mut ttrpc_options = Vec::new(); for op in &m.options { ttrpc_options.push(op.to_string()); } @@ -346,17 +344,14 @@ fn mount_oci_to_ttrpc(m: &ociMount) -> ttrpcMount { ttrpcMount { destination: m.destination.clone(), source: m.source.clone(), - field_type: m.r#type.clone(), + type_: m.r#type.clone(), options: ttrpc_options, - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + ..Default::default() } } -fn idmaps_oci_to_ttrpc( - res: &[oci::LinuxIdMapping], -) -> protobuf::RepeatedField { - let mut ttrpc_idmaps = protobuf::RepeatedField::new(); +fn idmaps_oci_to_ttrpc(res: &[oci::LinuxIdMapping]) -> Vec { + let mut ttrpc_idmaps = Vec::new(); for m in res.iter() { let mut idmapping = ttrpcLinuxIDMapping::default(); idmapping.set_HostID(m.host_id); @@ -367,10 +362,8 @@ fn idmaps_oci_to_ttrpc( ttrpc_idmaps } -fn devices_oci_to_ttrpc( - res: &[oci::LinuxDeviceCgroup], -) -> protobuf::RepeatedField { - let mut ttrpc_devices = protobuf::RepeatedField::new(); +fn devices_oci_to_ttrpc(res: &[oci::LinuxDeviceCgroup]) -> Vec { + let mut ttrpc_devices = Vec::new(); for d in res.iter() { let mut device = ttrpcLinuxDeviceCgroup::default(); device.set_Major(d.major.unwrap_or(0)); @@ -383,12 +376,10 @@ fn devices_oci_to_ttrpc( ttrpc_devices } -fn memory_oci_to_ttrpc( - res: &Option, -) -> protobuf::SingularPtrField { +fn memory_oci_to_ttrpc(res: &Option) -> protobuf::MessageField { let memory = if res.is_some() { let mem = res.as_ref().unwrap(); - protobuf::SingularPtrField::some(ttrpcLinuxMemory { + protobuf::MessageField::some(ttrpcLinuxMemory { Limit: mem.limit.unwrap_or(0), Reservation: mem.reservation.unwrap_or(0), Swap: mem.swap.unwrap_or(0), @@ -396,16 +387,15 @@ fn memory_oci_to_ttrpc( KernelTCP: mem.kernel_tcp.unwrap_or(0), Swappiness: mem.swappiness.unwrap_or(0), DisableOOMKiller: mem.disable_oom_killer.unwrap_or(false), - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + ..Default::default() }) } else { - protobuf::SingularPtrField::none() + protobuf::MessageField::none() }; memory } -fn cpu_oci_to_ttrpc(res: &Option) -> protobuf::SingularPtrField { +fn cpu_oci_to_ttrpc(res: &Option) -> protobuf::MessageField { match &res { Some(s) => { let mut cpu = ttrpcLinuxCPU::default(); @@ -414,27 +404,25 @@ fn cpu_oci_to_ttrpc(res: &Option) -> protobuf::SingularPtrField protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), } } -fn pids_oci_to_ttrpc(res: &Option) -> protobuf::SingularPtrField { +fn pids_oci_to_ttrpc(res: &Option) -> protobuf::MessageField { match &res { Some(s) => { let mut b = ttrpcLinuxPids::new(); b.set_Limit(s.limit); - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), } } -fn hugepage_limits_oci_to_ttrpc( - res: &[oci::LinuxHugepageLimit], -) -> protobuf::RepeatedField { - let mut ttrpc_hugepage_limits = protobuf::RepeatedField::new(); +fn hugepage_limits_oci_to_ttrpc(res: &[oci::LinuxHugepageLimit]) -> Vec { + let mut ttrpc_hugepage_limits = Vec::new(); for h in res.iter() { let mut hugepage_limit = ttrpcLinuxHugepageLimit::default(); hugepage_limit.set_Limit(h.limit); @@ -446,28 +434,26 @@ fn hugepage_limits_oci_to_ttrpc( fn network_oci_to_ttrpc( res: &Option, -) -> protobuf::SingularPtrField { +) -> protobuf::MessageField { match &res { Some(s) => { let mut b = ttrpcLinuxNetwork::new(); b.set_ClassID(s.class_id.unwrap_or(0)); - let mut priorities = protobuf::RepeatedField::new(); + let mut priorities = Vec::new(); for pr in s.priorities.iter() { let mut lip = ttrpcLinuxInterfacePriority::new(); lip.set_Name(pr.name.clone()); lip.set_Priority(pr.priority); priorities.push(lip); } - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), } } -fn weight_devices_oci_to_ttrpc( - res: &[oci::LinuxWeightDevice], -) -> protobuf::RepeatedField { - let mut ttrpc_weight_devices = protobuf::RepeatedField::new(); +fn weight_devices_oci_to_ttrpc(res: &[oci::LinuxWeightDevice]) -> Vec { + let mut ttrpc_weight_devices = Vec::new(); for dev in res.iter() { let mut device = ttrpcLinuxWeightDevice::default(); device.set_Major(dev.blk.major); @@ -489,8 +475,8 @@ fn weight_devices_oci_to_ttrpc( fn throttle_devices_oci_to_ttrpc( res: &[oci::LinuxThrottleDevice], -) -> protobuf::RepeatedField { - let mut ttrpc_throttle_devices = protobuf::RepeatedField::new(); +) -> Vec { + let mut ttrpc_throttle_devices = Vec::new(); for dev in res.iter() { let mut device = ttrpcLinuxThrottleDevice::default(); device.set_Major(dev.blk.major); @@ -503,7 +489,7 @@ fn throttle_devices_oci_to_ttrpc( fn block_io_oci_to_ttrpc( res: &Option, -) -> protobuf::SingularPtrField { +) -> protobuf::MessageField { match &res { Some(s) => { let mut b = ttrpcLinuxBlockIO::new(); @@ -529,9 +515,9 @@ fn block_io_oci_to_ttrpc( b.set_ThrottleWriteIOPSDevice(throttle_devices_oci_to_ttrpc( &s.throttle_write_iops_device, )); - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), } } @@ -552,15 +538,12 @@ fn resources_oci_to_ttrpc(res: &oci::LinuxResources) -> ttrpcLinuxResources { BlockIO: block_io, HugepageLimits: hugepage_limits, Network: network, - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + ..Default::default() } } -fn namespace_oci_to_ttrpc( - res: &[oci::LinuxNamespace], -) -> protobuf::RepeatedField { - let mut ttrpc_namespace = protobuf::RepeatedField::new(); +fn namespace_oci_to_ttrpc(res: &[oci::LinuxNamespace]) -> Vec { + let mut ttrpc_namespace = Vec::new(); for n in res.iter() { let mut ns = ttrpcLinuxNamespace::default(); ns.set_Path(n.path.clone()); @@ -570,10 +553,8 @@ fn namespace_oci_to_ttrpc( ttrpc_namespace } -fn linux_devices_oci_to_ttrpc( - res: &[oci::LinuxDevice], -) -> protobuf::RepeatedField { - let mut ttrpc_linux_devices = protobuf::RepeatedField::new(); +fn linux_devices_oci_to_ttrpc(res: &[oci::LinuxDevice]) -> Vec { + let mut ttrpc_linux_devices = Vec::new(); for n in res.iter() { let mut ld = ttrpcLinuxDevice::default(); ld.set_FileMode(n.file_mode.unwrap_or(0)); @@ -590,22 +571,22 @@ fn linux_devices_oci_to_ttrpc( fn seccomp_oci_to_ttrpc(sec: &oci::LinuxSeccomp) -> ttrpcLinuxSeccomp { let mut ttrpc_seccomp = ttrpcLinuxSeccomp::default(); - let mut ttrpc_arch = protobuf::RepeatedField::new(); + let mut ttrpc_arch = Vec::new(); for a in &sec.architectures { ttrpc_arch.push(std::string::String::from(a)); } ttrpc_seccomp.set_Architectures(ttrpc_arch); ttrpc_seccomp.set_DefaultAction(sec.default_action.clone()); - let mut ttrpc_flags = protobuf::RepeatedField::new(); + let mut ttrpc_flags = Vec::new(); for f in &sec.flags { ttrpc_flags.push(std::string::String::from(f)); } ttrpc_seccomp.set_Flags(ttrpc_flags); - let mut ttrpc_syscalls = protobuf::RepeatedField::new(); + let mut ttrpc_syscalls = Vec::new(); for sys in &sec.syscalls { let mut ttrpc_sys = ttrpcLinuxSyscall::default(); ttrpc_sys.set_Action(sys.action.clone()); - let mut ttrpc_args = protobuf::RepeatedField::new(); + let mut ttrpc_args = Vec::new(); for arg in &sys.args { let mut a = ttrpcLinuxSeccompArg::default(); a.set_Index(arg.index as u64); @@ -632,9 +613,9 @@ fn linux_oci_to_ttrpc(l: &ociLinux) -> ttrpcLinux { let ttrpc_linux_resources = match &l.resources { Some(s) => { let b = resources_oci_to_ttrpc(s); - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), }; let ttrpc_namespaces = namespace_oci_to_ttrpc(&l.namespaces); @@ -642,17 +623,17 @@ fn linux_oci_to_ttrpc(l: &ociLinux) -> ttrpcLinux { let ttrpc_seccomp = match &l.seccomp { Some(s) => { let b = seccomp_oci_to_ttrpc(s); - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), }; let ttrpc_intel_rdt = match &l.intel_rdt { Some(s) => { let b = intel_rdt_oci_to_ttrpc(s); - protobuf::SingularPtrField::some(b) + protobuf::MessageField::some(b) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), }; ttrpcLinux { @@ -665,38 +646,37 @@ fn linux_oci_to_ttrpc(l: &ociLinux) -> ttrpcLinux { Devices: ttrpc_linux_devices, Seccomp: ttrpc_seccomp, RootfsPropagation: l.rootfs_propagation.clone(), - MaskedPaths: protobuf::RepeatedField::from_slice(&l.masked_paths), - ReadonlyPaths: protobuf::RepeatedField::from_slice(&l.readonly_paths), + MaskedPaths: l.masked_paths.clone(), + ReadonlyPaths: l.readonly_paths.clone(), MountLabel: l.mount_label.clone(), IntelRdt: ttrpc_intel_rdt, - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + ..Default::default() } } fn oci_to_ttrpc(bundle_dir: &str, cid: &str, oci: &ociSpec) -> Result { let process = match &oci.process { - Some(p) => protobuf::SingularPtrField::some(process_oci_to_ttrpc(p)), - None => protobuf::SingularPtrField::none(), + Some(p) => protobuf::MessageField::some(process_oci_to_ttrpc(p)), + None => protobuf::MessageField::none(), }; let root = match &oci.root { Some(r) => { let ttrpc_root = root_oci_to_ttrpc(bundle_dir, r)?; - protobuf::SingularPtrField::some(ttrpc_root) + protobuf::MessageField::some(ttrpc_root) } - None => protobuf::SingularPtrField::none(), + None => protobuf::MessageField::none(), }; - let mut mounts = protobuf::RepeatedField::new(); + let mut mounts = Vec::new(); for m in &oci.mounts { mounts.push(mount_oci_to_ttrpc(m)); } let linux = match &oci.linux { - Some(l) => protobuf::SingularPtrField::some(linux_oci_to_ttrpc(l)), - None => protobuf::SingularPtrField::none(), + Some(l) => protobuf::MessageField::some(linux_oci_to_ttrpc(l)), + None => protobuf::MessageField::none(), }; if cid.len() < MIN_HOSTNAME_LEN as usize { @@ -713,13 +693,12 @@ fn oci_to_ttrpc(bundle_dir: &str, cid: &str, oci: &ociSpec) -> Result Root: root, Hostname: hostname, Mounts: mounts, - Hooks: protobuf::SingularPtrField::none(), + Hooks: protobuf::MessageField::none(), Annotations: HashMap::new(), Linux: linux, - Solaris: protobuf::SingularPtrField::none(), - Windows: protobuf::SingularPtrField::none(), - unknown_fields: protobuf::UnknownFields::new(), - cached_size: protobuf::CachedSize::default(), + Solaris: protobuf::MessageField::none(), + Windows: protobuf::MessageField::none(), + ..Default::default() }; Ok(ttrpc_spec) diff --git a/src/tools/kata-ctl/Cargo.lock b/src/tools/kata-ctl/Cargo.lock index 3edafa91d..79a912d41 100644 --- a/src/tools/kata-ctl/Cargo.lock +++ b/src/tools/kata-ctl/Cargo.lock @@ -13,14 +13,14 @@ dependencies = [ "logging", "nix 0.24.3", "oci", - "protobuf", + "protobuf 3.2.0", "protocols", "serde", "serde_json", "slog", "slog-scope", "tokio", - "ttrpc", + "ttrpc 0.7.1", "url", ] @@ -657,7 +657,7 @@ dependencies = [ "test-utils", "thiserror", "tokio", - "ttrpc", + "ttrpc 0.6.1", "url", "vmm-sys-util", ] @@ -1108,9 +1108,16 @@ name = "protobuf" version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + +[[package]] +name = "protobuf" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55bad9126f378a853655831eb7363b7b01b81d19f8cb1218861086ca4a1a61e" dependencies = [ - "serde", - "serde_derive", + "once_cell", + "protobuf-support", + "thiserror", ] [[package]] @@ -1119,7 +1126,22 @@ version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "033460afb75cf755fcfc16dfaed20b86468082a2ea24e05ac35ab4a099a017d6" dependencies = [ - "protobuf", + "protobuf 2.28.0", +] + +[[package]] +name = "protobuf-codegen" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dd418ac3c91caa4032d37cb80ff0d44e2ebe637b2fb243b6234bf89cdac4901" +dependencies = [ + "anyhow", + "once_cell", + "protobuf 3.2.0", + "protobuf-parse", + "regex", + "tempfile", + "thiserror", ] [[package]] @@ -1128,18 +1150,33 @@ version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a29399fc94bcd3eeaa951c715f7bea69409b2445356b00519740bcd6ddd865" dependencies = [ - "protobuf", - "protobuf-codegen", + "protobuf 2.28.0", + "protobuf-codegen 2.28.0", ] [[package]] -name = "protobuf-codegen-pure3" -version = "2.28.1" +name = "protobuf-parse" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0a3cf0a7de7570cb67bfb9a9a585b5841b49790a1be0ef104340a2110b91135" +checksum = "9d39b14605eaa1f6a340aec7f320b34064feb26c93aec35d6a9a2272a8ddfa49" dependencies = [ - "protobuf", - "protobuf-codegen", + "anyhow", + "indexmap", + "log", + "protobuf 3.2.0", + "protobuf-support", + "tempfile", + "thiserror", + "which", +] + +[[package]] +name = "protobuf-support" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d4d7b8601c814cfb36bcebb79f0e61e45e1e93640cf778837833bbed05c372" +dependencies = [ + "thiserror", ] [[package]] @@ -1148,8 +1185,8 @@ version = "0.1.0" dependencies = [ "async-trait", "oci", - "protobuf", - "ttrpc", + "protobuf 3.2.0", + "ttrpc 0.7.1", "ttrpc-codegen", ] @@ -1774,6 +1811,21 @@ name = "ttrpc" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ecfff459a859c6ba6668ff72b34c2f1d94d9d58f7088414c2674ad0f31cc7d8" +dependencies = [ + "byteorder", + "libc", + "log", + "nix 0.23.2", + "protobuf 2.28.0", + "protobuf-codegen-pure", + "thiserror", +] + +[[package]] +name = "ttrpc" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a35f22a2964bea14afee161665bb260b83cb48e665e0260ca06ec0e775c8b06c" dependencies = [ "async-trait", "byteorder", @@ -1781,8 +1833,8 @@ dependencies = [ "libc", "log", "nix 0.23.2", - "protobuf", - "protobuf-codegen-pure", + "protobuf 3.2.0", + "protobuf-codegen 3.2.0", "thiserror", "tokio", "tokio-vsock", @@ -1790,28 +1842,26 @@ dependencies = [ [[package]] name = "ttrpc-codegen" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df80affc2cf8c589172b05ba2b8e8a88722ebf4e28b86604615497a8b6fb78c0" +version = "0.4.1" dependencies = [ - "protobuf", - "protobuf-codegen", - "protobuf-codegen-pure3", + "protobuf 2.28.0", + "protobuf-codegen 3.2.0", + "protobuf-support", "ttrpc-compiler", ] [[package]] name = "ttrpc-compiler" -version = "0.4.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8db19ce6af25713061dd805d6733b6f0c45904bd63526ce5d2568c858b7edc71" +checksum = "ec3cb5dbf1f0865a34fe3f722290fe776cacb16f50428610b779467b76ddf647" dependencies = [ "derive-new", "prost", "prost-build", "prost-types", - "protobuf", - "protobuf-codegen", + "protobuf 2.28.0", + "protobuf-codegen 2.28.0", "tempfile", ] From 73253850e641832326ea22a6e1e5fc24de3ac576 Mon Sep 17 00:00:00 2001 From: Tim Zhang Date: Fri, 14 Apr 2023 11:31:47 +0800 Subject: [PATCH 128/137] kata-ctl: remove unused crate ttrpc Remove unused crate ttrpc. Signed-off-by: Tim Zhang --- src/tools/kata-ctl/Cargo.lock | 34 +++++----------------------------- src/tools/kata-ctl/Cargo.toml | 1 - 2 files changed, 5 insertions(+), 30 deletions(-) diff --git a/src/tools/kata-ctl/Cargo.lock b/src/tools/kata-ctl/Cargo.lock index 79a912d41..6acf131f0 100644 --- a/src/tools/kata-ctl/Cargo.lock +++ b/src/tools/kata-ctl/Cargo.lock @@ -20,7 +20,7 @@ dependencies = [ "slog", "slog-scope", "tokio", - "ttrpc 0.7.1", + "ttrpc", "url", ] @@ -657,7 +657,6 @@ dependencies = [ "test-utils", "thiserror", "tokio", - "ttrpc 0.6.1", "url", "vmm-sys-util", ] @@ -1144,16 +1143,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "protobuf-codegen-pure" -version = "2.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a29399fc94bcd3eeaa951c715f7bea69409b2445356b00519740bcd6ddd865" -dependencies = [ - "protobuf 2.28.0", - "protobuf-codegen 2.28.0", -] - [[package]] name = "protobuf-parse" version = "3.2.0" @@ -1186,7 +1175,7 @@ dependencies = [ "async-trait", "oci", "protobuf 3.2.0", - "ttrpc 0.7.1", + "ttrpc", "ttrpc-codegen", ] @@ -1806,21 +1795,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" -[[package]] -name = "ttrpc" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ecfff459a859c6ba6668ff72b34c2f1d94d9d58f7088414c2674ad0f31cc7d8" -dependencies = [ - "byteorder", - "libc", - "log", - "nix 0.23.2", - "protobuf 2.28.0", - "protobuf-codegen-pure", - "thiserror", -] - [[package]] name = "ttrpc" version = "0.7.1" @@ -1842,7 +1816,9 @@ dependencies = [ [[package]] name = "ttrpc-codegen" -version = "0.4.1" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94d7f7631d7a9ebed715a47cd4cb6072cbc7ae1d4ec01598971bbec0024340c2" dependencies = [ "protobuf 2.28.0", "protobuf-codegen 3.2.0", diff --git a/src/tools/kata-ctl/Cargo.toml b/src/tools/kata-ctl/Cargo.toml index 233f0e3a0..3de3a532a 100644 --- a/src/tools/kata-ctl/Cargo.toml +++ b/src/tools/kata-ctl/Cargo.toml @@ -37,7 +37,6 @@ libc = "0.2.138" slog = "2.7.0" slog-scope = "4.4.0" hyper = "0.14.20" -ttrpc = "0.6.0" tokio = "1.8.0" [target.'cfg(target_arch = "s390x")'.dependencies] From 0a582f7815b50a915ee3686b662c987ab65afbd9 Mon Sep 17 00:00:00 2001 From: Tim Zhang Date: Fri, 14 Apr 2023 11:34:09 +0800 Subject: [PATCH 129/137] trace-forwarder: remove unused crate protobuf Remove unused crate protobuf. Signed-off-by: Tim Zhang --- src/tools/trace-forwarder/Cargo.lock | 7 ------- src/tools/trace-forwarder/Cargo.toml | 1 - 2 files changed, 8 deletions(-) diff --git a/src/tools/trace-forwarder/Cargo.lock b/src/tools/trace-forwarder/Cargo.lock index ce1f994d2..7a6a7c0ac 100644 --- a/src/tools/trace-forwarder/Cargo.lock +++ b/src/tools/trace-forwarder/Cargo.lock @@ -289,7 +289,6 @@ dependencies = [ "opentelemetry 0.14.0", "opentelemetry-jaeger", "privdrop", - "protobuf", "serde", "serde_json", "slog", @@ -527,12 +526,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "protobuf" -version = "2.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" - [[package]] name = "quote" version = "1.0.10" diff --git a/src/tools/trace-forwarder/Cargo.toml b/src/tools/trace-forwarder/Cargo.toml index fc0b69631..0b6d3d550 100644 --- a/src/tools/trace-forwarder/Cargo.toml +++ b/src/tools/trace-forwarder/Cargo.toml @@ -23,7 +23,6 @@ serde_json = "1.0.44" anyhow = "1.0.31" opentelemetry = { version = "0.14.0", features=["serialize"] } opentelemetry-jaeger = "0.13.0" -protobuf = "2.27.0" tracing-opentelemetry = "0.16.0" tracing = "0.1.29" tracing-subscriber = "0.3.3" From 4849c56faa0f742075f01a07204f9aa36829efb9 Mon Sep 17 00:00:00 2001 From: Tim Zhang Date: Mon, 17 Apr 2023 15:27:16 +0800 Subject: [PATCH 130/137] agent: Fix unit test issue cuased by protobuf upgrade Fixes: #6646 Signed-off-by: Tim Zhang --- src/agent/rustjail/src/lib.rs | 240 +++++++++++----------------------- src/agent/src/mount.rs | 16 +-- 2 files changed, 79 insertions(+), 177 deletions(-) diff --git a/src/agent/rustjail/src/lib.rs b/src/agent/rustjail/src/lib.rs index fcfa3e029..de91f81bb 100644 --- a/src/agent/rustjail/src/lib.rs +++ b/src/agent/rustjail/src/lib.rs @@ -558,35 +558,30 @@ mod tests { // All fields specified grpcproc: grpc::Process { Terminal: true, - ConsoleSize: protobuf::SingularPtrField::::some(grpc::Box { + ConsoleSize: protobuf::MessageField::::some(grpc::Box { Height: 123, Width: 456, ..Default::default() }), - User: protobuf::SingularPtrField::::some(grpc::User { + User: protobuf::MessageField::::some(grpc::User { UID: 1234, GID: 5678, AdditionalGids: Vec::from([910, 1112]), Username: String::from("username"), ..Default::default() }), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([String::from("env")])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env")]), Cwd: String::from("cwd"), - Capabilities: protobuf::SingularPtrField::some(grpc::LinuxCapabilities { - Bounding: protobuf::RepeatedField::from(Vec::from([String::from("bnd")])), - Effective: protobuf::RepeatedField::from(Vec::from([String::from("eff")])), - Inheritable: protobuf::RepeatedField::from(Vec::from([String::from( - "inher", - )])), - Permitted: protobuf::RepeatedField::from(Vec::from([String::from("perm")])), - Ambient: protobuf::RepeatedField::from(Vec::from([String::from("amb")])), + Capabilities: protobuf::MessageField::some(grpc::LinuxCapabilities { + Bounding: Vec::from([String::from("bnd")]), + Effective: Vec::from([String::from("eff")]), + Inheritable: Vec::from([String::from("inher")]), + Permitted: Vec::from([String::from("perm")]), + Ambient: Vec::from([String::from("amb")]), ..Default::default() }), - Rlimits: protobuf::RepeatedField::from(Vec::from([ + Rlimits: Vec::from([ grpc::POSIXRlimit { Type: String::from("r#type"), Hard: 123, @@ -599,7 +594,7 @@ mod tests { Soft: 1011, ..Default::default() }, - ])), + ]), NoNewPrivileges: true, ApparmorProfile: String::from("apparmor profile"), OOMScoreAdj: 123456, @@ -649,7 +644,7 @@ mod tests { TestData { // None ConsoleSize grpcproc: grpc::Process { - ConsoleSize: protobuf::SingularPtrField::::none(), + ConsoleSize: protobuf::MessageField::::none(), OOMScoreAdj: 0, ..Default::default() }, @@ -662,7 +657,7 @@ mod tests { TestData { // None User grpcproc: grpc::Process { - User: protobuf::SingularPtrField::::none(), + User: protobuf::MessageField::::none(), OOMScoreAdj: 0, ..Default::default() }, @@ -680,7 +675,7 @@ mod tests { TestData { // None Capabilities grpcproc: grpc::Process { - Capabilities: protobuf::SingularPtrField::none(), + Capabilities: protobuf::MessageField::none(), OOMScoreAdj: 0, ..Default::default() }, @@ -781,99 +776,57 @@ mod tests { TestData { // All specified grpchooks: grpc::Hooks { - Prestart: protobuf::RepeatedField::from(Vec::from([ + Prestart: Vec::from([ grpc::Hook { Path: String::from("prestartpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() }, grpc::Hook { Path: String::from("prestartpath2"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg3"), - String::from("arg4"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env3"), - String::from("env4"), - ])), + Args: Vec::from([String::from("arg3"), String::from("arg4")]), + Env: Vec::from([String::from("env3"), String::from("env4")]), Timeout: 25, ..Default::default() }, - ])), - Poststart: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + ]), + Poststart: Vec::from([grpc::Hook { Path: String::from("poststartpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - Poststop: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + Poststop: Vec::from([grpc::Hook { Path: String::from("poststoppath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - CreateRuntime: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + CreateRuntime: Vec::from([grpc::Hook { Path: String::from("createruntimepath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - CreateContainer: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + CreateContainer: Vec::from([grpc::Hook { Path: String::from("createcontainerpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - StartContainer: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + StartContainer: Vec::from([grpc::Hook { Path: String::from("startcontainerpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), + }]), ..Default::default() }, result: oci::Hooks { @@ -926,72 +879,42 @@ mod tests { TestData { // Prestart empty grpchooks: grpc::Hooks { - Prestart: protobuf::RepeatedField::from(Vec::from([])), - Poststart: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + Prestart: Vec::from([]), + Poststart: Vec::from([grpc::Hook { Path: String::from("poststartpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - Poststop: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + Poststop: Vec::from([grpc::Hook { Path: String::from("poststoppath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - CreateRuntime: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + CreateRuntime: Vec::from([grpc::Hook { Path: String::from("createruntimepath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - CreateContainer: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + CreateContainer: Vec::from([grpc::Hook { Path: String::from("createcontainerpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), - StartContainer: protobuf::RepeatedField::from(Vec::from([grpc::Hook { + }]), + StartContainer: Vec::from([grpc::Hook { Path: String::from("startcontainerpath"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() - }])), + }]), ..Default::default() }, result: oci::Hooks { @@ -1063,11 +986,8 @@ mod tests { grpcmount: grpc::Mount { destination: String::from("destination"), source: String::from("source"), - field_type: String::from("fieldtype"), - options: protobuf::RepeatedField::from(Vec::from([ - String::from("option1"), - String::from("option2"), - ])), + type_: String::from("fieldtype"), + options: Vec::from([String::from("option1"), String::from("option2")]), ..Default::default() }, result: oci::Mount { @@ -1081,8 +1001,8 @@ mod tests { grpcmount: grpc::Mount { destination: String::from("destination"), source: String::from("source"), - field_type: String::from("fieldtype"), - options: protobuf::RepeatedField::from(Vec::new()), + type_: String::from("fieldtype"), + options: Vec::new(), ..Default::default() }, result: oci::Mount { @@ -1096,8 +1016,8 @@ mod tests { grpcmount: grpc::Mount { destination: String::new(), source: String::from("source"), - field_type: String::from("fieldtype"), - options: protobuf::RepeatedField::from(Vec::from([String::from("option1")])), + type_: String::from("fieldtype"), + options: Vec::from([String::from("option1")]), ..Default::default() }, result: oci::Mount { @@ -1111,8 +1031,8 @@ mod tests { grpcmount: grpc::Mount { destination: String::from("destination"), source: String::from("source"), - field_type: String::new(), - options: protobuf::RepeatedField::from(Vec::from([String::from("option1")])), + type_: String::new(), + options: Vec::from([String::from("option1")]), ..Default::default() }, result: oci::Mount { @@ -1172,27 +1092,15 @@ mod tests { grpchook: &[ grpc::Hook { Path: String::from("path"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg1"), - String::from("arg2"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env1"), - String::from("env2"), - ])), + Args: Vec::from([String::from("arg1"), String::from("arg2")]), + Env: Vec::from([String::from("env1"), String::from("env2")]), Timeout: 10, ..Default::default() }, grpc::Hook { Path: String::from("path2"), - Args: protobuf::RepeatedField::from(Vec::from([ - String::from("arg3"), - String::from("arg4"), - ])), - Env: protobuf::RepeatedField::from(Vec::from([ - String::from("env3"), - String::from("env4"), - ])), + Args: Vec::from([String::from("arg3"), String::from("arg4")]), + Env: Vec::from([String::from("env3"), String::from("env4")]), Timeout: 20, ..Default::default() }, diff --git a/src/agent/src/mount.rs b/src/agent/src/mount.rs index a863c3a31..3f59d8f41 100644 --- a/src/agent/src/mount.rs +++ b/src/agent/src/mount.rs @@ -1094,7 +1094,6 @@ fn parse_options(option_list: Vec) -> HashMap { #[cfg(test)] mod tests { use super::*; - use protobuf::RepeatedField; use protocols::agent::FSGroup; use std::fs::File; use std::fs::OpenOptions; @@ -2015,9 +2014,8 @@ mod tests { mount_path: "rw_mount", fs_group: Some(FSGroup { group_id: 3000, - group_change_policy: FSGroupChangePolicy::Always, - unknown_fields: Default::default(), - cached_size: Default::default(), + group_change_policy: FSGroupChangePolicy::Always.into(), + ..Default::default() }), read_only: false, expected_group_id: 3000, @@ -2027,9 +2025,8 @@ mod tests { mount_path: "ro_mount", fs_group: Some(FSGroup { group_id: 3000, - group_change_policy: FSGroupChangePolicy::OnRootMismatch, - unknown_fields: Default::default(), - cached_size: Default::default(), + group_change_policy: FSGroupChangePolicy::OnRootMismatch.into(), + ..Default::default() }), read_only: true, expected_group_id: 3000, @@ -2049,10 +2046,7 @@ mod tests { let directory_mode = mount_dir.as_path().metadata().unwrap().permissions().mode(); let mut storage_data = Storage::new(); if d.read_only { - storage_data.set_options(RepeatedField::from_slice(&[ - "foo".to_string(), - "ro".to_string(), - ])); + storage_data.set_options(vec!["foo".to_string(), "ro".to_string()]); } if let Some(fs_group) = d.fs_group.clone() { storage_data.set_fs_group(fs_group); From 2e3f19af92bd0d3e8bf8a95ff4a4170602e28805 Mon Sep 17 00:00:00 2001 From: Tim Zhang Date: Mon, 17 Apr 2023 20:15:49 +0800 Subject: [PATCH 131/137] agent: fix clippy warnings caused by protobuf3 Fix warnings introduced by protobuf upgrade. Signed-off-by: Tim Zhang --- src/agent/src/mount.rs | 2 +- src/agent/src/rpc.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/agent/src/mount.rs b/src/agent/src/mount.rs index 3f59d8f41..0eff266f2 100644 --- a/src/agent/src/mount.rs +++ b/src/agent/src/mount.rs @@ -211,7 +211,7 @@ async fn ephemeral_storage_handler( // By now we only support one option field: "fsGroup" which // isn't an valid mount option, thus we should remove it when // do mount. - if storage.options.len() > 0 { + if !storage.options.is_empty() { // ephemeral_storage didn't support mount options except fsGroup. let mut new_storage = storage.clone(); new_storage.options = Default::default(); diff --git a/src/agent/src/rpc.rs b/src/agent/src/rpc.rs index dbcb4bd13..478653904 100644 --- a/src/agent/src/rpc.rs +++ b/src/agent/src/rpc.rs @@ -2034,7 +2034,7 @@ fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> { let mut args = vec!["-v".to_string(), module.name.clone()]; - if module.parameters.len() > 0 { + if !module.parameters.is_empty() { args.extend(module.parameters.to_vec()) } From 53c749a9de89ead493a91f823d580c6cec9125e0 Mon Sep 17 00:00:00 2001 From: Tim Zhang Date: Tue, 18 Apr 2023 23:01:30 +0800 Subject: [PATCH 132/137] agent: Fix ut issue caused by fd double closed Never ever try to close the same fd double times, even in a unit test. A file descriptor is a number which will be reused, so when you close the same number twice you may close another file descriptor in the second time and then there will be an error 'Bad file descriptor (os error 9)' while the wrongly closed fd is being used. Fixes: #6679 Signed-off-by: Tim Zhang --- src/agent/src/main.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/agent/src/main.rs b/src/agent/src/main.rs index 085ea396d..f82904804 100644 --- a/src/agent/src/main.rs +++ b/src/agent/src/main.rs @@ -442,9 +442,8 @@ mod tests { let msg = format!("test[{}]: {:?}", i, d); let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC).unwrap(); defer!({ - // rfd is closed by the use of PipeStream in the crate_logger_task function, - // but we will attempt to close in case of a failure - let _ = unistd::close(rfd); + // XXX: Never try to close rfd, because it will be closed by PipeStream in + // create_logger_task() and it's not safe to close the same fd twice time. unistd::close(wfd).unwrap(); }); From 3e7b902265c6f5d1c4eece2ea778eb2cc3d6faa5 Mon Sep 17 00:00:00 2001 From: Vladimir Date: Tue, 18 Apr 2023 22:04:01 +0300 Subject: [PATCH 133/137] osbuilder: Fix D-Bus enabling in the dracut case - D-Bus enabling now occurs only in setup_rootfs (instead of prepare_overlay and setup_rootfs) - Adjust permissions of / so dbus-broker will be able to traverse FS These changes enables kata-agent to successfully communicate with D-Bus. Fixes #6677 Signed-off-by: Vladimir --- tools/osbuilder/rootfs-builder/rootfs.sh | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tools/osbuilder/rootfs-builder/rootfs.sh b/tools/osbuilder/rootfs-builder/rootfs.sh index 6dfb8734e..dde4a07d6 100755 --- a/tools/osbuilder/rootfs-builder/rootfs.sh +++ b/tools/osbuilder/rootfs-builder/rootfs.sh @@ -472,11 +472,6 @@ prepare_overlay() ln -sf /init ./sbin/init fi - # Kata systemd unit file - mkdir -p ./etc/systemd/system/basic.target.wants/ - ln -sf /usr/lib/systemd/system/kata-containers.target ./etc/systemd/system/basic.target.wants/kata-containers.target - mkdir -p ./etc/systemd/system/kata-containers.target.wants/ - ln -sf /usr/lib/systemd/system/dbus.socket ./etc/systemd/system/kata-containers.target.wants/dbus.socket popd > /dev/null } @@ -625,9 +620,12 @@ EOF if [ "${AGENT_INIT}" == "yes" ]; then setup_agent_init "${AGENT_DEST}" "${init}" else - # Setup systemd service for kata-agent + # Setup systemd-based environment for kata-agent mkdir -p "${ROOTFS_DIR}/etc/systemd/system/basic.target.wants" ln -sf "/usr/lib/systemd/system/kata-containers.target" "${ROOTFS_DIR}/etc/systemd/system/basic.target.wants/kata-containers.target" + mkdir -p "${ROOTFS_DIR}/etc/systemd/system/kata-containers.target.wants" + ln -sf "/usr/lib/systemd/system/dbus.socket" "${ROOTFS_DIR}/etc/systemd/system/kata-containers.target.wants/dbus.socket" + chmod g+rx,o+x "${ROOTFS_DIR}" fi info "Check init is installed" From b1730e4a67c4c635820079268fd84cc5f37bd378 Mon Sep 17 00:00:00 2001 From: Zvonko Kaiser Date: Mon, 17 Apr 2023 09:49:00 +0000 Subject: [PATCH 134/137] gpu: Add new kernel build option to usage() With each release make sure we ship a GPU enabled kernel Signed-off-by: Zvonko Kaiser --- .github/workflows/release.yaml | 4 ++-- .../packaging/kata-deploy/local-build/kata-deploy-binaries.sh | 1 - tools/packaging/kernel/README.md | 1 + tools/packaging/kernel/build-kernel.sh | 1 + tools/packaging/static-build/kernel/Dockerfile | 3 +-- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index f31261d51..a642fa36f 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -78,7 +78,7 @@ jobs: mv kata-static.tar.xz "$GITHUB_WORKSPACE/${tarball}" pushd $GITHUB_WORKSPACE echo "uploading asset '${tarball}' for tag: ${tag}" - GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}" + GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}" popd upload-cargo-vendored-tarball: @@ -92,7 +92,7 @@ jobs: tarball="kata-containers-$tag-vendor.tar.gz" pushd $GITHUB_WORKSPACE bash -c "tools/packaging/release/generate_vendor.sh ${tarball}" - GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}" + GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}" popd upload-libseccomp-tarball: diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index 55fcc0624..902b067c9 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -81,7 +81,6 @@ options: kernel kernel-dragonball-experimental kernel-experimental - kernel-gpu kernel-tdx-experimental kernel-gpu kernel-gpu-snp diff --git a/tools/packaging/kernel/README.md b/tools/packaging/kernel/README.md index ce4ea30c4..d9b78480a 100644 --- a/tools/packaging/kernel/README.md +++ b/tools/packaging/kernel/README.md @@ -47,6 +47,7 @@ Options: -f : Enable force generate config when setup. -g : GPU vendor, intel or nvidia. -h : Display this help. + -H : Linux headers for guest fs module building. -k : Path to kernel to build. -p : Path to a directory with patches to apply to kernel, only patches in top-level directory are applied. -t : Hypervisor_target. diff --git a/tools/packaging/kernel/build-kernel.sh b/tools/packaging/kernel/build-kernel.sh index 67c4c8f5c..88fb61c18 100755 --- a/tools/packaging/kernel/build-kernel.sh +++ b/tools/packaging/kernel/build-kernel.sh @@ -97,6 +97,7 @@ Options: -f : Enable force generate config when setup. -g : GPU vendor, intel or nvidia. -h : Display this help. + -H : Linux headers for guest fs module building. -k : Path to kernel to build. -p : Path to a directory with patches to apply to kernel. -s : Skip .config checks diff --git a/tools/packaging/static-build/kernel/Dockerfile b/tools/packaging/static-build/kernel/Dockerfile index b4c232972..183f8a47e 100644 --- a/tools/packaging/static-build/kernel/Dockerfile +++ b/tools/packaging/static-build/kernel/Dockerfile @@ -23,5 +23,4 @@ RUN apt-get update && \ rsync \ cpio \ patch && \ - if [ "$(uname -m)" = "s390x" ]; then apt-get install -y --no-install-recommends libssl-dev; fi && \ - apt-get clean && rm -rf /var/lib/lists/ + apt-get clean && apt-get autoclean From 432d4074405a66a9f0fef42e3d5386b6e9e4979a Mon Sep 17 00:00:00 2001 From: David Esparza Date: Wed, 15 Feb 2023 10:07:59 -0600 Subject: [PATCH 135/137] kata-ctl: checks for kvm, kvm_intel modules loaded Ensure that kvm and kvm_intel modules are loaded. Renames the get_cpu_info() function to read_file_contents() Fixes #5332 Signed-off-by: David Esparza --- src/tools/kata-ctl/src/arch/aarch64/mod.rs | 2 +- src/tools/kata-ctl/src/arch/s390x/mod.rs | 2 +- src/tools/kata-ctl/src/arch/x86_64/mod.rs | 153 +++++++++++++++++++- src/tools/kata-ctl/src/check.rs | 160 ++++++++++++++++++++- src/tools/kata-ctl/src/ops/check_ops.rs | 7 +- src/tools/kata-ctl/src/types.rs | 41 +++++- 6 files changed, 348 insertions(+), 17 deletions(-) diff --git a/src/tools/kata-ctl/src/arch/aarch64/mod.rs b/src/tools/kata-ctl/src/arch/aarch64/mod.rs index 65e3ed93f..a6137856f 100644 --- a/src/tools/kata-ctl/src/arch/aarch64/mod.rs +++ b/src/tools/kata-ctl/src/arch/aarch64/mod.rs @@ -20,7 +20,7 @@ mod arch_specific { // List of check functions static CHECK_LIST: &[CheckItem] = &[CheckItem { - name: CheckType::CheckCpu, + name: CheckType::Cpu, descr: "This parameter performs the host check", fp: check, perm: PermissionType::NonPrivileged, diff --git a/src/tools/kata-ctl/src/arch/s390x/mod.rs b/src/tools/kata-ctl/src/arch/s390x/mod.rs index 276c75a9e..7a9940dcf 100644 --- a/src/tools/kata-ctl/src/arch/s390x/mod.rs +++ b/src/tools/kata-ctl/src/arch/s390x/mod.rs @@ -60,7 +60,7 @@ mod arch_specific { // List of check functions static CHECK_LIST: &[CheckItem] = &[CheckItem { - name: CheckType::CheckCpu, + name: CheckType::Cpu, descr: "This parameter performs the cpu check", fp: check, perm: PermissionType::NonPrivileged, diff --git a/src/tools/kata-ctl/src/arch/x86_64/mod.rs b/src/tools/kata-ctl/src/arch/x86_64/mod.rs index 9c8782fa7..026312624 100644 --- a/src/tools/kata-ctl/src/arch/x86_64/mod.rs +++ b/src/tools/kata-ctl/src/arch/x86_64/mod.rs @@ -21,16 +21,43 @@ mod arch_specific { const CPUINFO_FLAGS_TAG: &str = "flags"; const CPU_FLAGS_INTEL: &[&str] = &["lm", "sse4_1", "vmx"]; const CPU_ATTRIBS_INTEL: &[&str] = &["GenuineIntel"]; + const VMM_FLAGS: &[&str] = &["hypervisor"]; + pub const ARCH_CPU_VENDOR_FIELD: &str = check::GENERIC_CPU_VENDOR_FIELD; pub const ARCH_CPU_MODEL_FIELD: &str = check::GENERIC_CPU_MODEL_FIELD; // List of check functions - static CHECK_LIST: &[CheckItem] = &[CheckItem { - name: CheckType::CheckCpu, - descr: "This parameter performs the cpu check", - fp: check_cpu, - perm: PermissionType::NonPrivileged, - }]; + static CHECK_LIST: &[CheckItem] = &[ + CheckItem { + name: CheckType::Cpu, + descr: "This parameter performs the cpu check", + fp: check_cpu, + perm: PermissionType::NonPrivileged, + }, + CheckItem { + name: CheckType::KernelModules, + descr: "This parameter performs the kvm check", + fp: check_kernel_modules, + perm: PermissionType::NonPrivileged, + }, + ]; + + static MODULE_LIST: &[KernelModule] = &[ + KernelModule { + name: "kvm", + parameter: KernelParam { + name: "kvmclock_periodic_sync", + value: KernelParamType::Simple("Y"), + }, + }, + KernelModule { + name: "kvm_intel", + parameter: KernelParam { + name: "unrestricted_guest", + value: KernelParamType::Predicate(unrestricted_guest_param_check), + }, + }, + ]; pub fn get_checks() -> Option<&'static [CheckItem<'static>]> { Some(CHECK_LIST) @@ -141,6 +168,120 @@ mod arch_specific { Ok(GuestProtection::NoProtection) } + + fn running_on_vmm() -> Result { + match check::get_single_cpu_info(check::PROC_CPUINFO, CPUINFO_DELIMITER) { + Ok(cpu_info) => { + // check if the 'hypervisor' flag exist in the cpu features + let missing_hypervisor_flag = check::check_cpu_attribs(&cpu_info, VMM_FLAGS)?; + + if missing_hypervisor_flag.is_empty() { + return Ok(true); + } + } + Err(e) => { + return Err(anyhow!( + "Unable to determine if the OS is running on a VM: {}: {}", + e, + check::PROC_CPUINFO + )); + } + } + + Ok(false) + } + + // check the host kernel parameter value is valid + // and check if we are running inside a VMM + fn unrestricted_guest_param_check( + module: &str, + param_name: &str, + param_value_host: &str, + ) -> Result<()> { + let expected_param_value: char = 'Y'; + + let running_on_vmm_alt = running_on_vmm()?; + + if running_on_vmm_alt { + let msg = format!("You are running in a VM, where the kernel module '{}' parameter '{:}' has a value '{:}'. This causes conflict when running kata.", + module, + param_name, + param_value_host + ); + return Err(anyhow!(msg)); + } + + if param_value_host == expected_param_value.to_string() { + Ok(()) + } else { + let error_msg = format!( + "Kernel Module: '{:}' parameter '{:}' should have value '{:}', but found '{:}.'.", + module, param_name, expected_param_value, param_value_host + ); + + let action_msg = format!("Remove the '{:}' module using `rmmod` and then reload using `modprobe`, setting '{:}={:}'", + module, + param_name, + expected_param_value + ); + + return Err(anyhow!("{} {}", error_msg, action_msg)); + } + } + + fn check_kernel_param( + module: &str, + param_name: &str, + param_value_host: &str, + param_type: KernelParamType, + ) -> Result<()> { + match param_type { + KernelParamType::Simple(param_value_req) => { + if param_value_host != param_value_req { + return Err(anyhow!( + "Kernel module '{}': parameter '{}' should have value '{}', but found '{}'", + module, + param_name, + param_value_req, + param_value_host + )); + } + Ok(()) + } + KernelParamType::Predicate(pred_func) => { + pred_func(module, param_name, param_value_host) + } + } + } + + fn check_kernel_modules(_args: &str) -> Result<()> { + println!("INFO: check kernel modules for: x86_64"); + + for module in MODULE_LIST { + let module_loaded = + check::check_kernel_module_loaded(module.name, module.parameter.name); + + match module_loaded { + Ok(param_value_host) => { + let parameter_check = check_kernel_param( + module.name, + module.parameter.name, + ¶m_value_host, + module.parameter.value.clone(), + ); + + match parameter_check { + Ok(_v) => println!("{} Ok", module.name), + Err(e) => return Err(e), + } + } + Err(err) => { + eprintln!("WARNING {:}", err.replace('\n', "")) + } + } + } + Ok(()) + } } #[cfg(target_arch = "x86_64")] diff --git a/src/tools/kata-ctl/src/check.rs b/src/tools/kata-ctl/src/check.rs index 81298d1a2..dfb9a3b7b 100644 --- a/src/tools/kata-ctl/src/check.rs +++ b/src/tools/kata-ctl/src/check.rs @@ -9,6 +9,10 @@ use anyhow::{anyhow, Result}; use reqwest::header::{CONTENT_TYPE, USER_AGENT}; use serde::{Deserialize, Serialize}; use thiserror::Error; + +#[cfg(any(target_arch = "x86_64"))] +use std::process::{Command, Stdio}; + #[derive(Debug, Deserialize, Serialize, PartialEq)] struct Release { tag_name: String, @@ -17,6 +21,12 @@ struct Release { tarball_url: String, } +#[allow(dead_code)] +const MODPROBE_PATH: &str = "/sbin/modprobe"; + +#[allow(dead_code)] +const MODINFO_PATH: &str = "/sbin/modinfo"; + const KATA_GITHUB_RELEASE_URL: &str = "https://api.github.com/repos/kata-containers/kata-containers/releases"; @@ -29,6 +39,7 @@ const ERR_NO_CPUINFO: &str = "cpu_info string is empty"; #[allow(dead_code)] pub const GENERIC_CPU_VENDOR_FIELD: &str = "vendor_id"; + #[allow(dead_code)] pub const GENERIC_CPU_MODEL_FIELD: &str = "model name"; @@ -36,8 +47,8 @@ pub const GENERIC_CPU_MODEL_FIELD: &str = "model name"; pub const PROC_CPUINFO: &str = "/proc/cpuinfo"; #[cfg(any(target_arch = "s390x", target_arch = "x86_64"))] -fn get_cpu_info(cpu_info_file: &str) -> Result { - let contents = std::fs::read_to_string(cpu_info_file)?; +fn read_file_contents(file_path: &str) -> Result { + let contents = std::fs::read_to_string(file_path)?; Ok(contents) } @@ -45,7 +56,7 @@ fn get_cpu_info(cpu_info_file: &str) -> Result { // the specified cpuinfo file by parsing based on a specified delimiter #[cfg(any(target_arch = "s390x", target_arch = "x86_64"))] pub fn get_single_cpu_info(cpu_info_file: &str, substring: &str) -> Result { - let contents = get_cpu_info(cpu_info_file)?; + let contents = read_file_contents(cpu_info_file)?; if contents.is_empty() { return Err(anyhow!(ERR_NO_CPUINFO)); @@ -57,7 +68,6 @@ pub fn get_single_cpu_info(cpu_info_file: &str, substring: &str) -> Result Result { } if cpu_flags_tag.is_empty() { - return Err(anyhow!("cpu flags delimiter string is empty")); + return Err(anyhow!("cpu flags delimiter string is empty"))?; } let subcontents: Vec<&str> = cpu_info.split('\n').collect(); @@ -222,6 +232,86 @@ pub fn check_official_releases() -> Result<()> { Ok(()) } +#[cfg(any(target_arch = "x86_64"))] +pub fn check_kernel_module_loaded(module: &str, parameter: &str) -> Result { + const MODPROBE_PARAMETERS_DRY_RUN: &str = "--dry-run"; + const MODPROBE_PARAMETERS_FIRST_TIME: &str = "--first-time"; + const MODULES_PATH: &str = "/sys/module"; + + let status_modinfo_success; + + // Partial check w/ modinfo + // verifies that the module exists + match Command::new(MODINFO_PATH) + .arg(module) + .stdout(Stdio::piped()) + .output() + { + Ok(v) => { + status_modinfo_success = v.status.success(); + + // The module is already not loaded. + if !status_modinfo_success { + let msg = String::from_utf8_lossy(&v.stderr).replace('\n', ""); + return Err(msg); + } + } + Err(_e) => { + let msg = format!( + "Command {:} not found, verify that `kmod` package is already installed.", + MODINFO_PATH, + ); + return Err(msg); + } + } + + // Partial check w/ modprobe + // check that the module is already loaded + match Command::new(MODPROBE_PATH) + .arg(MODPROBE_PARAMETERS_DRY_RUN) + .arg(MODPROBE_PARAMETERS_FIRST_TIME) + .arg(module) + .stdout(Stdio::piped()) + .output() + { + Ok(v) => { + // a successful simulated modprobe insert, means the module is not already loaded + let status_modprobe_success = v.status.success(); + + if status_modprobe_success && status_modinfo_success { + // This condition is true in the case that the module exist, but is not already loaded + let msg = format!("The kernel module `{:}` exist but is not already loaded. Try reloading it using 'modprobe {:}=Y'", + module, module + ); + return Err(msg); + } + } + + Err(_e) => { + let msg = format!( + "Command {:} not found, verify that `kmod` package is already installed.", + MODPROBE_PATH, + ); + return Err(msg); + } + } + + let module_path = format!("{}/{}/parameters/{}", MODULES_PATH, module, parameter); + + // Here the currently loaded kernel parameter value + // is retrieved and returned on success + match read_file_contents(&module_path) { + Ok(result) => Ok(result.replace('\n', "")), + Err(_e) => { + let msg = format!( + "'{:}' kernel module parameter `{:}` not found.", + module, parameter + ); + Err(msg) + } + } +} + #[cfg(any(target_arch = "s390x", target_arch = "x86_64"))] #[cfg(test)] mod tests { @@ -413,4 +503,64 @@ mod tests { assert!(!v.minor.to_string().is_empty()); assert!(!v.patch.to_string().is_empty()); } + + #[cfg(any(target_arch = "x86_64"))] + #[test] + fn check_module_loaded() { + #[allow(dead_code)] + #[derive(Debug)] + struct TestData<'a> { + module_name: &'a str, + param_name: &'a str, + param_value: &'a str, + result: Result, + } + + let tests = &[ + // Failure scenarios + TestData { + module_name: "", + param_name: "", + param_value: "", + result: Err(anyhow!("modinfo: ERROR: Module {} not found.", "")), + }, + TestData { + module_name: "kvm", + param_name: "", + param_value: "", + result: Err(anyhow!( + "'{:}' kernel module parameter `{:}` not found.", + "kvm", + "" + )), + }, + // Success scenarios + TestData { + module_name: "kvm", + param_name: "kvmclock_periodic_sync", + param_value: "Y", + result: Ok("Y".to_string()), + }, + ]; + + for (i, d) in tests.iter().enumerate() { + let msg = format!("test[{}]: {:?}", i, d); + let result = check_kernel_module_loaded(d.module_name, d.param_name); + let msg = format!("{}, result: {:?}", msg, result); + + if d.result.is_ok() { + assert_eq!( + result.as_ref().unwrap(), + d.result.as_ref().unwrap(), + "{}", + msg + ); + continue; + } + + let expected_error = format!("{}", &d.result.as_ref().unwrap_err()); + let actual_error = result.unwrap_err().to_string(); + assert!(actual_error == expected_error, "{}", msg); + } + } } diff --git a/src/tools/kata-ctl/src/ops/check_ops.rs b/src/tools/kata-ctl/src/ops/check_ops.rs index fa3aa688b..f2dbea702 100644 --- a/src/tools/kata-ctl/src/ops/check_ops.rs +++ b/src/tools/kata-ctl/src/ops/check_ops.rs @@ -73,15 +73,18 @@ pub fn handle_check(checkcmd: CheckArgument) -> Result<()> { match command { CheckSubCommand::All => { // run architecture-specific tests - handle_builtin_check(CheckType::CheckCpu, "")?; + handle_builtin_check(CheckType::Cpu, "")?; // run code that uses network checks check::run_network_checks()?; + + // run kernel module checks + handle_builtin_check(CheckType::KernelModules, "")?; } CheckSubCommand::NoNetworkChecks => { // run architecture-specific tests - handle_builtin_check(CheckType::CheckCpu, "")?; + handle_builtin_check(CheckType::Cpu, "")?; } CheckSubCommand::CheckVersionOnly => { diff --git a/src/tools/kata-ctl/src/types.rs b/src/tools/kata-ctl/src/types.rs index 483e5bce7..26f5954d8 100644 --- a/src/tools/kata-ctl/src/types.rs +++ b/src/tools/kata-ctl/src/types.rs @@ -12,8 +12,9 @@ pub type BuiltinCmdFp = fn(args: &str) -> Result<()>; // CheckType encodes the name of each check provided by kata-ctl. #[derive(Debug, strum_macros::Display, EnumString, PartialEq)] pub enum CheckType { - CheckCpu, - CheckNetwork, + Cpu, + Network, + KernelModules, } // PermissionType is used to show whether a check needs to run with elevated (super-user) @@ -33,3 +34,39 @@ pub struct CheckItem<'a> { pub fp: BuiltinCmdFp, pub perm: PermissionType, } + +// Builtin module parameter check handler type. +// +// BuiltinModuleParamFp represents a predicate function to determine if a +// kernel parameter _value_ is as expected. If not, the returned Error will +// explain what is wrong. +// +// Parameters: +// +// - module: name of kernel module. +// - param: name of parameter for the kernel module. +// - value: value of the kernel parameter. +pub type BuiltinModuleParamFp = fn(module: &str, param: &str, value: &str) -> Result<()>; + +// KernelParamType encodes the value and a handler +// function for kernel module parameters +#[allow(dead_code)] +#[derive(Clone)] +pub enum KernelParamType<'a> { + Simple(&'a str), + Predicate(BuiltinModuleParamFp), +} + +// Parameters is used to encode the module parameters +#[derive(Clone)] +pub struct KernelParam<'a> { + pub name: &'a str, + pub value: KernelParamType<'a>, +} + +// KernelModule is used to describe a kernel module along with its required parameters. +#[allow(dead_code)] +pub struct KernelModule<'a> { + pub name: &'a str, + pub parameter: KernelParam<'a>, +} From 96e8470dbeb5dedbe2636a7977e7b0469ea58ae6 Mon Sep 17 00:00:00 2001 From: Archana Shinde Date: Thu, 20 Apr 2023 23:08:51 -0700 Subject: [PATCH 136/137] kata-manager: Fix containerd download Newer containerd releases have an additional static package published. Because of this, download_url contains two urls causing curl to fail. To resolve this, pick the first url from the containerd releases to download containerd. Fixes: #6695 Signed-off-by: Archana Shinde --- utils/kata-manager.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/kata-manager.sh b/utils/kata-manager.sh index 8d83d055b..c51fb516c 100755 --- a/utils/kata-manager.sh +++ b/utils/kata-manager.sh @@ -160,6 +160,8 @@ github_get_release_file_url() -r '.[] | select(.tag_name == $version) | .assets[].browser_download_url' |\ grep "/${regex}$") + download_url=$(echo $download_url | awk '{print $1}') + [ -z "$download_url" ] && die "Cannot determine download URL for version $version ($url)" echo "$download_url" From cc8ea3232ef8ad574abf56a19d3fb5e5fa6377ee Mon Sep 17 00:00:00 2001 From: mengze Date: Mon, 17 Apr 2023 16:41:01 +0800 Subject: [PATCH 137/137] runtime-rs: support keep_abnormal in toml config This patch adds keep_abnormal in runtime config. If keep_abnormal = true, it means that 1) if the runtime exits abnormally, the cleanup process will be skipped, and 2) the runtime will not exit even if the health check fails. This option is typically used to retain abnormal information for debugging and should NOT be enabled by default. Fixes: #6717 Signed-off-by: mengze Signed-off-by: quanweiZhou --- src/libs/kata-types/src/config/runtime.rs | 6 ++++++ .../config/configuration-dragonball.toml.in | 9 ++++++++- src/runtime-rs/crates/runtimes/src/manager.rs | 14 +++++++++++++- .../runtimes/virt_container/src/health_check.rs | 10 +++++----- .../crates/runtimes/virt_container/src/sandbox.rs | 7 +++++-- 5 files changed, 37 insertions(+), 9 deletions(-) diff --git a/src/libs/kata-types/src/config/runtime.rs b/src/libs/kata-types/src/config/runtime.rs index 1d7364368..067ff6776 100644 --- a/src/libs/kata-types/src/config/runtime.rs +++ b/src/libs/kata-types/src/config/runtime.rs @@ -130,6 +130,12 @@ pub struct Runtime { /// Vendor customized runtime configuration. #[serde(default, flatten)] pub vendor: RuntimeVendor, + + /// If keep_abnormal is enabled, it means that 1) if the runtime exits abnormally, the cleanup process + /// will be skipped, and 2) the runtime will not exit even if the health check fails. + /// This option is typically used to retain abnormal information for debugging. + #[serde(default)] + pub keep_abnormal: bool, } impl ConfigOps for Runtime { diff --git a/src/runtime-rs/config/configuration-dragonball.toml.in b/src/runtime-rs/config/configuration-dragonball.toml.in index 174f270e7..4c7d3db05 100644 --- a/src/runtime-rs/config/configuration-dragonball.toml.in +++ b/src/runtime-rs/config/configuration-dragonball.toml.in @@ -214,7 +214,14 @@ dial_timeout = 45 # system log # (default: disabled) #enable_debug = true -# + +# If enabled, enabled, it means that 1) if the runtime exits abnormally, +# the cleanup process will be skipped, and 2) the runtime will not exit +# even if the health check fails. +# This option is typically used to retain abnormal information for debugging. +# (default: false) +#keep_abnormal = true + # Internetworking model # Determines how the VM should be connected to the # the container network interface diff --git a/src/runtime-rs/crates/runtimes/src/manager.rs b/src/runtime-rs/crates/runtimes/src/manager.rs index f97861f23..b32c36773 100644 --- a/src/runtime-rs/crates/runtimes/src/manager.rs +++ b/src/runtime-rs/crates/runtimes/src/manager.rs @@ -14,6 +14,7 @@ use common::{ RuntimeHandler, RuntimeInstance, Sandbox, SandboxNetworkEnv, }; use hypervisor::Param; +use kata_sys_util::spec::load_oci_spec; use kata_types::{ annotations::Annotation, config::default::DEFAULT_GUEST_DNS_FILE, config::TomlConfig, }; @@ -190,9 +191,16 @@ impl RuntimeHandlerManager { let sender = inner.msg_sender.clone(); let sandbox_state = persist::from_disk::(&inner.id) .context("failed to load the sandbox state")?; + + let config = if let Ok(spec) = load_oci_spec() { + load_config(&spec, &None).context("load config")? + } else { + TomlConfig::default() + }; + let sandbox_args = SandboxRestoreArgs { sid: inner.id.clone(), - toml_config: TomlConfig::default(), + toml_config: config, sender, }; match sandbox_state.sandbox_type.clone() { @@ -208,6 +216,10 @@ impl RuntimeHandlerManager { } #[cfg(feature = "virt")] name if name == VirtContainer::name() => { + if sandbox_args.toml_config.runtime.keep_abnormal { + info!(sl!(), "skip cleanup for keep_abnormal"); + return Ok(()); + } let sandbox = VirtSandbox::restore(sandbox_args, sandbox_state) .await .context("failed to restore the sandbox")?; diff --git a/src/runtime-rs/crates/runtimes/virt_container/src/health_check.rs b/src/runtime-rs/crates/runtimes/virt_container/src/health_check.rs index f6d60c4c4..81fb3d58b 100644 --- a/src/runtime-rs/crates/runtimes/virt_container/src/health_check.rs +++ b/src/runtime-rs/crates/runtimes/virt_container/src/health_check.rs @@ -21,17 +21,17 @@ const HEALTH_CHECK_STOP_CHANNEL_BUFFER_SIZE: usize = 1; pub struct HealthCheck { pub keep_alive: bool, - keep_vm: bool, + keep_abnormal: bool, stop_tx: mpsc::Sender<()>, stop_rx: Arc>>, } impl HealthCheck { - pub fn new(keep_alive: bool, keep_vm: bool) -> HealthCheck { + pub fn new(keep_alive: bool, keep_abnormal: bool) -> HealthCheck { let (tx, rx) = mpsc::channel(HEALTH_CHECK_STOP_CHANNEL_BUFFER_SIZE); HealthCheck { keep_alive, - keep_vm, + keep_abnormal, stop_tx: tx, stop_rx: Arc::new(Mutex::new(rx)), } @@ -46,7 +46,7 @@ impl HealthCheck { info!(sl!(), "start runtime keep alive"); let stop_rx = self.stop_rx.clone(); - let keep_vm = self.keep_vm; + let keep_abnormal = self.keep_abnormal; let _ = tokio::spawn(async move { let mut version_check_threshold_count = 0; @@ -87,7 +87,7 @@ impl HealthCheck { error!(sl!(), "failed to do {} agent health check: {}", id, e); if let Err(mpsc::error::TryRecvError::Empty) = stop_rx.try_recv() { error!(sl!(), "failed to receive stop monitor signal"); - if !keep_vm { + if !keep_abnormal { ::std::process::exit(1); } } else { diff --git a/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs b/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs index 881b5f78b..c5ec38e46 100644 --- a/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs +++ b/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs @@ -75,6 +75,8 @@ impl VirtSandbox { hypervisor: Arc, resource_manager: Arc, ) -> Result { + let config = resource_manager.config().await; + let keep_abnormal = config.runtime.keep_abnormal; Ok(Self { sid: sid.to_string(), msg_sender: Arc::new(Mutex::new(msg_sender)), @@ -82,7 +84,7 @@ impl VirtSandbox { agent, hypervisor, resource_manager, - monitor: Arc::new(HealthCheck::new(true, false)), + monitor: Arc::new(HealthCheck::new(true, keep_abnormal)), }) } @@ -440,6 +442,7 @@ impl Persist for VirtSandbox { }?; let agent = Arc::new(KataAgent::new(kata_types::config::Agent::default())); let sid = sandbox_args.sid; + let keep_abnormal = config.runtime.keep_abnormal; let args = ManagerArgs { sid: sid.clone(), agent: agent.clone(), @@ -454,7 +457,7 @@ impl Persist for VirtSandbox { agent, hypervisor, resource_manager, - monitor: Arc::new(HealthCheck::new(true, false)), + monitor: Arc::new(HealthCheck::new(true, keep_abnormal)), }) } }