From 168fadf1de2045ffa3d710ffdafcec248b09416e Mon Sep 17 00:00:00 2001 From: Binbin Zhang Date: Tue, 14 Sep 2021 16:10:00 +0800 Subject: [PATCH 01/46] ci: Weekly check whether the docs url is alive Weekly check(at 23:00 every Sunday) whether the docs url is ALIVE, so that we can find the failed url in time Fixes #815 Signed-off-by: Binbin Zhang --- .github/workflows/docs-url-alive-check.yaml | 44 +++++++++++++++++++++ Makefile | 8 +++- ci/docs-url-alive-check.sh | 12 ++++++ ci/lib.sh | 9 +++++ 4 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/docs-url-alive-check.yaml create mode 100755 ci/docs-url-alive-check.sh diff --git a/.github/workflows/docs-url-alive-check.yaml b/.github/workflows/docs-url-alive-check.yaml new file mode 100644 index 000000000..cf821abb2 --- /dev/null +++ b/.github/workflows/docs-url-alive-check.yaml @@ -0,0 +1,44 @@ +on: + schedule: + - cron: '0 23 * * 0' + +name: Docs URL Alive Check +jobs: + test: + strategy: + matrix: + go-version: [1.17.x] + os: [ubuntu-20.04] + runs-on: ${{ matrix.os }} + env: + target_branch: ${{ github.base_ref }} + steps: + - name: Install Go + if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go-version }} + env: + GOPATH: ${{ runner.workspace }}/kata-containers + - name: Set env + if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} + run: | + echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV + echo "${{ github.workspace }}/bin" >> $GITHUB_PATH + - name: Checkout code + if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} + uses: actions/checkout@v2 + with: + fetch-depth: 0 + path: ./src/github.com/${{ github.repository }} + - name: Setup + if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} + run: | + cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh + env: + GOPATH: ${{ runner.workspace }}/kata-containers + # docs url alive check + - name: Docs URL Alive Check + if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }} + run: | + cd ${GOPATH}/src/github.com/${{ github.repository }} && make docs-url-alive-check diff --git a/Makefile b/Makefile index 5e3695dd7..6bb914271 100644 --- a/Makefile +++ b/Makefile @@ -39,10 +39,16 @@ generate-protocols: static-checks: build bash ci/static-checks.sh +docs-url-alive-check: + bash ci/docs-url-alive-check.sh + .PHONY: \ all \ binary-tarball \ default \ install-binary-tarball \ logging-crate-tests \ - static-checks + static-checks \ + docs-url-alive-check + + diff --git a/ci/docs-url-alive-check.sh b/ci/docs-url-alive-check.sh new file mode 100755 index 000000000..4b5371c34 --- /dev/null +++ b/ci/docs-url-alive-check.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# +# Copyright (c) 2021 Easystack Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +set -e + +cidir=$(dirname "$0") +source "${cidir}/lib.sh" + +run_docs_url_alive_check diff --git a/ci/lib.sh b/ci/lib.sh index 3c64db595..5da391ca0 100644 --- a/ci/lib.sh +++ b/ci/lib.sh @@ -44,3 +44,12 @@ run_go_test() clone_tests_repo bash "$tests_repo_dir/.ci/go-test.sh" } + +run_docs_url_alive_check() +{ + clone_tests_repo + # Make sure we have the targeting branch + git remote set-branches --add origin "${branch}" + git fetch -a + bash "$tests_repo_dir/.ci/static-checks.sh" --docs --all "github.com/kata-containers/kata-containers" +} From 5c3e55362442726f9234cd2d1a2382f4fde5dcc4 Mon Sep 17 00:00:00 2001 From: Jakob Naucke Date: Mon, 7 Mar 2022 11:56:45 +0100 Subject: [PATCH 02/46] osbuilder: apk add --no-cache Hadolint DL3019. If you're wondering why this is in this PR, that's because I touch the file later, and we're only triggering the lints for changed files. Signed-off-by: Jakob Naucke --- tools/osbuilder/rootfs-builder/alpine/Dockerfile.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/osbuilder/rootfs-builder/alpine/Dockerfile.in b/tools/osbuilder/rootfs-builder/alpine/Dockerfile.in index 802506d09..45e33a788 100644 --- a/tools/osbuilder/rootfs-builder/alpine/Dockerfile.in +++ b/tools/osbuilder/rootfs-builder/alpine/Dockerfile.in @@ -6,7 +6,7 @@ ARG IMAGE_REGISTRY=docker.io FROM ${IMAGE_REGISTRY}/alpine:3.15 -RUN apk update && apk add \ +RUN apk update && apk add --no-cache \ apk-tools-static \ autoconf \ automake \ From 0072cc2b66ea4423275cb8abea548e045f3e91e0 Mon Sep 17 00:00:00 2001 From: Jakob Naucke Date: Fri, 4 Mar 2022 18:22:19 +0100 Subject: [PATCH 03/46] osbuilder: Remove musl installations Remove a lot of cruft of musl installations -- we needed those for the Go agent, but Rustup just takes care of everything. aarch64 on Debian-based & Alpine is an exception -- create a symlink `aarch64-linux-musl-gcc` to `musl-tools`'s `musl-gcc` or `gcc` on Alpine. This is unified -- arch-specific Dockerfiles are removed. Signed-off-by: Jakob Naucke --- ci/install_musl.sh | 24 ----- tools/osbuilder/dracut/Dockerfile.in | 3 - .../rootfs-builder/alpine/Dockerfile.in | 2 + .../rootfs-builder/clearlinux/Dockerfile.in | 1 - .../debian/Dockerfile-aarch64.in | 34 ------ .../rootfs-builder/debian/Dockerfile.in | 4 +- tools/osbuilder/rootfs-builder/rootfs.sh | 9 -- .../template/Dockerfile.template | 1 - .../ubuntu/Dockerfile-aarch64.in | 43 -------- .../rootfs-builder/ubuntu/Dockerfile.in | 4 +- tools/osbuilder/scripts/lib.sh | 101 ++---------------- tools/osbuilder/tests/test_images.sh | 2 - versions.yaml | 13 --- 13 files changed, 13 insertions(+), 228 deletions(-) delete mode 100755 ci/install_musl.sh delete mode 100644 tools/osbuilder/rootfs-builder/debian/Dockerfile-aarch64.in delete mode 100644 tools/osbuilder/rootfs-builder/ubuntu/Dockerfile-aarch64.in diff --git a/ci/install_musl.sh b/ci/install_musl.sh deleted file mode 100755 index 4beec2911..000000000 --- a/ci/install_musl.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) 2020 Ant Group -# -# SPDX-License-Identifier: Apache-2.0 -# - -set -e - -install_aarch64_musl() { - local arch=$(uname -m) - if [ "${arch}" == "aarch64" ]; then - local musl_tar="${arch}-linux-musl-native.tgz" - local musl_dir="${arch}-linux-musl-native" - pushd /tmp - if curl -sLO --fail https://musl.cc/${musl_tar}; then - tar -zxf ${musl_tar} - mkdir -p /usr/local/musl/ - cp -r ${musl_dir}/* /usr/local/musl/ - fi - popd - fi -} - -install_aarch64_musl diff --git a/tools/osbuilder/dracut/Dockerfile.in b/tools/osbuilder/dracut/Dockerfile.in index f84838bc3..e80fa374a 100644 --- a/tools/osbuilder/dracut/Dockerfile.in +++ b/tools/osbuilder/dracut/Dockerfile.in @@ -36,7 +36,4 @@ RUN zypper --non-interactive refresh; \ zypper --non-interactive clean --all; -# This will install the proper golang to build Kata components -@INSTALL_MUSL@ -@INSTALL_GO@ @INSTALL_RUST@ diff --git a/tools/osbuilder/rootfs-builder/alpine/Dockerfile.in b/tools/osbuilder/rootfs-builder/alpine/Dockerfile.in index 45e33a788..c9c4f0fba 100644 --- a/tools/osbuilder/rootfs-builder/alpine/Dockerfile.in +++ b/tools/osbuilder/rootfs-builder/alpine/Dockerfile.in @@ -26,3 +26,5 @@ RUN apk update && apk add --no-cache \ musl-dev \ protoc \ tar +# aarch64 requires this name -- link for all +RUN ln -s /usr/bin/gcc "/usr/bin/$(uname -m)-linux-musl-gcc" diff --git a/tools/osbuilder/rootfs-builder/clearlinux/Dockerfile.in b/tools/osbuilder/rootfs-builder/clearlinux/Dockerfile.in index b38990a9e..ac5b0ff74 100644 --- a/tools/osbuilder/rootfs-builder/clearlinux/Dockerfile.in +++ b/tools/osbuilder/rootfs-builder/clearlinux/Dockerfile.in @@ -37,5 +37,4 @@ RUN dnf -y update && dnf install -y \ dnf clean all # This will install the proper packages to build Kata components -@INSTALL_MUSL@ @INSTALL_RUST@ diff --git a/tools/osbuilder/rootfs-builder/debian/Dockerfile-aarch64.in b/tools/osbuilder/rootfs-builder/debian/Dockerfile-aarch64.in deleted file mode 100644 index 727506f47..000000000 --- a/tools/osbuilder/rootfs-builder/debian/Dockerfile-aarch64.in +++ /dev/null @@ -1,34 +0,0 @@ -# -# Copyright (c) 2020 ARM Limited -# -# SPDX-License-Identifier: Apache-2.0 - -ARG IMAGE_REGISTRY=docker.io -# NOTE: OS_VERSION is set according to config.sh -FROM ${IMAGE_REGISTRY}/debian:@OS_VERSION@ - -# RUN commands -RUN apt-get update && apt-get install -y \ - autoconf \ - automake \ - binutils \ - build-essential \ - chrony \ - coreutils \ - curl \ - debianutils \ - debootstrap \ - g++ \ - gcc \ - git \ - libc-dev \ - libstdc++-8-dev \ - m4 \ - make \ - sed \ - systemd \ - tar \ - vim -# This will install the proper packages to build Kata components -@INSTALL_MUSL@ -@INSTALL_RUST@ diff --git a/tools/osbuilder/rootfs-builder/debian/Dockerfile.in b/tools/osbuilder/rootfs-builder/debian/Dockerfile.in index 685dd0f4d..022059857 100644 --- a/tools/osbuilder/rootfs-builder/debian/Dockerfile.in +++ b/tools/osbuilder/rootfs-builder/debian/Dockerfile.in @@ -27,14 +27,14 @@ RUN apt-get update && apt-get --no-install-recommends install -y \ libstdc++-8-dev \ m4 \ make \ - musl \ - musl-dev \ musl-tools \ sed \ systemd \ tar \ vim \ wget +# aarch64 requires this name -- link for all +RUN ln -s /usr/bin/musl-gcc "/usr/bin/$(uname -m)-linux-musl-gcc" # This will install the proper packages to build Kata components @INSTALL_RUST@ diff --git a/tools/osbuilder/rootfs-builder/rootfs.sh b/tools/osbuilder/rootfs-builder/rootfs.sh index 80633a045..07f8ea89e 100755 --- a/tools/osbuilder/rootfs-builder/rootfs.sh +++ b/tools/osbuilder/rootfs-builder/rootfs.sh @@ -14,7 +14,6 @@ script_name="${0##*/}" script_dir="$(dirname $(readlink -f $0))" AGENT_VERSION=${AGENT_VERSION:-} RUST_VERSION="null" -MUSL_VERSION=${MUSL_VERSION:-"null"} AGENT_BIN=${AGENT_BIN:-kata-agent} AGENT_INIT=${AGENT_INIT:-no} KERNEL_MODULES_DIR=${KERNEL_MODULES_DIR:-""} @@ -335,11 +334,6 @@ build_rootfs_distro() echo "Required rust version: $RUST_VERSION" - detect_musl_version || - die "Could not detect the required musl version for AGENT_VERSION='${AGENT_VERSION:-main}'." - - echo "Required musl version: $MUSL_VERSION" - if [ -z "${USE_DOCKER}" ] && [ -z "${USE_PODMAN}" ]; then info "build directly" build_rootfs ${ROOTFS_DIR} @@ -544,7 +538,6 @@ EOT LIBC=gnu echo "WARNING: Forcing LIBC=gnu because $ARCH has no musl Rust target" fi - [ "$LIBC" == "musl" ] && bash ${script_dir}/../../../ci/install_musl.sh test -r "${HOME}/.cargo/env" && source "${HOME}/.cargo/env" # rust agent needs ${arch}-unknown-linux-${LIBC} if ! (rustup show | grep -v linux-${LIBC} > /dev/null); then @@ -555,7 +548,6 @@ EOT bash ${script_dir}/../../../ci/install_rust.sh ${RUST_VERSION} fi test -r "${HOME}/.cargo/env" && source "${HOME}/.cargo/env" - [ "$ARCH" == "aarch64" ] && OLD_PATH=$PATH && export PATH=$PATH:/usr/local/musl/bin agent_dir="${script_dir}/../../../src/agent/" @@ -577,7 +569,6 @@ EOT make clean make LIBC=${LIBC} INIT=${AGENT_INIT} SECCOMP=${SECCOMP} make install DESTDIR="${ROOTFS_DIR}" LIBC=${LIBC} INIT=${AGENT_INIT} - [ "$ARCH" == "aarch64" ] && export PATH=$OLD_PATH && rm -rf /usr/local/musl if [ "${SECCOMP}" == "yes" ]; then rm -rf "${libseccomp_install_dir}" "${gperf_install_dir}" fi diff --git a/tools/osbuilder/rootfs-builder/template/Dockerfile.template b/tools/osbuilder/rootfs-builder/template/Dockerfile.template index b881dac43..863cab3e5 100644 --- a/tools/osbuilder/rootfs-builder/template/Dockerfile.template +++ b/tools/osbuilder/rootfs-builder/template/Dockerfile.template @@ -14,5 +14,4 @@ FROM ${IMAGE_REGISTRY}/@distro@:@OS_VERSION@ # RUN commands # This will install the proper packages to build Kata components -@INSTALL_MUSL@ @INSTALL_RUST@ diff --git a/tools/osbuilder/rootfs-builder/ubuntu/Dockerfile-aarch64.in b/tools/osbuilder/rootfs-builder/ubuntu/Dockerfile-aarch64.in deleted file mode 100644 index bad700645..000000000 --- a/tools/osbuilder/rootfs-builder/ubuntu/Dockerfile-aarch64.in +++ /dev/null @@ -1,43 +0,0 @@ -# -# Copyright (c) 2020 ARM Limited -# -# SPDX-License-Identifier: Apache-2.0 - -ARG IMAGE_REGISTRY=docker.io -#ubuntu: docker image to be used to create a rootfs -#@OS_VERSION@: Docker image version to build this dockerfile -FROM ${IMAGE_REGISTRY}/ubuntu:@OS_VERSION@ - -# This dockerfile needs to provide all the componets need to build a rootfs -# Install any package need to create a rootfs (package manager, extra tools) - -# Avoid tzdata setup -ENV DEBIAN_FRONTEND noninteractive - -# RUN commands -RUN apt-get update && apt-get install -y \ - autoconf \ - automake \ - binutils \ - build-essential \ - chrony \ - coreutils \ - curl \ - debianutils \ - debootstrap \ - g++ \ - gcc \ - git \ - libc6-dev \ - libstdc++-8-dev \ - m4 \ - make \ - sed \ - systemd \ - tar \ - vim && \ - apt-get clean && rm -rf /var/lib/apt/lists/ - -# This will install the proper packages to build Kata components -@INSTALL_MUSL@ -@INSTALL_RUST@ diff --git a/tools/osbuilder/rootfs-builder/ubuntu/Dockerfile.in b/tools/osbuilder/rootfs-builder/ubuntu/Dockerfile.in index 07bf30ce8..edc921641 100644 --- a/tools/osbuilder/rootfs-builder/ubuntu/Dockerfile.in +++ b/tools/osbuilder/rootfs-builder/ubuntu/Dockerfile.in @@ -31,8 +31,6 @@ RUN apt-get update && apt-get --no-install-recommends install -y \ libstdc++-8-dev \ m4 \ make \ - musl \ - musl-dev \ musl-tools \ protobuf-compiler \ sed \ @@ -40,6 +38,8 @@ RUN apt-get update && apt-get --no-install-recommends install -y \ tar \ vim \ wget +# aarch64 requires this name -- link for all +RUN ln -s /usr/bin/musl-gcc "/usr/bin/$(uname -m)-linux-musl-gcc" # This will install the proper packages to build Kata components @INSTALL_RUST@ diff --git a/tools/osbuilder/scripts/lib.sh b/tools/osbuilder/scripts/lib.sh index 9f806d039..9e248e86c 100644 --- a/tools/osbuilder/scripts/lib.sh +++ b/tools/osbuilder/scripts/lib.sh @@ -7,7 +7,6 @@ set -e KATA_REPO=${KATA_REPO:-github.com/kata-containers/kata-containers} -MUSL_VERSION=${MUSL_VERSION:-"null"} # Give preference to variable set by CI yq_file="${script_dir}/../../../ci/install_yq.sh" kata_versions_file="${script_dir}/../../../versions.yaml" @@ -204,68 +203,12 @@ generate_dockerfile() dir="$1" [ -d "${dir}" ] || die "${dir}: not a directory" - local architecture=$(uname -m) - local rustarch=${architecture} - local muslarch=${architecture} - local libc=musl - case "$(uname -m)" in - "ppc64le") - rustarch=powerpc64le - muslarch=powerpc64 - libc=gnu - ;; - "s390x") - libc=gnu - ;; - - *) - ;; - esac + local rustarch=$(uname -m) + [ "$rustarch" = ppc64le ] && rustarch=powerpc64le [ -n "${http_proxy:-}" ] && readonly set_proxy="RUN sed -i '$ a proxy="${http_proxy:-}"' /etc/dnf/dnf.conf /etc/yum.conf; true" # Rust agent - # rust installer should set path apropiately, just in case - # install musl for compiling rust-agent - local musl_source_url="https://git.zv.io/toolchains/musl-cross-make.git" - local musl_source_dir="musl-cross-make" - install_musl= - if [ "${muslarch}" == "aarch64" ]; then - local musl_tar="${muslarch}-linux-musl-native.tgz" - local musl_dir="${muslarch}-linux-musl-native" - local aarch64_musl_target="aarch64-linux-musl" - install_musl=" -RUN cd /tmp; \ - mkdir -p /usr/local/musl/; \ - if curl -sLO --fail https://musl.cc/${musl_tar}; then \ - tar -zxf ${musl_tar}; \ - cp -r ${musl_dir}/* /usr/local/musl/; \ - else \ - git clone ${musl_source_url}; \ - TARGET=${aarch64_musl_target} make -j$(nproc) -C ${musl_source_dir} install; \ - cp -r ${musl_source_dir}/output/* /usr/local/musl/; \ - cp /usr/local/musl/bin/aarch64-linux-musl-g++ /usr/local/musl/bin/g++; \ - fi -ENV PATH=\$PATH:/usr/local/musl/bin -RUN ln -sf /usr/local/musl/bin/g++ /usr/bin/g++ -" - else - local musl_tar="musl-${MUSL_VERSION}.tar.gz" - local musl_dir="musl-${MUSL_VERSION}" - install_musl=" -RUN pushd /root; \ - curl -sLO https://www.musl-libc.org/releases/${musl_tar}; tar -zxf ${musl_tar}; \ - cd ${musl_dir}; \ - sed -i \"s/^ARCH = .*/ARCH = ${muslarch}/g\" dist/config.mak; \ - ./configure > /dev/null 2>\&1; \ - make > /dev/null 2>\&1; \ - make install > /dev/null 2>\&1; \ - echo \"/usr/local/musl/lib\" > /etc/ld-musl-${muslarch}.path; \ - popd -ENV PATH=\$PATH:/usr/local/musl/bin -" - fi - readonly install_rust=" RUN curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSLf --output /tmp/rust-init; \ chmod a+x /tmp/rust-init; \ @@ -280,31 +223,12 @@ RUN . /root/.cargo/env; \ RUN ln -sf /usr/bin/g++ /bin/musl-g++ " pushd "${dir}" - dockerfile_template="Dockerfile.in" - dockerfile_arch_template="Dockerfile-${architecture}.in" - # if arch-specific docker file exists, swap the univesal one with it. - if [ -f "${dockerfile_arch_template}" ]; then - dockerfile_template="${dockerfile_arch_template}" - else - [ -f "${dockerfile_template}" ] || die "${dockerfile_template}: file not found" - fi - # ppc64le and s390x have no musl target - if [ "${architecture}" == "ppc64le" ] || [ "${architecture}" == "s390x" ]; then - sed \ - -e "s|@OS_VERSION@|${OS_VERSION:-}|g" \ - -e "s|@INSTALL_MUSL@||g" \ - -e "s|@INSTALL_RUST@|${install_rust//$'\n'/\\n}|g" \ - -e "s|@SET_PROXY@|${set_proxy:-}|g" \ - "${dockerfile_template}" > Dockerfile - else - sed \ - -e "s|@OS_VERSION@|${OS_VERSION:-}|g" \ - -e "s|@INSTALL_MUSL@|${install_musl//$'\n'/\\n}|g" \ - -e "s|@INSTALL_RUST@|${install_rust//$'\n'/\\n}|g" \ - -e "s|@SET_PROXY@|${set_proxy:-}|g" \ - "${dockerfile_template}" > Dockerfile - fi + sed \ + -e "s#@OS_VERSION@#${OS_VERSION:-}#g" \ + -e "s#@INSTALL_RUST@#${install_rust//$'\n'/\\n}#g" \ + -e "s#@SET_PROXY@#${set_proxy:-}#g" \ + Dockerfile.in > Dockerfile popd } @@ -345,17 +269,6 @@ detect_rust_version() [ -n "$RUST_VERSION" ] } -detect_musl_version() -{ - info "Detecting musl version" - local yq_path="externals.musl.version" - - info "Get musl version from ${kata_versions_file}" - MUSL_VERSION="$(get_package_version_from_kata_yaml "$yq_path")" - - [ -n "$MUSL_VERSION" ] -} - before_starting_container() { return 0 } diff --git a/tools/osbuilder/tests/test_images.sh b/tools/osbuilder/tests/test_images.sh index fbbe5d590..fcbc360a2 100755 --- a/tools/osbuilder/tests/test_images.sh +++ b/tools/osbuilder/tests/test_images.sh @@ -640,8 +640,6 @@ test_dracut() die "Could not detect the required Go version for AGENT_VERSION='${AGENT_VERSION:-master}'." detect_rust_version || die "Could not detect the required rust version for AGENT_VERSION='${AGENT_VERSION:-master}'." - detect_musl_version || - die "Could not detect the required musl version for AGENT_VERSION='${AGENT_VERSION:-master}'." generate_dockerfile ${dracut_dir} info "Creating container for dracut" diff --git a/versions.yaml b/versions.yaml index 2412053e5..5c7bb5e62 100644 --- a/versions.yaml +++ b/versions.yaml @@ -233,19 +233,6 @@ externals: .*/v?(\d\S+)\.tar\.gz version: "v1.0.1" - musl: - description: | - The musl library is used to build the rust agent. - url: "https://www.musl-libc.org/" - uscan-url: >- - https://www.musl-libc.org/releases/ - musl-([\d\.]+)\.tar\.gz - version: "1.1.23" - meta: - description: | - 'newest-version' is the latest version known to work. - newest-version: "1.1.23" - nydus: description: "Nydus image acceleration service" url: "https://github.com/dragonflyoss/image-service" From 2c86b956fadc9f1240f08437097d8d623b122536 Mon Sep 17 00:00:00 2001 From: Jakob Naucke Date: Fri, 4 Mar 2022 18:23:19 +0100 Subject: [PATCH 04/46] osbuilder: Simplify Rust installation no double export, direct target Signed-off-by: Jakob Naucke --- tools/osbuilder/scripts/lib.sh | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/tools/osbuilder/scripts/lib.sh b/tools/osbuilder/scripts/lib.sh index 9e248e86c..9db989900 100644 --- a/tools/osbuilder/scripts/lib.sh +++ b/tools/osbuilder/scripts/lib.sh @@ -210,17 +210,11 @@ generate_dockerfile() # Rust agent readonly install_rust=" -RUN curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSLf --output /tmp/rust-init; \ - chmod a+x /tmp/rust-init; \ - export http_proxy=${http_proxy:-}; \ - export https_proxy=${http_proxy:-}; \ - /tmp/rust-init -y --default-toolchain ${RUST_VERSION} -RUN . /root/.cargo/env; \ - export http_proxy=${http_proxy:-}; \ - export https_proxy=${http_proxy:-}; \ - cargo install cargo-when; \ - rustup target install ${rustarch}-unknown-linux-${libc} -RUN ln -sf /usr/bin/g++ /bin/musl-g++ +ENV http_proxy=${http_proxy:-} +ENV https_proxy=${http_proxy:-} +RUN curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSLf | \ + sh -s -- -y --default-toolchain ${RUST_VERSION} -t ${rustarch}-unknown-linux-${LIBC} +RUN . /root/.cargo/env; cargo install cargo-when " pushd "${dir}" From 0a313eda1c6e62a2b7f1fb93ac1ea222b05032e0 Mon Sep 17 00:00:00 2001 From: Jakob Naucke Date: Tue, 1 Mar 2022 15:20:35 +0100 Subject: [PATCH 05/46] osbuilder: Fix use of LIBC in rootfs.sh - Add a doc comment - Pass to build container, e.g. to build x86_64 with glibc (would always use musl) Signed-off-by: Jakob Naucke --- tools/osbuilder/rootfs-builder/rootfs.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/osbuilder/rootfs-builder/rootfs.sh b/tools/osbuilder/rootfs-builder/rootfs.sh index 07f8ea89e..6bc0f8b7d 100755 --- a/tools/osbuilder/rootfs-builder/rootfs.sh +++ b/tools/osbuilder/rootfs-builder/rootfs.sh @@ -124,6 +124,9 @@ KERNEL_MODULES_DIR Path to a directory containing kernel modules to include in the rootfs. Default value: +LIBC libc the agent is built against (gnu or musl). + Default value: ${LIBC} (varies with architecture) + ROOTFS_DIR Path to the directory that is populated with the rootfs. Default value: <${script_name} path>/rootfs- @@ -407,6 +410,7 @@ build_rootfs_distro() --env AGENT_INIT="${AGENT_INIT}" \ --env CI="${CI}" \ --env KERNEL_MODULES_DIR="${KERNEL_MODULES_DIR}" \ + --env LIBC="${LIBC}" \ --env EXTRA_PKGS="${EXTRA_PKGS}" \ --env OSBUILDER_VERSION="${OSBUILDER_VERSION}" \ --env OS_VERSION="${OS_VERSION}" \ From df511bf17934a2bb41403438d0a23784bf0d2fb5 Mon Sep 17 00:00:00 2001 From: Jakob Naucke Date: Tue, 15 Feb 2022 19:12:41 +0100 Subject: [PATCH 06/46] packaging: Enable cross-building agent Requires setting ARCH and CC. - Add CC linker option for building agent. - Set host for building libseccomp. Fixes: #3681 Signed-off-by: Jakob Naucke --- ci/install_libseccomp.sh | 7 ++++--- utils.mk | 11 ++++++++++- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/ci/install_libseccomp.sh b/ci/install_libseccomp.sh index 893343886..4c4a42cb9 100755 --- a/ci/install_libseccomp.sh +++ b/ci/install_libseccomp.sh @@ -19,7 +19,7 @@ source "${tests_repo_dir}/.ci/lib.sh" # fail. So let's ensure they are unset here. unset PREFIX DESTDIR -arch=$(uname -m) +arch=${ARCH:-$(uname -m)} workdir="$(mktemp -d --tmpdir build-libseccomp.XXXXX)" # Variables for libseccomp @@ -70,7 +70,8 @@ build_and_install_gperf() { curl -sLO "${gperf_tarball_url}" tar -xf "${gperf_tarball}" pushd "gperf-${gperf_version}" - ./configure --prefix="${gperf_install_dir}" + # Unset $CC for configure, we will always use native for gperf + CC= ./configure --prefix="${gperf_install_dir}" make make install export PATH=$PATH:"${gperf_install_dir}"/bin @@ -84,7 +85,7 @@ build_and_install_libseccomp() { curl -sLO "${libseccomp_tarball_url}" tar -xf "${libseccomp_tarball}" pushd "libseccomp-${libseccomp_version}" - ./configure --prefix="${libseccomp_install_dir}" CFLAGS="${cflags}" --enable-static + ./configure --prefix="${libseccomp_install_dir}" CFLAGS="${cflags}" --enable-static --host="${arch}" make make install popd diff --git a/utils.mk b/utils.mk index e833b40d7..c87da0c06 100644 --- a/utils.mk +++ b/utils.mk @@ -113,7 +113,8 @@ endef BUILD_TYPE = release ##VAR ARCH=arch target to build (format: uname -m) -ARCH = $(shell uname -m) +HOST_ARCH = $(shell uname -m) +ARCH ?= $(HOST_ARCH) ##VAR LIBC=musl|gnu LIBC ?= musl ifneq ($(LIBC),musl) @@ -142,6 +143,14 @@ ifeq ($(ARCH), aarch64) $(warning "WARNING: aarch64-musl needs extra symbols from libgcc") endif +ifneq ($(HOST_ARCH),$(ARCH)) + ifeq ($(CC),) + CC = gcc + $(warning "WARNING: A foreign ARCH was passed, but no CC alternative. Using gcc.") + endif + override EXTRA_RUSTFLAGS += -C linker=$(CC) +endif + TRIPLE = $(ARCH)-unknown-linux-$(LIBC) CWD := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) From 72f7e9e3002db2540d605af3249ae87316711fd5 Mon Sep 17 00:00:00 2001 From: Jakob Naucke Date: Tue, 15 Feb 2022 19:12:41 +0100 Subject: [PATCH 07/46] osbuilder: Multistrap Ubuntu Use `multistrap` for building Ubuntu rootfs. Adds support for building for foreign architectures using the `ARCH` environment variable. In the process, the Ubuntu rootfs workflow is vastly simplified. Signed-off-by: Jakob Naucke --- tools/osbuilder/rootfs-builder/rootfs.sh | 18 ++-- .../rootfs-builder/ubuntu/Dockerfile.in | 42 +++------ .../osbuilder/rootfs-builder/ubuntu/config.sh | 48 +++++----- .../rootfs-builder/ubuntu/rootfs_lib.sh | 93 +++++-------------- tools/osbuilder/scripts/lib.sh | 5 +- 5 files changed, 71 insertions(+), 135 deletions(-) diff --git a/tools/osbuilder/rootfs-builder/rootfs.sh b/tools/osbuilder/rootfs-builder/rootfs.sh index 6bc0f8b7d..9bd485fe4 100755 --- a/tools/osbuilder/rootfs-builder/rootfs.sh +++ b/tools/osbuilder/rootfs-builder/rootfs.sh @@ -39,7 +39,11 @@ handle_error() { trap 'handle_error $LINENO' ERR # Default architecture -ARCH=$(uname -m) +export ARCH=${ARCH:-$(uname -m)} +if [ "$ARCH" == "ppc64le" ] || [ "$ARCH" == "s390x" ]; then + LIBC=gnu + echo "WARNING: Forcing LIBC=gnu because $ARCH has no musl Rust target" +fi # distro-specific config file typeset -r CONFIG_SH="config.sh" @@ -103,6 +107,11 @@ AGENT_SOURCE_BIN Path to the directory of agent binary. AGENT_VERSION Version of the agent to include in the rootfs. Default value: ${AGENT_VERSION:-} +ARCH Target architecture (according to \`uname -m\`). + Foreign bootstraps are currently only supported for Ubuntu + and glibc agents. + Default value: $(uname -m) + DISTRO_REPO Use host repositories to install guest packages. Default value: @@ -408,6 +417,7 @@ build_rootfs_distro() --env ROOTFS_DIR="/rootfs" \ --env AGENT_BIN="${AGENT_BIN}" \ --env AGENT_INIT="${AGENT_INIT}" \ + --env ARCH="${ARCH}" \ --env CI="${CI}" \ --env KERNEL_MODULES_DIR="${KERNEL_MODULES_DIR}" \ --env LIBC="${LIBC}" \ @@ -538,10 +548,6 @@ EOT AGENT_DEST="${AGENT_DIR}/${AGENT_BIN}" if [ -z "${AGENT_SOURCE_BIN}" ] ; then - if [ "$ARCH" == "ppc64le" ] || [ "$ARCH" == "s390x" ]; then - LIBC=gnu - echo "WARNING: Forcing LIBC=gnu because $ARCH has no musl Rust target" - fi test -r "${HOME}/.cargo/env" && source "${HOME}/.cargo/env" # rust agent needs ${arch}-unknown-linux-${LIBC} if ! (rustup show | grep -v linux-${LIBC} > /dev/null); then @@ -559,7 +565,7 @@ EOT info "Set up libseccomp" libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX) gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX) - bash ${script_dir}/../../../ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}" + ${script_dir}/../../../ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}" echo "Set environment variables for the libseccomp crate to link the libseccomp library statically" export LIBSECCOMP_LINK_TYPE=static export LIBSECCOMP_LIB_PATH="${libseccomp_install_dir}/lib" diff --git a/tools/osbuilder/rootfs-builder/ubuntu/Dockerfile.in b/tools/osbuilder/rootfs-builder/ubuntu/Dockerfile.in index edc921641..de3c31ed9 100644 --- a/tools/osbuilder/rootfs-builder/ubuntu/Dockerfile.in +++ b/tools/osbuilder/rootfs-builder/ubuntu/Dockerfile.in @@ -1,45 +1,29 @@ -# -# Copyright (c) 2018 Yash Jain +# Copyright (c) 2018 Yash Jain, 2022 IBM Corp. # # SPDX-License-Identifier: Apache-2.0 ARG IMAGE_REGISTRY=docker.io -#ubuntu: docker image to be used to create a rootfs -#@OS_VERSION@: Docker image version to build this dockerfile FROM ${IMAGE_REGISTRY}/ubuntu:@OS_VERSION@ +@SET_PROXY@ -# This dockerfile needs to provide all the componets need to build a rootfs -# Install any package need to create a rootfs (package manager, extra tools) - -# RUN commands -RUN apt-get update && apt-get --no-install-recommends install -y \ - apt-utils \ - autoconf \ - automake \ - binutils \ - build-essential \ +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive \ + apt-get --no-install-recommends -y install \ ca-certificates \ - chrony \ - coreutils \ curl \ - debianutils \ - debootstrap \ g++ \ - gcc \ + $(gcc_arch="@ARCH@" && [ "$(uname -m)" != "$gcc_arch" ] && ( \ + libc_arch="$gcc_arch" && \ + [ "$gcc_arch" = aarch64 ] && libc_arch=arm64; \ + [ "$gcc_arch" = ppc64le ] && gcc_arch=powerpc64le && libc_arch=ppc64el; \ + [ "$gcc_arch" = x86_64 ] && gcc_arch=x86-64 && libc_arch=amd64; \ + echo "gcc-$gcc_arch-linux-gnu libc6-dev-$libc_arch-cross")) \ git \ - libc6-dev \ - libstdc++-8-dev \ - m4 \ make \ + multistrap \ musl-tools \ - protobuf-compiler \ - sed \ - systemd \ - tar \ - vim \ - wget + protobuf-compiler # aarch64 requires this name -- link for all RUN ln -s /usr/bin/musl-gcc "/usr/bin/$(uname -m)-linux-musl-gcc" -# This will install the proper packages to build Kata components @INSTALL_RUST@ diff --git a/tools/osbuilder/rootfs-builder/ubuntu/config.sh b/tools/osbuilder/rootfs-builder/ubuntu/config.sh index 14ae93ece..8afc34804 100644 --- a/tools/osbuilder/rootfs-builder/ubuntu/config.sh +++ b/tools/osbuilder/rootfs-builder/ubuntu/config.sh @@ -1,34 +1,28 @@ -# This is a configuration file add extra variables to -# -# Copyright (c) 2018 Yash Jain +# Copyright (c) 2018 Yash Jain, 2022 IBM Corp. # # SPDX-License-Identifier: Apache-2.0 -# be used by build_rootfs() from rootfs_lib.sh the variables will be -# loaded just before call the function. For more information see the -# rootfs-builder/README.md file. -OS_VERSION=${OS_VERSION:-20.04} +OS_NAME=ubuntu # This should be Ubuntu's code name, e.g. "focal" (Focal Fossa) for 20.04 -OS_NAME=${OS_NAME:-"focal"} +OS_VERSION=${OS_VERSION:-focal} +PACKAGES=chrony +[ "$AGENT_INIT" = no ] && PACKAGES+=" init" +[ "$SECCOMP" = yes ] && PACKAGES+=" libseccomp2" +REPO_URL=http://ports.ubuntu.com -# packages to be installed by default -PACKAGES="systemd coreutils init kmod" -EXTRA_PKGS+=" chrony" - -DEBOOTSTRAP=${PACKAGE_MANAGER:-"debootstrap"} - -case $(uname -m) in - x86_64) ARCHITECTURE="amd64";; - ppc64le) ARCHITECTURE="ppc64el";; - aarch64) ARCHITECTURE="arm64";; - s390x) ARCHITECTURE="s390x";; - (*) die "$(uname -m) not supported " +case "$ARCH" in + aarch64) DEB_ARCH=arm64;; + ppc64le) DEB_ARCH=ppc64el;; + s390x) DEB_ARCH="$ARCH";; + x86_64) DEB_ARCH=amd64; REPO_URL=http://archive.ubuntu.com/ubuntu;; + *) die "$ARCH not supported" esac -# Init process must be one of {systemd,kata-agent} -INIT_PROCESS=systemd -# List of zero or more architectures to exclude from build, -# as reported by `uname -m` -ARCH_EXCLUDE_LIST=() - -[ "$SECCOMP" = "yes" ] && PACKAGES+=" libseccomp2" || true +if [ "$(uname -m)" != "$ARCH" ]; then + case "$ARCH" in + ppc64le) cc_arch=powerpc64le;; + x86_64) cc_arch=x86-64;; + *) cc_arch="$ARCH" + esac + export CC="$cc_arch-linux-gnu-gcc" +fi diff --git a/tools/osbuilder/rootfs-builder/ubuntu/rootfs_lib.sh b/tools/osbuilder/rootfs-builder/ubuntu/rootfs_lib.sh index e94df355d..4261c7ff0 100644 --- a/tools/osbuilder/rootfs-builder/ubuntu/rootfs_lib.sh +++ b/tools/osbuilder/rootfs-builder/ubuntu/rootfs_lib.sh @@ -1,78 +1,29 @@ -# - Arguments -# -# Copyright (c) 2018 Yash Jain +# Copyright (c) 2018 Yash Jain, 2022 IBM Corp. # # SPDX-License-Identifier: Apache-2.0 -# -# -# rootfs_dir=$1 -# -# - Optional environment variables -# -# EXTRA_PKGS: Variable to add extra PKGS provided by the user -# -# BIN_AGENT: Name of the Kata-Agent binary -# -# REPO_URL: URL to distribution repository ( should be configured in -# config.sh file) -# -# Any other configuration variable for a specific distro must be added -# and documented on its own config.sh -# -# - Expected result -# -# rootfs_dir populated with rootfs pkgs -# It must provide a binary in /sbin/init -# + build_rootfs() { - # Mandatory - local ROOTFS_DIR=$1 + local rootfs_dir=$1 + local multistrap_conf=multistrap.conf - # Name of the Kata-Agent binary - local BIN_AGENT=${BIN_AGENT} + # For simplicity's sake, use multistrap for foreign and native bootstraps. + cat > "$multistrap_conf" << EOF +[General] +cleanup=true +aptsources=Ubuntu +bootstrap=Ubuntu - # In case of support EXTRA packages, use it to allow - # users to add more packages to the base rootfs - local EXTRA_PKGS=${EXTRA_PKGS:-} +[Ubuntu] +source=$REPO_URL +keyring=ubuntu-keyring +suite=focal +packages=$PACKAGES $EXTRA_PKGS +EOF + multistrap -a "$DEB_ARCH" -d "$rootfs_dir" -f "$multistrap_conf" + rm -rf "$rootfs_dir/var/run" + ln -s /run "$rootfs_dir/var/run" + cp --remove-destination /etc/resolv.conf "$rootfs_dir/etc" - # In case rootfs is created using repositories allow user to modify - # the default URL - local REPO_URL=${REPO_URL:-YOUR_REPO} - - # PATH where files this script is placed - # Use it to refer to files in the same directory - # Example: ${CONFIG_DIR}/foo - local CONFIG_DIR=${CONFIG_DIR} - - - # Populate ROOTFS_DIR - # Must provide /sbin/init and /bin/${BIN_AGENT} - DEBOOTSTRAP="debootstrap" - check_root - mkdir -p "${ROOTFS_DIR}" - if [ -n "${PKG_MANAGER}" ]; then - info "debootstrap path provided by user: ${PKG_MANAGER}" - elif check_program $DEBOOTSTRAP ; then - PKG_MANAGER=$DEBOOTSTRAP - else - die "$DEBOOTSTRAP is not installed" - fi - # trim whitespace - PACKAGES=$(echo $PACKAGES |xargs ) - # add comma as debootstrap needs , separated package names. - # Don't change $PACKAGES in config.sh to include ',' - # This is done to maintain consistency - PACKAGES=$(echo $PACKAGES | sed -e 's/ /,/g' ) - - ${PKG_MANAGER} --variant=minbase \ - --arch=${ARCHITECTURE}\ - --include="$PACKAGES" \ - ${OS_NAME} \ - ${ROOTFS_DIR} - - [ -n "${EXTRA_PKGS}" ] && chroot $ROOTFS_DIR apt-get install -y ${EXTRA_PKGS} - - # Reduce image size and memory footprint - # removing not needed files and directories. - chroot $ROOTFS_DIR rm -rf /usr/share/{bash-completion,bug,doc,info,lintian,locale,man,menu,misc,pixmaps,terminfo,zoneinfo,zsh} + # Reduce image size and memory footprint by removing unnecessary files and directories. + rm -rf $rootfs_dir/usr/share/{bash-completion,bug,doc,info,lintian,locale,man,menu,misc,pixmaps,terminfo,zsh} } diff --git a/tools/osbuilder/scripts/lib.sh b/tools/osbuilder/scripts/lib.sh index 9db989900..5a6a1be1e 100644 --- a/tools/osbuilder/scripts/lib.sh +++ b/tools/osbuilder/scripts/lib.sh @@ -203,8 +203,8 @@ generate_dockerfile() dir="$1" [ -d "${dir}" ] || die "${dir}: not a directory" - local rustarch=$(uname -m) - [ "$rustarch" = ppc64le ] && rustarch=powerpc64le + local rustarch="$ARCH" + [ "$ARCH" = ppc64le ] && rustarch=powerpc64le [ -n "${http_proxy:-}" ] && readonly set_proxy="RUN sed -i '$ a proxy="${http_proxy:-}"' /etc/dnf/dnf.conf /etc/yum.conf; true" @@ -220,6 +220,7 @@ RUN . /root/.cargo/env; cargo install cargo-when sed \ -e "s#@OS_VERSION@#${OS_VERSION:-}#g" \ + -e "s#@ARCH@#$ARCH#g" \ -e "s#@INSTALL_RUST@#${install_rust//$'\n'/\\n}#g" \ -e "s#@SET_PROXY@#${set_proxy:-}#g" \ Dockerfile.in > Dockerfile From a2f5c1768e49883560125713b157be7dcf41210e Mon Sep 17 00:00:00 2001 From: Miao Xia Date: Thu, 17 Feb 2022 11:15:36 +0800 Subject: [PATCH 08/46] runtime/virtcontainers: Pass the hugepages resources to agent The hugepages resources claimed by containers should be limited by cgroup in the guest OS. Fixes: #3695 Signed-off-by: Miao Xia --- src/runtime/virtcontainers/kata_agent.go | 1 - src/runtime/virtcontainers/kata_agent_test.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/runtime/virtcontainers/kata_agent.go b/src/runtime/virtcontainers/kata_agent.go index 822e84f97..859a72527 100644 --- a/src/runtime/virtcontainers/kata_agent.go +++ b/src/runtime/virtcontainers/kata_agent.go @@ -1050,7 +1050,6 @@ func (k *kataAgent) constrainGRPCSpec(grpcSpec *grpc.Spec, passSeccomp bool, str grpcSpec.Linux.Resources.Devices = nil grpcSpec.Linux.Resources.Pids = nil grpcSpec.Linux.Resources.BlockIO = nil - grpcSpec.Linux.Resources.HugepageLimits = nil grpcSpec.Linux.Resources.Network = nil if grpcSpec.Linux.Resources.CPU != nil { grpcSpec.Linux.Resources.CPU.Cpus = "" diff --git a/src/runtime/virtcontainers/kata_agent_test.go b/src/runtime/virtcontainers/kata_agent_test.go index 6475e8b06..71cf992bb 100644 --- a/src/runtime/virtcontainers/kata_agent_test.go +++ b/src/runtime/virtcontainers/kata_agent_test.go @@ -598,7 +598,7 @@ func TestConstrainGRPCSpec(t *testing.T) { assert.NotNil(g.Linux.Resources.Memory) assert.Nil(g.Linux.Resources.Pids) assert.Nil(g.Linux.Resources.BlockIO) - assert.Nil(g.Linux.Resources.HugepageLimits) + assert.Len(g.Linux.Resources.HugepageLimits, 0) assert.Nil(g.Linux.Resources.Network) assert.NotNil(g.Linux.Resources.CPU) assert.Equal(g.Process.SelinuxLabel, "") From ebec6903b803e54032f60ba02609f9fe5b5059e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Fri, 18 Mar 2022 12:50:07 +0100 Subject: [PATCH 09/46] static-build,clh: Add the ability to build from a PR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Right now it doesn't do much for us, as we're always building from a specific version. However, this opens the possibility for us to add a CI, similar to the one we have for CRI-O, for testing against each cloud-hypervisor PR, on the cloud-hypervisor branch. Fixes: #3908 Signed-off-by: Fabiano Fidêncio --- .../cloud-hypervisor/build-static-clh.sh | 26 ++++++++++++++++--- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/tools/packaging/static-build/cloud-hypervisor/build-static-clh.sh b/tools/packaging/static-build/cloud-hypervisor/build-static-clh.sh index cf2aaec38..ff26822cc 100755 --- a/tools/packaging/static-build/cloud-hypervisor/build-static-clh.sh +++ b/tools/packaging/static-build/cloud-hypervisor/build-static-clh.sh @@ -22,6 +22,8 @@ source "${script_dir}/../../scripts/lib.sh" cloud_hypervisor_repo="${cloud_hypervisor_repo:-}" cloud_hypervisor_version="${cloud_hypervisor_version:-}" +cloud_hypervisor_pr="${cloud_hypervisor_pr:-}" +cloud_hypervisor_pull_ref_branch="${cloud_hypervisor_pull_ref_branch:-main}" if [ -z "$cloud_hypervisor_repo" ]; then info "Get cloud_hypervisor information from runtime versions.yaml" @@ -31,8 +33,13 @@ if [ -z "$cloud_hypervisor_repo" ]; then fi [ -n "$cloud_hypervisor_repo" ] || die "failed to get cloud_hypervisor repo" -[ -n "$cloud_hypervisor_version" ] || cloud_hypervisor_version=$(get_from_kata_deps "assets.hypervisor.cloud_hypervisor.version" "${kata_version}") -[ -n "$cloud_hypervisor_version" ] || die "failed to get cloud_hypervisor version" +if [ -n "$cloud_hypervisor_pr" ]; then + force_build_from_source=true + cloud_hypervisor_version="PR $cloud_hypervisor_pr" +else + [ -n "$cloud_hypervisor_version" ] || cloud_hypervisor_version=$(get_from_kata_deps "assets.hypervisor.cloud_hypervisor.version" "${kata_version}") + [ -n "$cloud_hypervisor_version" ] || die "failed to get cloud_hypervisor version" +fi pull_clh_released_binary() { info "Download cloud-hypervisor version: ${cloud_hypervisor_version}" @@ -50,8 +57,19 @@ build_clh_from_source() { repo_dir="${repo_dir//.git}" [ -d "${repo_dir}" ] || git clone "${cloud_hypervisor_repo}" pushd "${repo_dir}" - git fetch || true - git checkout "${cloud_hypervisor_version}" + + if [ -n "${cloud_hypervisor_pr}" ]; then + local pr_branch="PR_${cloud_hypervisor_pr}" + git fetch origin "pull/${cloud_hypervisor_pr}/head:${pr_branch}" || return 1 + git checkout "${pr_branch}" + git rebase "origin/${cloud_hypervisor_pull_ref_branch}" + + git log --oneline main~1..HEAD + else + git fetch || true + git checkout "${cloud_hypervisor_version}" + fi + if [ -n "${features}" ]; then info "Build cloud-hypervisor enabling the following features: ${features}" ./scripts/dev_cli.sh build --release --libc musl --features "${features}" From c776bdf4a855c6fffd596f00341ff6a9b62378af Mon Sep 17 00:00:00 2001 From: David Gibson Date: Tue, 22 Mar 2022 16:39:22 +1100 Subject: [PATCH 10/46] virtcontainers: Remove unused parameter from go-test.sh The check-go-test target passes the path to the mock hook test binary to go-test.sh when it invokes it. But go-test.sh just calls run_go_test from ci/lib.sh, which invokes a script from the tests repo *without* any parameters. That is, this parameter is ignored anyway, so remove it. Signed-off-by: David Gibson --- src/runtime/virtcontainers/Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/runtime/virtcontainers/Makefile b/src/runtime/virtcontainers/Makefile index f44c5c081..8a0e2761e 100644 --- a/src/runtime/virtcontainers/Makefile +++ b/src/runtime/virtcontainers/Makefile @@ -45,8 +45,7 @@ check-go-static: bash $(MK_DIR)/../../../ci/static-checks.sh check-go-test: - bash $(MK_DIR)/../../../ci/go-test.sh \ - $(TEST_BIN_DIR)/$(HOOK_BIN) + bash $(MK_DIR)/../../../ci/go-test.sh # # Install From c20ad2836ce61c8db3dfbf2aa287e7fd4939ba5d Mon Sep 17 00:00:00 2001 From: David Gibson Date: Tue, 22 Mar 2022 16:40:57 +1100 Subject: [PATCH 11/46] virtcontainers: Remove unused Makefile defines The INSTALL_EXEC and UNINSTALL_EXEC definitions from the virtcontainers Makefile (unlike those from the runtime Makefile in the parent directory) are entirely unused. Remove them. Signed-off-by: David Gibson --- src/runtime/virtcontainers/Makefile | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/runtime/virtcontainers/Makefile b/src/runtime/virtcontainers/Makefile index 8a0e2761e..86da3a6e9 100644 --- a/src/runtime/virtcontainers/Makefile +++ b/src/runtime/virtcontainers/Makefile @@ -51,10 +51,6 @@ check-go-test: # Install # -define INSTALL_EXEC - install -D $1 $(VC_BIN_DIR)/ || exit 1; -endef - define INSTALL_TEST_EXEC install -D $1 $(TEST_BIN_DIR)/ || exit 1; endef @@ -68,10 +64,6 @@ install: # Uninstall # -define UNINSTALL_EXEC - rm -f $(call FILE_SAFE_TO_REMOVE,$(VC_BIN_DIR)/$1) || exit 1; -endef - define UNINSTALL_TEST_EXEC rm -f $(call FILE_SAFE_TO_REMOVE,$(TEST_BIN_DIR)/$1) || exit 1; endef From e65db838ffcf9d0213beaae75f98076557b6b5cd Mon Sep 17 00:00:00 2001 From: David Gibson Date: Tue, 22 Mar 2022 16:53:59 +1100 Subject: [PATCH 12/46] virtcontainers: Remove VC_BIN_DIR The VC_BIN_DIR variable in the virtcontainers Makefile is almost unused. It's used to generate TEST_BIN_DIR, and it's created in the install target. However, we also create TEST_BIN_DIR, which is a subdirectory of VC_BIN_DIR with mkdir -p, so it will necessarily create VC_BIN_DIR along the way. So we can drop the unnecessary mkdir and expand the definition of VC_BIN_DIR in the definition of TEST_BIN_DIR. Signed-off-by: David Gibson --- src/runtime/virtcontainers/Makefile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/runtime/virtcontainers/Makefile b/src/runtime/virtcontainers/Makefile index 86da3a6e9..1665577c8 100644 --- a/src/runtime/virtcontainers/Makefile +++ b/src/runtime/virtcontainers/Makefile @@ -6,8 +6,7 @@ PREFIX := /usr BIN_DIR := $(PREFIX)/bin -VC_BIN_DIR := $(BIN_DIR)/virtcontainers/bin -TEST_BIN_DIR := $(VC_BIN_DIR)/test +TEST_BIN_DIR := $(BIN_DIR)/virtcontainers/bin/test HOOK_DIR := hook/mock HOOK_BIN := hook MK_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) @@ -56,7 +55,6 @@ define INSTALL_TEST_EXEC endef install: - @mkdir -p $(VC_BIN_DIR) @mkdir -p $(TEST_BIN_DIR) $(call INSTALL_TEST_EXEC,$(HOOK_DIR)/$(HOOK_BIN)) From 32131cb8ba3221c86010f972674ae05d780a2282 Mon Sep 17 00:00:00 2001 From: Dan Middleton Date: Tue, 22 Mar 2022 09:45:08 -0500 Subject: [PATCH 13/46] Agent: fix unneeded late initialization lint Clippy v1.58 added needless_late_init Fixes #3933 Signed-off-by: Dan Middleton --- src/agent/rustjail/src/cgroups/notifier.rs | 10 +++++----- src/agent/rustjail/src/lib.rs | 9 ++++----- src/agent/src/main.rs | 10 ++++------ src/agent/src/mount.rs | 4 ++-- src/agent/src/signal.rs | 9 ++++----- src/agent/src/util.rs | 12 +++--------- src/agent/vsock-exporter/src/lib.rs | 10 ++++------ 7 files changed, 26 insertions(+), 38 deletions(-) diff --git a/src/agent/rustjail/src/cgroups/notifier.rs b/src/agent/rustjail/src/cgroups/notifier.rs index b429a234c..9f91b3584 100644 --- a/src/agent/rustjail/src/cgroups/notifier.rs +++ b/src/agent/rustjail/src/cgroups/notifier.rs @@ -151,12 +151,12 @@ async fn register_memory_event( let eventfd = eventfd(0, EfdFlags::EFD_CLOEXEC)?; let event_control_path = Path::new(&cg_dir).join("cgroup.event_control"); - let data; - if arg.is_empty() { - data = format!("{} {}", eventfd, event_file.as_raw_fd()); + + let data = if arg.is_empty() { + format!("{} {}", eventfd, event_file.as_raw_fd()) } else { - data = format!("{} {} {}", eventfd, event_file.as_raw_fd(), arg); - } + format!("{} {} {}", eventfd, event_file.as_raw_fd(), arg) + }; fs::write(&event_control_path, data)?; diff --git a/src/agent/rustjail/src/lib.rs b/src/agent/rustjail/src/lib.rs index 7535bf990..bceb9cd04 100644 --- a/src/agent/rustjail/src/lib.rs +++ b/src/agent/rustjail/src/lib.rs @@ -351,13 +351,12 @@ fn seccomp_grpc_to_oci(sec: &grpc::LinuxSeccomp) -> oci::LinuxSeccomp { for sys in sec.Syscalls.iter() { let mut args = Vec::new(); - let errno_ret: u32; - if sys.has_errnoret() { - errno_ret = sys.get_errnoret(); + let errno_ret: u32 = if sys.has_errnoret() { + sys.get_errnoret() } else { - errno_ret = libc::EPERM as u32; - } + libc::EPERM as u32 + }; for arg in sys.Args.iter() { args.push(oci::LinuxSeccompArg { diff --git a/src/agent/src/main.rs b/src/agent/src/main.rs index d62f1e64c..f75f885b5 100644 --- a/src/agent/src/main.rs +++ b/src/agent/src/main.rs @@ -125,9 +125,7 @@ fn announce(logger: &Logger, config: &AgentConfig) { // output to the vsock port specified, or stdout. async fn create_logger_task(rfd: RawFd, vsock_port: u32, shutdown: Receiver) -> Result<()> { let mut reader = PipeStream::from_fd(rfd); - let mut writer: Box; - - if vsock_port > 0 { + let mut writer: Box = if vsock_port > 0 { let listenfd = socket::socket( AddressFamily::Vsock, SockType::Stream, @@ -139,10 +137,10 @@ async fn create_logger_task(rfd: RawFd, vsock_port: u32, shutdown: Receiver>) -> Result } let mut p = process.unwrap(); - let ret: i32; - match wait_status { - WaitStatus::Exited(_, c) => ret = c, - WaitStatus::Signaled(_, sig, _) => ret = sig as i32, + let ret: i32 = match wait_status { + WaitStatus::Exited(_, c) => c, + WaitStatus::Signaled(_, sig, _) => sig as i32, _ => { info!(logger, "got wrong status for process"; "child-status" => format!("{:?}", wait_status)); continue; } - } + }; p.exit_code = ret; let _ = p.exit_tx.take(); diff --git a/src/agent/src/util.rs b/src/agent/src/util.rs index 488e6ca6b..c4645f975 100644 --- a/src/agent/src/util.rs +++ b/src/agent/src/util.rs @@ -237,8 +237,6 @@ mod tests { JoinError, >; - let result: std::result::Result; - select! { res = handle => spawn_result = res, _ = &mut timeout => panic!("timed out"), @@ -246,7 +244,7 @@ mod tests { assert!(spawn_result.is_ok()); - result = spawn_result.unwrap(); + let result: std::result::Result = spawn_result.unwrap(); assert!(result.is_ok()); @@ -278,8 +276,6 @@ mod tests { let spawn_result: std::result::Result, JoinError>; - let result: std::result::Result; - select! { res = handle => spawn_result = res, _ = &mut timeout => panic!("timed out"), @@ -287,7 +283,7 @@ mod tests { assert!(spawn_result.is_ok()); - result = spawn_result.unwrap(); + let result: std::result::Result = spawn_result.unwrap(); assert!(result.is_ok()); @@ -320,8 +316,6 @@ mod tests { let spawn_result: std::result::Result, JoinError>; - let result: std::result::Result; - select! { res = handle => spawn_result = res, _ = &mut timeout => panic!("timed out"), @@ -329,7 +323,7 @@ mod tests { assert!(spawn_result.is_ok()); - result = spawn_result.unwrap(); + let result: std::result::Result = spawn_result.unwrap(); assert!(result.is_ok()); diff --git a/src/agent/vsock-exporter/src/lib.rs b/src/agent/vsock-exporter/src/lib.rs index 27e71e64b..776c3b548 100644 --- a/src/agent/vsock-exporter/src/lib.rs +++ b/src/agent/vsock-exporter/src/lib.rs @@ -178,13 +178,11 @@ impl Builder { pub fn init(self) -> Exporter { let Builder { port, cid, logger } = self; - let cid_str: String; - - if self.cid == libc::VMADDR_CID_ANY { - cid_str = ANY_CID.to_string(); + let cid_str: String = if self.cid == libc::VMADDR_CID_ANY { + ANY_CID.to_string() } else { - cid_str = format!("{}", self.cid); - } + format!("{}", self.cid) + }; Exporter { port, From 7743486413fa1b115dcc024838c23ec92f8464c2 Mon Sep 17 00:00:00 2001 From: bin Date: Tue, 22 Mar 2022 22:10:43 +0800 Subject: [PATCH 14/46] docs: fix markdown issues in how-to-run-docker-with-kata.md Some links in how-to-run-docker-with-kata.md is not correct, and some typos. Fixes: #3929 Signed-off-by: bin --- docs/how-to/how-to-run-docker-with-kata.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/how-to/how-to-run-docker-with-kata.md b/docs/how-to/how-to-run-docker-with-kata.md index 3f5e11471..d9c03ad0a 100644 --- a/docs/how-to/how-to-run-docker-with-kata.md +++ b/docs/how-to/how-to-run-docker-with-kata.md @@ -48,9 +48,9 @@ Running Docker containers Kata Containers requires care because `VOLUME`s specif kataShared on / type virtiofs (rw,relatime,dax) ``` -`kataShared` mount types are powered by [`virtio-fs`][virtio-fs], a marked improvement over `virtio-9p`, thanks to [PR #1016](https://github.com/kata-containers/runtime/pull/1016). While `virtio-fs` is normally an excellent choice, in the case of DinD workloads `virtio-fs` causes an issue -- [it *cannot* be used as a "upper layer" of `overlayfs` without a custom patch](http://lists.katacontainers.io/pipermail/kata-dev/2020-January/001216.html). +`kataShared` mount types are powered by [`virtio-fs`](https://virtio-fs.gitlab.io/), a marked improvement over `virtio-9p`, thanks to [PR #1016](https://github.com/kata-containers/runtime/pull/1016). While `virtio-fs` is normally an excellent choice, in the case of DinD workloads `virtio-fs` causes an issue -- [it *cannot* be used as a "upper layer" of `overlayfs` without a custom patch](http://lists.katacontainers.io/pipermail/kata-dev/2020-January/001216.html). -As `/var/lib/docker` is a `VOLUME` specified by DinD (i.e. the `docker` images tagged `*-dind`/`*-dind-rootless`), `docker` fill fail to start (or even worse, silently pick a worse storage driver like `vfs`) when started in a Kata Container. Special measures must be taken when running DinD-powered workloads in Kata Containers. +As `/var/lib/docker` is a `VOLUME` specified by DinD (i.e. the `docker` images tagged `*-dind`/`*-dind-rootless`), `docker` will fail to start (or even worse, silently pick a worse storage driver like `vfs`) when started in a Kata Container. Special measures must be taken when running DinD-powered workloads in Kata Containers. ## Workarounds/Solutions @@ -58,7 +58,7 @@ Thanks to various community contributions (see [issue references below](#referen ### Use a memory backed volume -For small workloads (small container images, without much generated filesystem load), a memory-backed volume is sufficient. Kubernetes supports a variant of [the `EmptyDir` volume][k8s-emptydir], which allows for memdisk-backed storage -- the [the `medium: Memory` ][k8s-memory-volume-type]. An example of a `Pod` using such a setup [was contributed](https://github.com/kata-containers/runtime/issues/1429#issuecomment-477385283), and is reproduced below: +For small workloads (small container images, without much generated filesystem load), a memory-backed volume is sufficient. Kubernetes supports a variant of [the `EmptyDir` volume](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir), which allows for memdisk-backed storage -- the the `medium: Memory`. An example of a `Pod` using such a setup [was contributed](https://github.com/kata-containers/runtime/issues/1429#issuecomment-477385283), and is reproduced below: ```yaml apiVersion: v1 From 0e83c95fac827ca739fd115f7560868c411f5de8 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Tue, 22 Mar 2022 17:08:15 +1100 Subject: [PATCH 15/46] virtcontainers: Run mock hook from build tree rather than system bin dir Running unit tests should generally have minimal dependencies on things outside the build tree. It *definitely* shouldn't modify system wide things outside the build tree. Currently the runtime "make test" target does so, though. Several of the tests in src/runtime/pkg/katautils/hook_test.go require a sample hook binary. They expect this hook in /usr/bin/virtcontainers/bin/test/hook, so the makefile, as root, installs the test binary to that location. Go tests automatically run within the package's directory though, so there's no need to use a system wide path. We can use a relative path to the binary build within the tree just as easily. fixes #3941 Signed-off-by: David Gibson --- src/runtime/Makefile | 6 ++---- src/runtime/pkg/katautils/hook_test.go | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/runtime/Makefile b/src/runtime/Makefile index 9f34c3da7..f87e72e58 100644 --- a/src/runtime/Makefile +++ b/src/runtime/Makefile @@ -589,12 +589,10 @@ $(GENERATED_FILES): %: %.in $(MAKEFILE_LIST) VERSION .git-commit generate-config: $(CONFIGS) -test: install-hook go-test +test: hook go-test -install-hook: +hook: make -C virtcontainers hook - echo "installing mock hook" - sudo -E make -C virtcontainers install go-test: $(GENERATED_FILES) go clean -testcache diff --git a/src/runtime/pkg/katautils/hook_test.go b/src/runtime/pkg/katautils/hook_test.go index 6109b5549..50452974b 100644 --- a/src/runtime/pkg/katautils/hook_test.go +++ b/src/runtime/pkg/katautils/hook_test.go @@ -20,7 +20,7 @@ import ( var testKeyHook = "test-key" var testContainerIDHook = "test-container-id" var testControllerIDHook = "test-controller-id" -var testBinHookPath = "/usr/bin/virtcontainers/bin/test/hook" +var testBinHookPath = "../../virtcontainers/hook/mock/hook" var testBundlePath = "/test/bundle" func getMockHookBinPath() string { From 86723b51ae131364e4efaa4d8818a6bac26a9dac Mon Sep 17 00:00:00 2001 From: David Gibson Date: Tue, 22 Mar 2022 17:10:55 +1100 Subject: [PATCH 16/46] virtcontainers: Remove unused install/uninstall targets We've now removed the need to install the mock hook binary for unit tests. However, it turns out that managing that was the *only* thing that the install and uninstall targets in the virtcontainers Makefile handled. So, remove them. Signed-off-by: David Gibson --- src/runtime/virtcontainers/Makefile | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/src/runtime/virtcontainers/Makefile b/src/runtime/virtcontainers/Makefile index 1665577c8..a255741b0 100644 --- a/src/runtime/virtcontainers/Makefile +++ b/src/runtime/virtcontainers/Makefile @@ -5,8 +5,6 @@ # PREFIX := /usr -BIN_DIR := $(PREFIX)/bin -TEST_BIN_DIR := $(BIN_DIR)/virtcontainers/bin/test HOOK_DIR := hook/mock HOOK_BIN := hook MK_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) @@ -46,29 +44,6 @@ check-go-static: check-go-test: bash $(MK_DIR)/../../../ci/go-test.sh -# -# Install -# - -define INSTALL_TEST_EXEC - install -D $1 $(TEST_BIN_DIR)/ || exit 1; -endef - -install: - @mkdir -p $(TEST_BIN_DIR) - $(call INSTALL_TEST_EXEC,$(HOOK_DIR)/$(HOOK_BIN)) - -# -# Uninstall -# - -define UNINSTALL_TEST_EXEC - rm -f $(call FILE_SAFE_TO_REMOVE,$(TEST_BIN_DIR)/$1) || exit 1; -endef - -uninstall: - $(call UNINSTALL_TEST_EXEC,$(HOOK_BIN)) - # # Clean # @@ -92,6 +67,4 @@ clean: check \ check-go-static \ check-go-test \ - install \ - uninstall \ clean From c77e34de33b44897a79b50c7ad59f26abccccb5d Mon Sep 17 00:00:00 2001 From: David Gibson Date: Tue, 22 Mar 2022 17:14:40 +1100 Subject: [PATCH 17/46] runtime: Move mock hook source src/runtime/virtcontainers/hook/mock contains a simple example hook in Go. The only thing this is used for is for some tests in src/runtime/pkg/katautils/hook_test.go. It doesn't really have anything to do with the rest of the virtcontainers package. So, move it next to the test code that uses it. Signed-off-by: David Gibson --- src/runtime/Makefile | 2 +- src/runtime/pkg/katautils/hook_test.go | 2 +- src/runtime/pkg/katautils/mockhook/.gitignore | 1 + src/runtime/pkg/katautils/mockhook/Makefile | 21 +++++++++++++++++++ .../mock => pkg/katautils/mockhook}/hook.go | 0 src/runtime/virtcontainers/Makefile | 13 ++---------- 6 files changed, 26 insertions(+), 13 deletions(-) create mode 100644 src/runtime/pkg/katautils/mockhook/.gitignore create mode 100644 src/runtime/pkg/katautils/mockhook/Makefile rename src/runtime/{virtcontainers/hook/mock => pkg/katautils/mockhook}/hook.go (100%) diff --git a/src/runtime/Makefile b/src/runtime/Makefile index f87e72e58..8f1856821 100644 --- a/src/runtime/Makefile +++ b/src/runtime/Makefile @@ -592,7 +592,7 @@ generate-config: $(CONFIGS) test: hook go-test hook: - make -C virtcontainers hook + make -C pkg/katautils/mockhook go-test: $(GENERATED_FILES) go clean -testcache diff --git a/src/runtime/pkg/katautils/hook_test.go b/src/runtime/pkg/katautils/hook_test.go index 50452974b..9ef48499b 100644 --- a/src/runtime/pkg/katautils/hook_test.go +++ b/src/runtime/pkg/katautils/hook_test.go @@ -20,7 +20,7 @@ import ( var testKeyHook = "test-key" var testContainerIDHook = "test-container-id" var testControllerIDHook = "test-controller-id" -var testBinHookPath = "../../virtcontainers/hook/mock/hook" +var testBinHookPath = "mockhook/hook" var testBundlePath = "/test/bundle" func getMockHookBinPath() string { diff --git a/src/runtime/pkg/katautils/mockhook/.gitignore b/src/runtime/pkg/katautils/mockhook/.gitignore new file mode 100644 index 000000000..6d71f66fb --- /dev/null +++ b/src/runtime/pkg/katautils/mockhook/.gitignore @@ -0,0 +1 @@ +hook diff --git a/src/runtime/pkg/katautils/mockhook/Makefile b/src/runtime/pkg/katautils/mockhook/Makefile new file mode 100644 index 000000000..2ca654438 --- /dev/null +++ b/src/runtime/pkg/katautils/mockhook/Makefile @@ -0,0 +1,21 @@ +# Copyright Red Hat. +# +# SPDX-License-Identifier: Apache-2.0 +# + +BIN = hook +SRC = hook.go + +V = @ +Q = $(V:1=) +QUIET_BUILD = $(Q:@=@echo ' BUILD '$@;) + +BUILDFLAGS = + +all: $(BIN) + +$(BIN): $(SRC) + $(QUIET_BUILD)go build $(BUILDFLAGS) -o $@ $^ + +clean: + rm -f $(BIN) diff --git a/src/runtime/virtcontainers/hook/mock/hook.go b/src/runtime/pkg/katautils/mockhook/hook.go similarity index 100% rename from src/runtime/virtcontainers/hook/mock/hook.go rename to src/runtime/pkg/katautils/mockhook/hook.go diff --git a/src/runtime/virtcontainers/Makefile b/src/runtime/virtcontainers/Makefile index a255741b0..14e9af1ab 100644 --- a/src/runtime/virtcontainers/Makefile +++ b/src/runtime/virtcontainers/Makefile @@ -5,8 +5,6 @@ # PREFIX := /usr -HOOK_DIR := hook/mock -HOOK_BIN := hook MK_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) GOBUILD_FLAGS := -mod=vendor @@ -22,16 +20,11 @@ QUIET_GOBUILD = $(Q:@=@echo ' GOBUILD '$@;) # Build # -all: build binaries +all: build build: $(QUIET_GOBUILD)go build $(GOBUILD_FLAGS) $(go list ./... | grep -v /vendor/) -hook: - $(QUIET_GOBUILD)go build $(GOBUILD_FLAGS) -o $(HOOK_DIR)/$@ $(HOOK_DIR)/*.go - -binaries: hook - # # Tests # @@ -54,7 +47,7 @@ define FILE_SAFE_TO_REMOVE = $(shell test -e "$(1)" && test "$(1)" != "/" && echo "$(1)") endef -CLEAN_FILES += $(HOOK_DIR)/$(HOOK_BIN) +CLEAN_FILES += clean: rm -f $(foreach f,$(CLEAN_FILES),$(call FILE_SAFE_TO_REMOVE,$(f))) @@ -62,8 +55,6 @@ clean: .PHONY: \ all \ build \ - hook \ - binaries \ check \ check-go-static \ check-go-test \ From ecf71d6dd64d8e9b288eef7e85c1f178f06de9a5 Mon Sep 17 00:00:00 2001 From: Gabriela Cervantes Date: Wed, 23 Mar 2022 15:50:37 +0000 Subject: [PATCH 18/46] docs: Remove VPP documentation This PR is removing VPP documentation as it is not longer valid with kata 2.x, all the instructions were used for kata 1.x Fixes #3946 Signed-off-by: Gabriela Cervantes --- docs/README.md | 1 - docs/use-cases/using-vpp-and-kata.md | 76 ---------------------------- 2 files changed, 77 deletions(-) delete mode 100644 docs/use-cases/using-vpp-and-kata.md diff --git a/docs/README.md b/docs/README.md index 529d772d9..96bf2622b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -30,7 +30,6 @@ See the [how-to documentation](how-to). * [GPU Passthrough with Kata](./use-cases/GPU-passthrough-and-Kata.md) * [SR-IOV with Kata](./use-cases/using-SRIOV-and-kata.md) * [Intel QAT with Kata](./use-cases/using-Intel-QAT-and-kata.md) -* [VPP with Kata](./use-cases/using-vpp-and-kata.md) * [SPDK vhost-user with Kata](./use-cases/using-SPDK-vhostuser-and-kata.md) * [Intel SGX with Kata](./use-cases/using-Intel-SGX-and-kata.md) diff --git a/docs/use-cases/using-vpp-and-kata.md b/docs/use-cases/using-vpp-and-kata.md deleted file mode 100644 index ea09c50f5..000000000 --- a/docs/use-cases/using-vpp-and-kata.md +++ /dev/null @@ -1,76 +0,0 @@ -# Setup to run VPP - -The Data Plane Development Kit (DPDK) is a set of libraries and drivers for -fast packet processing. Vector Packet Processing (VPP) is a platform -extensible framework that provides out-of-the-box production quality -switch and router functionality. VPP is a high performance packet-processing -stack that can run on commodity CPUs. Enabling VPP with DPDK support can -yield significant performance improvements over a Linux\* bridge providing a -switch with DPDK VHOST-USER ports. - -For more information about VPP visit their [wiki](https://wiki.fd.io/view/VPP). - -## Install and configure Kata Containers - -Follow the [Kata Containers setup instructions](../Developer-Guide.md). - -In order to make use of VHOST-USER based interfaces, the container needs to be backed -by huge pages. `HugePages` support is required for the large memory pool allocation used for -DPDK packet buffers. This is a feature which must be configured within the Linux Kernel. See -[the DPDK documentation](https://doc.dpdk.org/guides/linux_gsg/sys_reqs.html#use-of-hugepages-in-the-linux-environment) -for details on how to enable for the host. After enabling huge pages support on the host system, -update the Kata configuration to enable huge page support in the guest kernel: - -``` -$ sudo sed -i -e 's/^# *\(enable_hugepages\).*=.*$/\1 = true/g' /usr/share/defaults/kata-containers/configuration.toml -``` - - -## Install VPP - -Follow the [VPP installation instructions](https://wiki.fd.io/view/VPP/Installing_VPP_binaries_from_packages). - -After a successful installation, your host system is ready to start -connecting Kata Containers with VPP bridges. - -### Install the VPP Docker\* plugin - -To create a Docker network and connect Kata Containers easily to that network through -Docker, install a VPP Docker plugin. - -To install the plugin, follow the [plugin installation instructions](https://github.com/clearcontainers/vpp). - -This VPP plugin allows the creation of a VPP network. Every container added -to this network is connected through an L2 bridge-domain provided by VPP. - -## Example: Launch two Kata Containers using VPP - -To use VPP, use Docker to create a network that makes use of VPP. -For example: - -``` -$ sudo docker network create -d=vpp --ipam-driver=vpp --subnet=192.168.1.0/24 --gateway=192.168.1.1 vpp_net -``` - -Test connectivity by launching two containers: -``` -$ sudo docker run --runtime=kata-runtime --net=vpp_net --ip=192.168.1.2 --mac-address=CA:FE:CA:FE:01:02 -it busybox bash -c "ip a; ip route; sleep 300" - -$ sudo docker run --runtime=kata-runtime --net=vpp_net --ip=192.168.1.3 --mac-address=CA:FE:CA:FE:01:03 -it busybox bash -c "ip a; ip route; ping 192.168.1.2" -``` - -These commands setup two Kata Containers connected via a VPP L2 bridge -domain. The first of the two VMs displays the networking details and then -sleeps providing a period of time for it to be pinged. The second -VM displays its networking details and then pings the first VM, verifying -connectivity between them. - -After verifying connectivity, cleanup with the following commands: - -``` -$ sudo docker kill $(sudo docker ps --no-trunc -aq) -$ sudo docker rm $(sudo docker ps --no-trunc -aq) -$ sudo docker network rm vpp_net -$ sudo service vpp stop -``` - From 9a5b4770620ef4e10c7c9d8c6c900a1f74c00e39 Mon Sep 17 00:00:00 2001 From: Gabriela Cervantes Date: Wed, 23 Mar 2022 17:58:49 +0000 Subject: [PATCH 19/46] docs: Update vcpu handling document This PR updates the vcpu handling document by removing docker information which is not longer being used in kata 2.x and leaving only k8s information. Fixes #3950 Signed-off-by: Gabriela Cervantes --- docs/design/vcpu-handling.md | 64 ++++++++---------------------------- 1 file changed, 13 insertions(+), 51 deletions(-) diff --git a/docs/design/vcpu-handling.md b/docs/design/vcpu-handling.md index d5f5e3b10..d4059d09c 100644 --- a/docs/design/vcpu-handling.md +++ b/docs/design/vcpu-handling.md @@ -2,24 +2,15 @@ ## Default number of virtual CPUs -Before starting a container, the [runtime][6] reads the `default_vcpus` option -from the [configuration file][7] to determine the number of virtual CPUs +Before starting a container, the [runtime][4] reads the `default_vcpus` option +from the [configuration file][5] to determine the number of virtual CPUs (vCPUs) needed to start the virtual machine. By default, `default_vcpus` is equal to 1 for fast boot time and a small memory footprint per virtual machine. Be aware that increasing this value negatively impacts the virtual machine's boot time and memory footprint. In general, we recommend that you do not edit this variable, unless you know what are you doing. If your container needs more than one vCPU, use -[docker `--cpus`][1], [docker update][4], or [Kubernetes `cpu` limits][2] to -assign more resources. - -*Docker* - -```sh -$ docker run --name foo -ti --cpus 2 debian bash -$ docker update --cpus 4 foo -``` - +[Kubernetes `cpu` limits][1] to assign more resources. *Kubernetes* @@ -49,7 +40,7 @@ $ sudo -E kubectl create -f ~/cpu-demo.yaml ## Virtual CPUs and Kubernetes pods A Kubernetes pod is a group of one or more containers, with shared storage and -network, and a specification for how to run the containers [[specification][3]]. +network, and a specification for how to run the containers [[specification][2]]. In Kata Containers this group of containers, which is called a sandbox, runs inside the same virtual machine. If you do not specify a CPU constraint, the runtime does not add more vCPUs and the container is not placed inside a CPU cgroup. @@ -73,13 +64,7 @@ constraints with each container trying to consume 100% of vCPU, the resources divide in two parts, 50% of vCPU for each container because your virtual machine does not have enough resources to satisfy containers needs. If you want to give access to a greater or lesser portion of vCPUs to a specific container, -use [`docker --cpu-shares`][1] or [Kubernetes `cpu` requests][2]. - -*Docker* - -```sh -$ docker run -ti --cpus-shares=512 debian bash -``` +use [Kubernetes `cpu` requests][1]. *Kubernetes* @@ -109,10 +94,9 @@ $ sudo -E kubectl create -f ~/cpu-demo.yaml Before running containers without CPU constraint, consider that your containers are not running alone. Since your containers run inside a virtual machine other processes use the vCPUs as well (e.g. `systemd` and the Kata Containers -[agent][5]). In general, we recommend setting `default_vcpus` equal to 1 to +[agent][3]). In general, we recommend setting `default_vcpus` equal to 1 to allow non-container processes to run on this vCPU and to specify a CPU -constraint for each container. If your container is already running and needs -more vCPUs, you can add more using [docker update][4]. +constraint for each container. ## Container with CPU constraint @@ -121,7 +105,7 @@ constraints using the following formula: `vCPUs = ceiling( quota / period )`, wh `quota` specifies the number of microseconds per CPU Period that the container is guaranteed CPU access and `period` specifies the CPU CFS scheduler period of time in microseconds. The result determines the number of vCPU to hot plug into the -virtual machine. Once the vCPUs have been added, the [agent][5] places the +virtual machine. Once the vCPUs have been added, the [agent][3] places the container inside a CPU cgroup. This placement allows the container to use only its assigned resources. @@ -138,25 +122,6 @@ the virtual machine starts with 8 vCPUs and 1 vCPUs is added and assigned to the container. Non-container processes might be able to use 8 vCPUs but they use a maximum 1 vCPU, hence 7 vCPUs might not be used. - -*Container without CPU constraint* - -```sh -$ docker run -ti debian bash -c "nproc; cat /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_*" -1 # number of vCPUs -100000 # cfs period --1 # cfs quota -``` - -*Container with CPU constraint* - -```sh -docker run --cpus 4 -ti debian bash -c "nproc; cat /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_*" -5 # number of vCPUs -100000 # cfs period -400000 # cfs quota -``` - ## Virtual CPU handling without hotplug In some cases, the hardware and/or software architecture being utilized does not support @@ -183,11 +148,8 @@ the container's `spec` will provide the sizing information directly. If these ar calculate the number of CPUs required for the workload and augment this by `default_vcpus` configuration option, and use this for the virtual machine size. - -[1]: https://docs.docker.com/config/containers/resource_constraints/#cpu -[2]: https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource -[3]: https://kubernetes.io/docs/concepts/workloads/pods/pod/ -[4]: https://docs.docker.com/engine/reference/commandline/update/ -[5]: ../../src/agent -[6]: ../../src/runtime -[7]: ../../src/runtime/README.md#configuration +[1]: https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource +[2]: https://kubernetes.io/docs/concepts/workloads/pods/pod/ +[3]: ../../src/agent +[4]: ../../src/runtime +[5]: ../../src/runtime/README.md#configuration From 459f4bfedb25949d1e977479b2f70fef9ba3308d Mon Sep 17 00:00:00 2001 From: Julio Montes Date: Wed, 23 Mar 2022 12:59:13 -0600 Subject: [PATCH 20/46] osbuilder/qat: use centos as base OS move away from ubuntu, since now it's easier to build using CentOS as base OS fixes #3936 Signed-off-by: Julio Montes --- tools/osbuilder/dockerfiles/QAT/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/osbuilder/dockerfiles/QAT/Dockerfile b/tools/osbuilder/dockerfiles/QAT/Dockerfile index c0113569a..3455e4fa1 100644 --- a/tools/osbuilder/dockerfiles/QAT/Dockerfile +++ b/tools/osbuilder/dockerfiles/QAT/Dockerfile @@ -16,7 +16,7 @@ ENV QAT_DRIVER_URL "https://downloadmirror.intel.com/649693/${QAT_DRIVER_VER}" ENV QAT_CONFIGURE_OPTIONS "--enable-icp-sriov=guest" ENV KATA_REPO_VERSION "main" ENV AGENT_VERSION "" -ENV ROOTFS_OS "ubuntu" +ENV ROOTFS_OS "centos" ENV OUTPUT_DIR "/output" RUN dnf install -y \ From 19f372b5f582e65b5d34faf7153f0d6e5b5ce794 Mon Sep 17 00:00:00 2001 From: Feng Wang Date: Thu, 24 Mar 2022 07:51:07 -0700 Subject: [PATCH 21/46] runtime: Add more debug logs for container io stream copy This can help debugging container lifecycle issues Fixes: #3913 Signed-off-by: Feng Wang --- src/runtime/pkg/containerd-shim-v2/start.go | 14 ++++++++++++-- src/runtime/pkg/containerd-shim-v2/stream.go | 10 +++++++++- src/runtime/pkg/containerd-shim-v2/stream_test.go | 3 ++- src/runtime/pkg/containerd-shim-v2/wait.go | 11 ++++++++++- 4 files changed, 33 insertions(+), 5 deletions(-) diff --git a/src/runtime/pkg/containerd-shim-v2/start.go b/src/runtime/pkg/containerd-shim-v2/start.go index 5eede489f..65bfe6d9a 100644 --- a/src/runtime/pkg/containerd-shim-v2/start.go +++ b/src/runtime/pkg/containerd-shim-v2/start.go @@ -8,12 +8,14 @@ package containerdshim import ( "context" "fmt" + "github.com/sirupsen/logrus" "github.com/containerd/containerd/api/types/task" "github.com/kata-containers/kata-containers/src/runtime/pkg/katautils" ) func startContainer(ctx context.Context, s *service, c *container) (retErr error) { + shimLog.WithField("container", c.id).Debug("start container") defer func() { if retErr != nil { // notify the wait goroutine to continue @@ -78,7 +80,8 @@ func startContainer(ctx context.Context, s *service, c *container) (retErr error return err } c.ttyio = tty - go ioCopy(c.exitIOch, c.stdinCloser, tty, stdin, stdout, stderr) + + go ioCopy(shimLog.WithField("container", c.id), c.exitIOch, c.stdinCloser, tty, stdin, stdout, stderr) } else { // close the io exit channel, since there is no io for this container, // otherwise the following wait goroutine will hang on this channel. @@ -94,6 +97,10 @@ func startContainer(ctx context.Context, s *service, c *container) (retErr error } func startExec(ctx context.Context, s *service, containerID, execID string) (e *exec, retErr error) { + shimLog.WithFields(logrus.Fields{ + "container": containerID, + "exec": execID, + }).Debug("start container execution") // start an exec c, err := s.getContainer(containerID) if err != nil { @@ -140,7 +147,10 @@ func startExec(ctx context.Context, s *service, containerID, execID string) (e * } execs.ttyio = tty - go ioCopy(execs.exitIOch, execs.stdinCloser, tty, stdin, stdout, stderr) + go ioCopy(shimLog.WithFields(logrus.Fields{ + "container": c.id, + "exec": execID, + }), execs.exitIOch, execs.stdinCloser, tty, stdin, stdout, stderr) go wait(ctx, s, c, execID) diff --git a/src/runtime/pkg/containerd-shim-v2/stream.go b/src/runtime/pkg/containerd-shim-v2/stream.go index f976c49ef..58045359b 100644 --- a/src/runtime/pkg/containerd-shim-v2/stream.go +++ b/src/runtime/pkg/containerd-shim-v2/stream.go @@ -12,6 +12,7 @@ import ( "syscall" "github.com/containerd/fifo" + "github.com/sirupsen/logrus" ) // The buffer size used to specify the buffer for IO streams copy @@ -86,18 +87,20 @@ func newTtyIO(ctx context.Context, stdin, stdout, stderr string, console bool) ( return ttyIO, nil } -func ioCopy(exitch, stdinCloser chan struct{}, tty *ttyIO, stdinPipe io.WriteCloser, stdoutPipe, stderrPipe io.Reader) { +func ioCopy(shimLog *logrus.Entry, exitch, stdinCloser chan struct{}, tty *ttyIO, stdinPipe io.WriteCloser, stdoutPipe, stderrPipe io.Reader) { var wg sync.WaitGroup if tty.Stdin != nil { wg.Add(1) go func() { + shimLog.Debug("stdin io stream copy started") p := bufPool.Get().(*[]byte) defer bufPool.Put(p) io.CopyBuffer(stdinPipe, tty.Stdin, *p) // notify that we can close process's io safely. close(stdinCloser) wg.Done() + shimLog.Debug("stdin io stream copy exited") }() } @@ -105,6 +108,7 @@ func ioCopy(exitch, stdinCloser chan struct{}, tty *ttyIO, stdinPipe io.WriteClo wg.Add(1) go func() { + shimLog.Debug("stdout io stream copy started") p := bufPool.Get().(*[]byte) defer bufPool.Put(p) io.CopyBuffer(tty.Stdout, stdoutPipe, *p) @@ -113,20 +117,24 @@ func ioCopy(exitch, stdinCloser chan struct{}, tty *ttyIO, stdinPipe io.WriteClo // close stdin to make the other routine stop tty.Stdin.Close() } + shimLog.Debug("stdout io stream copy exited") }() } if tty.Stderr != nil && stderrPipe != nil { wg.Add(1) go func() { + shimLog.Debug("stderr io stream copy started") p := bufPool.Get().(*[]byte) defer bufPool.Put(p) io.CopyBuffer(tty.Stderr, stderrPipe, *p) wg.Done() + shimLog.Debug("stderr io stream copy exited") }() } wg.Wait() tty.close() close(exitch) + shimLog.Debug("all io stream copy goroutines exited") } diff --git a/src/runtime/pkg/containerd-shim-v2/stream_test.go b/src/runtime/pkg/containerd-shim-v2/stream_test.go index 01b988b1a..9b848e52b 100644 --- a/src/runtime/pkg/containerd-shim-v2/stream_test.go +++ b/src/runtime/pkg/containerd-shim-v2/stream_test.go @@ -7,6 +7,7 @@ package containerdshim import ( "context" + "github.com/sirupsen/logrus" "io" "os" "path/filepath" @@ -179,7 +180,7 @@ func TestIoCopy(t *testing.T) { defer tty.close() // start the ioCopy threads : copy from src to dst - go ioCopy(exitioch, stdinCloser, tty, dstInW, srcOutR, srcErrR) + go ioCopy(logrus.WithContext(context.Background()), exitioch, stdinCloser, tty, dstInW, srcOutR, srcErrR) var firstW, secondW, thirdW io.WriteCloser var firstR, secondR, thirdR io.Reader diff --git a/src/runtime/pkg/containerd-shim-v2/wait.go b/src/runtime/pkg/containerd-shim-v2/wait.go index c6587c16c..0eeac976e 100644 --- a/src/runtime/pkg/containerd-shim-v2/wait.go +++ b/src/runtime/pkg/containerd-shim-v2/wait.go @@ -31,12 +31,17 @@ func wait(ctx context.Context, s *service, c *container, execID string) (int32, if execID == "" { //wait until the io closed, then wait the container <-c.exitIOch + shimLog.WithField("container", c.id).Debug("The container io streams closed") } else { execs, err = c.getExec(execID) if err != nil { return exitCode255, err } <-execs.exitIOch + shimLog.WithFields(logrus.Fields{ + "container": c.id, + "exec": execID, + }).Debug("The container process io streams closed") //This wait could be triggered before exec start which //will get the exec's id, thus this assignment must after //the exec exit, to make sure it get the exec's id. @@ -82,13 +87,17 @@ func wait(ctx context.Context, s *service, c *container, execID string) (int32, c.exitTime = timeStamp c.exitCh <- uint32(ret) - + shimLog.WithField("container", c.id).Debug("The container status is StatusStopped") } else { execs.status = task.StatusStopped execs.exitCode = ret execs.exitTime = timeStamp execs.exitCh <- uint32(ret) + shimLog.WithFields(logrus.Fields{ + "container": c.id, + "exec": execID, + }).Debug("The container exec status is StatusStopped") } s.mu.Unlock() From bad859d2f8d51aa72e563c30bac638c7415a5329 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 2 Mar 2022 16:24:42 +1100 Subject: [PATCH 22/46] tools/packaging/kata-deploy/local-build: Add build to gitignore This directory consists entirely of files built during a make kata-tarball, so it should not be committed to the tree. A symbolic link to this directory might be created during 'make tarball', ignore it as well. Signed-off-by: David Gibson [greg: - rearranged the subject to make the subsystem checker happy - also ignore the symbolic link created by `kata-deploy-binaries-in-docker.sh`] Signed-off-by: Greg Kurz --- .gitignore | 1 + tools/packaging/kata-deploy/local-build/.gitignore | 1 + 2 files changed, 2 insertions(+) create mode 100644 tools/packaging/kata-deploy/local-build/.gitignore diff --git a/.gitignore b/.gitignore index 529bab04a..ce97c7e98 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,5 @@ src/agent/src/version.rs src/agent/kata-agent.service src/agent/protocols/src/*.rs !src/agent/protocols/src/lib.rs +build diff --git a/tools/packaging/kata-deploy/local-build/.gitignore b/tools/packaging/kata-deploy/local-build/.gitignore new file mode 100644 index 000000000..567609b12 --- /dev/null +++ b/tools/packaging/kata-deploy/local-build/.gitignore @@ -0,0 +1 @@ +build/ From 1ed7da8fc75426e71cf6d68648ab6ab4576f0a54 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 2 Mar 2022 16:26:22 +1100 Subject: [PATCH 23/46] packaging: Eliminate TTY_OPT and NO_TTY variables in kata-deploy NO_TTY configured whether to add the -t option to docker run. It makes no sense for the caller to configure this, since whether you need it depends on the commands you're running. Since the point here is to run non-interactive build scripts, we don't need -t, or -i either. Signed-off-by: David Gibson Signed-off-by: Greg Kurz --- tools/packaging/kata-deploy/local-build/Makefile | 2 +- .../local-build/kata-deploy-binaries-in-docker.sh | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/tools/packaging/kata-deploy/local-build/Makefile b/tools/packaging/kata-deploy/local-build/Makefile index 68e45d447..f04d9b658 100644 --- a/tools/packaging/kata-deploy/local-build/Makefile +++ b/tools/packaging/kata-deploy/local-build/Makefile @@ -16,7 +16,7 @@ endef kata-tarball: | all-parallel merge-builds all-parallel: - ${MAKE} -f $(MK_PATH) all -j$$(( $$(nproc) - 1 )) NO_TTY="true" V= + ${MAKE} -f $(MK_PATH) all -j$$(( $$(nproc) - 1 )) V= all: cloud-hypervisor-tarball \ firecracker-tarball \ diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh index 68fba1b5f..c1fc1538a 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh @@ -16,10 +16,6 @@ kata_deploy_create="${script_dir}/kata-deploy-binaries.sh" uid=$(id -u ${USER}) gid=$(id -g ${USER}) -TTY_OPT="-i" -NO_TTY="${NO_TTY:-false}" -[ -t 1 ] && [ "${NO_TTY}" == "false" ] && TTY_OPT="-it" - if [ "${script_dir}" != "${PWD}" ]; then ln -sf "${script_dir}/build" "${PWD}/build" fi @@ -34,7 +30,7 @@ docker build -q -t build-kata-deploy \ --build-arg GID=${gid} \ "${script_dir}/dockerbuild/" -docker run ${TTY_OPT} \ +docker run \ -v /var/run/docker.sock:/var/run/docker.sock \ --user ${uid}:${gid} \ --env USER=${USER} -v "${kata_dir}:${kata_dir}" \ From 154c8b03d3763bfe100155105a7db7e889bf2a38 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Fri, 25 Mar 2022 10:30:52 +0100 Subject: [PATCH 24/46] tools/packaging/kata-deploy: Copy install_yq.sh in a dedicated script 'make kata-tarball' sometimes fails early with: cp: cannot create regular file '[...]/tools/packaging/kata-deploy/local-build/dockerbuild/install_yq.sh': File exists This happens because all assets are built in parallel using the same `kata-deploy-binaries-in-docker.sh` script, and thus all try to copy the `install_yq.sh` script to the same location with the `cp` command. This is a well known race condition that cannot be avoided without serialization of `cp` invocations. Move the copying of `install_yq.sh` to a separate script and ensure it is called *before* parallel builds. Make the presence of the copy a prerequisite for each sub-build so that they still can be triggered individually. Update the GH release workflow to also call this script before calling `kata-deploy-binaries-in-docker.sh`. Fixes #3756 Signed-off-by: Greg Kurz --- .github/workflows/release.yaml | 1 + tools/packaging/kata-deploy/local-build/Makefile | 7 +++++-- .../kata-deploy-binaries-in-docker.sh | 4 ---- .../local-build/kata-deploy-copy-yq-installer.sh | 16 ++++++++++++++++ 4 files changed, 22 insertions(+), 6 deletions(-) create mode 100755 tools/packaging/kata-deploy/local-build/kata-deploy-copy-yq-installer.sh diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index ca2db149c..c87d5b3a9 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -26,6 +26,7 @@ jobs: - name: Build ${{ matrix.asset }} run: | + ./tools/packaging/kata-deploy/local-build/kata-deploy-copy-yq-installer.sh ./tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh --build="${KATA_ASSET}" build_dir=$(readlink -f build) # store-artifact does not work with symlink diff --git a/tools/packaging/kata-deploy/local-build/Makefile b/tools/packaging/kata-deploy/local-build/Makefile index f04d9b658..ac3a77714 100644 --- a/tools/packaging/kata-deploy/local-build/Makefile +++ b/tools/packaging/kata-deploy/local-build/Makefile @@ -15,7 +15,10 @@ endef kata-tarball: | all-parallel merge-builds -all-parallel: +$(MK_DIR)/dockerbuild/install_yq.sh: + $(MK_DIR)/kata-deploy-copy-yq-installer.sh + +all-parallel: $(MK_DIR)/dockerbuild/install_yq.sh ${MAKE} -f $(MK_PATH) all -j$$(( $$(nproc) - 1 )) V= all: cloud-hypervisor-tarball \ @@ -26,7 +29,7 @@ all: cloud-hypervisor-tarball \ rootfs-initrd-tarball \ shim-v2-tarball -%-tarball-build: +%-tarball-build: $(MK_DIR)/dockerbuild/install_yq.sh $(call BUILD,$*) cloud-hypervisor-tarball: diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh index c1fc1538a..4035ff9cb 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh @@ -20,10 +20,6 @@ if [ "${script_dir}" != "${PWD}" ]; then ln -sf "${script_dir}/build" "${PWD}/build" fi -install_yq_script_path="${script_dir}/../../../../ci/install_yq.sh" - -cp "${install_yq_script_path}" "${script_dir}/dockerbuild/install_yq.sh" - docker build -q -t build-kata-deploy \ --build-arg IMG_USER="${USER}" \ --build-arg UID=${uid} \ diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-copy-yq-installer.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-copy-yq-installer.sh new file mode 100755 index 000000000..1271fd882 --- /dev/null +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-copy-yq-installer.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2018-2021 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +set -o errexit +set -o nounset +set -o pipefail +set -o errtrace + +script_dir=$(dirname "$(readlink -f "$0")") +install_yq_script_path="${script_dir}/../../../../ci/install_yq.sh" + +cp "${install_yq_script_path}" "${script_dir}/dockerbuild/install_yq.sh" From c27963276baa981abfcf182cbb83ad03b41894b6 Mon Sep 17 00:00:00 2001 From: Julio Montes Date: Fri, 25 Mar 2022 07:04:20 -0600 Subject: [PATCH 25/46] osbuilder/qat: don't pull kata sources if exist don't pull kata sources if they already exist under GOPATH fixes #3965 Signed-off-by: Julio Montes --- tools/osbuilder/dockerfiles/QAT/run.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/osbuilder/dockerfiles/QAT/run.sh b/tools/osbuilder/dockerfiles/QAT/run.sh index bf333d1a5..b30c34c8b 100755 --- a/tools/osbuilder/dockerfiles/QAT/run.sh +++ b/tools/osbuilder/dockerfiles/QAT/run.sh @@ -39,9 +39,9 @@ grab_kata_repos() # Check out all the repos we will use now, so we can try and ensure they use the specified branch # Only check out the branch needed, and make it shallow and thus space/bandwidth efficient # Use a green prompt with white text for easy viewing - /bin/echo -e "\n\e[1;42mClone and checkout Kata repos\e[0m" - git clone --single-branch --branch $KATA_REPO_VERSION --depth=1 https://${kata_repo} ${kata_repo_path} - git clone --single-branch --branch $KATA_REPO_VERSION --depth=1 https://${tests_repo} ${tests_repo_path} + /bin/echo -e "\n\e[1;42mClone and checkout Kata repos\e[0m" + [ -d "${kata_repo_path}" ] || git clone --single-branch --branch $KATA_REPO_VERSION --depth=1 https://${kata_repo} ${kata_repo_path} + [ -d "${tests_repo_path}" ] || git clone --single-branch --branch $KATA_REPO_VERSION --depth=1 https://${tests_repo} ${tests_repo_path} } configure_kernel() @@ -164,6 +164,7 @@ main() done shift $((OPTIND-1)) + sudo chown -R qatbuilder:qatbuilder /home/qatbuilder grab_qat_drivers grab_kata_repos configure_kernel From ff17c756d2cb777ce63af2c9aa5a3dd7dc5e2eba Mon Sep 17 00:00:00 2001 From: Jakob Naucke Date: Tue, 11 Jan 2022 19:09:56 +0100 Subject: [PATCH 26/46] runtime: Allow and require no initrd for SE Previously, it was not permitted to have neither an initrd nor an image. However, this is the exact config to use for Secure Execution, where the initrd is part of the image to be specified as `-kernel`. Require the configuration of no initrd for Secure Execution. Also - remove redundant code for image/initrd checking -- no need to check in `newQemuHypervisorConfig` (calling) when it is also checked in `getInitrdAndImage` (called) - use `QemuCCWVirtio` constant when possible Fixes: #3922 Signed-off-by: Jakob Naucke --- src/runtime/pkg/katautils/config.go | 20 ++++++------------- src/runtime/virtcontainers/hypervisor.go | 14 +++++++------ src/runtime/virtcontainers/hypervisor_test.go | 12 +++++++++++ src/runtime/virtcontainers/qemu.go | 2 +- 4 files changed, 27 insertions(+), 21 deletions(-) diff --git a/src/runtime/pkg/katautils/config.go b/src/runtime/pkg/katautils/config.go index 741f32631..3525bc236 100644 --- a/src/runtime/pkg/katautils/config.go +++ b/src/runtime/pkg/katautils/config.go @@ -469,11 +469,13 @@ func (h hypervisor) getInitrdAndImage() (initrd string, image string, err error) image, errImage := h.image() - if image != "" && initrd != "" { + if h.ConfidentialGuest && h.MachineType == vc.QemuCCWVirtio { + if image != "" || initrd != "" { + return "", "", errors.New("Neither the image nor initrd path may be set for Secure Execution") + } + } else if image != "" && initrd != "" { return "", "", errors.New("having both an image and an initrd defined in the configuration file is not supported") - } - - if errInitrd != nil && errImage != nil { + } else if errInitrd != nil && errImage != nil { return "", "", fmt.Errorf("Either initrd or image must be set to a valid path (initrd: %v) (image: %v)", errInitrd, errImage) } @@ -605,16 +607,6 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { return vc.HypervisorConfig{}, err } - if image != "" && initrd != "" { - return vc.HypervisorConfig{}, - errors.New("having both an image and an initrd defined in the configuration file is not supported") - } - - if image == "" && initrd == "" { - return vc.HypervisorConfig{}, - errors.New("either image or initrd must be defined in the configuration file") - } - firmware, err := h.firmware() if err != nil { return vc.HypervisorConfig{}, err diff --git a/src/runtime/virtcontainers/hypervisor.go b/src/runtime/virtcontainers/hypervisor.go index cebc51570..12725160b 100644 --- a/src/runtime/virtcontainers/hypervisor.go +++ b/src/runtime/virtcontainers/hypervisor.go @@ -527,17 +527,19 @@ func (conf *HypervisorConfig) CheckTemplateConfig() error { } func (conf *HypervisorConfig) Valid() error { - // Kata specific checks. Should be done outside the hypervisor if conf.KernelPath == "" { return fmt.Errorf("Missing kernel path") } - if conf.ImagePath == "" && conf.InitrdPath == "" { + if conf.ConfidentialGuest && conf.HypervisorMachineType == QemuCCWVirtio { + if conf.ImagePath != "" || conf.InitrdPath != "" { + fmt.Println("yes, failing") + return fmt.Errorf("Neither the image or initrd path may be set for Secure Execution") + } + } else if conf.ImagePath == "" && conf.InitrdPath == "" { return fmt.Errorf("Missing image and initrd path") - } - - if conf.ImagePath != "" && conf.InitrdPath != "" { + } else if conf.ImagePath != "" && conf.InitrdPath != "" { return fmt.Errorf("Image and initrd path cannot be both set") } @@ -559,7 +561,7 @@ func (conf *HypervisorConfig) Valid() error { if conf.BlockDeviceDriver == "" { conf.BlockDeviceDriver = defaultBlockDriver - } else if conf.BlockDeviceDriver == config.VirtioBlock && conf.HypervisorMachineType == "s390-ccw-virtio" { + } else if conf.BlockDeviceDriver == config.VirtioBlock && conf.HypervisorMachineType == QemuCCWVirtio { conf.BlockDeviceDriver = config.VirtioBlockCCW } diff --git a/src/runtime/virtcontainers/hypervisor_test.go b/src/runtime/virtcontainers/hypervisor_test.go index 86aefde69..ab312b098 100644 --- a/src/runtime/virtcontainers/hypervisor_test.go +++ b/src/runtime/virtcontainers/hypervisor_test.go @@ -144,6 +144,18 @@ func TestHypervisorConfigBothInitrdAndImage(t *testing.T) { testHypervisorConfigValid(t, hypervisorConfig, false) } +func TestHypervisorConfigSecureExecution(t *testing.T) { + hypervisorConfig := &HypervisorConfig{ + KernelPath: fmt.Sprintf("%s/%s", testDir, testKernel), + InitrdPath: fmt.Sprintf("%s/%s", testDir, testInitrd), + ConfidentialGuest: true, + HypervisorMachineType: QemuCCWVirtio, + } + + // Secure Execution should only specify a kernel (encrypted image contains all components) + testHypervisorConfigValid(t, hypervisorConfig, false) +} + func TestHypervisorConfigValidTemplateConfig(t *testing.T) { hypervisorConfig := &HypervisorConfig{ KernelPath: fmt.Sprintf("%s/%s", testDir, testKernel), diff --git a/src/runtime/virtcontainers/qemu.go b/src/runtime/virtcontainers/qemu.go index 242b3645d..b76ba3fe5 100644 --- a/src/runtime/virtcontainers/qemu.go +++ b/src/runtime/virtcontainers/qemu.go @@ -1841,7 +1841,7 @@ func (q *qemu) hotplugAddCPUs(amount uint32) (uint32, error) { threadID := fmt.Sprintf("%d", hc.Properties.Thread) // If CPU type is IBM pSeries, Z or arm virt, we do not set socketID and threadID - if machine.Type == "pseries" || machine.Type == "s390-ccw-virtio" || machine.Type == "virt" { + if machine.Type == "pseries" || machine.Type == QemuCCWVirtio || machine.Type == "virt" { socketID = "" threadID = "" dieID = "" From 0928eb9f4e17ba0fd91a2ff2326e0f89985842dc Mon Sep 17 00:00:00 2001 From: Feng Wang Date: Thu, 24 Mar 2022 07:57:06 -0700 Subject: [PATCH 27/46] agent: Kill the all the container processes of the same cgroup Otherwise the container process might leak and cause an unclean exit Fixes: #3913 Signed-off-by: Feng Wang --- src/agent/src/rpc.rs | 95 ++++++++++++++++--- src/runtime/pkg/containerd-shim-v2/service.go | 2 + 2 files changed, 85 insertions(+), 12 deletions(-) diff --git a/src/agent/src/rpc.rs b/src/agent/src/rpc.rs index 232534716..e0c205cbe 100644 --- a/src/agent/src/rpc.rs +++ b/src/agent/src/rpc.rs @@ -19,6 +19,7 @@ use ttrpc::{ }; use anyhow::{anyhow, Context, Result}; +use cgroups::freezer::FreezerState; use oci::{LinuxNamespace, Root, Spec}; use protobuf::{Message, RepeatedField, SingularPtrField}; use protocols::agent::{ @@ -40,8 +41,9 @@ use rustjail::specconv::CreateOpts; use nix::errno::Errno; use nix::mount::MsFlags; use nix::sys::signal::Signal; -use nix::sys::stat; +use nix::sys::{signal, stat}; use nix::unistd::{self, Pid}; +use rustjail::cgroups::Manager; use rustjail::process::ProcessOperations; use sysinfo::{DiskExt, System, SystemExt}; @@ -391,7 +393,6 @@ impl AgentService { let cid = req.container_id.clone(); let eid = req.exec_id.clone(); let s = self.sandbox.clone(); - let mut sandbox = s.lock().await; info!( sl!(), @@ -400,27 +401,97 @@ impl AgentService { "exec-id" => eid.clone(), ); - let p = sandbox.find_container_process(cid.as_str(), eid.as_str())?; - - let mut signal = Signal::try_from(req.signal as i32).map_err(|e| { + let mut sig = Signal::try_from(req.signal as i32).map_err(|e| { anyhow!(e).context(format!( "failed to convert {:?} to signal (container-id: {}, exec-id: {})", req.signal, cid, eid )) })?; - - // For container initProcess, if it hasn't installed handler for "SIGTERM" signal, - // it will ignore the "SIGTERM" signal sent to it, thus send it "SIGKILL" signal - // instead of "SIGTERM" to terminate it. - if p.init && signal == Signal::SIGTERM && !is_signal_handled(p.pid, req.signal) { - signal = Signal::SIGKILL; + { + let mut sandbox = s.lock().await; + let p = sandbox.find_container_process(cid.as_str(), eid.as_str())?; + // For container initProcess, if it hasn't installed handler for "SIGTERM" signal, + // it will ignore the "SIGTERM" signal sent to it, thus send it "SIGKILL" signal + // instead of "SIGTERM" to terminate it. + if p.init && sig == Signal::SIGTERM && !is_signal_handled(p.pid, sig as u32) { + sig = Signal::SIGKILL; + } + p.signal(sig)?; } - p.signal(signal)?; + if eid.is_empty() { + // eid is empty, signal all the remaining processes in the container cgroup + info!( + sl!(), + "signal all the remaining processes"; + "container-id" => cid.clone(), + "exec-id" => eid.clone(), + ); + if let Err(err) = self.freeze_cgroup(&cid, FreezerState::Frozen).await { + warn!( + sl!(), + "freeze cgroup failed"; + "container-id" => cid.clone(), + "exec-id" => eid.clone(), + "error" => format!("{:?}", err), + ); + } + + let pids = self.get_pids(&cid).await?; + for pid in pids.iter() { + if let Err(err) = signal::kill(Pid::from_raw(*pid), Some(sig)) { + warn!( + sl!(), + "signal failed"; + "container-id" => cid.clone(), + "exec-id" => eid.clone(), + "pid" => pid, + "error" => format!("{:?}", err), + ); + } + } + if let Err(err) = self.freeze_cgroup(&cid, FreezerState::Thawed).await { + warn!( + sl!(), + "unfreeze cgroup failed"; + "container-id" => cid.clone(), + "exec-id" => eid.clone(), + "error" => format!("{:?}", err), + ); + } + } Ok(()) } + async fn freeze_cgroup(&self, cid: &str, state: FreezerState) -> Result<()> { + let s = self.sandbox.clone(); + let mut sandbox = s.lock().await; + let ctr = sandbox + .get_container(cid) + .ok_or_else(|| anyhow!("Invalid container id {}", cid))?; + let cm = ctr + .cgroup_manager + .as_ref() + .ok_or_else(|| anyhow!("cgroup manager not exist"))?; + cm.freeze(state)?; + Ok(()) + } + + async fn get_pids(&self, cid: &str) -> Result> { + let s = self.sandbox.clone(); + let mut sandbox = s.lock().await; + let ctr = sandbox + .get_container(cid) + .ok_or_else(|| anyhow!("Invalid container id {}", cid))?; + let cm = ctr + .cgroup_manager + .as_ref() + .ok_or_else(|| anyhow!("cgroup manager not exist"))?; + let pids = cm.get_pids()?; + Ok(pids) + } + #[instrument] async fn do_wait_process( &self, diff --git a/src/runtime/pkg/containerd-shim-v2/service.go b/src/runtime/pkg/containerd-shim-v2/service.go index 8e20ae82f..72f3f14a0 100644 --- a/src/runtime/pkg/containerd-shim-v2/service.go +++ b/src/runtime/pkg/containerd-shim-v2/service.go @@ -776,6 +776,8 @@ func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.E return empty, errors.New("The exec process does not exist") } processStatus = execs.status + } else { + r.All = true } // According to CRI specs, kubelet will call StopPodSandbox() From 800e4a9cfbf9d1cec667db50219eaf66a451bd47 Mon Sep 17 00:00:00 2001 From: Yu Li Date: Sat, 26 Mar 2022 17:41:29 +0800 Subject: [PATCH 28/46] agent: use ms as unit of cputime instead of ticks For the library `procfs`, the unit of values in `CpuTime` is ticks, and we do not know how many ticks per second from metrics because the `tps` in `CpuTime` is private. But there are some implements in `CpuTime` for getting these values, e.g., `user_ms()` for `user`, and `nice_ms()` for `nice`. With these values, accurate time can be obtained. Fixes: #3979 Acked-by: zhaojizhuang <571130360@qq.com> Signed-off-by: Yu Li --- src/agent/src/metrics.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/agent/src/metrics.rs b/src/agent/src/metrics.rs index 1c75fc22d..508d7d2c2 100644 --- a/src/agent/src/metrics.rs +++ b/src/agent/src/metrics.rs @@ -344,25 +344,25 @@ fn set_gauge_vec_meminfo(gv: &prometheus::GaugeVec, meminfo: &procfs::Meminfo) { #[instrument] fn set_gauge_vec_cpu_time(gv: &prometheus::GaugeVec, cpu: &str, cpu_time: &procfs::CpuTime) { gv.with_label_values(&[cpu, "user"]) - .set(cpu_time.user as f64); + .set(cpu_time.user_ms() as f64); gv.with_label_values(&[cpu, "nice"]) - .set(cpu_time.nice as f64); + .set(cpu_time.nice_ms() as f64); gv.with_label_values(&[cpu, "system"]) - .set(cpu_time.system as f64); + .set(cpu_time.system_ms() as f64); gv.with_label_values(&[cpu, "idle"]) - .set(cpu_time.idle as f64); + .set(cpu_time.idle_ms() as f64); gv.with_label_values(&[cpu, "iowait"]) - .set(cpu_time.iowait.unwrap_or(0) as f64); + .set(cpu_time.iowait_ms().unwrap_or(0) as f64); gv.with_label_values(&[cpu, "irq"]) - .set(cpu_time.irq.unwrap_or(0) as f64); + .set(cpu_time.irq_ms().unwrap_or(0) as f64); gv.with_label_values(&[cpu, "softirq"]) - .set(cpu_time.softirq.unwrap_or(0) as f64); + .set(cpu_time.softirq_ms().unwrap_or(0) as f64); gv.with_label_values(&[cpu, "steal"]) - .set(cpu_time.steal.unwrap_or(0) as f64); + .set(cpu_time.steal_ms().unwrap_or(0) as f64); gv.with_label_values(&[cpu, "guest"]) - .set(cpu_time.guest.unwrap_or(0) as f64); + .set(cpu_time.guest_ms().unwrap_or(0) as f64); gv.with_label_values(&[cpu, "guest_nice"]) - .set(cpu_time.guest_nice.unwrap_or(0) as f64); + .set(cpu_time.guest_nice_ms().unwrap_or(0) as f64); } #[instrument] From 66f05c5bcbb6a98a05a6385a143fe0a1bb872214 Mon Sep 17 00:00:00 2001 From: yaoyinnan Date: Mon, 28 Mar 2022 21:21:38 +0800 Subject: [PATCH 29/46] runtime: Remove the explicit VirtioMem set and fix the comment Modify the 2Mib in the comment to 4Mib. VirtioMem is set by configuration file or annotation. And setupVirtioMem is called only when VirtioMem is true. Fixes: #3750 Signed-off-by: yaoyinnan --- src/runtime/virtcontainers/qemu.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/runtime/virtcontainers/qemu.go b/src/runtime/virtcontainers/qemu.go index 242b3645d..94c35c833 100644 --- a/src/runtime/virtcontainers/qemu.go +++ b/src/runtime/virtcontainers/qemu.go @@ -758,7 +758,7 @@ func (q *qemu) setupVirtioMem(ctx context.Context) error { if err != nil { return err } - // backend memory size must be multiple of 2Mib + // backend memory size must be multiple of 4Mib sizeMB := (int(maxMem) - int(q.config.MemorySize)) >> 2 << 2 share, target, memoryBack, err := q.getMemArgs() @@ -783,7 +783,6 @@ func (q *qemu) setupVirtioMem(ctx context.Context) error { err = q.qmpMonitorCh.qmp.ExecMemdevAdd(q.qmpMonitorCh.ctx, memoryBack, "virtiomem", target, sizeMB, share, "virtio-mem-pci", "virtiomem0", addr, bridge.ID) if err == nil { - q.config.VirtioMem = true q.Logger().Infof("Setup %dMB virtio-mem-pci success", sizeMB) } else { help := "" From a931402375918225d6b3d043ba7cd34722e981de Mon Sep 17 00:00:00 2001 From: Gabriela Cervantes Date: Mon, 28 Mar 2022 16:36:22 +0000 Subject: [PATCH 30/46] docs: Remove kata-proxy references in documentation This PR removes the kata-proxy references in VSocks documentation, as this is not a component in kata 2.0 and all the examples that were used belonged to kata 1.x. Fixes #3980 Signed-off-by: Gabriela Cervantes --- docs/design/VSocks.md | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/docs/design/VSocks.md b/docs/design/VSocks.md index 28ef4effb..0271645c2 100644 --- a/docs/design/VSocks.md +++ b/docs/design/VSocks.md @@ -67,22 +67,15 @@ Using a proxy for multiplexing the connections between the VM and the host uses 4.5MB per [POD][2]. In a high density deployment this could add up to GBs of memory that could have been used to host more PODs. When we talk about density each kilobyte matters and it might be the decisive factor between run another -POD or not. For example if you have 500 PODs running in a server, the same -amount of [`kata-proxy`][3] processes will be running and consuming for around -2250MB of RAM. Before making the decision not to use VSOCKs, you should ask +POD or not. Before making the decision not to use VSOCKs, you should ask yourself, how many more containers can run with the memory RAM consumed by the Kata proxies? ### Reliability -[`kata-proxy`][3] is in charge of multiplexing the connections between virtual -machine and host processes, if it dies all connections get broken. For example -if you have a [POD][2] with 10 containers running, if `kata-proxy` dies it would -be impossible to contact your containers, though they would still be running. Since communication via VSOCKs is direct, the only way to lose communication with the containers is if the VM itself or the `containerd-shim-kata-v2` dies, if this happens the containers are removed automatically. [1]: https://wiki.qemu.org/Features/VirtioVsock [2]: ./vcpu-handling.md#virtual-cpus-and-kubernetes-pods -[3]: https://github.com/kata-containers/proxy From 93d03cc0648184f6954a08e93dc9896b850218d1 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Tue, 29 Mar 2022 02:36:27 +0000 Subject: [PATCH 31/46] kata-deploy: fix version bump from -rc to stable In such case, we should bump from "latest" tag rather than from current_version. Fixes: #3986 Signed-off-by: Peng Tao --- .../release/update-repository-version.sh | 30 ++++++++----------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/tools/packaging/release/update-repository-version.sh b/tools/packaging/release/update-repository-version.sh index 2fef162e4..2335d4f9e 100755 --- a/tools/packaging/release/update-repository-version.sh +++ b/tools/packaging/release/update-repository-version.sh @@ -182,30 +182,22 @@ bump_repo() { info "Updating kata-deploy / kata-cleanup image tags" local version_to_replace="${current_version}" local replacement="${new_version}" - local removed_files=false - if [ "${target_branch}" == "main" ]; then + local need_commit=false + if [ "${target_branch}" == "main" ];then if [[ "${new_version}" =~ "rc" ]]; then - ## this is the case 2) where we remove te kata-deploy / kata-cleanup stable files + ## We are bumping from alpha to RC, should drop kata-deploy-stable yamls. git rm "${kata_deploy_stable_yaml}" git rm "${kata_cleanup_stable_yaml}" - removed_files=true + need_commit=true fi - - ## these are the cases 1) and 2), where "alpha" and "rc0" are tagged as "latest" - version_to_replace="latest" - replacement="latest" - else - if [[ "${new_version}" =~ "rc" ]]; then - ## we're in a stable branch, coming from "rcX" (tagged as latest) and we're going - ## to "rcX+1", which should still be tagged as latest. + elif [ "${new_version}" != *"rc"* ]; then + ## We are on a stable branch and creating new stable releases. + ## Need to change kata-deploy / kata-cleanup to use the stable tags. + if [[ "${version_to_replace}" =~ "rc" ]]; then + ## Coming from "rcX" so from the latest tag. version_to_replace="latest" - replacement="latest" fi - fi - - if [ "${version_to_replace}" != "${replacement}" ] || [ "${removed_files}" == "true" ]; then - ## this covers case 3), as it has changes on kata-deploy / kata-cleanup files sed -i "s#${registry}:${version_to_replace}#${registry}:${replacement}#g" "${kata_deploy_yaml}" sed -i "s#${registry}:${version_to_replace}#${registry}:${replacement}#g" "${kata_cleanup_yaml}" @@ -214,6 +206,10 @@ bump_repo() { git add "${kata_deploy_yaml}" git add "${kata_cleanup_yaml}" + need_commit=true + fi + + if [ "${need_commit}" == "true" ]; then info "Creating the commit with the kata-deploy changes" local commit_msg="$(generate_kata_deploy_commit $new_version)" git commit -s -m "${commit_msg}" From fb8be96194c62840e173c26c3fce3e5db9539a8d Mon Sep 17 00:00:00 2001 From: bin Date: Tue, 29 Mar 2022 16:39:01 +0800 Subject: [PATCH 32/46] runtime: stop getting OOM events when ttrpc: closed error getOOMEvents is a long-waiting call, it will retry when failed. For cases of agent shutdown, the retry should stop. When the agent hasn't detected agent has died, we can also check whether the error is "ttrpc: closed". Fixes: #3815 Signed-off-by: bin --- src/runtime/pkg/containerd-shim-v2/wait.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/runtime/pkg/containerd-shim-v2/wait.go b/src/runtime/pkg/containerd-shim-v2/wait.go index 0eeac976e..8b2ac70fb 100644 --- a/src/runtime/pkg/containerd-shim-v2/wait.go +++ b/src/runtime/pkg/containerd-shim-v2/wait.go @@ -15,7 +15,6 @@ import ( "github.com/containerd/containerd/api/types/task" "github.com/containerd/containerd/mount" "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" "github.com/kata-containers/kata-containers/src/runtime/pkg/oci" ) @@ -156,13 +155,11 @@ func watchOOMEvents(ctx context.Context, s *service) { default: containerID, err := s.sandbox.GetOOMEvent(ctx) if err != nil { - shimLog.WithError(err).Warn("failed to get OOM event from sandbox") - // If the GetOOMEvent call is not implemented, then the agent is most likely an older version, - // stop attempting to get OOM events. - // for rust agent, the response code is not found - if isGRPCErrorCode(codes.NotFound, err) || err.Error() == "Dead agent" { + if err.Error() == "ttrpc: closed" || err.Error() == "Dead agent" { + shimLog.WithError(err).Warn("agent has shutdown, return from watching of OOM events") return } + shimLog.WithError(err).Warn("failed to get OOM event from sandbox") time.Sleep(defaultCheckInterval) continue } From 5e1c30d4846bcc9e2b56fa229ec26d7d75e3ff7b Mon Sep 17 00:00:00 2001 From: bin Date: Tue, 29 Mar 2022 16:59:12 +0800 Subject: [PATCH 33/46] runtime: add logs around sandbox monitor For debugging purposes, add some logs. Fixes: #3815 Signed-off-by: bin --- src/runtime/pkg/containerd-shim-v2/wait.go | 2 ++ src/runtime/virtcontainers/monitor.go | 14 +++++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/runtime/pkg/containerd-shim-v2/wait.go b/src/runtime/pkg/containerd-shim-v2/wait.go index 8b2ac70fb..0f6c4fe06 100644 --- a/src/runtime/pkg/containerd-shim-v2/wait.go +++ b/src/runtime/pkg/containerd-shim-v2/wait.go @@ -67,6 +67,7 @@ func wait(ctx context.Context, s *service, c *container, execID string) (int32, if c.cType.IsSandbox() { // cancel watcher if s.monitor != nil { + shimLog.WithField("sandbox", s.sandbox.ID()).Info("cancel watcher") s.monitor <- nil } if err = s.sandbox.Stop(ctx, true); err != nil { @@ -110,6 +111,7 @@ func watchSandbox(ctx context.Context, s *service) { return } err := <-s.monitor + shimLog.WithError(err).WithField("sandbox", s.sandbox.ID()).Info("watchSandbox gets an error or stop signal") if err == nil { return } diff --git a/src/runtime/virtcontainers/monitor.go b/src/runtime/virtcontainers/monitor.go index 125636a90..ae7843bea 100644 --- a/src/runtime/virtcontainers/monitor.go +++ b/src/runtime/virtcontainers/monitor.go @@ -18,6 +18,8 @@ const ( watcherChannelSize = 128 ) +var monitorLog = virtLog.WithField("subsystem", "virtcontainers/monitor") + // nolint: govet type monitor struct { watchers []chan error @@ -33,6 +35,9 @@ type monitor struct { } func newMonitor(s *Sandbox) *monitor { + // there should only be one monitor for one sandbox, + // so it's safe to let monitorLog as a global variable. + monitorLog = monitorLog.WithField("sandbox", s.ID()) return &monitor{ sandbox: s, checkInterval: defaultCheckInterval, @@ -72,6 +77,7 @@ func (m *monitor) newWatcher(ctx context.Context) (chan error, error) { } func (m *monitor) notify(ctx context.Context, err error) { + monitorLog.WithError(err).Warn("notify on errors") m.sandbox.agent.markDead(ctx) m.Lock() @@ -85,18 +91,19 @@ func (m *monitor) notify(ctx context.Context, err error) { // but just in case... defer func() { if x := recover(); x != nil { - virtLog.Warnf("watcher closed channel: %v", x) + monitorLog.Warnf("watcher closed channel: %v", x) } }() for _, c := range m.watchers { + monitorLog.WithError(err).Warn("write error to watcher") // throw away message can not write to channel // make it not stuck, the first error is useful. select { case c <- err: default: - virtLog.WithField("channel-size", watcherChannelSize).Warnf("watcher channel is full, throw notify message") + monitorLog.WithField("channel-size", watcherChannelSize).Warnf("watcher channel is full, throw notify message") } } } @@ -104,6 +111,7 @@ func (m *monitor) notify(ctx context.Context, err error) { func (m *monitor) stop() { // wait outside of monitor lock for the watcher channel to exit. defer m.wg.Wait() + monitorLog.Info("stopping monitor") m.Lock() defer m.Unlock() @@ -122,7 +130,7 @@ func (m *monitor) stop() { // but just in case... defer func() { if x := recover(); x != nil { - virtLog.Warnf("watcher closed channel: %v", x) + monitorLog.Warnf("watcher closed channel: %v", x) } }() From 2eb07455d0e7d98b7b64b861dde58fb548d89ced Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Sat, 26 Mar 2022 12:37:27 +0100 Subject: [PATCH 34/46] tools: Add a generate_vendor.sh script MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This script is responsible for generating a tarball with all the rust vendored code that is needed for fully building kata-containers on a disconnected environment. Signed-off-by: Fabiano Fidêncio --- tools/packaging/release/generate_vendor.sh | 53 ++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100755 tools/packaging/release/generate_vendor.sh diff --git a/tools/packaging/release/generate_vendor.sh b/tools/packaging/release/generate_vendor.sh new file mode 100755 index 000000000..bde881108 --- /dev/null +++ b/tools/packaging/release/generate_vendor.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2022 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +set -o errexit +set -o nounset +set -o pipefail + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +script_name="$(basename "${BASH_SOURCE[0]}")" + +# This is very much error prone in case we re-structure our +# repos again, but it's also used in a few other places :-/ +repo_dir="${script_dir}/../../.." + +function usage() { + + cat <> .cargo/config + vendor_dir_list+=" $dir/vendor $dir/.cargo/config" + echo "${vendor_dir_list}" + popd + done + popd + + tar -cvzf ${1} ${vendor_dir_list} +} + +main () { + [ $# -ne 1 ] && usage && exit 0 + create_vendor_tarball ${1} +} + +main "$@" From 3606923ac80aab82f425f158547a0ccc3aa0f9a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Sat, 26 Mar 2022 12:46:16 +0100 Subject: [PATCH 35/46] workflows,release: Ship *all* the rust vendored code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of only vendoring the code needed by the agent, let's ensure we vendor all the needed rust code, and let's do it using the newly introduced enerate_vendor.sh script. Fixes: #3973 Signed-off-by: Fabiano Fidêncio --- .github/workflows/release.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index c87d5b3a9..56cc89538 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -141,13 +141,10 @@ jobs: - uses: actions/checkout@v2 - name: generate-and-upload-tarball run: | - pushd $GITHUB_WORKSPACE/src/agent - cargo vendor >> .cargo/config - popd tag=$(echo $GITHUB_REF | cut -d/ -f3-) tarball="kata-containers-$tag-vendor.tar.gz" pushd $GITHUB_WORKSPACE - tar -cvzf "${tarball}" src/agent/.cargo/config src/agent/vendor + bash -c "tools/packaging/release/generate_vendor.sh ${tarball}" GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}" popd From 0baebd2b374082a1cd6349053445eacdb51fbc73 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 29 Mar 2022 17:32:29 +0200 Subject: [PATCH 36/46] tools/packaging: Fix usage of kata-deploy-binaries.sh Add missing documentation for -s . Signed-off-by: Greg Kurz --- tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index f335f9ed2..43342b902 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -64,6 +64,7 @@ version: The kata version that will be use to create the tarball options: -h|--help : Show this help +-s : Silent mode (produce output in case of failure only) --build= : all cloud-hypervisor From a779e19beef0fdb50b905b25aead0294d4f0cb19 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 29 Mar 2022 15:10:38 +0200 Subject: [PATCH 37/46] tools/packaging: Fix error path in 'kata-deploy-binaries.sh -s' `make kata-tarball` relies on `kata-deploy-binaries.sh -s` which silently ignores errors, and you may end up with an incomplete tarball without noticing it because `make`'s exit status is 0. `kata-deploy-binaries.sh` does set the `errexit` option and all the code in the script seems to assume that since it doesn't do error checking. Unfortunately, bash automatically disables `errexit` when calling a function from a conditional pipeline, like done in the `-s` case: if [ "${silent}" == true ]; then if ! handle_build "${t}" &>"$log_file"; then ^^^^^^ this disables `errexit` and `handle_build` ends with a `tar tvf` that always succeeds. Adding error checking all over the place isn't really an option as it would seriously obfuscate the code. Drop the conditional pipeline instead and print the final error message from a `trap` handler on the special ERR signal. This requires the `errtrace` option as `trap`s aren't propagated to functions by default. Since all outputs of `handle_build` are redirected to the build log file, some file descriptor duplication magic is needed for the handler to be able to write to the orignal stdout and stderr. Fixes #3757 Signed-off-by: Greg Kurz --- .../local-build/kata-deploy-binaries.sh | 27 +++++++++++++++---- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh index 43342b902..a73b2d2c2 100755 --- a/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh +++ b/tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh @@ -8,6 +8,7 @@ set -o errexit set -o nounset set -o pipefail +set -o errtrace readonly project="kata-containers" @@ -196,6 +197,18 @@ handle_build() { tar tvf "${tarball_name}" } +silent_mode_error_trap() { + local stdout="$1" + local stderr="$2" + local t="$3" + local log_file="$4" + exec 1>&${stdout} + exec 2>&${stderr} + error "Failed to build: $t, logs:" + cat "${log_file}" + exit 1 +} + main() { local build_targets local silent @@ -248,11 +261,15 @@ main() { ( cd "${builddir}" if [ "${silent}" == true ]; then - if ! handle_build "${t}" &>"$log_file"; then - error "Failed to build: $t, logs:" - cat "${log_file}" - exit 1 - fi + local stdout + local stderr + # Save stdout and stderr, to be restored + # by silent_mode_error_trap() in case of + # build failure. + exec {stdout}>&1 + exec {stderr}>&2 + trap "silent_mode_error_trap $stdout $stderr $t \"$log_file\"" ERR + handle_build "${t}" &>"$log_file" else handle_build "${t}" fi From 9e4ca0c4f8ea64df9988468ca58e650443b6617c Mon Sep 17 00:00:00 2001 From: David Esparza Date: Wed, 23 Mar 2022 02:20:31 -0400 Subject: [PATCH 38/46] doc: Improve kata-deploy README.md by changing sh blocks to bash blocks The idea is to pass this README file to kata-doc-to-script.sh script and then execute the result. Added comments with a file name on top of each YAML snippet. This helps in assigning a file name when we cat the YAML to a file. Fixes: #3943 Signed-off-by: David Esparza --- tools/packaging/kata-deploy/README.md | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/tools/packaging/kata-deploy/README.md b/tools/packaging/kata-deploy/README.md index c298c6145..c900a95b9 100644 --- a/tools/packaging/kata-deploy/README.md +++ b/tools/packaging/kata-deploy/README.md @@ -27,7 +27,7 @@ The stable image refers to the last stable releases content. > **Note:** if you use a tagged version of the repo, the stable image does match that version. > For instance, if you use the 2.2.1 tagged version of the kata-deploy.yaml file, then the version 2.2.1 of the kata runtime will be deployed. -```sh +```bash $ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml $ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy-stable.yaml ``` @@ -36,12 +36,15 @@ $ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-contai ```sh $ GO111MODULE=auto go get github.com/kata-containers/kata-containers +``` + +```bash $ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/packaging/kata-deploy $ kubectl apply -k kata-deploy/overlays/k3s ``` #### Ensure kata-deploy is ready -```sh +```bash kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod ``` @@ -52,7 +55,7 @@ the `Pod` specification. The `runtimeClass` examples provided define a node sele which will ensure the workload is only scheduled on a node that has Kata Containers installed `runtimeClass` is a built-in type in Kubernetes. To apply each Kata Containers `runtimeClass`: -```sh +```bash $ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml ``` @@ -85,25 +88,25 @@ spec: To run an example with `kata-clh`: -```sh +```bash $ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/examples/test-deploy-kata-clh.yaml ``` To run an example with `kata-fc`: -```sh +```bash $ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/examples/test-deploy-kata-fc.yaml ``` To run an example with `kata-qemu`: -```sh +```bash $ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/examples/test-deploy-kata-qemu.yaml ``` The following removes the test pods: -```sh +```bash $ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/examples/test-deploy-kata-clh.yaml $ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/examples/test-deploy-kata-fc.yaml $ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/examples/test-deploy-kata-qemu.yaml @@ -136,13 +139,13 @@ $ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-conta #### Removing the stable image -```sh +```bash $ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy-stable.yaml $ kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod ``` After ensuring kata-deploy has been deleted, cleanup the cluster: -```sh +```bash $ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup-stable.yaml ``` @@ -150,7 +153,7 @@ The cleanup daemon-set will run a single time, cleaning up the node-label, which This process should take, at most, 5 minutes. After that, let's delete the cleanup daemon-set, the added RBAC and runtime classes: -```sh +```bash $ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup-stable.yaml $ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml $ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml From a63bbf9793f9a2f901cdda82d1e28ae87f4f5359 Mon Sep 17 00:00:00 2001 From: Francesco Giudici Date: Wed, 30 Mar 2022 11:58:53 +0200 Subject: [PATCH 39/46] kata-monitor: fix duplicated output when printing usage (default: "/run/containerd/containerd.sock") is duplicated when printing kata-monitor usage: [root@kubernetes ~]# kata-monitor --help Usage of kata-monitor: -listen-address string The address to listen on for HTTP requests. (default ":8090") -log-level string Log level of logrus(trace/debug/info/warn/error/fatal/panic). (default "info") -runtime-endpoint string Endpoint of CRI container runtime service. (default: "/run/containerd/containerd.sock") (default "/run/containerd/containerd.sock") the golang flag package takes care of adding the defaults when printing usage. Remove the explicit print of the value so that it would not be printed on screen twice. Fixes: #3998 Signed-off-by: Francesco Giudici --- src/runtime/cmd/kata-monitor/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/runtime/cmd/kata-monitor/main.go b/src/runtime/cmd/kata-monitor/main.go index a3fc86cfd..356316dcf 100644 --- a/src/runtime/cmd/kata-monitor/main.go +++ b/src/runtime/cmd/kata-monitor/main.go @@ -21,7 +21,7 @@ import ( const defaultListenAddress = "127.0.0.1:8090" var monitorListenAddr = flag.String("listen-address", defaultListenAddress, "The address to listen on for HTTP requests.") -var runtimeEndpoint = flag.String("runtime-endpoint", "/run/containerd/containerd.sock", `Endpoint of CRI container runtime service. (default: "/run/containerd/containerd.sock")`) +var runtimeEndpoint = flag.String("runtime-endpoint", "/run/containerd/containerd.sock", "Endpoint of CRI container runtime service.") var logLevel = flag.String("log-level", "info", "Log level of logrus(trace/debug/info/warn/error/fatal/panic).") // These values are overridden via ldflags From 59c7165ee1a88be17ccd409beb5b893b0adc5b21 Mon Sep 17 00:00:00 2001 From: Eng Zer Jun Date: Sun, 20 Mar 2022 15:54:25 +0800 Subject: [PATCH 40/46] test: use `T.TempDir` to create temporary test directory The directory created by `T.TempDir` is automatically removed when the test and all its subtests complete. This commit also updates the unit test advice to use `T.TempDir` to create temporary directory in tests. Fixes: #3924 Reference: https://pkg.go.dev/testing#T.TempDir Signed-off-by: Eng Zer Jun --- docs/Unit-Test-Advice.md | 10 +- src/runtime/cmd/kata-runtime/factory_test.go | 13 +- .../cmd/kata-runtime/kata-check_amd64_test.go | 36 ++--- .../cmd/kata-runtime/kata-check_arm64_test.go | 8 +- .../kata-runtime/kata-check_generic_test.go | 5 +- .../kata-runtime/kata-check_ppc64le_test.go | 9 +- .../cmd/kata-runtime/kata-check_s390x_test.go | 7 +- .../cmd/kata-runtime/kata-check_test.go | 76 +++------- .../cmd/kata-runtime/kata-env_amd64_test.go | 5 +- .../cmd/kata-runtime/kata-env_generic_test.go | 5 +- src/runtime/cmd/kata-runtime/kata-env_test.go | 118 ++++----------- src/runtime/cmd/kata-runtime/main_test.go | 28 ++-- src/runtime/cmd/kata-runtime/utils_test.go | 24 +--- .../pkg/containerd-shim-v2/create_test.go | 4 +- .../pkg/containerd-shim-v2/stream_test.go | 8 +- src/runtime/pkg/direct-volume/utils_test.go | 12 +- .../pkg/katatestutils/constraints_test.go | 6 +- src/runtime/pkg/katautils/config_test.go | 136 ++++++------------ src/runtime/pkg/katautils/create_test.go | 16 +-- src/runtime/pkg/katautils/network_test.go | 10 +- src/runtime/pkg/katautils/utils_test.go | 41 ++---- src/runtime/pkg/oci/utils_test.go | 12 +- src/runtime/pkg/utils/utils_test.go | 9 +- src/runtime/virtcontainers/container_test.go | 25 ++-- .../device/config/config_test.go | 4 +- .../device/manager/manager_linux_test.go | 6 +- .../device/manager/manager_test.go | 14 +- .../virtcontainers/fs_share_linux_test.go | 8 +- src/runtime/virtcontainers/hypervisor_test.go | 6 +- src/runtime/virtcontainers/kata_agent_test.go | 18 +-- src/runtime/virtcontainers/mount_test.go | 6 +- src/runtime/virtcontainers/nydusd_test.go | 22 +-- src/runtime/virtcontainers/qemu_test.go | 4 +- src/runtime/virtcontainers/sandbox_test.go | 26 ++-- .../virtcontainers/utils/utils_test.go | 24 ++-- src/runtime/virtcontainers/virtiofsd_test.go | 22 +-- src/runtime/virtcontainers/vm_test.go | 14 +- 37 files changed, 220 insertions(+), 577 deletions(-) diff --git a/docs/Unit-Test-Advice.md b/docs/Unit-Test-Advice.md index 8b02bcc8f..ac099a400 100644 --- a/docs/Unit-Test-Advice.md +++ b/docs/Unit-Test-Advice.md @@ -277,7 +277,9 @@ mod tests { ## Temporary files -Always delete temporary files on success. +Use `t.TempDir()` to create temporary directory. The directory created by +`t.TempDir()` is automatically removed when the test and all its subtests +complete. ### Golang temporary files @@ -286,11 +288,7 @@ func TestSomething(t *testing.T) { assert := assert.New(t) // Create a temporary directory - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - - // Delete it at the end of the test - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() // Add test logic that will use the tmpdir here... } diff --git a/src/runtime/cmd/kata-runtime/factory_test.go b/src/runtime/cmd/kata-runtime/factory_test.go index 10b40976a..f980bc5a5 100644 --- a/src/runtime/cmd/kata-runtime/factory_test.go +++ b/src/runtime/cmd/kata-runtime/factory_test.go @@ -8,7 +8,6 @@ package main import ( "context" "flag" - "os" "testing" "github.com/stretchr/testify/assert" @@ -43,9 +42,7 @@ func TestFactoryCLIFunctionNoRuntimeConfig(t *testing.T) { func TestFactoryCLIFunctionInit(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() runtimeConfig, err := newTestRuntimeConfig(tmpdir, testConsole, true) assert.NoError(err) @@ -92,9 +89,7 @@ func TestFactoryCLIFunctionInit(t *testing.T) { func TestFactoryCLIFunctionDestroy(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() runtimeConfig, err := newTestRuntimeConfig(tmpdir, testConsole, true) assert.NoError(err) @@ -126,9 +121,7 @@ func TestFactoryCLIFunctionDestroy(t *testing.T) { func TestFactoryCLIFunctionStatus(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() runtimeConfig, err := newTestRuntimeConfig(tmpdir, testConsole, true) assert.NoError(err) diff --git a/src/runtime/cmd/kata-runtime/kata-check_amd64_test.go b/src/runtime/cmd/kata-runtime/kata-check_amd64_test.go index 43a7019ff..47a6c2120 100644 --- a/src/runtime/cmd/kata-runtime/kata-check_amd64_test.go +++ b/src/runtime/cmd/kata-runtime/kata-check_amd64_test.go @@ -71,11 +71,7 @@ func TestCCCheckCLIFunction(t *testing.T) { func TestCheckCheckKernelModulesNoNesting(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() savedSysModuleDir := sysModuleDir savedProcCPUInfo := procCPUInfo @@ -91,7 +87,7 @@ func TestCheckCheckKernelModulesNoNesting(t *testing.T) { procCPUInfo = savedProcCPUInfo }() - err = os.MkdirAll(sysModuleDir, testDirMode) + err := os.MkdirAll(sysModuleDir, testDirMode) if err != nil { t.Fatal(err) } @@ -156,11 +152,7 @@ func TestCheckCheckKernelModulesNoNesting(t *testing.T) { func TestCheckCheckKernelModulesNoUnrestrictedGuest(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() savedSysModuleDir := sysModuleDir savedProcCPUInfo := procCPUInfo @@ -176,7 +168,7 @@ func TestCheckCheckKernelModulesNoUnrestrictedGuest(t *testing.T) { procCPUInfo = savedProcCPUInfo }() - err = os.MkdirAll(sysModuleDir, testDirMode) + err := os.MkdirAll(sysModuleDir, testDirMode) if err != nil { t.Fatal(err) } @@ -255,11 +247,7 @@ func TestCheckHostIsVMContainerCapable(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() savedSysModuleDir := sysModuleDir savedProcCPUInfo := procCPUInfo @@ -275,7 +263,7 @@ func TestCheckHostIsVMContainerCapable(t *testing.T) { procCPUInfo = savedProcCPUInfo }() - err = os.MkdirAll(sysModuleDir, testDirMode) + err := os.MkdirAll(sysModuleDir, testDirMode) if err != nil { t.Fatal(err) } @@ -405,11 +393,7 @@ func TestArchKernelParamHandler(t *testing.T) { func TestKvmIsUsable(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() savedKvmDevice := kvmDevice fakeKVMDevice := filepath.Join(dir, "kvm") @@ -419,7 +403,7 @@ func TestKvmIsUsable(t *testing.T) { kvmDevice = savedKvmDevice }() - err = kvmIsUsable() + err := kvmIsUsable() assert.Error(err) err = createEmptyFile(fakeKVMDevice) @@ -457,9 +441,7 @@ foo : bar func TestSetCPUtype(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() savedArchRequiredCPUFlags := archRequiredCPUFlags savedArchRequiredCPUAttribs := archRequiredCPUAttribs diff --git a/src/runtime/cmd/kata-runtime/kata-check_arm64_test.go b/src/runtime/cmd/kata-runtime/kata-check_arm64_test.go index 2b56dca6d..a35cd65b0 100644 --- a/src/runtime/cmd/kata-runtime/kata-check_arm64_test.go +++ b/src/runtime/cmd/kata-runtime/kata-check_arm64_test.go @@ -67,11 +67,7 @@ foo : bar {validContents, validNormalizeVendorName, validNormalizeModelName, false}, } - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() savedProcCPUInfo := procCPUInfo @@ -84,7 +80,7 @@ foo : bar procCPUInfo = savedProcCPUInfo }() - _, _, err = getCPUDetails() + _, _, err := getCPUDetails() // ENOENT assert.Error(t, err) assert.True(t, os.IsNotExist(err)) diff --git a/src/runtime/cmd/kata-runtime/kata-check_generic_test.go b/src/runtime/cmd/kata-runtime/kata-check_generic_test.go index 2a2210b9e..6584b66e0 100644 --- a/src/runtime/cmd/kata-runtime/kata-check_generic_test.go +++ b/src/runtime/cmd/kata-runtime/kata-check_generic_test.go @@ -9,7 +9,6 @@ package main import ( - "os" "testing" "github.com/stretchr/testify/assert" @@ -18,9 +17,7 @@ import ( func testSetCPUTypeGeneric(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() savedArchRequiredCPUFlags := archRequiredCPUFlags savedArchRequiredCPUAttribs := archRequiredCPUAttribs diff --git a/src/runtime/cmd/kata-runtime/kata-check_ppc64le_test.go b/src/runtime/cmd/kata-runtime/kata-check_ppc64le_test.go index c27232252..b2101a448 100644 --- a/src/runtime/cmd/kata-runtime/kata-check_ppc64le_test.go +++ b/src/runtime/cmd/kata-runtime/kata-check_ppc64le_test.go @@ -7,7 +7,6 @@ package main import ( "fmt" - "os" "path/filepath" "testing" @@ -118,11 +117,7 @@ func TestArchKernelParamHandler(t *testing.T) { func TestKvmIsUsable(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() savedKvmDevice := kvmDevice fakeKVMDevice := filepath.Join(dir, "kvm") @@ -132,7 +127,7 @@ func TestKvmIsUsable(t *testing.T) { kvmDevice = savedKvmDevice }() - err = kvmIsUsable() + err := kvmIsUsable() assert.Error(err) err = createEmptyFile(fakeKVMDevice) diff --git a/src/runtime/cmd/kata-runtime/kata-check_s390x_test.go b/src/runtime/cmd/kata-runtime/kata-check_s390x_test.go index d521a456d..0898037c6 100644 --- a/src/runtime/cmd/kata-runtime/kata-check_s390x_test.go +++ b/src/runtime/cmd/kata-runtime/kata-check_s390x_test.go @@ -7,7 +7,6 @@ package main import ( "fmt" - "os" "path/filepath" "testing" @@ -117,11 +116,7 @@ func TestArchKernelParamHandler(t *testing.T) { func TestKvmIsUsable(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() savedKvmDevice := kvmDevice fakeKVMDevice := filepath.Join(dir, "kvm") diff --git a/src/runtime/cmd/kata-runtime/kata-check_test.go b/src/runtime/cmd/kata-runtime/kata-check_test.go index 95f668907..e6d273692 100644 --- a/src/runtime/cmd/kata-runtime/kata-check_test.go +++ b/src/runtime/cmd/kata-runtime/kata-check_test.go @@ -155,11 +155,7 @@ func makeCPUInfoFile(path, vendorID, flags string) error { // nolint: unused, deadcode func genericTestGetCPUDetails(t *testing.T, validVendor string, validModel string, validContents string, data []testCPUDetail) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() savedProcCPUInfo := procCPUInfo @@ -172,7 +168,7 @@ func genericTestGetCPUDetails(t *testing.T, validVendor string, validModel strin procCPUInfo = savedProcCPUInfo }() - _, _, err = getCPUDetails() + _, _, err := getCPUDetails() // ENOENT assert.Error(t, err) assert.True(t, os.IsNotExist(err)) @@ -197,11 +193,7 @@ func genericTestGetCPUDetails(t *testing.T, validVendor string, validModel strin func genericCheckCLIFunction(t *testing.T, cpuData []testCPUData, moduleData []testModuleData) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() _, config, err := makeRuntimeConfig(dir) assert.NoError(err) @@ -307,15 +299,11 @@ func TestCheckGetCPUInfo(t *testing.T) { {"foo\n\nbar\nbaz\n\n", "foo", false}, } - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() file := filepath.Join(dir, "cpuinfo") // file doesn't exist - _, err = getCPUInfo(file) + _, err := getCPUInfo(file) assert.Error(err) for _, d := range data { @@ -527,11 +515,7 @@ func TestCheckHaveKernelModule(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() savedModProbeCmd := modProbeCmd savedSysModuleDir := sysModuleDir @@ -545,7 +529,7 @@ func TestCheckHaveKernelModule(t *testing.T) { sysModuleDir = savedSysModuleDir }() - err = os.MkdirAll(sysModuleDir, testDirMode) + err := os.MkdirAll(sysModuleDir, testDirMode) if err != nil { t.Fatal(err) } @@ -577,11 +561,7 @@ func TestCheckHaveKernelModule(t *testing.T) { func TestCheckCheckKernelModules(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() savedModProbeCmd := modProbeCmd savedSysModuleDir := sysModuleDir @@ -595,7 +575,7 @@ func TestCheckCheckKernelModules(t *testing.T) { sysModuleDir = savedSysModuleDir }() - err = os.MkdirAll(sysModuleDir, testDirMode) + err := os.MkdirAll(sysModuleDir, testDirMode) if err != nil { t.Fatal(err) } @@ -662,11 +642,7 @@ func TestCheckCheckKernelModulesUnreadableFile(t *testing.T) { t.Skip(ktu.TestDisabledNeedNonRoot) } - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() testData := map[string]kernelModule{ "foo": { @@ -691,7 +667,7 @@ func TestCheckCheckKernelModulesUnreadableFile(t *testing.T) { }() modPath := filepath.Join(sysModuleDir, "foo/parameters") - err = os.MkdirAll(modPath, testDirMode) + err := os.MkdirAll(modPath, testDirMode) assert.NoError(err) modParamFile := filepath.Join(modPath, "param1") @@ -710,11 +686,7 @@ func TestCheckCheckKernelModulesUnreadableFile(t *testing.T) { func TestCheckCheckKernelModulesInvalidFileContents(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() testData := map[string]kernelModule{ "foo": { @@ -739,7 +711,7 @@ func TestCheckCheckKernelModulesInvalidFileContents(t *testing.T) { }() modPath := filepath.Join(sysModuleDir, "foo/parameters") - err = os.MkdirAll(modPath, testDirMode) + err := os.MkdirAll(modPath, testDirMode) assert.NoError(err) modParamFile := filepath.Join(modPath, "param1") @@ -755,11 +727,7 @@ func TestCheckCheckKernelModulesInvalidFileContents(t *testing.T) { func TestCheckCLIFunctionFail(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() _, config, err := makeRuntimeConfig(dir) assert.NoError(err) @@ -788,11 +756,7 @@ func TestCheckCLIFunctionFail(t *testing.T) { func TestCheckKernelParamHandler(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() savedModProbeCmd := modProbeCmd savedSysModuleDir := sysModuleDir @@ -870,9 +834,7 @@ func TestCheckKernelParamHandler(t *testing.T) { func TestArchRequiredKernelModules(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() _, config, err := makeRuntimeConfig(tmpdir) assert.NoError(err) @@ -885,11 +847,7 @@ func TestArchRequiredKernelModules(t *testing.T) { return } - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() savedModProbeCmd := modProbeCmd savedSysModuleDir := sysModuleDir diff --git a/src/runtime/cmd/kata-runtime/kata-env_amd64_test.go b/src/runtime/cmd/kata-runtime/kata-env_amd64_test.go index a3951328a..7c1fe849d 100644 --- a/src/runtime/cmd/kata-runtime/kata-env_amd64_test.go +++ b/src/runtime/cmd/kata-runtime/kata-env_amd64_test.go @@ -6,7 +6,6 @@ package main import ( - "os" "testing" "github.com/stretchr/testify/assert" @@ -22,9 +21,7 @@ func getExpectedHostDetails(tmpdir string) (HostInfo, error) { func TestEnvGetEnvInfoSetsCPUType(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() savedArchRequiredCPUFlags := archRequiredCPUFlags savedArchRequiredCPUAttribs := archRequiredCPUAttribs diff --git a/src/runtime/cmd/kata-runtime/kata-env_generic_test.go b/src/runtime/cmd/kata-runtime/kata-env_generic_test.go index 1991138a4..8e22ba74a 100644 --- a/src/runtime/cmd/kata-runtime/kata-env_generic_test.go +++ b/src/runtime/cmd/kata-runtime/kata-env_generic_test.go @@ -9,7 +9,6 @@ package main import ( - "os" "testing" "github.com/stretchr/testify/assert" @@ -18,9 +17,7 @@ import ( func testEnvGetEnvInfoSetsCPUTypeGeneric(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() savedArchRequiredCPUFlags := archRequiredCPUFlags savedArchRequiredCPUAttribs := archRequiredCPUAttribs diff --git a/src/runtime/cmd/kata-runtime/kata-env_test.go b/src/runtime/cmd/kata-runtime/kata-env_test.go index 11f0478b2..96847dc13 100644 --- a/src/runtime/cmd/kata-runtime/kata-env_test.go +++ b/src/runtime/cmd/kata-runtime/kata-env_test.go @@ -364,11 +364,7 @@ func TestEnvGetMetaInfo(t *testing.T) { } func TestEnvGetHostInfo(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() expectedHostDetails, err := getExpectedHostDetails(tmpdir) assert.NoError(t, err) @@ -389,13 +385,9 @@ func TestEnvGetHostInfo(t *testing.T) { } func TestEnvGetHostInfoNoProcCPUInfo(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() - _, err = getExpectedHostDetails(tmpdir) + _, err := getExpectedHostDetails(tmpdir) assert.NoError(t, err) err = os.Remove(procCPUInfo) @@ -406,13 +398,9 @@ func TestEnvGetHostInfoNoProcCPUInfo(t *testing.T) { } func TestEnvGetHostInfoNoOSRelease(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() - _, err = getExpectedHostDetails(tmpdir) + _, err := getExpectedHostDetails(tmpdir) assert.NoError(t, err) err = os.Remove(osRelease) @@ -423,13 +411,9 @@ func TestEnvGetHostInfoNoOSRelease(t *testing.T) { } func TestEnvGetHostInfoNoProcVersion(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() - _, err = getExpectedHostDetails(tmpdir) + _, err := getExpectedHostDetails(tmpdir) assert.NoError(t, err) err = os.Remove(procVersion) @@ -440,11 +424,7 @@ func TestEnvGetHostInfoNoProcVersion(t *testing.T) { } func TestEnvGetEnvInfo(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() // Run test twice to ensure the individual component debug+trace // options are tested. @@ -474,9 +454,7 @@ func TestEnvGetEnvInfo(t *testing.T) { func TestEnvGetEnvInfoNoHypervisorVersion(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() configFile, config, err := makeRuntimeConfig(tmpdir) assert.NoError(err) @@ -501,20 +479,14 @@ func TestEnvGetEnvInfoNoHypervisorVersion(t *testing.T) { func TestEnvGetEnvInfoAgentError(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() - _, _, err = makeRuntimeConfig(tmpdir) + _, _, err := makeRuntimeConfig(tmpdir) assert.NoError(err) } func TestEnvGetEnvInfoNoOSRelease(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() configFile, config, err := makeRuntimeConfig(tmpdir) assert.NoError(t, err) @@ -530,11 +502,7 @@ func TestEnvGetEnvInfoNoOSRelease(t *testing.T) { } func TestEnvGetEnvInfoNoProcCPUInfo(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() configFile, config, err := makeRuntimeConfig(tmpdir) assert.NoError(t, err) @@ -550,11 +518,7 @@ func TestEnvGetEnvInfoNoProcCPUInfo(t *testing.T) { } func TestEnvGetEnvInfoNoProcVersion(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() configFile, config, err := makeRuntimeConfig(tmpdir) assert.NoError(t, err) @@ -570,11 +534,7 @@ func TestEnvGetEnvInfoNoProcVersion(t *testing.T) { } func TestEnvGetRuntimeInfo(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() configFile, config, err := makeRuntimeConfig(tmpdir) assert.NoError(t, err) @@ -587,11 +547,7 @@ func TestEnvGetRuntimeInfo(t *testing.T) { } func TestEnvGetAgentInfo(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() _, config, err := makeRuntimeConfig(tmpdir) assert.NoError(t, err) @@ -726,11 +682,7 @@ func testEnvShowJSONSettings(t *testing.T, tmpdir string, tmpfile *os.File) erro } func TestEnvShowSettings(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() tmpfile, err := os.CreateTemp("", "envShowSettings-") assert.NoError(t, err) @@ -747,11 +699,7 @@ func TestEnvShowSettings(t *testing.T) { } func TestEnvShowSettingsInvalidFile(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() tmpfile, err := os.CreateTemp("", "envShowSettings-") assert.NoError(t, err) @@ -771,11 +719,7 @@ func TestEnvShowSettingsInvalidFile(t *testing.T) { } func TestEnvHandleSettings(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() configFile, config, err := makeRuntimeConfig(tmpdir) assert.NoError(t, err) @@ -805,9 +749,7 @@ func TestEnvHandleSettings(t *testing.T) { func TestEnvHandleSettingsInvalidParams(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() configFile, _, err := makeRuntimeConfig(tmpdir) assert.NoError(err) @@ -859,11 +801,7 @@ func TestEnvHandleSettingsInvalidRuntimeConfigType(t *testing.T) { } func TestEnvCLIFunction(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() configFile, config, err := makeRuntimeConfig(tmpdir) assert.NoError(t, err) @@ -904,11 +842,7 @@ func TestEnvCLIFunction(t *testing.T) { } func TestEnvCLIFunctionFail(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() configFile, config, err := makeRuntimeConfig(tmpdir) assert.NoError(t, err) @@ -940,9 +874,7 @@ func TestEnvCLIFunctionFail(t *testing.T) { func TestGetHypervisorInfo(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() _, config, err := makeRuntimeConfig(tmpdir) assert.NoError(err) @@ -962,9 +894,7 @@ func TestGetHypervisorInfo(t *testing.T) { func TestGetHypervisorInfoSocket(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() _, config, err := makeRuntimeConfig(tmpdir) assert.NoError(err) diff --git a/src/runtime/cmd/kata-runtime/main_test.go b/src/runtime/cmd/kata-runtime/main_test.go index 1ce21514d..95c7dc59d 100644 --- a/src/runtime/cmd/kata-runtime/main_test.go +++ b/src/runtime/cmd/kata-runtime/main_test.go @@ -258,14 +258,12 @@ func TestMainBeforeSubCommands(t *testing.T) { func TestMainBeforeSubCommandsInvalidLogFile(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "katatest") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() logFile := filepath.Join(tmpdir, "log") // create the file as the wrong type to force a failure - err = os.MkdirAll(logFile, testDirMode) + err := os.MkdirAll(logFile, testDirMode) assert.NoError(err) set := flag.NewFlagSet("", 0) @@ -281,9 +279,7 @@ func TestMainBeforeSubCommandsInvalidLogFile(t *testing.T) { func TestMainBeforeSubCommandsInvalidLogFormat(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "katatest") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() logFile := filepath.Join(tmpdir, "log") @@ -302,7 +298,7 @@ func TestMainBeforeSubCommandsInvalidLogFormat(t *testing.T) { ctx := createCLIContext(set) - err = beforeSubcommands(ctx) + err := beforeSubcommands(ctx) assert.Error(err) assert.NotNil(kataLog.Logger.Out) } @@ -310,9 +306,7 @@ func TestMainBeforeSubCommandsInvalidLogFormat(t *testing.T) { func TestMainBeforeSubCommandsLoadConfigurationFail(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "katatest") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() logFile := filepath.Join(tmpdir, "log") configFile := filepath.Join(tmpdir, "config") @@ -345,9 +339,7 @@ func TestMainBeforeSubCommandsLoadConfigurationFail(t *testing.T) { func TestMainBeforeSubCommandsShowCCConfigPaths(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "katatest") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() set := flag.NewFlagSet("", 0) set.Bool("show-default-config-paths", true, "") @@ -409,9 +401,7 @@ func TestMainBeforeSubCommandsShowCCConfigPaths(t *testing.T) { func TestMainFatal(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "katatest") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() var exitStatus int savedExitFunc := exitFunc @@ -633,9 +623,7 @@ func TestMainCreateRuntime(t *testing.T) { func TestMainVersionPrinter(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "katatest") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() savedOutputFile := defaultOutputFile diff --git a/src/runtime/cmd/kata-runtime/utils_test.go b/src/runtime/cmd/kata-runtime/utils_test.go index da1e3d5c4..f83dae6c4 100644 --- a/src/runtime/cmd/kata-runtime/utils_test.go +++ b/src/runtime/cmd/kata-runtime/utils_test.go @@ -17,18 +17,14 @@ import ( ) func TestFileExists(t *testing.T) { - dir, err := os.MkdirTemp("", "katatest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() file := filepath.Join(dir, "foo") assert.False(t, katautils.FileExists(file), fmt.Sprintf("File %q should not exist", file)) - err = createEmptyFile(file) + err := createEmptyFile(file) if err != nil { t.Fatal(err) } @@ -54,14 +50,10 @@ func TestGetKernelVersion(t *testing.T) { {validContents, validVersion, false}, } - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() subDir := filepath.Join(tmpdir, "subdir") - err = os.MkdirAll(subDir, testDirMode) + err := os.MkdirAll(subDir, testDirMode) assert.NoError(t, err) _, err = getKernelVersion() @@ -103,11 +95,7 @@ func TestGetDistroDetails(t *testing.T) { const unknown = "<>" - tmpdir, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testOSRelease := filepath.Join(tmpdir, "os-release") testOSReleaseClr := filepath.Join(tmpdir, "os-release-clr") @@ -131,7 +119,7 @@ VERSION_ID="%s" `, nonClrExpectedName, nonClrExpectedVersion) subDir := filepath.Join(tmpdir, "subdir") - err = os.MkdirAll(subDir, testDirMode) + err := os.MkdirAll(subDir, testDirMode) assert.NoError(t, err) // override diff --git a/src/runtime/pkg/containerd-shim-v2/create_test.go b/src/runtime/pkg/containerd-shim-v2/create_test.go index 6b00991f9..40ffde59f 100644 --- a/src/runtime/pkg/containerd-shim-v2/create_test.go +++ b/src/runtime/pkg/containerd-shim-v2/create_test.go @@ -382,9 +382,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config string, err err func TestCreateLoadRuntimeConfig(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() config, err := createAllRuntimeConfigFiles(tmpdir, "qemu") assert.NoError(err) diff --git a/src/runtime/pkg/containerd-shim-v2/stream_test.go b/src/runtime/pkg/containerd-shim-v2/stream_test.go index 9b848e52b..d5317a172 100644 --- a/src/runtime/pkg/containerd-shim-v2/stream_test.go +++ b/src/runtime/pkg/containerd-shim-v2/stream_test.go @@ -26,9 +26,7 @@ func TestNewTtyIOFifoReopen(t *testing.T) { assert := assert.New(t) ctx := context.TODO() - testDir, err := os.MkdirTemp("", "kata-") - assert.NoError(err) - defer os.RemoveAll(testDir) + testDir := t.TempDir() fifoPath, err := os.MkdirTemp(testDir, "fifo-path-") assert.NoError(err) @@ -104,9 +102,7 @@ func TestIoCopy(t *testing.T) { testBytes2 := []byte("Test2") testBytes3 := []byte("Test3") - testDir, err := os.MkdirTemp("", "kata-") - assert.NoError(err) - defer os.RemoveAll(testDir) + testDir := t.TempDir() fifoPath, err := os.MkdirTemp(testDir, "fifo-path-") assert.NoError(err) diff --git a/src/runtime/pkg/direct-volume/utils_test.go b/src/runtime/pkg/direct-volume/utils_test.go index 6ca80dab8..485f8f9ce 100644 --- a/src/runtime/pkg/direct-volume/utils_test.go +++ b/src/runtime/pkg/direct-volume/utils_test.go @@ -18,9 +18,7 @@ import ( func TestAdd(t *testing.T) { var err error - kataDirectVolumeRootPath, err = os.MkdirTemp(os.TempDir(), "add-test") - assert.Nil(t, err) - defer os.RemoveAll(kataDirectVolumeRootPath) + kataDirectVolumeRootPath = t.TempDir() var volumePath = "/a/b/c" var basePath = "a" actual := MountInfo{ @@ -54,9 +52,7 @@ func TestAdd(t *testing.T) { func TestRecordSandboxId(t *testing.T) { var err error - kataDirectVolumeRootPath, err = os.MkdirTemp(os.TempDir(), "recordSanboxId-test") - assert.Nil(t, err) - defer os.RemoveAll(kataDirectVolumeRootPath) + kataDirectVolumeRootPath = t.TempDir() var volumePath = "/a/b/c" mntInfo := MountInfo{ @@ -82,9 +78,7 @@ func TestRecordSandboxId(t *testing.T) { func TestRecordSandboxIdNoMountInfoFile(t *testing.T) { var err error - kataDirectVolumeRootPath, err = os.MkdirTemp(os.TempDir(), "recordSanboxId-test") - assert.Nil(t, err) - defer os.RemoveAll(kataDirectVolumeRootPath) + kataDirectVolumeRootPath = t.TempDir() var volumePath = "/a/b/c" sandboxId := uuid.Generate().String() diff --git a/src/runtime/pkg/katatestutils/constraints_test.go b/src/runtime/pkg/katatestutils/constraints_test.go index ec7d43ecd..61ff8b399 100644 --- a/src/runtime/pkg/katatestutils/constraints_test.go +++ b/src/runtime/pkg/katatestutils/constraints_test.go @@ -271,14 +271,12 @@ func TestGetFileContents(t *testing.T) { {"foo\nbar"}, } - dir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(dir) + dir := t.TempDir() file := filepath.Join(dir, "foo") // file doesn't exist - _, err = getFileContents(file) + _, err := getFileContents(file) assert.Error(err) for _, d := range data { diff --git a/src/runtime/pkg/katautils/config_test.go b/src/runtime/pkg/katautils/config_test.go index c6882f074..2f85adcb3 100644 --- a/src/runtime/pkg/katautils/config_test.go +++ b/src/runtime/pkg/katautils/config_test.go @@ -279,17 +279,13 @@ func testLoadConfiguration(t *testing.T, dir string, } func TestConfigLoadConfiguration(t *testing.T) { - tmpdir, err := os.MkdirTemp(testDir, "load-config-") - assert.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testLoadConfiguration(t, tmpdir, nil) } func TestConfigLoadConfigurationFailBrokenSymLink(t *testing.T) { - tmpdir, err := os.MkdirTemp(testDir, "runtime-config-") - assert.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testLoadConfiguration(t, tmpdir, func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) { @@ -297,7 +293,7 @@ func TestConfigLoadConfigurationFailBrokenSymLink(t *testing.T) { if configFile == config.ConfigPathLink { // break the symbolic link - err = os.Remove(config.ConfigPathLink) + err := os.Remove(config.ConfigPathLink) if err != nil { return expectFail, err } @@ -310,9 +306,7 @@ func TestConfigLoadConfigurationFailBrokenSymLink(t *testing.T) { } func TestConfigLoadConfigurationFailSymLinkLoop(t *testing.T) { - tmpdir, err := os.MkdirTemp(testDir, "runtime-config-") - assert.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testLoadConfiguration(t, tmpdir, func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) { @@ -320,13 +314,13 @@ func TestConfigLoadConfigurationFailSymLinkLoop(t *testing.T) { if configFile == config.ConfigPathLink { // remove the config file - err = os.Remove(config.ConfigPath) + err := os.Remove(config.ConfigPath) if err != nil { return expectFail, err } // now, create a sym-link loop - err := os.Symlink(config.ConfigPathLink, config.ConfigPath) + err = os.Symlink(config.ConfigPathLink, config.ConfigPath) if err != nil { return expectFail, err } @@ -339,15 +333,13 @@ func TestConfigLoadConfigurationFailSymLinkLoop(t *testing.T) { } func TestConfigLoadConfigurationFailMissingHypervisor(t *testing.T) { - tmpdir, err := os.MkdirTemp(testDir, "runtime-config-") - assert.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testLoadConfiguration(t, tmpdir, func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) { expectFail := true - err = os.Remove(config.RuntimeConfig.HypervisorConfig.HypervisorPath) + err := os.Remove(config.RuntimeConfig.HypervisorConfig.HypervisorPath) if err != nil { return expectFail, err } @@ -357,15 +349,13 @@ func TestConfigLoadConfigurationFailMissingHypervisor(t *testing.T) { } func TestConfigLoadConfigurationFailMissingImage(t *testing.T) { - tmpdir, err := os.MkdirTemp(testDir, "runtime-config-") - assert.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testLoadConfiguration(t, tmpdir, func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) { expectFail := true - err = os.Remove(config.RuntimeConfig.HypervisorConfig.ImagePath) + err := os.Remove(config.RuntimeConfig.HypervisorConfig.ImagePath) if err != nil { return expectFail, err } @@ -375,15 +365,13 @@ func TestConfigLoadConfigurationFailMissingImage(t *testing.T) { } func TestConfigLoadConfigurationFailMissingKernel(t *testing.T) { - tmpdir, err := os.MkdirTemp(testDir, "runtime-config-") - assert.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testLoadConfiguration(t, tmpdir, func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) { expectFail := true - err = os.Remove(config.RuntimeConfig.HypervisorConfig.KernelPath) + err := os.Remove(config.RuntimeConfig.HypervisorConfig.KernelPath) if err != nil { return expectFail, err } @@ -397,16 +385,14 @@ func TestConfigLoadConfigurationFailUnreadableConfig(t *testing.T) { t.Skip(ktu.TestDisabledNeedNonRoot) } - tmpdir, err := os.MkdirTemp(testDir, "runtime-config-") - assert.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testLoadConfiguration(t, tmpdir, func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) { expectFail := true // make file unreadable by non-root user - err = os.Chmod(config.ConfigPath, 0000) + err := os.Chmod(config.ConfigPath, 0000) if err != nil { return expectFail, err } @@ -420,9 +406,7 @@ func TestConfigLoadConfigurationFailTOMLConfigFileInvalidContents(t *testing.T) t.Skip(ktu.TestDisabledNeedNonRoot) } - tmpdir, err := os.MkdirTemp(testDir, "runtime-config-") - assert.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testLoadConfiguration(t, tmpdir, func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) { @@ -446,9 +430,7 @@ func TestConfigLoadConfigurationFailTOMLConfigFileDuplicatedData(t *testing.T) { t.Skip(ktu.TestDisabledNeedNonRoot) } - tmpdir, err := os.MkdirTemp(testDir, "runtime-config-") - assert.NoError(t, err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testLoadConfiguration(t, tmpdir, func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) { @@ -471,11 +453,7 @@ func TestConfigLoadConfigurationFailTOMLConfigFileDuplicatedData(t *testing.T) { } func TestMinimalRuntimeConfig(t *testing.T) { - dir, err := os.MkdirTemp(testDir, "minimal-runtime-config-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() hypervisorPath := path.Join(dir, "hypervisor") defaultHypervisorPath = hypervisorPath @@ -510,7 +488,7 @@ func TestMinimalRuntimeConfig(t *testing.T) { defaultKernelPath = kernelPath for _, file := range []string{defaultImagePath, defaultInitrdPath, defaultHypervisorPath, defaultJailerPath, defaultKernelPath} { - err = WriteFile(file, "foo", testFileMode) + err := WriteFile(file, "foo", testFileMode) if err != nil { t.Fatal(err) } @@ -531,7 +509,7 @@ func TestMinimalRuntimeConfig(t *testing.T) { utils.VHostVSockDevicePath = "/dev/null" configPath := path.Join(dir, "runtime.toml") - err = createConfig(configPath, runtimeMinimalConfig) + err := createConfig(configPath, runtimeMinimalConfig) if err != nil { t.Fatal(err) } @@ -600,11 +578,7 @@ func TestMinimalRuntimeConfig(t *testing.T) { } func TestNewQemuHypervisorConfig(t *testing.T) { - dir, err := os.MkdirTemp(testDir, "hypervisor-config-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() hypervisorPath := path.Join(dir, "hypervisor") kernelPath := path.Join(dir, "kernel") @@ -699,11 +673,7 @@ func TestNewQemuHypervisorConfig(t *testing.T) { } func TestNewFirecrackerHypervisorConfig(t *testing.T) { - dir, err := os.MkdirTemp(testDir, "hypervisor-config-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() hypervisorPath := path.Join(dir, "hypervisor") kernelPath := path.Join(dir, "kernel") @@ -794,9 +764,7 @@ func TestNewFirecrackerHypervisorConfig(t *testing.T) { func TestNewQemuHypervisorConfigImageAndInitrd(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp(testDir, "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() imagePath := filepath.Join(tmpdir, "image") initrdPath := filepath.Join(tmpdir, "initrd") @@ -804,7 +772,7 @@ func TestNewQemuHypervisorConfigImageAndInitrd(t *testing.T) { kernelPath := path.Join(tmpdir, "kernel") for _, file := range []string{imagePath, initrdPath, hypervisorPath, kernelPath} { - err = createEmptyFile(file) + err := createEmptyFile(file) assert.NoError(err) } @@ -826,7 +794,7 @@ func TestNewQemuHypervisorConfigImageAndInitrd(t *testing.T) { PCIeRootPort: pcieRootPort, } - _, err = newQemuHypervisorConfig(hypervisor) + _, err := newQemuHypervisorConfig(hypervisor) // specifying both an image+initrd is invalid assert.Error(err) @@ -836,9 +804,7 @@ func TestNewClhHypervisorConfig(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp(testDir, "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() hypervisorPath := path.Join(tmpdir, "hypervisor") kernelPath := path.Join(tmpdir, "kernel") @@ -846,7 +812,7 @@ func TestNewClhHypervisorConfig(t *testing.T) { virtioFsDaemon := path.Join(tmpdir, "virtiofsd") for _, file := range []string{imagePath, hypervisorPath, kernelPath, virtioFsDaemon} { - err = createEmptyFile(file) + err := createEmptyFile(file) assert.NoError(err) } @@ -931,14 +897,12 @@ func TestHypervisorDefaults(t *testing.T) { func TestHypervisorDefaultsHypervisor(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp(testDir, "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testHypervisorPath := filepath.Join(tmpdir, "hypervisor") testHypervisorLinkPath := filepath.Join(tmpdir, "hypervisor-link") - err = createEmptyFile(testHypervisorPath) + err := createEmptyFile(testHypervisorPath) assert.NoError(err) err = syscall.Symlink(testHypervisorPath, testHypervisorLinkPath) @@ -967,14 +931,12 @@ func TestHypervisorDefaultsHypervisor(t *testing.T) { func TestHypervisorDefaultsKernel(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp(testDir, "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testKernelPath := filepath.Join(tmpdir, "kernel") testKernelLinkPath := filepath.Join(tmpdir, "kernel-link") - err = createEmptyFile(testKernelPath) + err := createEmptyFile(testKernelPath) assert.NoError(err) err = syscall.Symlink(testKernelPath, testKernelLinkPath) @@ -1010,14 +972,12 @@ func TestHypervisorDefaultsKernel(t *testing.T) { func TestHypervisorDefaultsInitrd(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp(testDir, "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testInitrdPath := filepath.Join(tmpdir, "initrd") testInitrdLinkPath := filepath.Join(tmpdir, "initrd-link") - err = createEmptyFile(testInitrdPath) + err := createEmptyFile(testInitrdPath) assert.NoError(err) err = syscall.Symlink(testInitrdPath, testInitrdLinkPath) @@ -1047,14 +1007,12 @@ func TestHypervisorDefaultsInitrd(t *testing.T) { func TestHypervisorDefaultsImage(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp(testDir, "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() testImagePath := filepath.Join(tmpdir, "image") testImageLinkPath := filepath.Join(tmpdir, "image-link") - err = createEmptyFile(testImagePath) + err := createEmptyFile(testImagePath) assert.NoError(err) err = syscall.Symlink(testImagePath, testImageLinkPath) @@ -1142,16 +1100,14 @@ func TestGetDefaultConfigFilePaths(t *testing.T) { func TestGetDefaultConfigFile(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp(testDir, "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() hypervisor := "qemu" confDir := filepath.Join(tmpdir, "conf") sysConfDir := filepath.Join(tmpdir, "sysconf") for _, dir := range []string{confDir, sysConfDir} { - err = os.MkdirAll(dir, testDirMode) + err := os.MkdirAll(dir, testDirMode) assert.NoError(err) } @@ -1449,11 +1405,7 @@ func TestUpdateRuntimeConfigurationInvalidKernelParams(t *testing.T) { func TestCheckHypervisorConfig(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp(testDir, "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() // Not created on purpose imageENOENT := filepath.Join(dir, "image-ENOENT.img") @@ -1463,7 +1415,7 @@ func TestCheckHypervisorConfig(t *testing.T) { initrdEmpty := filepath.Join(dir, "initrd-empty.img") for _, file := range []string{imageEmpty, initrdEmpty} { - err = createEmptyFile(file) + err := createEmptyFile(file) assert.NoError(err) } @@ -1478,7 +1430,7 @@ func TestCheckHypervisorConfig(t *testing.T) { fileData := strings.Repeat("X", int(fileSizeBytes)) for _, file := range []string{image, initrd} { - err = WriteFile(file, fileData, testFileMode) + err := WriteFile(file, fileData, testFileMode) assert.NoError(err) } @@ -1612,19 +1564,15 @@ func TestCheckFactoryConfig(t *testing.T) { func TestValidateBindMounts(t *testing.T) { assert := assert.New(t) - tmpdir1, err := os.MkdirTemp(testDir, "tmp1-") - assert.NoError(err) - defer os.RemoveAll(tmpdir1) + tmpdir1 := t.TempDir() - tmpdir2, err := os.MkdirTemp(testDir, "tmp2-") - assert.NoError(err) - defer os.RemoveAll(tmpdir2) + tmpdir2 := t.TempDir() duplicate1 := filepath.Join(tmpdir1, "cat.txt") duplicate2 := filepath.Join(tmpdir2, "cat.txt") unique := filepath.Join(tmpdir1, "foobar.txt") - err = os.WriteFile(duplicate1, []byte("kibble-monster"), 0644) + err := os.WriteFile(duplicate1, []byte("kibble-monster"), 0644) assert.NoError(err) err = os.WriteFile(duplicate2, []byte("furbag"), 0644) diff --git a/src/runtime/pkg/katautils/create_test.go b/src/runtime/pkg/katautils/create_test.go index e2488aaa9..56e7fa27a 100644 --- a/src/runtime/pkg/katautils/create_test.go +++ b/src/runtime/pkg/katautils/create_test.go @@ -124,14 +124,10 @@ func TestSetEphemeralStorageType(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp(testDir, "foo") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() ephePath := filepath.Join(dir, vc.K8sEmptyDir, "tmp-volume") - err = os.MkdirAll(ephePath, testDirMode) + err := os.MkdirAll(ephePath, testDirMode) assert.Nil(err) err = syscall.Mount("tmpfs", ephePath, "tmpfs", 0, "") @@ -308,17 +304,13 @@ func TestCreateSandboxAnnotations(t *testing.T) { func TestCheckForFips(t *testing.T) { assert := assert.New(t) - path, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(path) - val := procFIPS - procFIPS = filepath.Join(path, "fips-enabled") + procFIPS = filepath.Join(t.TempDir(), "fips-enabled") defer func() { procFIPS = val }() - err = os.WriteFile(procFIPS, []byte("1"), 0644) + err := os.WriteFile(procFIPS, []byte("1"), 0644) assert.NoError(err) hconfig := vc.HypervisorConfig{ diff --git a/src/runtime/pkg/katautils/network_test.go b/src/runtime/pkg/katautils/network_test.go index e601fda7f..8d1bf86ee 100644 --- a/src/runtime/pkg/katautils/network_test.go +++ b/src/runtime/pkg/katautils/network_test.go @@ -23,15 +23,13 @@ import ( func TestGetNetNsFromBindMount(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() mountFile := filepath.Join(tmpdir, "mountInfo") nsPath := filepath.Join(tmpdir, "ns123") // Non-existent namespace path - _, err = getNetNsFromBindMount(nsPath, mountFile) + _, err := getNetNsFromBindMount(nsPath, mountFile) assert.NotNil(err) tmpNSPath := filepath.Join(tmpdir, "testNetNs") @@ -85,9 +83,7 @@ func TestHostNetworkingRequested(t *testing.T) { assert.Error(err) // Bind-mounted Netns - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() // Create a bind mount to the current network namespace. tmpFile := filepath.Join(tmpdir, "testNetNs") diff --git a/src/runtime/pkg/katautils/utils_test.go b/src/runtime/pkg/katautils/utils_test.go index 6a4e51b5c..93d819a8f 100644 --- a/src/runtime/pkg/katautils/utils_test.go +++ b/src/runtime/pkg/katautils/utils_test.go @@ -26,10 +26,6 @@ const ( testContainerID = "1" ) -var ( - testDir = "" -) - func createFile(file, contents string) error { return os.WriteFile(file, []byte(contents), testFileMode) } @@ -44,17 +40,13 @@ func TestUtilsResolvePathEmptyPath(t *testing.T) { } func TestUtilsResolvePathValidPath(t *testing.T) { - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() target := path.Join(dir, "target") linkDir := path.Join(dir, "a/b/c") linkFile := path.Join(linkDir, "link") - err = createEmptyFile(target) + err := createEmptyFile(target) assert.NoError(t, err) absolute, err := filepath.Abs(target) @@ -76,16 +68,13 @@ func TestUtilsResolvePathValidPath(t *testing.T) { } func TestUtilsResolvePathENOENT(t *testing.T) { - dir, err := os.MkdirTemp("", "") - if err != nil { - t.Fatal(err) - } + dir := t.TempDir() target := path.Join(dir, "target") linkDir := path.Join(dir, "a/b/c") linkFile := path.Join(linkDir, "link") - err = createEmptyFile(target) + err := createEmptyFile(target) assert.NoError(t, err) err = os.MkdirAll(linkDir, testDirMode) @@ -111,16 +100,12 @@ func TestUtilsResolvePathENOENT(t *testing.T) { func TestFileSize(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp(testDir, "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() file := filepath.Join(dir, "foo") // ENOENT - _, err = fileSize(file) + _, err := fileSize(file) assert.Error(err) err = createEmptyFile(file) @@ -152,12 +137,10 @@ func TestWriteFileErrWriteFail(t *testing.T) { func TestWriteFileErrNoPath(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp(testDir, "") - assert.NoError(err) - defer os.RemoveAll(dir) + dir := t.TempDir() // attempt to write a file over an existing directory - err = WriteFile(dir, "", 0000) + err := WriteFile(dir, "", 0000) assert.Error(err) } @@ -177,16 +160,12 @@ func TestGetFileContents(t *testing.T) { {"processor : 0\nvendor_id : GenuineIntel\n"}, } - dir, err := os.MkdirTemp(testDir, "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) + dir := t.TempDir() file := filepath.Join(dir, "foo") // file doesn't exist - _, err = GetFileContents(file) + _, err := GetFileContents(file) assert.Error(t, err) for _, d := range data { diff --git a/src/runtime/pkg/oci/utils_test.go b/src/runtime/pkg/oci/utils_test.go index 8f8e19799..bfa71801d 100644 --- a/src/runtime/pkg/oci/utils_test.go +++ b/src/runtime/pkg/oci/utils_test.go @@ -410,12 +410,10 @@ func TestGetShmSizeBindMounted(t *testing.T) { t.Skip("Test disabled as requires root privileges") } - dir, err := os.MkdirTemp("", "") - assert.Nil(t, err) - defer os.RemoveAll(dir) + dir := t.TempDir() shmPath := filepath.Join(dir, "shm") - err = os.Mkdir(shmPath, 0700) + err := os.Mkdir(shmPath, 0700) assert.Nil(t, err) size := 8192 @@ -473,15 +471,13 @@ func TestMain(m *testing.M) { func TestAddAssetAnnotations(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() // Create a pretend asset file // (required since the existence of binary asset annotations is verified). fakeAssetFile := filepath.Join(tmpdir, "fake-binary") - err = os.WriteFile(fakeAssetFile, []byte(""), fileMode) + err := os.WriteFile(fakeAssetFile, []byte(""), fileMode) assert.NoError(err) expectedAnnotations := map[string]string{ diff --git a/src/runtime/pkg/utils/utils_test.go b/src/runtime/pkg/utils/utils_test.go index 318b1af44..67edd613d 100644 --- a/src/runtime/pkg/utils/utils_test.go +++ b/src/runtime/pkg/utils/utils_test.go @@ -51,11 +51,8 @@ func TestGzipAccepted(t *testing.T) { func TestEnsureDir(t *testing.T) { const testMode = 0755 - tmpdir, err := os.MkdirTemp("", "TestEnsureDir") assert := assert.New(t) - - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() // nolint: govet testCases := []struct { @@ -120,9 +117,7 @@ func TestEnsureDir(t *testing.T) { func TestFirstValidExecutable(t *testing.T) { assert := assert.New(t) - tmpdir, err := os.MkdirTemp("", "TestFirstValidPath") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() // nolint: govet testCases := []struct { diff --git a/src/runtime/virtcontainers/container_test.go b/src/runtime/virtcontainers/container_test.go index eddf8ed70..f6fce13e6 100644 --- a/src/runtime/virtcontainers/container_test.go +++ b/src/runtime/virtcontainers/container_test.go @@ -259,8 +259,7 @@ func testSetupFakeRootfs(t *testing.T) (testRawFile, loopDev, mntDir string, err t.Skip(testDisabledAsNonRoot) } - tmpDir, err := os.MkdirTemp("", "") - assert.NoError(err) + tmpDir := t.TempDir() testRawFile = filepath.Join(tmpDir, "raw.img") _, err = os.Stat(testRawFile) @@ -570,14 +569,9 @@ func TestMountSharedDirMounts(t *testing.T) { assert := assert.New(t) - testMountPath, err := os.MkdirTemp("", "sandbox-test") - assert.NoError(err) - defer os.RemoveAll(testMountPath) - // create a new shared directory for our test: kataHostSharedDirSaved := kataHostSharedDir - testHostDir, err := os.MkdirTemp("", "kata-Cleanup") - assert.NoError(err) + testHostDir := t.TempDir() kataHostSharedDir = func() string { return testHostDir } @@ -611,12 +605,18 @@ func TestMountSharedDirMounts(t *testing.T) { // // Create the mounts that we'll test with // + testMountPath := t.TempDir() secretpath := filepath.Join(testMountPath, K8sSecret) err = os.MkdirAll(secretpath, 0777) assert.NoError(err) secret := filepath.Join(secretpath, "super-secret-thing") - _, err = os.Create(secret) + f, err := os.Create(secret) assert.NoError(err) + t.Cleanup(func() { + if err := f.Close(); err != nil { + t.Fatalf("failed to close file: %v", err) + } + }) mountDestination := "/fluffhead/token" // @@ -655,6 +655,13 @@ func TestMountSharedDirMounts(t *testing.T) { assert.Equal(len(updatedMounts), 1) assert.Equal(updatedMounts[mountDestination].Source, expectedStorageDest) assert.Equal(updatedMounts[mountDestination].Destination, mountDestination) + + // Perform cleanups + err = container.unmountHostMounts(k.ctx) + assert.NoError(err) + + err = fsShare.Cleanup(k.ctx) + assert.NoError(err) } func TestGetContainerId(t *testing.T) { diff --git a/src/runtime/virtcontainers/device/config/config_test.go b/src/runtime/virtcontainers/device/config/config_test.go index b887048e6..68d723030 100644 --- a/src/runtime/virtcontainers/device/config/config_test.go +++ b/src/runtime/virtcontainers/device/config/config_test.go @@ -17,9 +17,7 @@ import ( func TestGetBackingFile(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "backing") - assert.NoError(err) - defer os.RemoveAll(dir) + dir := t.TempDir() orgGetSysDevPath := getSysDevPath getSysDevPath = func(info DeviceInfo) string { diff --git a/src/runtime/virtcontainers/device/manager/manager_linux_test.go b/src/runtime/virtcontainers/device/manager/manager_linux_test.go index b47a38c0a..abb753291 100644 --- a/src/runtime/virtcontainers/device/manager/manager_linux_test.go +++ b/src/runtime/virtcontainers/device/manager/manager_linux_test.go @@ -29,22 +29,20 @@ func TestAttachVhostUserBlkDevice(t *testing.T) { rootEnabled = false } - tmpDir, err := os.MkdirTemp("", "") + tmpDir := t.TempDir() dm := &deviceManager{ blockDriver: config.VirtioBlock, devices: make(map[string]api.Device), vhostUserStoreEnabled: true, vhostUserStorePath: tmpDir, } - assert.Nil(t, err) - defer os.RemoveAll(tmpDir) vhostUserDevNodePath := filepath.Join(tmpDir, "/block/devices/") vhostUserSockPath := filepath.Join(tmpDir, "/block/sockets/") deviceNodePath := filepath.Join(vhostUserDevNodePath, "vhostblk0") deviceSockPath := filepath.Join(vhostUserSockPath, "vhostblk0") - err = os.MkdirAll(vhostUserDevNodePath, dirMode) + err := os.MkdirAll(vhostUserDevNodePath, dirMode) assert.Nil(t, err) err = os.MkdirAll(vhostUserSockPath, dirMode) assert.Nil(t, err) diff --git a/src/runtime/virtcontainers/device/manager/manager_test.go b/src/runtime/virtcontainers/device/manager/manager_test.go index 8e1b8ec4b..ea20012a9 100644 --- a/src/runtime/virtcontainers/device/manager/manager_test.go +++ b/src/runtime/virtcontainers/device/manager/manager_test.go @@ -34,12 +34,8 @@ func TestNewDevice(t *testing.T) { major := int64(252) minor := int64(3) - tmpDir, err := os.MkdirTemp("", "") - assert.Nil(t, err) - - config.SysDevPrefix = tmpDir + config.SysDevPrefix = t.TempDir() defer func() { - os.RemoveAll(tmpDir) config.SysDevPrefix = savedSysDevPrefix }() @@ -53,7 +49,7 @@ func TestNewDevice(t *testing.T) { DevType: "c", } - _, err = dm.NewDevice(deviceInfo) + _, err := dm.NewDevice(deviceInfo) assert.NotNil(t, err) format := strconv.FormatInt(major, 10) + ":" + strconv.FormatInt(minor, 10) @@ -99,15 +95,13 @@ func TestAttachVFIODevice(t *testing.T) { blockDriver: config.VirtioBlock, devices: make(map[string]api.Device), } - tmpDir, err := os.MkdirTemp("", "") - assert.Nil(t, err) - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() testFDIOGroup := "2" testDeviceBDFPath := "0000:00:1c.0" devicesDir := filepath.Join(tmpDir, testFDIOGroup, "devices") - err = os.MkdirAll(devicesDir, dirMode) + err := os.MkdirAll(devicesDir, dirMode) assert.Nil(t, err) deviceBDFDir := filepath.Join(devicesDir, testDeviceBDFPath) diff --git a/src/runtime/virtcontainers/fs_share_linux_test.go b/src/runtime/virtcontainers/fs_share_linux_test.go index 704cb4b88..6b740d1e6 100644 --- a/src/runtime/virtcontainers/fs_share_linux_test.go +++ b/src/runtime/virtcontainers/fs_share_linux_test.go @@ -24,14 +24,11 @@ func TestSandboxSharedFilesystem(t *testing.T) { assert := assert.New(t) // create temporary files to mount: - testMountPath, err := os.MkdirTemp("", "sandbox-test") - assert.NoError(err) - defer os.RemoveAll(testMountPath) + testMountPath := t.TempDir() // create a new shared directory for our test: kataHostSharedDirSaved := kataHostSharedDir - testHostDir, err := os.MkdirTemp("", "kata-Cleanup") - assert.NoError(err) + testHostDir := t.TempDir() kataHostSharedDir = func() string { return testHostDir } @@ -66,7 +63,6 @@ func TestSandboxSharedFilesystem(t *testing.T) { dir := kataHostSharedDir() err = os.MkdirAll(path.Join(dir, sandbox.id), 0777) assert.Nil(err) - defer os.RemoveAll(dir) // Test the prepare function. We expect it to succeed err = sandbox.fsShare.Prepare(sandbox.ctx) diff --git a/src/runtime/virtcontainers/hypervisor_test.go b/src/runtime/virtcontainers/hypervisor_test.go index 86aefde69..f60428795 100644 --- a/src/runtime/virtcontainers/hypervisor_test.go +++ b/src/runtime/virtcontainers/hypervisor_test.go @@ -390,12 +390,10 @@ func TestGetHostMemorySizeKb(t *testing.T) { }, } - dir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(dir) + dir := t.TempDir() file := filepath.Join(dir, "meminfo") - _, err = GetHostMemorySizeKb(file) + _, err := GetHostMemorySizeKb(file) assert.Error(err) for _, d := range data { diff --git a/src/runtime/virtcontainers/kata_agent_test.go b/src/runtime/virtcontainers/kata_agent_test.go index bcca754ba..cb3923c99 100644 --- a/src/runtime/virtcontainers/kata_agent_test.go +++ b/src/runtime/virtcontainers/kata_agent_test.go @@ -751,9 +751,7 @@ func TestHandlePidNamespace(t *testing.T) { func TestAgentConfigure(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "kata-agent-test") - assert.Nil(err) - defer os.RemoveAll(dir) + dir := t.TempDir() k := &kataAgent{} h := &mockHypervisor{} @@ -761,7 +759,7 @@ func TestAgentConfigure(t *testing.T) { id := "foobar" ctx := context.Background() - err = k.configure(ctx, h, id, dir, c) + err := k.configure(ctx, h, id, dir, c) assert.Nil(err) err = k.configure(ctx, h, id, dir, c) @@ -872,9 +870,7 @@ func TestAgentCreateContainer(t *testing.T) { }, } - dir, err := os.MkdirTemp("", "kata-agent-test") - assert.Nil(err) - defer os.RemoveAll(dir) + dir := t.TempDir() err = k.configure(context.Background(), &mockHypervisor{}, sandbox.id, dir, KataAgentConfig{}) assert.Nil(err) @@ -984,8 +980,7 @@ func TestKataCleanupSandbox(t *testing.T) { kataHostSharedDirSaved := kataHostSharedDir kataHostSharedDir = func() string { - td, _ := os.MkdirTemp("", "kata-Cleanup") - return td + return t.TempDir() } defer func() { kataHostSharedDir = kataHostSharedDirSaved @@ -996,7 +991,6 @@ func TestKataCleanupSandbox(t *testing.T) { } dir := kataHostSharedDir() - defer os.RemoveAll(dir) err := os.MkdirAll(path.Join(dir, s.id), 0777) assert.Nil(err) @@ -1144,9 +1138,7 @@ func TestHandleHugepages(t *testing.T) { assert := assert.New(t) - dir, err := ioutil.TempDir("", "hugepages-test") - assert.Nil(err) - defer os.RemoveAll(dir) + dir := t.TempDir() k := kataAgent{} var formattedSizes []string diff --git a/src/runtime/virtcontainers/mount_test.go b/src/runtime/virtcontainers/mount_test.go index 6564a9648..b658de829 100644 --- a/src/runtime/virtcontainers/mount_test.go +++ b/src/runtime/virtcontainers/mount_test.go @@ -318,14 +318,12 @@ func TestIsWatchable(t *testing.T) { result = isWatchableMount(path) assert.False(result) - testPath, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(testPath) + testPath := t.TempDir() // Verify secret is successful (single file mount): // /tmppath/kubernetes.io~secret/super-secret-thing secretpath := filepath.Join(testPath, K8sSecret) - err = os.MkdirAll(secretpath, 0777) + err := os.MkdirAll(secretpath, 0777) assert.NoError(err) secret := filepath.Join(secretpath, "super-secret-thing") _, err = os.Create(secret) diff --git a/src/runtime/virtcontainers/nydusd_test.go b/src/runtime/virtcontainers/nydusd_test.go index 96163f66a..4f9c4bbeb 100644 --- a/src/runtime/virtcontainers/nydusd_test.go +++ b/src/runtime/virtcontainers/nydusd_test.go @@ -8,7 +8,6 @@ package virtcontainers import ( "context" "encoding/base64" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -19,7 +18,6 @@ import ( ) func TestNydusdStart(t *testing.T) { - assert := assert.New(t) // nolint: govet type fields struct { pid int @@ -33,13 +31,8 @@ func TestNydusdStart(t *testing.T) { setupShareDirFn func() error } - sourcePath, err := ioutil.TempDir("", "") - assert.NoError(err) - defer os.RemoveAll(sourcePath) - - socketDir, err := ioutil.TempDir("", "") - assert.NoError(err) - defer os.RemoveAll(socketDir) + sourcePath := t.TempDir() + socketDir := t.TempDir() sockPath := filepath.Join(socketDir, "vhost-user.sock") apiSockPath := filepath.Join(socketDir, "api.sock") @@ -115,13 +108,8 @@ func TestNydusdArgs(t *testing.T) { func TestNydusdValid(t *testing.T) { assert := assert.New(t) - sourcePath, err := ioutil.TempDir("", "") - assert.NoError(err) - defer os.RemoveAll(sourcePath) - - socketDir, err := ioutil.TempDir("", "") - assert.NoError(err) - defer os.RemoveAll(socketDir) + sourcePath := t.TempDir() + socketDir := t.TempDir() sockPath := filepath.Join(socketDir, "vhost-user.sock") apiSockPath := filepath.Join(socketDir, "api.sock") @@ -135,7 +123,7 @@ func TestNydusdValid(t *testing.T) { } } nd := newNydsudFunc() - err = nd.valid() + err := nd.valid() assert.NoError(err) nd = newNydsudFunc() diff --git a/src/runtime/virtcontainers/qemu_test.go b/src/runtime/virtcontainers/qemu_test.go index 9069bb212..0c27240be 100644 --- a/src/runtime/virtcontainers/qemu_test.go +++ b/src/runtime/virtcontainers/qemu_test.go @@ -308,9 +308,7 @@ func TestQemuAddDeviceSerialPortDev(t *testing.T) { func TestQemuAddDeviceKataVSOCK(t *testing.T) { assert := assert.New(t) - dir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(dir) + dir := t.TempDir() vsockFilename := filepath.Join(dir, "vsock") diff --git a/src/runtime/virtcontainers/sandbox_test.go b/src/runtime/virtcontainers/sandbox_test.go index b1556ec15..31bec9381 100644 --- a/src/runtime/virtcontainers/sandbox_test.go +++ b/src/runtime/virtcontainers/sandbox_test.go @@ -257,8 +257,8 @@ func testCheckContainerOnDiskState(c *Container, containerState types.ContainerS // writeContainerConfig write config.json to bundle path // and return bundle path. -// NOTE: don't forget to delete the bundle path -func writeContainerConfig() (string, error) { +// NOTE: the bundle path is automatically removed by t.Cleanup +func writeContainerConfig(t *testing.T) (string, error) { basicSpec := ` { @@ -269,12 +269,9 @@ func writeContainerConfig() (string, error) { } }` - configDir, err := os.MkdirTemp("", "vc-tmp-") - if err != nil { - return "", err - } + configDir := t.TempDir() - err = os.MkdirAll(configDir, DirMode) + err := os.MkdirAll(configDir, DirMode) if err != nil { return "", err } @@ -293,10 +290,7 @@ func TestSandboxSetSandboxAndContainerState(t *testing.T) { contConfig := newTestContainerConfigNoop(contID) assert := assert.New(t) - configDir, err := writeContainerConfig() - if err != nil { - os.RemoveAll(configDir) - } + configDir, err := writeContainerConfig(t) assert.NoError(err) // set bundle path annotation, fetchSandbox need this annotation to get containers @@ -526,15 +520,14 @@ func TestContainerStateSetFstype(t *testing.T) { } func TestSandboxAttachDevicesVFIO(t *testing.T) { - tmpDir, err := os.MkdirTemp("", "") - assert.Nil(t, err) + tmpDir := t.TempDir() os.RemoveAll(tmpDir) testFDIOGroup := "2" testDeviceBDFPath := "0000:00:1c.0" devicesDir := filepath.Join(tmpDir, testFDIOGroup, "devices") - err = os.MkdirAll(devicesDir, DirMode) + err := os.MkdirAll(devicesDir, DirMode) assert.Nil(t, err) deviceFile := filepath.Join(devicesDir, testDeviceBDFPath) @@ -596,8 +589,7 @@ func TestSandboxAttachDevicesVhostUserBlk(t *testing.T) { rootEnabled = false } - tmpDir, err := os.MkdirTemp("", "") - assert.Nil(t, err) + tmpDir := t.TempDir() os.RemoveAll(tmpDir) dm := manager.NewDeviceManager(config.VirtioSCSI, true, tmpDir, nil) @@ -606,7 +598,7 @@ func TestSandboxAttachDevicesVhostUserBlk(t *testing.T) { deviceNodePath := filepath.Join(vhostUserDevNodePath, "vhostblk0") deviceSockPath := filepath.Join(vhostUserSockPath, "vhostblk0") - err = os.MkdirAll(vhostUserDevNodePath, dirMode) + err := os.MkdirAll(vhostUserDevNodePath, dirMode) assert.Nil(t, err) err = os.MkdirAll(vhostUserSockPath, dirMode) assert.Nil(t, err) diff --git a/src/runtime/virtcontainers/utils/utils_test.go b/src/runtime/virtcontainers/utils/utils_test.go index 838e72ef4..aac6fef90 100644 --- a/src/runtime/virtcontainers/utils/utils_test.go +++ b/src/runtime/virtcontainers/utils/utils_test.go @@ -458,12 +458,8 @@ func TestMkdirAllWithInheritedOwnerSuccessful(t *testing.T) { t.Skip("Test disabled as requires root user") } assert := assert.New(t) - tmpDir1, err := os.MkdirTemp("", "test") - assert.NoError(err) - defer os.RemoveAll(tmpDir1) - tmpDir2, err := os.MkdirTemp("", "test") - assert.NoError(err) - defer os.RemoveAll(tmpDir2) + tmpDir1 := t.TempDir() + tmpDir2 := t.TempDir() testCases := []struct { before func(rootDir string, uid, gid int) @@ -474,7 +470,7 @@ func TestMkdirAllWithInheritedOwnerSuccessful(t *testing.T) { }{ { before: func(rootDir string, uid, gid int) { - err = syscall.Chown(rootDir, uid, gid) + err := syscall.Chown(rootDir, uid, gid) assert.NoError(err) }, rootDir: tmpDir1, @@ -485,7 +481,7 @@ func TestMkdirAllWithInheritedOwnerSuccessful(t *testing.T) { { before: func(rootDir string, uid, gid int) { // remove the tmpDir2 so the MkdirAllWithInheritedOwner() call creates them from /tmp - err = os.RemoveAll(tmpDir2) + err := os.RemoveAll(tmpDir2) assert.NoError(err) }, rootDir: tmpDir2, @@ -502,8 +498,10 @@ func TestMkdirAllWithInheritedOwnerSuccessful(t *testing.T) { err := MkdirAllWithInheritedOwner(tc.targetDir, 0700) assert.NoError(err) - // remove the first parent "/tmp" from the assertion as it's owned by root - for _, p := range getAllParentPaths(tc.targetDir)[1:] { + // tmpDir1: /tmp/TestMkdirAllWithInheritedOwnerSuccessful/001 + // tmpDir2: /tmp/TestMkdirAllWithInheritedOwnerSuccessful/002 + // remove the first two parent "/tmp/TestMkdirAllWithInheritedOwnerSuccessful" from the assertion as it's owned by root + for _, p := range getAllParentPaths(tc.targetDir)[2:] { info, err := os.Stat(p) assert.NoError(err) assert.True(info.IsDir()) @@ -520,12 +518,10 @@ func TestChownToParent(t *testing.T) { t.Skip("Test disabled as requires root user") } assert := assert.New(t) - rootDir, err := os.MkdirTemp("", "root") - assert.NoError(err) - defer os.RemoveAll(rootDir) + rootDir := t.TempDir() uid := 1234 gid := 5678 - err = syscall.Chown(rootDir, uid, gid) + err := syscall.Chown(rootDir, uid, gid) assert.NoError(err) targetDir := path.Join(rootDir, "foo") diff --git a/src/runtime/virtcontainers/virtiofsd_test.go b/src/runtime/virtcontainers/virtiofsd_test.go index 4aa55091c..c2ebc012c 100644 --- a/src/runtime/virtcontainers/virtiofsd_test.go +++ b/src/runtime/virtcontainers/virtiofsd_test.go @@ -7,7 +7,6 @@ package virtcontainers import ( "context" - "os" "path" "strings" "testing" @@ -16,7 +15,6 @@ import ( ) func TestVirtiofsdStart(t *testing.T) { - assert := assert.New(t) // nolint: govet type fields struct { path string @@ -29,13 +27,8 @@ func TestVirtiofsdStart(t *testing.T) { ctx context.Context } - sourcePath, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(sourcePath) - - socketDir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(socketDir) + sourcePath := t.TempDir() + socketDir := t.TempDir() socketPath := path.Join(socketDir, "socket.s") @@ -103,13 +96,8 @@ func TestVirtiofsdArgs(t *testing.T) { func TestValid(t *testing.T) { assert := assert.New(t) - sourcePath, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(sourcePath) - - socketDir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(socketDir) + sourcePath := t.TempDir() + socketDir := t.TempDir() socketPath := socketDir + "socket.s" @@ -123,7 +111,7 @@ func TestValid(t *testing.T) { // valid case v := newVirtiofsdFunc() - err = v.valid() + err := v.valid() assert.NoError(err) v = newVirtiofsdFunc() diff --git a/src/runtime/virtcontainers/vm_test.go b/src/runtime/virtcontainers/vm_test.go index d16e17504..0d50a69de 100644 --- a/src/runtime/virtcontainers/vm_test.go +++ b/src/runtime/virtcontainers/vm_test.go @@ -18,9 +18,7 @@ import ( func TestNewVM(t *testing.T) { assert := assert.New(t) - testDir, err := os.MkdirTemp("", "vmfactory-tmp-") - assert.Nil(err) - defer os.RemoveAll(testDir) + testDir := t.TempDir() config := VMConfig{ HypervisorType: MockHypervisor, @@ -33,7 +31,7 @@ func TestNewVM(t *testing.T) { ctx := WithNewAgentFunc(context.Background(), newMockAgent) var vm *VM - _, err = NewVM(ctx, config) + _, err := NewVM(ctx, config) assert.Error(err) config.HypervisorConfig = hyperConfig @@ -65,9 +63,7 @@ func TestNewVM(t *testing.T) { defer func() { urandomDev = savedUrandomDev }() - tmpdir, err := os.MkdirTemp("", "") - assert.NoError(err) - defer os.RemoveAll(tmpdir) + tmpdir := t.TempDir() urandomDev = filepath.Join(tmpdir, "urandom") data := make([]byte, 512) err = os.WriteFile(urandomDev, data, os.FileMode(0640)) @@ -98,9 +94,7 @@ func TestVMConfigValid(t *testing.T) { err := config.Valid() assert.Error(err) - testDir, err := os.MkdirTemp("", "vmfactory-tmp-") - assert.Nil(err) - defer os.RemoveAll(testDir) + testDir := t.TempDir() config.HypervisorConfig = HypervisorConfig{ KernelPath: testDir, From 0d765bd08248887ad3084df0c5483e551295e91a Mon Sep 17 00:00:00 2001 From: Wang Xingxing Date: Wed, 30 Mar 2022 16:01:17 +0800 Subject: [PATCH 41/46] agent: fix container stop error with signal SIGRTMIN+3 The nix::sys::signal::Signal package api cannot deal with SIGRTMIN+3, directly use libc function to send the signal. Fixes: #3990 Signed-off-by: Wang Xingxing --- src/agent/rustjail/src/process.rs | 12 +++++++----- src/agent/src/rpc.rs | 18 ++++++------------ 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/src/agent/rustjail/src/process.rs b/src/agent/rustjail/src/process.rs index 192aefc4e..cced9b98f 100644 --- a/src/agent/rustjail/src/process.rs +++ b/src/agent/rustjail/src/process.rs @@ -8,8 +8,8 @@ use std::fs::File; use std::os::unix::io::RawFd; use tokio::sync::mpsc::Sender; +use nix::errno::Errno; use nix::fcntl::{fcntl, FcntlArg, OFlag}; -use nix::sys::signal::{self, Signal}; use nix::sys::wait::{self, WaitStatus}; use nix::unistd::{self, Pid}; use nix::Result; @@ -80,7 +80,7 @@ pub struct Process { pub trait ProcessOperations { fn pid(&self) -> Pid; fn wait(&self) -> Result; - fn signal(&self, sig: Signal) -> Result<()>; + fn signal(&self, sig: libc::c_int) -> Result<()>; } impl ProcessOperations for Process { @@ -92,8 +92,10 @@ impl ProcessOperations for Process { wait::waitpid(Some(self.pid()), None) } - fn signal(&self, sig: Signal) -> Result<()> { - signal::kill(self.pid(), Some(sig)) + fn signal(&self, sig: libc::c_int) -> Result<()> { + let res = unsafe { libc::kill(self.pid().into(), sig) }; + + Errno::result(res).map(drop) } } @@ -281,6 +283,6 @@ mod tests { // signal to every process in the process // group of the calling process. process.pid = 0; - assert!(process.signal(Signal::SIGCONT).is_ok()); + assert!(process.signal(libc::SIGCONT).is_ok()); } } diff --git a/src/agent/src/rpc.rs b/src/agent/src/rpc.rs index e0c205cbe..77b18a32e 100644 --- a/src/agent/src/rpc.rs +++ b/src/agent/src/rpc.rs @@ -40,8 +40,7 @@ use rustjail::specconv::CreateOpts; use nix::errno::Errno; use nix::mount::MsFlags; -use nix::sys::signal::Signal; -use nix::sys::{signal, stat}; +use nix::sys::stat; use nix::unistd::{self, Pid}; use rustjail::cgroups::Manager; use rustjail::process::ProcessOperations; @@ -71,7 +70,6 @@ use tracing_opentelemetry::OpenTelemetrySpanExt; use tracing::instrument; use libc::{self, c_char, c_ushort, pid_t, winsize, TIOCSWINSZ}; -use std::convert::TryFrom; use std::fs; use std::os::unix::fs::MetadataExt; use std::os::unix::prelude::PermissionsExt; @@ -401,20 +399,15 @@ impl AgentService { "exec-id" => eid.clone(), ); - let mut sig = Signal::try_from(req.signal as i32).map_err(|e| { - anyhow!(e).context(format!( - "failed to convert {:?} to signal (container-id: {}, exec-id: {})", - req.signal, cid, eid - )) - })?; + let mut sig: libc::c_int = req.signal as libc::c_int; { let mut sandbox = s.lock().await; let p = sandbox.find_container_process(cid.as_str(), eid.as_str())?; // For container initProcess, if it hasn't installed handler for "SIGTERM" signal, // it will ignore the "SIGTERM" signal sent to it, thus send it "SIGKILL" signal // instead of "SIGTERM" to terminate it. - if p.init && sig == Signal::SIGTERM && !is_signal_handled(p.pid, sig as u32) { - sig = Signal::SIGKILL; + if p.init && sig == libc::SIGTERM && !is_signal_handled(p.pid, sig as u32) { + sig = libc::SIGKILL; } p.signal(sig)?; } @@ -440,7 +433,8 @@ impl AgentService { let pids = self.get_pids(&cid).await?; for pid in pids.iter() { - if let Err(err) = signal::kill(Pid::from_raw(*pid), Some(sig)) { + let res = unsafe { libc::kill(*pid, sig) }; + if let Err(err) = Errno::result(res).map(drop) { warn!( sl!(), "signal failed"; From 0d5f80b803987dab1e5fa12b5ca5fc9954dd9b89 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Thu, 31 Mar 2022 13:19:01 +0200 Subject: [PATCH 42/46] versions: Bump firecracker to v0.23.4 This release changes Docker images repository from DockerHub to Amazon ECR. This resolves the `You have reached your pull rate limit` error when building the firecracker tarball. Fixes #4001 Signed-off-by: Greg Kurz --- versions.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/versions.yaml b/versions.yaml index 1a3350e09..afd8b27f9 100644 --- a/versions.yaml +++ b/versions.yaml @@ -83,7 +83,7 @@ assets: uscan-url: >- https://github.com/firecracker-microvm/firecracker/tags .*/v?(\d\S+)\.tar\.gz - version: "v0.23.1" + version: "v0.23.4" qemu: description: "VMM that uses KVM" From 2b91dcfeef692fc5cf3cee912920ffb9decf245d Mon Sep 17 00:00:00 2001 From: Gabriela Cervantes Date: Thu, 31 Mar 2022 16:25:47 +0000 Subject: [PATCH 43/46] docs: Remove kata-proxy reference This PR removes the kata-proxy reference from this document as it is not longer a component in kata 2.0 Fixes #4013 Signed-off-by: Gabriela Cervantes --- docs/how-to/how-to-use-kata-containers-with-acrn.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/how-to/how-to-use-kata-containers-with-acrn.md b/docs/how-to/how-to-use-kata-containers-with-acrn.md index 3bb038b9f..9679cd0c2 100644 --- a/docs/how-to/how-to-use-kata-containers-with-acrn.md +++ b/docs/how-to/how-to-use-kata-containers-with-acrn.md @@ -101,7 +101,7 @@ Start an ACRN based Kata Container, $ sudo docker run -ti --runtime=kata-runtime busybox sh ``` -You will see ACRN(`acrn-dm`) is now running on your system, as well as a `kata-shim`, `kata-proxy`. You should obtain an interactive shell prompt. Verify that all the Kata processes terminate once you exit the container. +You will see ACRN(`acrn-dm`) is now running on your system, as well as a `kata-shim`. You should obtain an interactive shell prompt. Verify that all the Kata processes terminate once you exit the container. ```bash $ ps -ef | grep -E "kata|acrn" From c9e24433d845821029395dd98ea09d1cda63b43f Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 1 Apr 2022 06:23:21 +0000 Subject: [PATCH 44/46] release: Kata Containers 2.5.0-alpha0 - agent: fix container stop error with signal SIGRTMIN+3 - doc: Improve kata-deploy README.md by changing sh blocks to bash blocks - docs: Remove kata-proxy reference - kata-monitor: fix duplicated output when printing usage - Stop getting OOM events from agent for "ttrpc closed" error - tools/packaging: Fix error path in `kata-deploy-binaries.sh -s` - kata-deploy: fix version bump from -rc to stable - release: Include all the rust vendored code into the vendored tarball - docs: Remove VPP documentation - runtime: Remove the explicit VirtioMem set and fix the comment - tools/packaging/kata-deploy: Copy install_yq.sh before starting parallel builds - docs: Remove kata-proxy references in documentation - agent: Signal the whole process group - osbuilder/qat: don't pull kata sources if exist - docs: fix markdown issues in how-to-run-docker-with-kata.md - osbuilder/qat: use centos as base OS - docs: Update vcpu handling document - Agent: fix unneeded late initialization lint - static-build,clh: Add the ability to build from a PR - Don't use a globally installed mock hook for hook tests - ci: Weekly check whether the docs url is alive - Multistrap Ubuntu & enable cross-building guest - device: using const strings for block-driver option instead of hard coding - doc: update Intel SGX use cases document - tools: update QEMU to 6.2 - action: Update link for format patch documentation - runtime: properly handle ESRCH error when signaling container - docs: Update k8s documentation - rustjail: optimization, merged several writelns into one - doc: fix kata-deploy README typo - versions: Upgrade to Cloud Hypervisor v22.1 - Add debug and self-test control options to Kata Manager - scripts: Change here document delimiters - agent: add tests for get_memory_info function - CI: Update GHA secret name - tools: release: Do not consider release candidates as stable releases - kernel: fix cve-2022-0847 - docs: Update contact link in runtime README - Improve error checking of hugepage allocation - CI: Create GHA to add PR sizing label - release: Revert kata-deploy changes after 2.4.0-rc0 release 2b91dcfe docs: Remove kata-proxy reference 0d765bd0 agent: fix container stop error with signal SIGRTMIN+3 a63bbf97 kata-monitor: fix duplicated output when printing usage 9e4ca0c4 doc: Improve kata-deploy README.md by changing sh blocks to bash blocks a779e19b tools/packaging: Fix error path in 'kata-deploy-binaries.sh -s' 0baebd2b tools/packaging: Fix usage of kata-deploy-binaries.sh 3606923a workflows,release: Ship *all* the rust vendored code 2eb07455 tools: Add a generate_vendor.sh script 5e1c30d4 runtime: add logs around sandbox monitor fb8be961 runtime: stop getting OOM events when ttrpc: closed error 93d03cc0 kata-deploy: fix version bump from -rc to stable a9314023 docs: Remove kata-proxy references in documentation 66f05c5b runtime: Remove the explicit VirtioMem set and fix the comment 0928eb9f agent: Kill the all the container processes of the same cgroup c2796327 osbuilder/qat: don't pull kata sources if exist 154c8b03 tools/packaging/kata-deploy: Copy install_yq.sh in a dedicated script 1ed7da8f packaging: Eliminate TTY_OPT and NO_TTY variables in kata-deploy bad859d2 tools/packaging/kata-deploy/local-build: Add build to gitignore 19f372b5 runtime: Add more debug logs for container io stream copy 459f4bfe osbuilder/qat: use centos as base OS 9a5b4770 docs: Update vcpu handling document ecf71d6d docs: Remove VPP documentation c77e34de runtime: Move mock hook source 86723b51 virtcontainers: Remove unused install/uninstall targets 0e83c95f virtcontainers: Run mock hook from build tree rather than system bin dir 77434864 docs: fix markdown issues in how-to-run-docker-with-kata.md 32131cb8 Agent: fix unneeded late initialization lint e65db838 virtcontainers: Remove VC_BIN_DIR c20ad283 virtcontainers: Remove unused Makefile defines c776bdf4 virtcontainers: Remove unused parameter from go-test.sh ebec6903 static-build,clh: Add the ability to build from a PR 24b29310 doc: update Intel SGX use cases document 18d4d7fb tools: update QEMU to 6.2 62351637 action: Update link for format patch documentation aa5ae6b1 runtime: Properly handle ESRCH error when signaling container efa19c41 device: use const strings for block-driver option instead of hard coding dacf6e39 doc: fix filename typo 92ce5e2d rustjail: optimization, merged several writelns into one 7a18e32f versions: Upgrade to Cloud Hypervisor v22.1 5c434270 docs: Update k8s documentation 5d6d39be scripts: Change here document delimiters be12baf3 manager: Change here documents to use standard delimiter 9576a7da manager: Add options to change self test behaviour d4d65bed manager: Add option to enable component debug 019da91d manager: Whitespace fix d234cb76 manager: Create containerd link c088a3f3 agent: add tests for get_memory_info function 4b1e2f52 CI: Update GHA secret name ffdf961a docs: Update contact link in runtime README 5ec7592d kernel: fix cve-2022-0847 6a850899 CI: Create GHA to add PR sizing label 2b41d275 release: Revert kata-deploy changes after 2.4.0-rc0 release 4adf93ef tools: release: Do not consider release candidates as stable releases 72f7e9e3 osbuilder: Multistrap Ubuntu df511bf1 packaging: Enable cross-building agent 0a313eda osbuilder: Fix use of LIBC in rootfs.sh 2c86b956 osbuilder: Simplify Rust installation 0072cc2b osbuilder: Remove musl installations 5c3e5536 osbuilder: apk add --no-cache 42e35505 agent: Verify that we allocated as many hugepages as we need 608e003a agent: Don't attempt to create directories for hugepage configuration 168fadf1 ci: Weekly check whether the docs url is alive Signed-off-by: Peng Tao --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index cbc70e35b..1f5195967 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.0-rc0 +2.5.0-alpha0 From 98750d792b35a2355772a26d36fbf932fadc7078 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Fri, 1 Apr 2022 08:06:00 +0200 Subject: [PATCH 45/46] clh: Expose service offload configuration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This configuration option is valid for all the hypervisor that are going to be used with the confidential containers effort, thus exposing the configuration option for Cloud Hypervisor as well. Fixes: #4022 Signed-off-by: Fabiano Fidêncio --- src/runtime/config/configuration-clh.toml.in | 27 ++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/runtime/config/configuration-clh.toml.in b/src/runtime/config/configuration-clh.toml.in index fcf95b2e2..e8b6b8c8b 100644 --- a/src/runtime/config/configuration-clh.toml.in +++ b/src/runtime/config/configuration-clh.toml.in @@ -324,3 +324,30 @@ experimental=@DEFAULTEXPFEATURES@ # If enabled, user can run pprof tools with shim v2 process through kata-monitor. # (default: false) # enable_pprof = true + +# WARNING: All the options in the following section have not been implemented yet. +# This section was added as a placeholder. DO NOT USE IT! +[image] +# Container image service. +# +# Offload the CRI image management service to the Kata agent. +# (default: false) +#service_offload = true + +# Container image decryption keys provisioning. +# Applies only if service_offload is true. +# Keys can be provisioned locally (e.g. through a special command or +# a local file) or remotely (usually after the guest is remotely attested). +# The provision setting is a complete URL that lets the Kata agent decide +# which method to use in order to fetch the keys. +# +# Keys can be stored in a local file, in a measured and attested initrd: +#provision=data:///local/key/file +# +# Keys could be fetched through a special command or binary from the +# initrd (guest) image, e.g. a firmware call: +#provision=file:///path/to/bin/fetcher/in/guest +# +# Keys can be remotely provisioned. The Kata agent fetches them from e.g. +# a HTTPS URL: +#provision=https://my-key-broker.foo/tenant/ From 4405b188e8d646d36b45084c826cf66805ba21a2 Mon Sep 17 00:00:00 2001 From: George Ntoutsos Date: Wed, 30 Mar 2022 18:57:04 +0300 Subject: [PATCH 46/46] docs: Add a firecracker installation guide Add info on setting up kata with firecracker. Fixes: #3555 Signed-off-by: George Ntoutsos Signed-off-by: Anastassios Nanos --- docs/how-to/README.md | 5 + ...to-use-kata-containers-with-firecracker.md | 254 ++++++++++++++++++ 2 files changed, 259 insertions(+) create mode 100644 docs/how-to/how-to-use-kata-containers-with-firecracker.md diff --git a/docs/how-to/README.md b/docs/how-to/README.md index 87a581080..6dd163ce7 100644 --- a/docs/how-to/README.md +++ b/docs/how-to/README.md @@ -15,6 +15,11 @@ - `qemu` - `cloud-hypervisor` - `firecracker` + + In the case of `firecracker` the use of a block device `snapshotter` is needed + for the VM rootfs. Refer to the following guide for additional configuration + steps: + - [Setup Kata containers with `firecracker`](how-to-use-kata-containers-with-firecracker.md) - `ACRN` While `qemu` , `cloud-hypervisor` and `firecracker` work out of the box with installation of Kata, diff --git a/docs/how-to/how-to-use-kata-containers-with-firecracker.md b/docs/how-to/how-to-use-kata-containers-with-firecracker.md new file mode 100644 index 000000000..03f533ef7 --- /dev/null +++ b/docs/how-to/how-to-use-kata-containers-with-firecracker.md @@ -0,0 +1,254 @@ +# Configure Kata Containers to use Firecracker + +This document provides an overview on how to run Kata Containers with the AWS Firecracker hypervisor. + +## Introduction + +AWS Firecracker is an open source virtualization technology that is purpose-built for creating and managing secure, multi-tenant container and function-based services that provide serverless operational models. AWS Firecracker runs workloads in lightweight virtual machines, called `microVMs`, which combine the security and isolation properties provided by hardware virtualization technology with the speed and flexibility of Containers. + +Please refer to AWS Firecracker [documentation](https://github.com/firecracker-microvm/firecracker/blob/main/docs/getting-started.md) for more details. + +## Pre-requisites + +This document requires the presence of Kata Containers on your system. Install using the instructions available through the following links: + +- Kata Containers [automated installation](../install/README.md) + +- Kata Containers manual installation: Automated installation does not seem to be supported for Clear Linux, so please use [manual installation](../Developer-Guide.md) steps. +> **Note:** Create rootfs image and not initrd image. + +## Install AWS Firecracker + +Kata Containers only support AWS Firecracker v0.23.4 ([yet](https://github.com/kata-containers/kata-containers/pull/1519)). +To install Firecracker we need to get the `firecracker` and `jailer` binaries: + +```bash +$ release_url="https://github.com/firecracker-microvm/firecracker/releases" +$ version="v0.23.1" +$ arch=`uname -m` +$ curl ${release_url}/download/${version}/firecracker-${version}-${arch} -o firecracker +$ curl ${release_url}/download/${version}/jailer-${version}-${arch} -o jailer +$ chmod +x jailer firecracker +``` + +To make the binaries available from the default system `PATH` it is recommended to move them to `/usr/local/bin` or add a symbolic link: + +```bash +$ sudo ln -s $(pwd)/firecracker /usr/local/bin +$ sudo ln -s $(pwd)/jailer /usr/local/bin +``` + +More details can be found in [AWS Firecracker docs](https://github.com/firecracker-microvm/firecracker/blob/main/docs/getting-started.md) + +In order to run Kata with AWS Firecracker a block device as the backing store for a VM is required. To interact with `containerd` and Kata we use the `devmapper` `snapshotter`. + +## Configure `devmapper` + +To check support for your `containerd` installation, you can run: + +``` +$ ctr plugins ls |grep devmapper +``` + +if the output of the above command is: + +``` +io.containerd.snapshotter.v1 devmapper linux/amd64 ok +``` +then you can skip this section and move on to `Configure Kata Containers with AWS Firecracker` + +If the output of the above command is: + +``` +io.containerd.snapshotter.v1 devmapper linux/amd64 error +``` + +then we need to setup `devmapper` `snapshotter`. Based on a [very useful +guide](https://docs.docker.com/storage/storagedriver/device-mapper-driver/) +from docker, we can set it up using the following scripts: + +> **Note:** The following scripts assume a 100G sparse file for storing container images, a 10G sparse file for the thin-provisioning pool and 10G base image files for any sandboxed container created. This means that we will need at least 10GB free space. + +``` +#!/bin/bash +set -ex + +DATA_DIR=/var/lib/containerd/devmapper +POOL_NAME=devpool + +mkdir -p ${DATA_DIR} + +# Create data file +sudo touch "${DATA_DIR}/data" +sudo truncate -s 100G "${DATA_DIR}/data" + +# Create metadata file +sudo touch "${DATA_DIR}/meta" +sudo truncate -s 10G "${DATA_DIR}/meta" + +# Allocate loop devices +DATA_DEV=$(sudo losetup --find --show "${DATA_DIR}/data") +META_DEV=$(sudo losetup --find --show "${DATA_DIR}/meta") + +# Define thin-pool parameters. +# See https://www.kernel.org/doc/Documentation/device-mapper/thin-provisioning.txt for details. +SECTOR_SIZE=512 +DATA_SIZE="$(sudo blockdev --getsize64 -q ${DATA_DEV})" +LENGTH_IN_SECTORS=$(bc <<< "${DATA_SIZE}/${SECTOR_SIZE}") +DATA_BLOCK_SIZE=128 +LOW_WATER_MARK=32768 + +# Create a thin-pool device +sudo dmsetup create "${POOL_NAME}" \ + --table "0 ${LENGTH_IN_SECTORS} thin-pool ${META_DEV} ${DATA_DEV} ${DATA_BLOCK_SIZE} ${LOW_WATER_MARK}" + +cat << EOF +# +# Add this to your config.toml configuration file and restart `containerd` daemon +# +[plugins] + [plugins.devmapper] + pool_name = "${POOL_NAME}" + root_path = "${DATA_DIR}" + base_image_size = "10GB" + discard_blocks = true +EOF +``` + +Make it executable and run it: + +```bash +$ sudo chmod +x ~/scripts/devmapper/create.sh +$ cd ~/scripts/devmapper/ +$ sudo ./create.sh +``` + +Now, we can add the `devmapper` configuration provided from the script to `/etc/containerd/config.toml`. +> **Note:** If you are using the default `containerd` configuration (`containerd config default >> /etc/containerd/config.toml`), you may need to edit the existing `[plugins."io.containerd.snapshotter.v1.devmapper"]`configuration. +Save and restart `containerd`: + + +```bash +$ sudo systemctl restart containerd +``` + +We can use `dmsetup` to verify that the thin-pool was created successfully. + +```bash +$ sudo dmsetup ls +``` + + We should also check that `devmapper` is registered and running: + +```bash +$ sudo ctr plugins ls | grep devmapper +``` + +This script needs to be run only once, while setting up the `devmapper` `snapshotter` for `containerd`. Afterwards, make sure that on each reboot, the thin-pool is initialized from the same data directory. Otherwise, all the fetched containers (or the ones that you have created) will be re-initialized. A simple script that re-creates the thin-pool from the same data directory is shown below: + +``` +#!/bin/bash +set -ex + +DATA_DIR=/var/lib/containerd/devmapper +POOL_NAME=devpool + +# Allocate loop devices +DATA_DEV=$(sudo losetup --find --show "${DATA_DIR}/data") +META_DEV=$(sudo losetup --find --show "${DATA_DIR}/meta") + +# Define thin-pool parameters. +# See https://www.kernel.org/doc/Documentation/device-mapper/thin-provisioning.txt for details. +SECTOR_SIZE=512 +DATA_SIZE="$(sudo blockdev --getsize64 -q ${DATA_DEV})" +LENGTH_IN_SECTORS=$(bc <<< "${DATA_SIZE}/${SECTOR_SIZE}") +DATA_BLOCK_SIZE=128 +LOW_WATER_MARK=32768 + +# Create a thin-pool device +sudo dmsetup create "${POOL_NAME}" \ + --table "0 ${LENGTH_IN_SECTORS} thin-pool ${META_DEV} ${DATA_DEV} ${DATA_BLOCK_SIZE} ${LOW_WATER_MARK}" +``` + +We can create a systemd service to run the above script on each reboot: + +```bash +$ sudo nano /lib/systemd/system/devmapper_reload.service +``` + +The service file: + +``` +[Unit] +Description=Devmapper reload script + +[Service] +ExecStart=/path/to/script/reload.sh + +[Install] +WantedBy=multi-user.target +``` + +Enable the newly created service: + +```bash +$ sudo systemctl daemon-reload +$ sudo systemctl enable devmapper_reload.service +$ sudo systemctl start devmapper_reload.service +``` + +## Configure Kata Containers with AWS Firecracker + +To configure Kata Containers with AWS Firecracker, copy the generated `configuration-fc.toml` file when building the `kata-runtime` to either `/etc/kata-containers/configuration-fc.toml` or `/usr/share/defaults/kata-containers/configuration-fc.toml`. + +The following command shows full paths to the `configuration.toml` files that the runtime loads. It will use the first path that exists. (Please make sure the kernel and image paths are set correctly in the `configuration.toml` file) + +```bash +$ sudo kata-runtime --show-default-config-paths +``` + +## Configure `containerd` +Next, we need to configure containerd. Add a file in your path (e.g. `/usr/local/bin/containerd-shim-kata-fc-v2`) with the following contents: + +``` +#!/bin/bash +KATA_CONF_FILE=/etc/containers/configuration-fc.toml /usr/local/bin/containerd-shim-kata-v2 $@ +``` +> **Note:** You may need to edit the paths of the configuration file and the `containerd-shim-kata-v2` to correspond to your setup. + +Make it executable: + +```bash +$ sudo chmod +x /usr/local/bin/containerd-shim-kata-fc-v2 +``` + +Add the relevant section in `containerd`’s `config.toml` file (`/etc/containerd/config.toml`): + +``` +[plugins.cri.containerd.runtimes] + [plugins.cri.containerd.runtimes.kata-fc] + runtime_type = "io.containerd.kata-fc.v2" +``` + +> **Note:** If you are using the default `containerd` configuration (`containerd config default >> /etc/containerd/config.toml`), +> the configuration should change to : +``` +[plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-fc] + runtime_type = "io.containerd.kata-fc.v2" +``` + +Restart `containerd`: + +```bash +$ sudo systemctl restart containerd +``` + +## Verify the installation + +We are now ready to launch a container using Kata with Firecracker to verify that everything worked: + +```bash +$ sudo ctr images pull --snapshotter devmapper docker.io/library/ubuntu:latest +$ sudo ctr run --snapshotter devmapper --runtime io.containerd.run.kata-fc.v2 -t --rm docker.io/library/ubuntu +```