CCv0: Merge from main -- August 1st

Conflicts:
	src/runtime/pkg/katautils/config.go
	src/runtime/virtcontainers/container.go
	src/runtime/virtcontainers/hypervisor.go
	src/runtime/virtcontainers/qemu_arch_base.go
	src/runtime/virtcontainers/sandbox.go
	tests/integration/kubernetes/gha-run.sh
	tests/integration/kubernetes/setup.sh
	tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml
	tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh
	tools/packaging/kata-deploy/scripts/kata-deploy.sh
	tools/packaging/kernel/kata_config_version
	versions.yaml

Fixes: #7433

Signed-off-by: Fabiano Fidêncio <fabiano.fidencio@intel.com>
This commit is contained in:
Fabiano Fidêncio
2023-08-01 17:14:17 +02:00
426 changed files with 64309 additions and 2456 deletions

View File

@@ -0,0 +1,75 @@
#!/bin/bash
#
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o nounset
set -o pipefail
kata_tarball_dir="${2:-kata-artifacts}"
cri_containerd_dir="$(dirname "$(readlink -f "$0")")"
source "${cri_containerd_dir}/../../common.bash"
function install_dependencies() {
info "Installing the dependencies needed for running the cri-containerd tests"
# Dependency list of projects that we can rely on the system packages
# - build-essential
# - Theoretically we only need `make`, but doesn't hurt to install
# the whole build-essential group
# - jq
# - podman-docker
# - one of the tests rely on docker to pull an image.
# we've decided to go for podman, instead, as it does *not* bring
# containerd as a dependency
declare -a system_deps=(
build-essential
jq
podman-docker
)
sudo apt-get update
sudo apt-get -y install "${system_deps[@]}"
ensure_yq
${repo_root_dir}/tests/install_go.sh -p
# Dependency list of projects that we can install them
# directly from their releases on GitHub:
# - containerd
# - cri-container-cni release tarball already includes CNI plugins
# - cri-tools
declare -a github_deps
github_deps[0]="cri_containerd:$(get_from_kata_deps "externals.containerd.${CONTAINERD_VERSION}")"
github_deps[1]="cri_tools:$(get_from_kata_deps "externals.critools.latest")"
for github_dep in "${github_deps[@]}"; do
IFS=":" read -r -a dep <<< "${github_dep}"
install_${dep[0]} "${dep[1]}"
done
# Clone containerd as we'll need to build it in order to run the tests
# base_version: The version to be intalled in the ${major}.${minor} format
clone_cri_containerd $(get_from_kata_deps "externals.containerd.${CONTAINERD_VERSION}")
}
function run() {
info "Running cri-containerd tests using ${KATA_HYPERVISOR} hypervisor"
return 0
}
function main() {
action="${1:-}"
case "${action}" in
install-dependencies) install_dependencies ;;
install-kata) install_kata ;;
run) run ;;
*) >&2 die "Invalid argument" ;;
esac
}
main "$@"

View File

@@ -0,0 +1,493 @@
#!/bin/bash
#
# Copyright (c) 2017-2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
[[ "${DEBUG}" != "" ]] && set -o xtrace
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
source "${SCRIPT_PATH}/../../common.bash"
# runc is installed in /usr/local/sbin/ add that path
export PATH="$PATH:/usr/local/sbin"
# golang is installed in /usr/local/go/bin/ add that path
export PATH="$PATH:/usr/local/go/bin"
# Runtime to be used for testing
RUNTIME=${RUNTIME:-containerd-shim-kata-v2}
FACTORY_TEST=${FACTORY_TEST:-""}
KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
USE_DEVMAPPER="${USE_DEVMAPPER:-false}"
ARCH=$(uname -m)
containerd_runtime_type="io.containerd.kata-${KATA_HYPERVISOR}.v2"
containerd_shim_path="$(command -v containerd-shim)"
#containerd config file
readonly tmp_dir=$(mktemp -t -d test-cri-containerd.XXXX)
export REPORT_DIR="${tmp_dir}"
readonly CONTAINERD_CONFIG_FILE="${tmp_dir}/test-containerd-config"
readonly CONTAINERD_CONFIG_FILE_TEMP="${CONTAINERD_CONFIG_FILE}.temp"
readonly default_containerd_config="/etc/containerd/config.toml"
readonly default_containerd_config_backup="$CONTAINERD_CONFIG_FILE.backup"
readonly kata_config="/etc/kata-containers/configuration.toml"
readonly kata_config_backup="$kata_config.backup"
readonly default_kata_config="/opt/kata/share/defaults/kata-containers/configuration.toml"
function ci_config() {
sudo mkdir -p $(dirname "${kata_config}")
[ -f "$kata_config" ] && sudo cp "$kata_config" "$kata_config_backup" || \
sudo cp "$default_kata_config" "$kata_config"
source /etc/os-release || source /usr/lib/os-release
ID=${ID:-""}
if [ "$ID" == ubuntu ]; then
# https://github.com/kata-containers/tests/issues/352
if [ -n "${FACTORY_TEST}" ]; then
sudo sed -i -e 's/^#enable_template.*$/enable_template = true/g' "${kata_config}"
echo "init vm template"
sudo -E PATH=$PATH "$RUNTIME" factory init
fi
fi
echo "enable debug for kata-runtime"
sudo sed -i 's/^#enable_debug =/enable_debug =/g' ${kata_config}
}
function ci_cleanup() {
source /etc/os-release || source /usr/lib/os-release
if [ -n "${FACTORY_TEST}" ]; then
echo "destroy vm template"
sudo -E PATH=$PATH "$RUNTIME" factory destroy
fi
if [ -e "$default_containerd_config_backup" ]; then
echo "restore containerd config"
sudo systemctl stop containerd
sudo cp "$default_containerd_config_backup" "$default_containerd_config"
fi
[ -f "$kata_config_backup" ] && sudo mv "$kata_config_backup" "$kata_config" || \
sudo rm "$kata_config"
}
function create_containerd_config() {
local runtime="$1"
# kata_annotations is set to 1 if caller want containerd setup with
# kata annotations support.
local kata_annotations=${2-0}
[ -n "${runtime}" ] || die "need runtime to create config"
local runtime_type="${containerd_runtime_type}"
if [ "${runtime}" == "runc" ]; then
runtime_type="io.containerd.runc.v2"
fi
local containerd_runtime=$(command -v "containerd-shim-${runtime}-v2")
cat << EOF | sudo tee "${CONTAINERD_CONFIG_FILE}"
[debug]
level = "debug"
[plugins]
[plugins.cri]
[plugins.cri.containerd]
default_runtime_name = "$runtime"
[plugins.cri.containerd.runtimes.${runtime}]
runtime_type = "${runtime_type}"
$( [ $kata_annotations -eq 1 ] && \
echo 'pod_annotations = ["io.katacontainers.*"]' && \
echo ' container_annotations = ["io.katacontainers.*"]'
)
[plugins.cri.containerd.runtimes.${runtime}.options]
Runtime = "${containerd_runtime}"
[plugins.linux]
shim = "${containerd_shim_path}"
EOF
if [ "$USE_DEVMAPPER" == "true" ]; then
sudo sed -i 's|^\(\[plugins\]\).*|\1\n \[plugins.devmapper\]\n pool_name = \"contd-thin-pool\"\n base_image_size = \"4096MB\"|' ${CONTAINERD_CONFIG_FILE}
echo "Devicemapper configured"
cat "${CONTAINERD_CONFIG_FILE}"
fi
}
function cleanup() {
ci_cleanup
[ -d "$tmp_dir" ] && rm -rf "${tmp_dir}"
}
trap cleanup EXIT
function err_report() {
local log_file="${REPORT_DIR}/containerd.log"
if [ -f "$log_file" ]; then
echo "ERROR: containerd log :"
echo "-------------------------------------"
cat "${log_file}"
echo "-------------------------------------"
fi
}
function check_daemon_setup() {
info "containerd(cri): Check daemon works with runc"
create_containerd_config "runc"
# containerd cri-integration will modify the passed in config file. Let's
# give it a temp one.
cp $CONTAINERD_CONFIG_FILE $CONTAINERD_CONFIG_FILE_TEMP
# in some distros(AlibabaCloud), there is no btrfs-devel package available,
# so pass GO_BUILDTAGS="no_btrfs" to make to not use btrfs.
sudo -E PATH="${PATH}:/usr/local/bin" \
REPORT_DIR="${REPORT_DIR}" \
FOCUS="TestImageLoad" \
RUNTIME="" \
CONTAINERD_CONFIG_FILE="$CONTAINERD_CONFIG_FILE_TEMP" \
make GO_BUILDTAGS="no_btrfs" -e cri-integration
}
function testContainerStart() {
# no_container_yaml set to 1 will not create container_yaml
# because caller has created its own container_yaml.
no_container_yaml=${1:-0}
local pod_yaml=${REPORT_DIR}/pod.yaml
local container_yaml=${REPORT_DIR}/container.yaml
local image="busybox:latest"
cat << EOF > "${pod_yaml}"
metadata:
name: busybox-sandbox1
namespace: default
uid: busybox-sandbox1-uid
EOF
#TestContainerSwap has created its own container_yaml.
if [ $no_container_yaml -ne 1 ]; then
cat << EOF > "${container_yaml}"
metadata:
name: busybox-killed-vmm
namespace: default
uid: busybox-killed-vmm-uid
image:
image: "$image"
command:
- top
EOF
fi
sudo cp "$default_containerd_config" "$default_containerd_config_backup"
sudo cp $CONTAINERD_CONFIG_FILE "$default_containerd_config"
restart_containerd_service
sudo crictl pull $image
podid=$(sudo crictl runp $pod_yaml)
cid=$(sudo crictl create $podid $container_yaml $pod_yaml)
sudo crictl start $cid
}
function testContainerStop() {
info "stop pod $podid"
sudo crictl stopp $podid
info "remove pod $podid"
sudo crictl rmp $podid
sudo cp "$default_containerd_config_backup" "$default_containerd_config"
restart_containerd_service
}
function TestKilledVmmCleanup() {
if [[ "${KATA_HYPERVISOR}" != "qemu" ]]; then
info "TestKilledVmmCleanup is skipped for ${KATA_HYPERVISOR}, only QEMU is currently tested"
return 0
fi
info "test killed vmm cleanup"
testContainerStart
qemu_pid=$(ps aux|grep qemu|grep -v grep|awk '{print $2}')
info "kill qemu $qemu_pid"
sudo kill -SIGKILL $qemu_pid
# sleep to let shimv2 exit
sleep 1
remained=$(ps aux|grep shimv2|grep -v grep || true)
[ -z $remained ] || die "found remaining shimv2 process $remained"
testContainerStop
info "stop containerd"
}
function TestContainerMemoryUpdate() {
if [[ "${KATA_HYPERVISOR}" != "qemu" ]] || [[ "${ARCH}" == "ppc64le" ]] || [[ "${ARCH}" == "s390x" ]]; then
return
fi
test_virtio_mem=$1
if [ $test_virtio_mem -eq 1 ]; then
if [[ "$ARCH" != "x86_64" ]]; then
return
fi
info "Test container memory update with virtio-mem"
sudo sed -i -e 's/^#enable_virtio_mem.*$/enable_virtio_mem = true/g' "${kata_config}"
else
info "Test container memory update without virtio-mem"
sudo sed -i -e 's/^enable_virtio_mem.*$/#enable_virtio_mem = true/g' "${kata_config}"
fi
testContainerStart
vm_size=$(($(sudo crictl exec $cid cat /proc/meminfo | grep "MemTotal:" | awk '{print $2}')*1024))
if [ $vm_size -gt $((2*1024*1024*1024)) ] || [ $vm_size -lt $((2*1024*1024*1024-128*1024*1024)) ]; then
testContainerStop
die "The VM memory size $vm_size before update is not right"
fi
sudo crictl update --memory $((2*1024*1024*1024)) $cid
sleep 1
vm_size=$(($(sudo crictl exec $cid cat /proc/meminfo | grep "MemTotal:" | awk '{print $2}')*1024))
if [ $vm_size -gt $((4*1024*1024*1024)) ] || [ $vm_size -lt $((4*1024*1024*1024-128*1024*1024)) ]; then
testContainerStop
die "The VM memory size $vm_size after increase is not right"
fi
if [ $test_virtio_mem -eq 1 ]; then
sudo crictl update --memory $((1*1024*1024*1024)) $cid
sleep 1
vm_size=$(($(sudo crictl exec $cid cat /proc/meminfo | grep "MemTotal:" | awk '{print $2}')*1024))
if [ $vm_size -gt $((3*1024*1024*1024)) ] || [ $vm_size -lt $((3*1024*1024*1024-128*1024*1024)) ]; then
testContainerStop
die "The VM memory size $vm_size after decrease is not right"
fi
fi
testContainerStop
}
function getContainerSwapInfo() {
swap_size=$(($(sudo crictl exec $cid cat /proc/meminfo | grep "SwapTotal:" | awk '{print $2}')*1024))
# NOTE: these below two checks only works on cgroup v1
swappiness=$(sudo crictl exec $cid cat /sys/fs/cgroup/memory/memory.swappiness)
swap_in_bytes=$(sudo crictl exec $cid cat /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes)
}
function TestContainerSwap() {
if [[ "${KATA_HYPERVISOR}" != "qemu" ]] || [[ "${ARCH}" != "x86_64" ]]; then
return
fi
local container_yaml=${REPORT_DIR}/container.yaml
local image="busybox:latest"
info "Test container with guest swap"
create_containerd_config "kata-${KATA_HYPERVISOR}" 1
sudo sed -i -e 's/^#enable_guest_swap.*$/enable_guest_swap = true/g' "${kata_config}"
# Test without swap device
testContainerStart
getContainerSwapInfo
# Current default swappiness is 60
if [ $swappiness -ne 60 ]; then
testContainerStop
die "The VM swappiness $swappiness without swap device is not right"
fi
if [ $swap_in_bytes -lt 1125899906842624 ]; then
testContainerStop
die "The VM swap_in_bytes $swap_in_bytes without swap device is not right"
fi
if [ $swap_size -ne 0 ]; then
testContainerStop
die "The VM swap size $swap_size without swap device is not right"
fi
testContainerStop
# Test with swap device
cat << EOF > "${container_yaml}"
metadata:
name: busybox-swap
namespace: default
uid: busybox-swap-uid
annotations:
io.katacontainers.container.resource.swappiness: "100"
io.katacontainers.container.resource.swap_in_bytes: "1610612736"
linux:
resources:
memory_limit_in_bytes: 1073741824
image:
image: "$image"
command:
- top
EOF
testContainerStart 1
getContainerSwapInfo
testContainerStop
if [ $swappiness -ne 100 ]; then
die "The VM swappiness $swappiness with swap device is not right"
fi
if [ $swap_in_bytes -ne 1610612736 ]; then
die "The VM swap_in_bytes $swap_in_bytes with swap device is not right"
fi
if [ $swap_size -ne 536870912 ]; then
die "The VM swap size $swap_size with swap device is not right"
fi
# Test without swap_in_bytes
cat << EOF > "${container_yaml}"
metadata:
name: busybox-swap
namespace: default
uid: busybox-swap-uid
annotations:
io.katacontainers.container.resource.swappiness: "100"
linux:
resources:
memory_limit_in_bytes: 1073741824
image:
image: "$image"
command:
- top
EOF
testContainerStart 1
getContainerSwapInfo
testContainerStop
if [ $swappiness -ne 100 ]; then
die "The VM swappiness $swappiness without swap_in_bytes is not right"
fi
# swap_in_bytes is not set, it should be a value that bigger than 1125899906842624
if [ $swap_in_bytes -lt 1125899906842624 ]; then
die "The VM swap_in_bytes $swap_in_bytes without swap_in_bytes is not right"
fi
if [ $swap_size -ne 1073741824 ]; then
die "The VM swap size $swap_size without swap_in_bytes is not right"
fi
# Test without memory_limit_in_bytes
cat << EOF > "${container_yaml}"
metadata:
name: busybox-swap
namespace: default
uid: busybox-swap-uid
annotations:
io.katacontainers.container.resource.swappiness: "100"
image:
image: "$image"
command:
- top
EOF
testContainerStart 1
getContainerSwapInfo
testContainerStop
if [ $swappiness -ne 100 ]; then
die "The VM swappiness $swappiness without memory_limit_in_bytes is not right"
fi
# swap_in_bytes is not set, it should be a value that bigger than 1125899906842624
if [ $swap_in_bytes -lt 1125899906842624 ]; then
die "The VM swap_in_bytes $swap_in_bytes without memory_limit_in_bytes is not right"
fi
if [ $swap_size -ne 2147483648 ]; then
die "The VM swap size $swap_size without memory_limit_in_bytes is not right"
fi
create_containerd_config "kata-${KATA_HYPERVISOR}"
}
# k8s may restart docker which will impact on containerd stop
function stop_containerd() {
local tmp=$(pgrep kubelet || true)
[ -n "$tmp" ] && sudo kubeadm reset -f
sudo systemctl stop containerd
}
function main() {
info "Stop crio service"
systemctl is-active --quiet crio && sudo systemctl stop crio
info "Stop containerd service"
systemctl is-active --quiet containerd && stop_containerd
# Configure enviroment if running in CI
ci_config
pushd "containerd"
# Make sure the right artifacts are going to be built
make clean
check_daemon_setup
info "containerd(cri): testing using runtime: ${containerd_runtime_type}"
create_containerd_config "kata-${KATA_HYPERVISOR}"
info "containerd(cri): Running cri-integration"
passing_test="TestContainerStats|TestContainerRestart|TestContainerListStatsWithIdFilter|TestContainerListStatsWithIdSandboxIdFilter|TestDuplicateName|TestImageLoad|TestImageFSInfo|TestSandboxCleanRemove"
if [[ "${KATA_HYPERVISOR}" == "cloud-hypervisor" || \
"${KATA_HYPERVISOR}" == "qemu" ]]; then
issue="https://github.com/kata-containers/tests/issues/2318"
info "${KATA_HYPERVISOR} fails with TestContainerListStatsWithSandboxIdFilter }"
info "see ${issue}"
else
passing_test="${passing_test}|TestContainerListStatsWithSandboxIdFilter"
fi
# in some distros(AlibabaCloud), there is no btrfs-devel package available,
# so pass GO_BUILDTAGS="no_btrfs" to make to not use btrfs.
# containerd cri-integration will modify the passed in config file. Let's
# give it a temp one.
cp $CONTAINERD_CONFIG_FILE $CONTAINERD_CONFIG_FILE_TEMP
sudo -E PATH="${PATH}:/usr/local/bin" \
REPORT_DIR="${REPORT_DIR}" \
FOCUS="^(${passing_test})$" \
RUNTIME="" \
CONTAINERD_CONFIG_FILE="$CONTAINERD_CONFIG_FILE_TEMP" \
make GO_BUILDTAGS="no_btrfs" -e cri-integration
# trap error for print containerd log,
# containerd's `cri-integration` will print the log itself.
trap err_report ERR
# TestContainerSwap is currently failing with GHA.
# Let's re-enable it as soon as we get it to work.
# Reference: https://github.com/kata-containers/kata-containers/issues/7410
# TestContainerSwap
# TODO: runtime-rs doesn't support memory update currently
if [ "$KATA_HYPERVISOR" != "dragonball" ]; then
TestContainerMemoryUpdate 1
TestContainerMemoryUpdate 0
fi
TestKilledVmmCleanup
popd
}
main

View File

@@ -8,8 +8,11 @@ set -o errexit
set -o nounset
set -o pipefail
integration_dir="$(dirname "$(readlink -f "$0")")"
tools_dir="${integration_dir}/../../tools"
kubernetes_dir="$(dirname "$(readlink -f "$0")")"
source "${kubernetes_dir}/../../common.bash"
tools_dir="${repo_root_dir}/tools"
AZ_RG="${AZ_RG:-kataCI}"
function _print_cluster_name() {
short_sha="$(git rev-parse --short=12 HEAD)"
@@ -35,7 +38,7 @@ function create_cluster() {
delete_cluster || true
az aks create \
-g "kataCI" \
-g "${AZ_RG}" \
-n "$(_print_cluster_name)" \
-s "Standard_D4s_v5" \
--node-count 1 \
@@ -54,34 +57,33 @@ function install_kubectl() {
function get_cluster_credentials() {
az aks get-credentials \
-g "kataCI" \
-g "${AZ_RG}" \
-n "$(_print_cluster_name)"
}
function ensure_yq() {
: "${GOPATH:=${GITHUB_WORKSPACE}}"
export GOPATH
export PATH="${GOPATH}/bin:${PATH}"
INSTALL_IN_GOPATH=true "${repo_root_dir}/ci/install_yq.sh"
}
function run_tests() {
function deploy_kata() {
platform="${1}"
ensure_yq
# Emsure we're in the default namespace
kubectl config set-context --current --namespace=default
# Delete any spurious tests namespace that was left behind
kubectl delete namespace kata-containers-k8s-tests &> /dev/null || true
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Enable debug for Kata Containers
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[1].value' --tag '!!str' "true"
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[4].value' --tag '!!str' "true"
if [ "${KATA_HOST_OS}" = "cbl-mariner" ]; then
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[+].name' "HOST_OS"
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[-1].value' "${KATA_HOST_OS}"
fi
echo "::group::Final kata-deploy.yaml that is used in the test"
cat "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
cat "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" | grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" || die "Failed to setup the tests image"
echo "::endgroup::"
kubectl apply -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
if [ "${platform}" = "tdx" ]; then
@@ -90,7 +92,6 @@ function run_tests() {
kubectl apply -f "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
fi
kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
kubectl apply -f "${tools_dir}/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml"
# This is needed as the kata-deploy pod will be set to "Ready" when it starts running,
# which may cause issues like not having the node properly labeled or the artefacts
@@ -101,11 +102,24 @@ function run_tests() {
sleep 60s
fi
echo "::group::kata-deploy logs"
kubectl -n kube-system logs -l name=kata-deploy
echo "::endgroup::"
echo "::group::Runtime classes"
kubectl get runtimeclass
echo "::endgroup::"
}
function run_tests() {
# Delete any spurious tests namespace that was left behind
kubectl delete namespace kata-containers-k8s-tests &> /dev/null || true
# Create a new namespace for the tests and switch to it
kubectl apply -f ${integration_dir}/kubernetes/runtimeclass_workloads/tests-namespace.yaml
kubectl apply -f ${kubernetes_dir}/runtimeclass_workloads/tests-namespace.yaml
kubectl config set-context --current --namespace=kata-containers-k8s-tests
pushd "${integration_dir}/kubernetes"
pushd "${kubernetes_dir}"
bash setup.sh
bash run_kubernetes_tests.sh
popd
@@ -113,6 +127,15 @@ function run_tests() {
function cleanup() {
platform="${1}"
ensure_yq
echo "Gather information about the nodes and pods before cleaning up the node"
get_nodes_and_pods_info
if [ "${platform}" = "aks" ]; then
delete_cluster
return
fi
# Switch back to the default namespace and delete the tests one
kubectl config set-context --current --namespace=default
@@ -129,6 +152,9 @@ function cleanup() {
kubectl delete ${deploy_spec}
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" 'spec.template.spec.containers[0].env[4].value' --tag '!!str' "true"
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" | grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" || die "Failed to setup the tests image"
@@ -137,16 +163,20 @@ function cleanup() {
kubectl delete ${cleanup_spec}
kubectl delete -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
kubectl delete -f "${tools_dir}/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml"
}
function delete_cluster() {
az aks delete \
-g "kataCI" \
-g "${AZ_RG}" \
-n "$(_print_cluster_name)" \
--yes
}
function get_nodes_and_pods_info() {
kubectl debug $(kubectl get nodes -o name) -it --image=quay.io/kata-containers/kata-debug:latest
kubectl get pods -o name | grep node-debugger | xargs kubectl delete
}
function main() {
export KATA_HOST_OS="${KATA_HOST_OS:-}"
@@ -159,14 +189,15 @@ function main() {
install-bats) install_bats ;;
install-kubectl) install_kubectl ;;
get-cluster-credentials) get_cluster_credentials ;;
run-tests-aks) run_tests "aks" ;;
run-tests-sev) run_tests "sev" ;;
run-tests-snp) run_tests "snp" ;;
run-tests-tdx) run_tests "tdx" ;;
deploy-kata-aks) deploy_kata "aks" ;;
deploy-kata-sev) deploy_kata "sev" ;;
deploy-kata-snp) deploy_kata "snp" ;;
deploy-kata-tdx) deploy_kata "tdx" ;;
run-tests) run_tests ;;
cleanup-sev) cleanup "sev" ;;
cleanup-snp) cleanup "snp" ;;
cleanup-tdx) cleanup "tdx" ;;
delete-cluster) delete_cluster ;;
delete-cluster) cleanup "aks" ;;
*) >&2 echo "Invalid argument"; exit 2 ;;
esac
}

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env bats
#
# Copyright (c) 2022 Ant Group
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
TEST_INITRD="${TEST_INITRD:-no}"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
pod_name="test-file-volume"
container_name="busybox-file-volume-container"
tmp_file=$(exec_host mktemp /tmp/file-volume-test-foo.XXXXX)
mount_path="/tmp/foo.txt"
file_body="test"
get_pod_config_dir
}
@test "Test readonly volume for pods" {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
# Write test body to temp file
exec_host "echo "$file_body" > $tmp_file"
# Create test yaml
sed -e "s|HOST_FILE|$tmp_file|" ${pod_config_dir}/pod-file-volume.yaml > ${pod_config_dir}/test-pod-file-volume.yaml
sed -i "s|MOUNT_PATH|$mount_path|" ${pod_config_dir}/test-pod-file-volume.yaml
# Create pod
kubectl create -f "${pod_config_dir}/test-pod-file-volume.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Validate file volume body inside the pod
file_in_container=$(kubectl exec $pod_name -- cat $mount_path)
[ "$file_body" == "$file_in_container" ]
}
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
kubectl delete pod "$pod_name"
exec_host rm -f $tmp_file
rm -f ${pod_config_dir}/test-pod-file-volume.yaml.yaml
}

View File

@@ -0,0 +1,67 @@
#!/usr/bin/env bats
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
TEST_INITRD="${TEST_INITRD:-no}"
setup() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
get_pod_config_dir
tmp_file=$(exec_host mktemp -d /tmp/data.XXXX)
pod_yaml=$(mktemp --tmpdir pod_config.XXXXXX.yaml)
msg="Hello from Kubernetes"
exec_host "echo $msg > $tmp_file/index.html"
pod_name="pv-pod"
# Define temporary file at yaml
sed -e "s|tmp_data|${tmp_file}|g" ${pod_config_dir}/pv-volume.yaml > "$pod_yaml"
}
@test "Create Persistent Volume" {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
volume_name="pv-volume"
volume_claim="pv-claim"
# Create the persistent volume
kubectl create -f "$pod_yaml"
# Check the persistent volume is Available
cmd="kubectl get pv $volume_name | grep Available"
waitForProcess "$wait_time" "$sleep_time" "$cmd"
# Create the persistent volume claim
kubectl create -f "${pod_config_dir}/volume-claim.yaml"
# Check the persistent volume claim is Bound.
cmd="kubectl get pvc $volume_claim | grep Bound"
waitForProcess "$wait_time" "$sleep_time" "$cmd"
# Create pod
kubectl create -f "${pod_config_dir}/pv-pod.yaml"
# Check pod creation
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
cmd="cat /mnt/index.html"
kubectl exec $pod_name -- sh -c "$cmd" | grep "$msg"
}
teardown() {
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
# Debugging information
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
kubectl delete pvc "$volume_claim"
kubectl delete pv "$volume_name"
rm -f "$pod_yaml"
exec_host rm -rf "$tmp_file"
}

View File

@@ -8,6 +8,7 @@
set -e
kubernetes_dir=$(dirname "$(readlink -f "$0")")
source "${kubernetes_dir}/../../common.bash"
TARGET_ARCH="${TARGET_ARCH:-x86_64}"
KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
@@ -27,6 +28,7 @@ else
"k8s-empty-dirs.bats" \
"k8s-env.bats" \
"k8s-exec.bats" \
"k8s-file-volume.bats" \
"k8s-inotify.bats" \
"k8s-job.bats" \
"k8s-kill-all-process-in-container.bats" \
@@ -50,6 +52,7 @@ else
"k8s-sysctls.bats" \
"k8s-security-context.bats" \
"k8s-shared-volume.bats" \
"k8s-volume.bats" \
"k8s-nginx-connectivity.bats" \
)
fi

View File

@@ -0,0 +1,26 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
kind: Pod
apiVersion: v1
metadata:
name: pv-pod
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
volumes:
- name: pv-storage
persistentVolumeClaim:
claimName: pv-claim
containers:
- name: pv-container
image: quay.io/prometheus/busybox:latest
ports:
command:
- sleep
- "120"
volumeMounts:
- mountPath: "/mnt/"
name: pv-storage

View File

@@ -0,0 +1,19 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
kind: PersistentVolume
apiVersion: v1
metadata:
name: pv-volume
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "tmp_data"

View File

@@ -0,0 +1,16 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pv-claim
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi

View File

@@ -8,26 +8,33 @@ set -o nounset
set -o pipefail
kubernetes_dir=$(dirname "$(readlink -f "$0")")
source "${kubernetes_dir}/../../common.bash"
reset_workloads_work_dir() {
rm -rf ${kubernetes_dir}/runtimeclass_workloads_work
cp -R ${kubernetes_dir}/runtimeclass_workloads ${kubernetes_dir}/runtimeclass_workloads_work
}
set_runtime_class() {
sed -i -e "s|runtimeClassName: kata|runtimeClassName: kata-${KATA_HYPERVISOR}|" ${kubernetes_dir}/runtimeclass_workloads/*.yaml
sed -i -e "s|runtimeClassName: kata|runtimeClassName: kata-${KATA_HYPERVISOR}|" ${kubernetes_dir}/runtimeclass_workloads_work/*.yaml
}
set_kernel_path() {
if [[ "${KATA_HOST_OS}" = "cbl-mariner" ]]; then
mariner_kernel_path="/usr/share/cloud-hypervisor/vmlinux.bin"
find ${kubernetes_dir}/runtimeclass_workloads/*.yaml -exec yq write -i {} 'metadata.annotations[io.katacontainers.config.hypervisor.kernel]' "${mariner_kernel_path}" \;
find ${kubernetes_dir}/runtimeclass_workloads_work/*.yaml -exec yq write -i {} 'metadata.annotations[io.katacontainers.config.hypervisor.kernel]' "${mariner_kernel_path}" \;
fi
}
set_initrd_path() {
if [[ "${KATA_HOST_OS}" = "cbl-mariner" ]]; then
initrd_path="/opt/kata/share/kata-containers/kata-containers-initrd-cbl-mariner.img"
find ${kubernetes_dir}/runtimeclass_workloads/*.yaml -exec yq write -i {} 'metadata.annotations[io.katacontainers.config.hypervisor.initrd]' "${initrd_path}" \;
initrd_path="/opt/kata/share/kata-containers/kata-containers-initrd-mariner.img"
find ${kubernetes_dir}/runtimeclass_workloads_work/*.yaml -exec yq write -i {} 'metadata.annotations[io.katacontainers.config.hypervisor.initrd]' "${initrd_path}" \;
fi
}
main() {
reset_workloads_work_dir
set_runtime_class
set_kernel_path
set_initrd_path

View File

@@ -34,6 +34,19 @@ dragonball_limitations="https://github.com/kata-containers/kata-containers/issue
export KUBECONFIG="${KUBECONFIG:-$HOME/.kube/config}"
get_pod_config_dir() {
pod_config_dir="${BATS_TEST_DIRNAME}/runtimeclass_workloads"
pod_config_dir="${BATS_TEST_DIRNAME}/runtimeclass_workloads_work"
info "k8s configured to use runtimeclass"
}
# Runs a command in the host filesystem.
exec_host() {
node="$(kubectl get node -o name)"
# `kubectl debug` always returns 0, so we hack it to return the right exit code.
command="$@"
command+='; echo -en \\n$?'
output="$(kubectl debug -qit "${node}" --image=alpine:latest -- chroot /host bash -c "${command}")"
kubectl get pods -o name | grep node-debugger | xargs kubectl delete > /dev/null
exit_code="$(echo "${output}" | tail -1)"
echo "$(echo "${output}" | head -n -1)"
return ${exit_code}
}

View File

@@ -0,0 +1,38 @@
#!/bin/bash
#
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o nounset
set -o pipefail
kata_tarball_dir="${2:-kata-artifacts}"
nydus_dir="$(dirname "$(readlink -f "$0")")"
source "${nydus_dir}/../../common.bash"
function install_dependencies() {
info "Installing the dependencies needed for running the nydus tests"
return 0
}
function run() {
info "Running nydus tests using ${KATA_HYPERVISOR} hypervisor"
return 0
}
function main() {
action="${1:-}"
case "${action}" in
install-dependencies) install_dependencies ;;
install-kata) return 0 ;;
run) run ;;
*) >&2 die "Invalid argument" ;;
esac
}
main "$@"

View File

@@ -0,0 +1,12 @@
metadata:
name: nydus-container
image:
image: ghcr.io/dragonflyoss/image-service/alpine:nydus-latest
command:
- tail
- -f
- /dev/null
linux:
resources:
memory_limit_in_bytes: 524288000
log_path: nydus.0.log

View File

@@ -0,0 +1,5 @@
metadata:
attempt: 1
name: nydus-sandbox
namespace: default
log_directory: /tmp

View File

@@ -0,0 +1,211 @@
#!/bin/bash
#
# Copyright (c) 2022 Ant Group
#
# SPDX-License-Identifier: Apache-2.0
#
# This will test the nydus feature is working properly
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
dir_path=$(dirname "$0")
source "${dir_path}/../../lib/common.bash"
source "${dir_path}/../../.ci/lib.sh"
source "/etc/os-release" || source "/usr/lib/os-release"
KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
need_restore_kata_config=false
kata_config_backup="/tmp/kata-configuration.toml"
SYSCONFIG_FILE="/etc/kata-containers/configuration.toml"
DEFAULT_CONFIG_FILE="/opt/kata/share/defaults/kata-containers/configuration-qemu.toml"
CLH_CONFIG_FILE="/opt/kata/share/defaults/kata-containers/configuration-clh.toml"
DB_CONFIG_FILE="/opt/kata/share/defaults/kata-containers/configuration-dragonball.toml"
need_restore_containerd_config=false
containerd_config="/etc/containerd/config.toml"
containerd_config_backup="/tmp/containerd.config.toml"
# test image for container
IMAGE="${IMAGE:-ghcr.io/dragonflyoss/image-service/alpine:nydus-latest}"
if [ "$KATA_HYPERVISOR" != "qemu" ] && [ "$KATA_HYPERVISOR" != "cloud-hypervisor" ] && [ "$KATA_HYPERVISOR" != "dragonball" ]; then
echo "Skip nydus test for $KATA_HYPERVISOR, it only works for QEMU/CLH/DB now."
exit 0
fi
arch="$(uname -m)"
if [ "$arch" != "x86_64" ]; then
echo "Skip nydus test for $arch, it only works for x86_64 now. See https://github.com/kata-containers/tests/issues/4445"
exit 0
fi
function install_from_tarball() {
local package_name="$1"
local binary_name="$2"
[ -n "$package_name" ] || die "need package_name"
[ -n "$binary_name" ] || die "need package release binary_name"
local url=$(get_version "externals.${package_name}.url")
local version=$(get_version "externals.${package_name}.version")
local tarball_url="${url}/releases/download/${version}/${binary_name}-${version}-$arch.tgz"
if [ "${package_name}" == "nydus" ]; then
local goarch="$(${dir_path}/../../.ci/kata-arch.sh --golang)"
tarball_url="${url}/releases/download/${version}/${binary_name}-${version}-linux-$goarch.tgz"
fi
echo "Download tarball from ${tarball_url}"
curl -Ls "$tarball_url" | sudo tar xfz - -C /usr/local/bin --strip-components=1
}
function setup_nydus() {
# install nydus
install_from_tarball "nydus" "nydus-static"
# install nydus-snapshotter
install_from_tarball "nydus-snapshotter" "nydus-snapshotter"
# Config nydus snapshotter
sudo -E cp "$dir_path/nydusd-config.json" /etc/
# start nydus-snapshotter
nohup /usr/local/bin/containerd-nydus-grpc \
--config-path /etc/nydusd-config.json \
--shared-daemon \
--log-level debug \
--root /var/lib/containerd/io.containerd.snapshotter.v1.nydus \
--cache-dir /var/lib/nydus/cache \
--nydusd-path /usr/local/bin/nydusd \
--nydusimg-path /usr/local/bin/nydus-image \
--disable-cache-manager true \
--enable-nydus-overlayfs true \
--log-to-stdout >/dev/null 2>&1 &
}
function config_kata() {
sudo mkdir -p /etc/kata-containers
if [ -f "$SYSCONFIG_FILE" ]; then
need_restore_kata_config=true
sudo cp -a "${SYSCONFIG_FILE}" "${kata_config_backup}"
elif [ "$KATA_HYPERVISOR" == "qemu" ]; then
sudo cp -a "${DEFAULT_CONFIG_FILE}" "${SYSCONFIG_FILE}"
elif [ "$KATA_HYPERVISOR" == "dragonball" ]; then
sudo cp -a "${DB_CONFIG_FILE}" "${SYSCONFIG_FILE}"
else
sudo cp -a "${CLH_CONFIG_FILE}" "${SYSCONFIG_FILE}"
fi
echo "Enabling all debug options in file ${SYSCONFIG_FILE}"
sudo sed -i -e 's/^#\(enable_debug\).*=.*$/\1 = true/g' "${SYSCONFIG_FILE}"
sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 agent.log=debug"/g' "${SYSCONFIG_FILE}"
if [ "$KATA_HYPERVISOR" != "dragonball" ]; then
sudo sed -i 's|^shared_fs.*|shared_fs = "virtio-fs-nydus"|g' "${SYSCONFIG_FILE}"
sudo sed -i 's|^virtio_fs_daemon.*|virtio_fs_daemon = "/usr/local/bin/nydusd"|g' "${SYSCONFIG_FILE}"
fi
sudo sed -i 's|^virtio_fs_extra_args.*|virtio_fs_extra_args = []|g' "${SYSCONFIG_FILE}"
}
function config_containerd() {
readonly runc_path=$(command -v runc)
sudo mkdir -p /etc/containerd/
if [ -f "$containerd_config" ]; then
need_restore_containerd_config=true
sudo cp -a "${containerd_config}" "${containerd_config_backup}"
else
sudo rm "${containerd_config}"
fi
cat <<EOF | sudo tee $containerd_config
[debug]
level = "debug"
[proxy_plugins]
[proxy_plugins.nydus]
type = "snapshot"
address = "/run/containerd-nydus/containerd-nydus-grpc.sock"
[plugins]
[plugins.cri]
disable_hugetlb_controller = false
[plugins.cri.containerd]
snapshotter = "nydus"
disable_snapshot_annotations = false
[plugins.cri.containerd.runtimes]
[plugins.cri.containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins.cri.containerd.runtimes.runc.options]
BinaryName = "${runc_path}"
Root = ""
[plugins.cri.containerd.runtimes.kata]
runtime_type = "io.containerd.kata.v2"
privileged_without_host_devices = true
EOF
}
function setup() {
setup_nydus
config_kata
config_containerd
restart_containerd_service
check_processes
extract_kata_env
}
function run_test() {
sudo -E crictl pull "${IMAGE}"
pod=$(sudo -E crictl runp -r kata $dir_path/nydus-sandbox.yaml)
echo "Pod $pod created"
cnt=$(sudo -E crictl create $pod $dir_path/nydus-container.yaml $dir_path/nydus-sandbox.yaml)
echo "Container $cnt created"
sudo -E crictl start $cnt
echo "Container $cnt started"
# ensure container is running
state=$(sudo -E crictl inspect $cnt | jq .status.state | tr -d '"')
[ $state == "CONTAINER_RUNNING" ] || die "Container is not running($state)"
# run a command in container
crictl exec $cnt ls
# cleanup containers
sudo -E crictl stop $cnt
sudo -E crictl stopp $pod
sudo -E crictl rmp $pod
}
function teardown() {
echo "Running teardown"
# kill nydus-snapshotter
bin=containerd-nydus-grpc
kill -9 $(pidof $bin) || true
[ "$(pidof $bin)" == "" ] || die "$bin is running"
bin=nydusd
kill -9 $(pidof $bin) || true
[ "$(pidof $bin)" == "" ] || die "$bin is running"
# restore kata configuratiom.toml if needed
if [ "${need_restore_kata_config}" == "true" ]; then
sudo mv "$kata_config_backup" "$SYSCONFIG_FILE"
else
sudo rm "$SYSCONFIG_FILE"
fi
# restore containerd config.toml if needed
if [ "${need_restore_containerd_config}" == "true" ]; then
sudo mv "$containerd_config_backup" "$containerd_config"
else
sudo rm "$containerd_config"
fi
clean_env_ctr
check_processes
}
trap teardown EXIT
echo "Running setup"
setup
echo "Running nydus integration tests"
run_test

View File

@@ -0,0 +1,27 @@
{
"device": {
"backend": {
"type": "registry",
"config": {
"scheme": "https",
"timeout": 5,
"connect_timeout": 5,
"retry_limit": 2
}
},
"cache": {
"type": "blobcache",
"config": {
"work_dir": "/var/lib/nydus/cache"
}
}
},
"mode": "direct",
"digest_validate": false,
"iostats_files": false,
"enable_xattr": true,
"fs_prefetch": {
"enable": true,
"threads_count": 2
}
}