mirror of
https://github.com/aljazceru/kata-containers.git
synced 2026-01-20 23:04:22 +01:00
CCv0: Merge from main -- August 1st
Conflicts: src/runtime/pkg/katautils/config.go src/runtime/virtcontainers/container.go src/runtime/virtcontainers/hypervisor.go src/runtime/virtcontainers/qemu_arch_base.go src/runtime/virtcontainers/sandbox.go tests/integration/kubernetes/gha-run.sh tests/integration/kubernetes/setup.sh tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml tools/packaging/kata-deploy/local-build/kata-deploy-binaries.sh tools/packaging/kata-deploy/scripts/kata-deploy.sh tools/packaging/kernel/kata_config_version versions.yaml Fixes: #7433 Signed-off-by: Fabiano Fidêncio <fabiano.fidencio@intel.com>
This commit is contained in:
1
tests/.gitignore
vendored
Normal file
1
tests/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
integration/kubernetes/runtimeclass_workloads_work/
|
||||
@@ -7,6 +7,9 @@
|
||||
# This file contains common functions that
|
||||
# are being used by our metrics and integration tests
|
||||
|
||||
this_script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
export repo_root_dir="$(cd "${this_script_dir}/../" && pwd)"
|
||||
|
||||
# Kata tests directory used for storing various test-related artifacts.
|
||||
KATA_TESTS_BASEDIR="${KATA_TESTS_BASEDIR:-/var/log/kata-tests}"
|
||||
|
||||
@@ -23,23 +26,23 @@ KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
|
||||
|
||||
RUNTIME="${RUNTIME:-containerd-shim-kata-v2}"
|
||||
|
||||
die() {
|
||||
function die() {
|
||||
local msg="$*"
|
||||
echo -e "[$(basename $0):${BASH_LINENO[0]}] ERROR: $msg" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
warn() {
|
||||
function warn() {
|
||||
local msg="$*"
|
||||
echo -e "[$(basename $0):${BASH_LINENO[0]}] WARNING: $msg"
|
||||
}
|
||||
|
||||
info() {
|
||||
function info() {
|
||||
local msg="$*"
|
||||
echo -e "[$(basename $0):${BASH_LINENO[0]}] INFO: $msg"
|
||||
}
|
||||
|
||||
handle_error() {
|
||||
function handle_error() {
|
||||
local exit_code="${?}"
|
||||
local line_number="${1:-}"
|
||||
echo -e "[$(basename $0):$line_number] ERROR: $(eval echo "$BASH_COMMAND")"
|
||||
@@ -47,7 +50,7 @@ handle_error() {
|
||||
}
|
||||
trap 'handle_error $LINENO' ERR
|
||||
|
||||
waitForProcess() {
|
||||
function waitForProcess() {
|
||||
wait_time="$1"
|
||||
sleep_time="$2"
|
||||
cmd="$3"
|
||||
@@ -66,7 +69,7 @@ waitForProcess() {
|
||||
# Kata runtime. Of course, the end user can choose any name they
|
||||
# want in reality, but this function knows the names of the default
|
||||
# and recommended Kata docker runtime install names.
|
||||
is_a_kata_runtime() {
|
||||
function is_a_kata_runtime() {
|
||||
if [ "$1" = "containerd-shim-kata-v2" ] || [ "$1" = "io.containerd.kata.v2" ]; then
|
||||
echo "1"
|
||||
else
|
||||
@@ -76,7 +79,7 @@ is_a_kata_runtime() {
|
||||
|
||||
# Gets versions and paths of all the components
|
||||
# list in kata-env
|
||||
extract_kata_env() {
|
||||
function extract_kata_env() {
|
||||
RUNTIME_CONFIG_PATH=$(kata-runtime kata-env --json | jq -r .Runtime.Config.Path)
|
||||
RUNTIME_VERSION=$(kata-runtime kata-env --json | jq -r .Runtime.Version | grep Semver | cut -d'"' -f4)
|
||||
RUNTIME_COMMIT=$(kata-runtime kata-env --json | jq -r .Runtime.Version | grep Commit | cut -d'"' -f4)
|
||||
@@ -97,7 +100,7 @@ extract_kata_env() {
|
||||
}
|
||||
|
||||
# Checks that processes are not running
|
||||
check_processes() {
|
||||
function check_processes() {
|
||||
extract_kata_env
|
||||
|
||||
# Only check the kata-env if we have managed to find the kata executable...
|
||||
@@ -120,7 +123,7 @@ check_processes() {
|
||||
|
||||
# Clean environment, this function will try to remove all
|
||||
# stopped/running containers.
|
||||
clean_env()
|
||||
function clean_env()
|
||||
{
|
||||
# If the timeout has not been set, default it to 30s
|
||||
# Docker has a built in 10s default timeout, so make ours
|
||||
@@ -139,7 +142,7 @@ clean_env()
|
||||
fi
|
||||
}
|
||||
|
||||
clean_env_ctr()
|
||||
function clean_env_ctr()
|
||||
{
|
||||
local count_running="$(sudo ctr c list -q | wc -l)"
|
||||
local remaining_attempts=10
|
||||
@@ -181,7 +184,32 @@ clean_env_ctr()
|
||||
count_tasks="$(sudo ctr t list -q | wc -l)"
|
||||
|
||||
if (( count_tasks > 0 )); then
|
||||
die "Can't remove running contaienrs."
|
||||
die "Can't remove running containers."
|
||||
fi
|
||||
|
||||
kill_kata_components
|
||||
}
|
||||
|
||||
# Kills running shim and hypervisor components
|
||||
function kill_kata_components() {
|
||||
local kata_bin_dir="/opt/kata/bin"
|
||||
local shim_path="${kata_bin_dir}/containerd-shim-kata-v2"
|
||||
local hypervisor_path="${kata_bin_dir}/qemu-system-x86_64"
|
||||
local pid_shim_count="$(pgrep -fc ${shim_path} || exit 0)"
|
||||
|
||||
[ ${pid_shim_count} -gt "0" ] && sudo kill -SIGKILL "$(pgrep -f ${shim_path})" > /dev/null 2>&1
|
||||
|
||||
if [ "${KATA_HYPERVISOR}" = 'clh' ]; then
|
||||
hypervisor_path="${kata_bin_dir}/cloud-hypervisor"
|
||||
elif [ "${KATA_HYPERVISOR}" != 'qemu' ]; then
|
||||
echo "Failed to stop the hypervisor: '${KATA_HYPERVISOR}' as it is not recognized"
|
||||
return
|
||||
fi
|
||||
|
||||
local pid_hypervisor_count="$(pgrep -fc ${hypervisor_path} || exit 0)"
|
||||
|
||||
if [ ${pid_hypervisor_count} -gt "0" ]; then
|
||||
sudo kill -SIGKILL "$(pgrep -f ${hypervisor_path})" > /dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -189,7 +217,7 @@ clean_env_ctr()
|
||||
# Outputs warnings to stdio if something has gone wrong.
|
||||
#
|
||||
# Returns 0 on success, 1 otherwise
|
||||
restart_systemd_service_with_no_burst_limit() {
|
||||
function restart_systemd_service_with_no_burst_limit() {
|
||||
local service=$1
|
||||
info "restart $service service"
|
||||
|
||||
@@ -224,7 +252,7 @@ restart_systemd_service_with_no_burst_limit() {
|
||||
return 0
|
||||
}
|
||||
|
||||
restart_containerd_service() {
|
||||
function restart_containerd_service() {
|
||||
restart_systemd_service_with_no_burst_limit containerd || return 1
|
||||
|
||||
local retries=5
|
||||
@@ -241,16 +269,147 @@ restart_containerd_service() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# @path_results: path to the input metric-results folder
|
||||
# @tarball_fname: path and filename to the output tarball
|
||||
function compress_metrics_results_dir()
|
||||
{
|
||||
local path_results="${1:-results}"
|
||||
local tarball_fname="${2:-}"
|
||||
# Configures containerd
|
||||
function overwrite_containerd_config() {
|
||||
containerd_config="/etc/containerd/config.toml"
|
||||
sudo rm -f "${containerd_config}"
|
||||
sudo tee "${containerd_config}" << EOF
|
||||
version = 2
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
||||
|
||||
[ -z "${tarball_fname}" ] && die "Missing the tarball filename or the path to save the tarball results is incorrect."
|
||||
[ ! -d "${path_results}" ] && die "Missing path to the results folder."
|
||||
|
||||
cd "${path_results}" && tar -czf "${tarball_fname}" *.json && cd -
|
||||
info "tarball generated: ${tarball_fname}"
|
||||
[plugins]
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
default_runtime_name = "kata"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
EOF
|
||||
}
|
||||
|
||||
function install_kata() {
|
||||
local kata_tarball="kata-static.tar.xz"
|
||||
declare -r katadir="/opt/kata"
|
||||
declare -r destdir="/"
|
||||
declare -r local_bin_dir="/usr/local/bin/"
|
||||
|
||||
# Removing previous kata installation
|
||||
sudo rm -rf "${katadir}"
|
||||
|
||||
pushd "${kata_tarball_dir}"
|
||||
sudo tar -xvf "${kata_tarball}" -C "${destdir}"
|
||||
popd
|
||||
|
||||
# create symbolic links to kata components
|
||||
for b in "${katadir}"/bin/* ; do
|
||||
sudo ln -sf "${b}" "${local_bin_dir}/$(basename $b)"
|
||||
done
|
||||
|
||||
if [[ ${KATA_HYPERVISOR} == "dragonball" ]]; then
|
||||
sudo ln -sf "${katadir}/runtime-rs/bin/containerd-shim-kata-v2" "${local_bin_dir}/containerd-shim-kata-${KATA_HYPERVISOR}-v2"
|
||||
else
|
||||
sudo ln -sf "${katadir}/bin/containerd-shim-kata-v2" "${local_bin_dir}/containerd-shim-kata-${KATA_HYPERVISOR}-v2"
|
||||
fi
|
||||
|
||||
sudo ln -sf ${katadir}/share/defaults/kata-containers/configuration-${KATA_HYPERVISOR}.toml ${katadir}/share/defaults/kata-containers/configuration.toml
|
||||
|
||||
check_containerd_config_for_kata
|
||||
restart_containerd_service
|
||||
}
|
||||
|
||||
function check_containerd_config_for_kata() {
|
||||
# check containerd config
|
||||
declare -r line1="default_runtime_name = \"kata\""
|
||||
declare -r line2="runtime_type = \"io.containerd.kata.v2\""
|
||||
declare -r num_lines_containerd=2
|
||||
declare -r containerd_path="/etc/containerd/config.toml"
|
||||
local count_matches=$(grep -ic "$line1\|$line2" "${containerd_path}")
|
||||
|
||||
if [ "${count_matches}" = "${num_lines_containerd}" ]; then
|
||||
info "containerd ok"
|
||||
else
|
||||
info "overwriting containerd configuration w/ a valid one"
|
||||
overwrite_containerd_config
|
||||
fi
|
||||
}
|
||||
|
||||
function ensure_yq() {
|
||||
: "${GOPATH:=${GITHUB_WORKSPACE:-$HOME/go}}"
|
||||
export GOPATH
|
||||
export PATH="${GOPATH}/bin:${PATH}"
|
||||
INSTALL_IN_GOPATH=true "${repo_root_dir}/ci/install_yq.sh"
|
||||
}
|
||||
|
||||
# dependency: What we want to get the version from the versions.yaml file
|
||||
function get_from_kata_deps() {
|
||||
local dependency="$1"
|
||||
versions_file="${repo_root_dir}/versions.yaml"
|
||||
|
||||
command -v yq &>/dev/null || die 'yq command is not in your $PATH'
|
||||
result=$("yq" read -X "$versions_file" "$dependency")
|
||||
[ "$result" = "null" ] && result=""
|
||||
echo "$result"
|
||||
}
|
||||
|
||||
# project: org/repo format
|
||||
# base_version: ${major}.${minor}
|
||||
function get_latest_patch_release_from_a_github_project() {
|
||||
project="${1}"
|
||||
base_version="${2}"
|
||||
|
||||
curl --silent https://api.github.com/repos/${project}/releases | jq -r .[].tag_name | grep "^${base_version}.[0-9]*$" -m1
|
||||
}
|
||||
|
||||
# base_version: The version to be intalled in the ${major}.${minor} format
|
||||
function clone_cri_containerd() {
|
||||
base_version="${1}"
|
||||
|
||||
project="containerd/containerd"
|
||||
version=$(get_latest_patch_release_from_a_github_project "${project}" "${base_version}")
|
||||
|
||||
rm -rf containerd
|
||||
git clone -b ${version} https://github.com/${project}
|
||||
}
|
||||
|
||||
# project: org/repo format
|
||||
# version: the version of the tarball that will be downloaded
|
||||
# tarball-name: the name of the tarball that will be downloaded
|
||||
function download_github_project_tarball() {
|
||||
project="${1}"
|
||||
version="${2}"
|
||||
tarball_name="${3}"
|
||||
|
||||
wget https://github.com/${project}/releases/download/${version}/${tarball_name}
|
||||
}
|
||||
|
||||
# base_version: The version to be intalled in the ${major}.${minor} format
|
||||
function install_cri_containerd() {
|
||||
base_version="${1}"
|
||||
|
||||
project="containerd/containerd"
|
||||
version=$(get_latest_patch_release_from_a_github_project "${project}" "${base_version}")
|
||||
|
||||
tarball_name="cri-containerd-cni-${version//v}-linux-$(${repo_root_dir}/tests/kata-arch.sh -g).tar.gz"
|
||||
|
||||
download_github_project_tarball "${project}" "${version}" "${tarball_name}"
|
||||
sudo tar -xvf "${tarball_name}" -C /
|
||||
rm -f "${tarball_name}"
|
||||
|
||||
sudo mkdir -p /etc/containerd
|
||||
containerd config default | sudo tee /etc/containerd/config.toml
|
||||
}
|
||||
|
||||
# base_version: The version to be intalled in the ${major}.${minor} format
|
||||
function install_cri_tools() {
|
||||
base_version="${1}"
|
||||
|
||||
project="kubernetes-sigs/cri-tools"
|
||||
version=$(get_latest_patch_release_from_a_github_project "${project}" "${base_version}")
|
||||
|
||||
tarball_name="crictl-${version}-linux-$(${repo_root_dir}/tests/kata-arch.sh -g).tar.gz"
|
||||
|
||||
download_github_project_tarball "${project}" "${version}" "${tarball_name}"
|
||||
sudo tar -xvf "${tarball_name}" -C /usr/local/bin
|
||||
rm -f "${tarball_name}"
|
||||
}
|
||||
|
||||
33
tests/functional/vfio/gha-run.sh
Executable file
33
tests/functional/vfio/gha-run.sh
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) Microsoft Corporation.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
kata_tarball_dir="${2:-kata-artifacts}"
|
||||
vfio_dir="$(dirname "$(readlink -f "$0")")"
|
||||
source "${vfio_dir}/../../common.bash"
|
||||
|
||||
function install_dependencies() {
|
||||
info "Installing the dependencies needed for running the vfio tests"
|
||||
}
|
||||
|
||||
function run() {
|
||||
info "Running cri-containerd tests using ${KATA_HYPERVISOR} hypervisor"
|
||||
}
|
||||
|
||||
function main() {
|
||||
action="${1:-}"
|
||||
case "${action}" in
|
||||
install-dependencies) install_dependencies ;;
|
||||
run) run ;;
|
||||
*) >&2 die "Invalid argument" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
main "$@"
|
||||
98
tests/install_go.sh
Executable file
98
tests/install_go.sh
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2018-2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
tmp_dir=$(mktemp -d -t install-go-tmp.XXXXXXXXXX)
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
script_name="$(basename "${BASH_SOURCE[0]}")"
|
||||
force=""
|
||||
USE_VERSIONS_FILE=""
|
||||
PROJECT="Kata Containers"
|
||||
|
||||
source "${script_dir}/common.bash"
|
||||
|
||||
install_dest="/usr/local/"
|
||||
|
||||
function finish() {
|
||||
rm -rf "$tmp_dir"
|
||||
}
|
||||
|
||||
function usage(){
|
||||
exit_code="$1"
|
||||
cat <<EOF
|
||||
Usage:
|
||||
|
||||
${script_name} [options] <args>
|
||||
|
||||
Args:
|
||||
<go-version> : Install a specific go version.
|
||||
|
||||
Example:
|
||||
${script_name} 1.10
|
||||
|
||||
Options
|
||||
-d <path> : destination path, path where go will be installed.
|
||||
-f : Force remove old go version and install the specified one.
|
||||
-h : Show this help
|
||||
-p : Install go defined in ${PROJECT} versions file.
|
||||
|
||||
EOF
|
||||
|
||||
exit "$exit_code"
|
||||
}
|
||||
|
||||
trap finish EXIT
|
||||
|
||||
pushd "${tmp_dir}"
|
||||
|
||||
while getopts "d:fhp" opt
|
||||
do
|
||||
case $opt in
|
||||
d) install_dest="${OPTARG}" ;;
|
||||
f) force="true" ;;
|
||||
h) usage 0 ;;
|
||||
p) USE_VERSIONS_FILE="true" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $(( $OPTIND - 1 ))
|
||||
|
||||
go_version="${1:-""}"
|
||||
|
||||
if [ -z "$go_version" ] && [ "${USE_VERSIONS_FILE}" = "true" ] ;then
|
||||
go_version=$(get_from_kata_deps "languages.golang.meta.newest-version")
|
||||
fi
|
||||
|
||||
if [ -z "$go_version" ];then
|
||||
echo "Missing go version or -p option"
|
||||
usage 0
|
||||
fi
|
||||
|
||||
if command -v go; then
|
||||
[[ "$(go version)" == *"go${go_version}"* ]] && \
|
||||
info "Go ${go_version} already installed" && \
|
||||
exit
|
||||
if [ "${force}" = "true" ]; then
|
||||
info "removing $(go version)"
|
||||
sudo rm -rf "${install_dest}/go"
|
||||
else
|
||||
die "$(go version) is installed, use -f or remove it before install go ${go_version}"
|
||||
fi
|
||||
fi
|
||||
|
||||
goarch=$("${repo_root_dir}/tests/kata-arch.sh" --golang)
|
||||
|
||||
info "Download go version ${go_version}"
|
||||
kernel_name=$(uname -s)
|
||||
curl -OL "https://storage.googleapis.com/golang/go${go_version}.${kernel_name,,}-${goarch}.tar.gz"
|
||||
info "Install go"
|
||||
mkdir -p "${install_dest}"
|
||||
sudo tar -C "${install_dest}" -xzf "go${go_version}.${kernel_name,,}-${goarch}.tar.gz"
|
||||
popd
|
||||
75
tests/integration/cri-containerd/gha-run.sh
Executable file
75
tests/integration/cri-containerd/gha-run.sh
Executable file
@@ -0,0 +1,75 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
kata_tarball_dir="${2:-kata-artifacts}"
|
||||
cri_containerd_dir="$(dirname "$(readlink -f "$0")")"
|
||||
source "${cri_containerd_dir}/../../common.bash"
|
||||
|
||||
function install_dependencies() {
|
||||
info "Installing the dependencies needed for running the cri-containerd tests"
|
||||
|
||||
# Dependency list of projects that we can rely on the system packages
|
||||
# - build-essential
|
||||
# - Theoretically we only need `make`, but doesn't hurt to install
|
||||
# the whole build-essential group
|
||||
# - jq
|
||||
# - podman-docker
|
||||
# - one of the tests rely on docker to pull an image.
|
||||
# we've decided to go for podman, instead, as it does *not* bring
|
||||
# containerd as a dependency
|
||||
declare -a system_deps=(
|
||||
build-essential
|
||||
jq
|
||||
podman-docker
|
||||
)
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install "${system_deps[@]}"
|
||||
|
||||
ensure_yq
|
||||
${repo_root_dir}/tests/install_go.sh -p
|
||||
|
||||
# Dependency list of projects that we can install them
|
||||
# directly from their releases on GitHub:
|
||||
# - containerd
|
||||
# - cri-container-cni release tarball already includes CNI plugins
|
||||
# - cri-tools
|
||||
declare -a github_deps
|
||||
github_deps[0]="cri_containerd:$(get_from_kata_deps "externals.containerd.${CONTAINERD_VERSION}")"
|
||||
github_deps[1]="cri_tools:$(get_from_kata_deps "externals.critools.latest")"
|
||||
|
||||
for github_dep in "${github_deps[@]}"; do
|
||||
IFS=":" read -r -a dep <<< "${github_dep}"
|
||||
install_${dep[0]} "${dep[1]}"
|
||||
done
|
||||
|
||||
# Clone containerd as we'll need to build it in order to run the tests
|
||||
# base_version: The version to be intalled in the ${major}.${minor} format
|
||||
clone_cri_containerd $(get_from_kata_deps "externals.containerd.${CONTAINERD_VERSION}")
|
||||
}
|
||||
|
||||
function run() {
|
||||
info "Running cri-containerd tests using ${KATA_HYPERVISOR} hypervisor"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function main() {
|
||||
action="${1:-}"
|
||||
case "${action}" in
|
||||
install-dependencies) install_dependencies ;;
|
||||
install-kata) install_kata ;;
|
||||
run) run ;;
|
||||
*) >&2 die "Invalid argument" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
main "$@"
|
||||
493
tests/integration/cri-containerd/integration-tests.sh
Executable file
493
tests/integration/cri-containerd/integration-tests.sh
Executable file
@@ -0,0 +1,493 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2017-2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
[[ "${DEBUG}" != "" ]] && set -o xtrace
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -o errtrace
|
||||
|
||||
SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
|
||||
source "${SCRIPT_PATH}/../../common.bash"
|
||||
|
||||
# runc is installed in /usr/local/sbin/ add that path
|
||||
export PATH="$PATH:/usr/local/sbin"
|
||||
|
||||
# golang is installed in /usr/local/go/bin/ add that path
|
||||
export PATH="$PATH:/usr/local/go/bin"
|
||||
|
||||
# Runtime to be used for testing
|
||||
RUNTIME=${RUNTIME:-containerd-shim-kata-v2}
|
||||
FACTORY_TEST=${FACTORY_TEST:-""}
|
||||
KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
|
||||
USE_DEVMAPPER="${USE_DEVMAPPER:-false}"
|
||||
ARCH=$(uname -m)
|
||||
|
||||
containerd_runtime_type="io.containerd.kata-${KATA_HYPERVISOR}.v2"
|
||||
|
||||
containerd_shim_path="$(command -v containerd-shim)"
|
||||
|
||||
#containerd config file
|
||||
readonly tmp_dir=$(mktemp -t -d test-cri-containerd.XXXX)
|
||||
export REPORT_DIR="${tmp_dir}"
|
||||
readonly CONTAINERD_CONFIG_FILE="${tmp_dir}/test-containerd-config"
|
||||
readonly CONTAINERD_CONFIG_FILE_TEMP="${CONTAINERD_CONFIG_FILE}.temp"
|
||||
readonly default_containerd_config="/etc/containerd/config.toml"
|
||||
readonly default_containerd_config_backup="$CONTAINERD_CONFIG_FILE.backup"
|
||||
readonly kata_config="/etc/kata-containers/configuration.toml"
|
||||
readonly kata_config_backup="$kata_config.backup"
|
||||
readonly default_kata_config="/opt/kata/share/defaults/kata-containers/configuration.toml"
|
||||
|
||||
function ci_config() {
|
||||
sudo mkdir -p $(dirname "${kata_config}")
|
||||
[ -f "$kata_config" ] && sudo cp "$kata_config" "$kata_config_backup" || \
|
||||
sudo cp "$default_kata_config" "$kata_config"
|
||||
|
||||
source /etc/os-release || source /usr/lib/os-release
|
||||
ID=${ID:-""}
|
||||
if [ "$ID" == ubuntu ]; then
|
||||
# https://github.com/kata-containers/tests/issues/352
|
||||
if [ -n "${FACTORY_TEST}" ]; then
|
||||
sudo sed -i -e 's/^#enable_template.*$/enable_template = true/g' "${kata_config}"
|
||||
echo "init vm template"
|
||||
sudo -E PATH=$PATH "$RUNTIME" factory init
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "enable debug for kata-runtime"
|
||||
sudo sed -i 's/^#enable_debug =/enable_debug =/g' ${kata_config}
|
||||
}
|
||||
|
||||
function ci_cleanup() {
|
||||
source /etc/os-release || source /usr/lib/os-release
|
||||
|
||||
if [ -n "${FACTORY_TEST}" ]; then
|
||||
echo "destroy vm template"
|
||||
sudo -E PATH=$PATH "$RUNTIME" factory destroy
|
||||
fi
|
||||
|
||||
if [ -e "$default_containerd_config_backup" ]; then
|
||||
echo "restore containerd config"
|
||||
sudo systemctl stop containerd
|
||||
sudo cp "$default_containerd_config_backup" "$default_containerd_config"
|
||||
fi
|
||||
|
||||
[ -f "$kata_config_backup" ] && sudo mv "$kata_config_backup" "$kata_config" || \
|
||||
sudo rm "$kata_config"
|
||||
}
|
||||
|
||||
function create_containerd_config() {
|
||||
local runtime="$1"
|
||||
# kata_annotations is set to 1 if caller want containerd setup with
|
||||
# kata annotations support.
|
||||
local kata_annotations=${2-0}
|
||||
[ -n "${runtime}" ] || die "need runtime to create config"
|
||||
|
||||
local runtime_type="${containerd_runtime_type}"
|
||||
if [ "${runtime}" == "runc" ]; then
|
||||
runtime_type="io.containerd.runc.v2"
|
||||
fi
|
||||
local containerd_runtime=$(command -v "containerd-shim-${runtime}-v2")
|
||||
|
||||
cat << EOF | sudo tee "${CONTAINERD_CONFIG_FILE}"
|
||||
[debug]
|
||||
level = "debug"
|
||||
[plugins]
|
||||
[plugins.cri]
|
||||
[plugins.cri.containerd]
|
||||
default_runtime_name = "$runtime"
|
||||
[plugins.cri.containerd.runtimes.${runtime}]
|
||||
runtime_type = "${runtime_type}"
|
||||
$( [ $kata_annotations -eq 1 ] && \
|
||||
echo 'pod_annotations = ["io.katacontainers.*"]' && \
|
||||
echo ' container_annotations = ["io.katacontainers.*"]'
|
||||
)
|
||||
[plugins.cri.containerd.runtimes.${runtime}.options]
|
||||
Runtime = "${containerd_runtime}"
|
||||
[plugins.linux]
|
||||
shim = "${containerd_shim_path}"
|
||||
EOF
|
||||
|
||||
if [ "$USE_DEVMAPPER" == "true" ]; then
|
||||
sudo sed -i 's|^\(\[plugins\]\).*|\1\n \[plugins.devmapper\]\n pool_name = \"contd-thin-pool\"\n base_image_size = \"4096MB\"|' ${CONTAINERD_CONFIG_FILE}
|
||||
echo "Devicemapper configured"
|
||||
cat "${CONTAINERD_CONFIG_FILE}"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
ci_cleanup
|
||||
[ -d "$tmp_dir" ] && rm -rf "${tmp_dir}"
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
function err_report() {
|
||||
local log_file="${REPORT_DIR}/containerd.log"
|
||||
if [ -f "$log_file" ]; then
|
||||
echo "ERROR: containerd log :"
|
||||
echo "-------------------------------------"
|
||||
cat "${log_file}"
|
||||
echo "-------------------------------------"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function check_daemon_setup() {
|
||||
info "containerd(cri): Check daemon works with runc"
|
||||
create_containerd_config "runc"
|
||||
|
||||
# containerd cri-integration will modify the passed in config file. Let's
|
||||
# give it a temp one.
|
||||
cp $CONTAINERD_CONFIG_FILE $CONTAINERD_CONFIG_FILE_TEMP
|
||||
# in some distros(AlibabaCloud), there is no btrfs-devel package available,
|
||||
# so pass GO_BUILDTAGS="no_btrfs" to make to not use btrfs.
|
||||
sudo -E PATH="${PATH}:/usr/local/bin" \
|
||||
REPORT_DIR="${REPORT_DIR}" \
|
||||
FOCUS="TestImageLoad" \
|
||||
RUNTIME="" \
|
||||
CONTAINERD_CONFIG_FILE="$CONTAINERD_CONFIG_FILE_TEMP" \
|
||||
make GO_BUILDTAGS="no_btrfs" -e cri-integration
|
||||
}
|
||||
|
||||
function testContainerStart() {
|
||||
# no_container_yaml set to 1 will not create container_yaml
|
||||
# because caller has created its own container_yaml.
|
||||
no_container_yaml=${1:-0}
|
||||
|
||||
local pod_yaml=${REPORT_DIR}/pod.yaml
|
||||
local container_yaml=${REPORT_DIR}/container.yaml
|
||||
local image="busybox:latest"
|
||||
|
||||
cat << EOF > "${pod_yaml}"
|
||||
metadata:
|
||||
name: busybox-sandbox1
|
||||
namespace: default
|
||||
uid: busybox-sandbox1-uid
|
||||
EOF
|
||||
|
||||
#TestContainerSwap has created its own container_yaml.
|
||||
if [ $no_container_yaml -ne 1 ]; then
|
||||
cat << EOF > "${container_yaml}"
|
||||
metadata:
|
||||
name: busybox-killed-vmm
|
||||
namespace: default
|
||||
uid: busybox-killed-vmm-uid
|
||||
image:
|
||||
image: "$image"
|
||||
command:
|
||||
- top
|
||||
EOF
|
||||
fi
|
||||
|
||||
sudo cp "$default_containerd_config" "$default_containerd_config_backup"
|
||||
sudo cp $CONTAINERD_CONFIG_FILE "$default_containerd_config"
|
||||
|
||||
restart_containerd_service
|
||||
|
||||
sudo crictl pull $image
|
||||
podid=$(sudo crictl runp $pod_yaml)
|
||||
cid=$(sudo crictl create $podid $container_yaml $pod_yaml)
|
||||
sudo crictl start $cid
|
||||
}
|
||||
|
||||
function testContainerStop() {
|
||||
info "stop pod $podid"
|
||||
sudo crictl stopp $podid
|
||||
info "remove pod $podid"
|
||||
sudo crictl rmp $podid
|
||||
|
||||
sudo cp "$default_containerd_config_backup" "$default_containerd_config"
|
||||
restart_containerd_service
|
||||
}
|
||||
|
||||
function TestKilledVmmCleanup() {
|
||||
if [[ "${KATA_HYPERVISOR}" != "qemu" ]]; then
|
||||
info "TestKilledVmmCleanup is skipped for ${KATA_HYPERVISOR}, only QEMU is currently tested"
|
||||
return 0
|
||||
fi
|
||||
|
||||
info "test killed vmm cleanup"
|
||||
|
||||
testContainerStart
|
||||
|
||||
qemu_pid=$(ps aux|grep qemu|grep -v grep|awk '{print $2}')
|
||||
info "kill qemu $qemu_pid"
|
||||
sudo kill -SIGKILL $qemu_pid
|
||||
# sleep to let shimv2 exit
|
||||
sleep 1
|
||||
remained=$(ps aux|grep shimv2|grep -v grep || true)
|
||||
[ -z $remained ] || die "found remaining shimv2 process $remained"
|
||||
|
||||
testContainerStop
|
||||
|
||||
info "stop containerd"
|
||||
}
|
||||
|
||||
function TestContainerMemoryUpdate() {
|
||||
if [[ "${KATA_HYPERVISOR}" != "qemu" ]] || [[ "${ARCH}" == "ppc64le" ]] || [[ "${ARCH}" == "s390x" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
test_virtio_mem=$1
|
||||
|
||||
if [ $test_virtio_mem -eq 1 ]; then
|
||||
if [[ "$ARCH" != "x86_64" ]]; then
|
||||
return
|
||||
fi
|
||||
info "Test container memory update with virtio-mem"
|
||||
|
||||
sudo sed -i -e 's/^#enable_virtio_mem.*$/enable_virtio_mem = true/g' "${kata_config}"
|
||||
else
|
||||
info "Test container memory update without virtio-mem"
|
||||
|
||||
sudo sed -i -e 's/^enable_virtio_mem.*$/#enable_virtio_mem = true/g' "${kata_config}"
|
||||
fi
|
||||
|
||||
testContainerStart
|
||||
|
||||
vm_size=$(($(sudo crictl exec $cid cat /proc/meminfo | grep "MemTotal:" | awk '{print $2}')*1024))
|
||||
if [ $vm_size -gt $((2*1024*1024*1024)) ] || [ $vm_size -lt $((2*1024*1024*1024-128*1024*1024)) ]; then
|
||||
testContainerStop
|
||||
die "The VM memory size $vm_size before update is not right"
|
||||
fi
|
||||
|
||||
sudo crictl update --memory $((2*1024*1024*1024)) $cid
|
||||
sleep 1
|
||||
|
||||
vm_size=$(($(sudo crictl exec $cid cat /proc/meminfo | grep "MemTotal:" | awk '{print $2}')*1024))
|
||||
if [ $vm_size -gt $((4*1024*1024*1024)) ] || [ $vm_size -lt $((4*1024*1024*1024-128*1024*1024)) ]; then
|
||||
testContainerStop
|
||||
die "The VM memory size $vm_size after increase is not right"
|
||||
fi
|
||||
|
||||
if [ $test_virtio_mem -eq 1 ]; then
|
||||
sudo crictl update --memory $((1*1024*1024*1024)) $cid
|
||||
sleep 1
|
||||
|
||||
vm_size=$(($(sudo crictl exec $cid cat /proc/meminfo | grep "MemTotal:" | awk '{print $2}')*1024))
|
||||
if [ $vm_size -gt $((3*1024*1024*1024)) ] || [ $vm_size -lt $((3*1024*1024*1024-128*1024*1024)) ]; then
|
||||
testContainerStop
|
||||
die "The VM memory size $vm_size after decrease is not right"
|
||||
fi
|
||||
fi
|
||||
|
||||
testContainerStop
|
||||
}
|
||||
|
||||
function getContainerSwapInfo() {
|
||||
swap_size=$(($(sudo crictl exec $cid cat /proc/meminfo | grep "SwapTotal:" | awk '{print $2}')*1024))
|
||||
# NOTE: these below two checks only works on cgroup v1
|
||||
swappiness=$(sudo crictl exec $cid cat /sys/fs/cgroup/memory/memory.swappiness)
|
||||
swap_in_bytes=$(sudo crictl exec $cid cat /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes)
|
||||
}
|
||||
|
||||
function TestContainerSwap() {
|
||||
if [[ "${KATA_HYPERVISOR}" != "qemu" ]] || [[ "${ARCH}" != "x86_64" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
local container_yaml=${REPORT_DIR}/container.yaml
|
||||
local image="busybox:latest"
|
||||
|
||||
info "Test container with guest swap"
|
||||
|
||||
create_containerd_config "kata-${KATA_HYPERVISOR}" 1
|
||||
sudo sed -i -e 's/^#enable_guest_swap.*$/enable_guest_swap = true/g' "${kata_config}"
|
||||
|
||||
# Test without swap device
|
||||
testContainerStart
|
||||
getContainerSwapInfo
|
||||
# Current default swappiness is 60
|
||||
if [ $swappiness -ne 60 ]; then
|
||||
testContainerStop
|
||||
die "The VM swappiness $swappiness without swap device is not right"
|
||||
fi
|
||||
if [ $swap_in_bytes -lt 1125899906842624 ]; then
|
||||
testContainerStop
|
||||
die "The VM swap_in_bytes $swap_in_bytes without swap device is not right"
|
||||
fi
|
||||
if [ $swap_size -ne 0 ]; then
|
||||
testContainerStop
|
||||
die "The VM swap size $swap_size without swap device is not right"
|
||||
fi
|
||||
testContainerStop
|
||||
|
||||
# Test with swap device
|
||||
cat << EOF > "${container_yaml}"
|
||||
metadata:
|
||||
name: busybox-swap
|
||||
namespace: default
|
||||
uid: busybox-swap-uid
|
||||
annotations:
|
||||
io.katacontainers.container.resource.swappiness: "100"
|
||||
io.katacontainers.container.resource.swap_in_bytes: "1610612736"
|
||||
linux:
|
||||
resources:
|
||||
memory_limit_in_bytes: 1073741824
|
||||
image:
|
||||
image: "$image"
|
||||
command:
|
||||
- top
|
||||
EOF
|
||||
|
||||
testContainerStart 1
|
||||
getContainerSwapInfo
|
||||
testContainerStop
|
||||
|
||||
if [ $swappiness -ne 100 ]; then
|
||||
die "The VM swappiness $swappiness with swap device is not right"
|
||||
fi
|
||||
if [ $swap_in_bytes -ne 1610612736 ]; then
|
||||
die "The VM swap_in_bytes $swap_in_bytes with swap device is not right"
|
||||
fi
|
||||
if [ $swap_size -ne 536870912 ]; then
|
||||
die "The VM swap size $swap_size with swap device is not right"
|
||||
fi
|
||||
|
||||
# Test without swap_in_bytes
|
||||
cat << EOF > "${container_yaml}"
|
||||
metadata:
|
||||
name: busybox-swap
|
||||
namespace: default
|
||||
uid: busybox-swap-uid
|
||||
annotations:
|
||||
io.katacontainers.container.resource.swappiness: "100"
|
||||
linux:
|
||||
resources:
|
||||
memory_limit_in_bytes: 1073741824
|
||||
image:
|
||||
image: "$image"
|
||||
command:
|
||||
- top
|
||||
EOF
|
||||
|
||||
testContainerStart 1
|
||||
getContainerSwapInfo
|
||||
testContainerStop
|
||||
|
||||
if [ $swappiness -ne 100 ]; then
|
||||
die "The VM swappiness $swappiness without swap_in_bytes is not right"
|
||||
fi
|
||||
# swap_in_bytes is not set, it should be a value that bigger than 1125899906842624
|
||||
if [ $swap_in_bytes -lt 1125899906842624 ]; then
|
||||
die "The VM swap_in_bytes $swap_in_bytes without swap_in_bytes is not right"
|
||||
fi
|
||||
if [ $swap_size -ne 1073741824 ]; then
|
||||
die "The VM swap size $swap_size without swap_in_bytes is not right"
|
||||
fi
|
||||
|
||||
# Test without memory_limit_in_bytes
|
||||
cat << EOF > "${container_yaml}"
|
||||
metadata:
|
||||
name: busybox-swap
|
||||
namespace: default
|
||||
uid: busybox-swap-uid
|
||||
annotations:
|
||||
io.katacontainers.container.resource.swappiness: "100"
|
||||
image:
|
||||
image: "$image"
|
||||
command:
|
||||
- top
|
||||
EOF
|
||||
|
||||
testContainerStart 1
|
||||
getContainerSwapInfo
|
||||
testContainerStop
|
||||
|
||||
if [ $swappiness -ne 100 ]; then
|
||||
die "The VM swappiness $swappiness without memory_limit_in_bytes is not right"
|
||||
fi
|
||||
# swap_in_bytes is not set, it should be a value that bigger than 1125899906842624
|
||||
if [ $swap_in_bytes -lt 1125899906842624 ]; then
|
||||
die "The VM swap_in_bytes $swap_in_bytes without memory_limit_in_bytes is not right"
|
||||
fi
|
||||
if [ $swap_size -ne 2147483648 ]; then
|
||||
die "The VM swap size $swap_size without memory_limit_in_bytes is not right"
|
||||
fi
|
||||
|
||||
create_containerd_config "kata-${KATA_HYPERVISOR}"
|
||||
}
|
||||
|
||||
# k8s may restart docker which will impact on containerd stop
|
||||
function stop_containerd() {
|
||||
local tmp=$(pgrep kubelet || true)
|
||||
[ -n "$tmp" ] && sudo kubeadm reset -f
|
||||
|
||||
sudo systemctl stop containerd
|
||||
}
|
||||
|
||||
function main() {
|
||||
|
||||
info "Stop crio service"
|
||||
systemctl is-active --quiet crio && sudo systemctl stop crio
|
||||
|
||||
info "Stop containerd service"
|
||||
systemctl is-active --quiet containerd && stop_containerd
|
||||
|
||||
# Configure enviroment if running in CI
|
||||
ci_config
|
||||
|
||||
pushd "containerd"
|
||||
|
||||
# Make sure the right artifacts are going to be built
|
||||
make clean
|
||||
|
||||
check_daemon_setup
|
||||
|
||||
info "containerd(cri): testing using runtime: ${containerd_runtime_type}"
|
||||
|
||||
create_containerd_config "kata-${KATA_HYPERVISOR}"
|
||||
|
||||
info "containerd(cri): Running cri-integration"
|
||||
|
||||
|
||||
passing_test="TestContainerStats|TestContainerRestart|TestContainerListStatsWithIdFilter|TestContainerListStatsWithIdSandboxIdFilter|TestDuplicateName|TestImageLoad|TestImageFSInfo|TestSandboxCleanRemove"
|
||||
|
||||
if [[ "${KATA_HYPERVISOR}" == "cloud-hypervisor" || \
|
||||
"${KATA_HYPERVISOR}" == "qemu" ]]; then
|
||||
issue="https://github.com/kata-containers/tests/issues/2318"
|
||||
info "${KATA_HYPERVISOR} fails with TestContainerListStatsWithSandboxIdFilter }"
|
||||
info "see ${issue}"
|
||||
else
|
||||
passing_test="${passing_test}|TestContainerListStatsWithSandboxIdFilter"
|
||||
fi
|
||||
|
||||
# in some distros(AlibabaCloud), there is no btrfs-devel package available,
|
||||
# so pass GO_BUILDTAGS="no_btrfs" to make to not use btrfs.
|
||||
# containerd cri-integration will modify the passed in config file. Let's
|
||||
# give it a temp one.
|
||||
cp $CONTAINERD_CONFIG_FILE $CONTAINERD_CONFIG_FILE_TEMP
|
||||
sudo -E PATH="${PATH}:/usr/local/bin" \
|
||||
REPORT_DIR="${REPORT_DIR}" \
|
||||
FOCUS="^(${passing_test})$" \
|
||||
RUNTIME="" \
|
||||
CONTAINERD_CONFIG_FILE="$CONTAINERD_CONFIG_FILE_TEMP" \
|
||||
make GO_BUILDTAGS="no_btrfs" -e cri-integration
|
||||
|
||||
# trap error for print containerd log,
|
||||
# containerd's `cri-integration` will print the log itself.
|
||||
trap err_report ERR
|
||||
|
||||
# TestContainerSwap is currently failing with GHA.
|
||||
# Let's re-enable it as soon as we get it to work.
|
||||
# Reference: https://github.com/kata-containers/kata-containers/issues/7410
|
||||
# TestContainerSwap
|
||||
|
||||
# TODO: runtime-rs doesn't support memory update currently
|
||||
if [ "$KATA_HYPERVISOR" != "dragonball" ]; then
|
||||
TestContainerMemoryUpdate 1
|
||||
TestContainerMemoryUpdate 0
|
||||
fi
|
||||
|
||||
TestKilledVmmCleanup
|
||||
|
||||
popd
|
||||
}
|
||||
|
||||
main
|
||||
@@ -8,8 +8,11 @@ set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
integration_dir="$(dirname "$(readlink -f "$0")")"
|
||||
tools_dir="${integration_dir}/../../tools"
|
||||
kubernetes_dir="$(dirname "$(readlink -f "$0")")"
|
||||
source "${kubernetes_dir}/../../common.bash"
|
||||
tools_dir="${repo_root_dir}/tools"
|
||||
|
||||
AZ_RG="${AZ_RG:-kataCI}"
|
||||
|
||||
function _print_cluster_name() {
|
||||
short_sha="$(git rev-parse --short=12 HEAD)"
|
||||
@@ -35,7 +38,7 @@ function create_cluster() {
|
||||
delete_cluster || true
|
||||
|
||||
az aks create \
|
||||
-g "kataCI" \
|
||||
-g "${AZ_RG}" \
|
||||
-n "$(_print_cluster_name)" \
|
||||
-s "Standard_D4s_v5" \
|
||||
--node-count 1 \
|
||||
@@ -54,34 +57,33 @@ function install_kubectl() {
|
||||
|
||||
function get_cluster_credentials() {
|
||||
az aks get-credentials \
|
||||
-g "kataCI" \
|
||||
-g "${AZ_RG}" \
|
||||
-n "$(_print_cluster_name)"
|
||||
}
|
||||
|
||||
function ensure_yq() {
|
||||
: "${GOPATH:=${GITHUB_WORKSPACE}}"
|
||||
export GOPATH
|
||||
export PATH="${GOPATH}/bin:${PATH}"
|
||||
INSTALL_IN_GOPATH=true "${repo_root_dir}/ci/install_yq.sh"
|
||||
}
|
||||
|
||||
function run_tests() {
|
||||
function deploy_kata() {
|
||||
platform="${1}"
|
||||
ensure_yq
|
||||
|
||||
# Emsure we're in the default namespace
|
||||
kubectl config set-context --current --namespace=default
|
||||
|
||||
# Delete any spurious tests namespace that was left behind
|
||||
kubectl delete namespace kata-containers-k8s-tests &> /dev/null || true
|
||||
|
||||
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||
|
||||
# Enable debug for Kata Containers
|
||||
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[1].value' --tag '!!str' "true"
|
||||
# Let the `kata-deploy` script take care of the runtime class creation / removal
|
||||
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[4].value' --tag '!!str' "true"
|
||||
|
||||
if [ "${KATA_HOST_OS}" = "cbl-mariner" ]; then
|
||||
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[+].name' "HOST_OS"
|
||||
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[-1].value' "${KATA_HOST_OS}"
|
||||
fi
|
||||
|
||||
echo "::group::Final kata-deploy.yaml that is used in the test"
|
||||
cat "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||
cat "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" | grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" || die "Failed to setup the tests image"
|
||||
echo "::endgroup::"
|
||||
|
||||
kubectl apply -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
|
||||
if [ "${platform}" = "tdx" ]; then
|
||||
@@ -90,7 +92,6 @@ function run_tests() {
|
||||
kubectl apply -f "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||
fi
|
||||
kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
|
||||
kubectl apply -f "${tools_dir}/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml"
|
||||
|
||||
# This is needed as the kata-deploy pod will be set to "Ready" when it starts running,
|
||||
# which may cause issues like not having the node properly labeled or the artefacts
|
||||
@@ -101,11 +102,24 @@ function run_tests() {
|
||||
sleep 60s
|
||||
fi
|
||||
|
||||
echo "::group::kata-deploy logs"
|
||||
kubectl -n kube-system logs -l name=kata-deploy
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::Runtime classes"
|
||||
kubectl get runtimeclass
|
||||
echo "::endgroup::"
|
||||
}
|
||||
|
||||
function run_tests() {
|
||||
# Delete any spurious tests namespace that was left behind
|
||||
kubectl delete namespace kata-containers-k8s-tests &> /dev/null || true
|
||||
|
||||
# Create a new namespace for the tests and switch to it
|
||||
kubectl apply -f ${integration_dir}/kubernetes/runtimeclass_workloads/tests-namespace.yaml
|
||||
kubectl apply -f ${kubernetes_dir}/runtimeclass_workloads/tests-namespace.yaml
|
||||
kubectl config set-context --current --namespace=kata-containers-k8s-tests
|
||||
|
||||
pushd "${integration_dir}/kubernetes"
|
||||
pushd "${kubernetes_dir}"
|
||||
bash setup.sh
|
||||
bash run_kubernetes_tests.sh
|
||||
popd
|
||||
@@ -113,6 +127,15 @@ function run_tests() {
|
||||
|
||||
function cleanup() {
|
||||
platform="${1}"
|
||||
ensure_yq
|
||||
|
||||
echo "Gather information about the nodes and pods before cleaning up the node"
|
||||
get_nodes_and_pods_info
|
||||
|
||||
if [ "${platform}" = "aks" ]; then
|
||||
delete_cluster
|
||||
return
|
||||
fi
|
||||
|
||||
# Switch back to the default namespace and delete the tests one
|
||||
kubectl config set-context --current --namespace=default
|
||||
@@ -129,6 +152,9 @@ function cleanup() {
|
||||
kubectl delete ${deploy_spec}
|
||||
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
|
||||
|
||||
# Let the `kata-deploy` script take care of the runtime class creation / removal
|
||||
yq write -i "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" 'spec.template.spec.containers[0].env[4].value' --tag '!!str' "true"
|
||||
|
||||
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
|
||||
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
|
||||
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" | grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" || die "Failed to setup the tests image"
|
||||
@@ -137,16 +163,20 @@ function cleanup() {
|
||||
|
||||
kubectl delete ${cleanup_spec}
|
||||
kubectl delete -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
|
||||
kubectl delete -f "${tools_dir}/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml"
|
||||
}
|
||||
|
||||
function delete_cluster() {
|
||||
az aks delete \
|
||||
-g "kataCI" \
|
||||
-g "${AZ_RG}" \
|
||||
-n "$(_print_cluster_name)" \
|
||||
--yes
|
||||
}
|
||||
|
||||
function get_nodes_and_pods_info() {
|
||||
kubectl debug $(kubectl get nodes -o name) -it --image=quay.io/kata-containers/kata-debug:latest
|
||||
kubectl get pods -o name | grep node-debugger | xargs kubectl delete
|
||||
}
|
||||
|
||||
function main() {
|
||||
export KATA_HOST_OS="${KATA_HOST_OS:-}"
|
||||
|
||||
@@ -159,14 +189,15 @@ function main() {
|
||||
install-bats) install_bats ;;
|
||||
install-kubectl) install_kubectl ;;
|
||||
get-cluster-credentials) get_cluster_credentials ;;
|
||||
run-tests-aks) run_tests "aks" ;;
|
||||
run-tests-sev) run_tests "sev" ;;
|
||||
run-tests-snp) run_tests "snp" ;;
|
||||
run-tests-tdx) run_tests "tdx" ;;
|
||||
deploy-kata-aks) deploy_kata "aks" ;;
|
||||
deploy-kata-sev) deploy_kata "sev" ;;
|
||||
deploy-kata-snp) deploy_kata "snp" ;;
|
||||
deploy-kata-tdx) deploy_kata "tdx" ;;
|
||||
run-tests) run_tests ;;
|
||||
cleanup-sev) cleanup "sev" ;;
|
||||
cleanup-snp) cleanup "snp" ;;
|
||||
cleanup-tdx) cleanup "tdx" ;;
|
||||
delete-cluster) delete_cluster ;;
|
||||
delete-cluster) cleanup "aks" ;;
|
||||
*) >&2 echo "Invalid argument"; exit 2 ;;
|
||||
esac
|
||||
}
|
||||
47
tests/integration/kubernetes/k8s-file-volume.bats
Normal file
47
tests/integration/kubernetes/k8s-file-volume.bats
Normal file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Copyright (c) 2022 Ant Group
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
load "${BATS_TEST_DIRNAME}/../../common.bash"
|
||||
load "${BATS_TEST_DIRNAME}/tests_common.sh"
|
||||
TEST_INITRD="${TEST_INITRD:-no}"
|
||||
|
||||
setup() {
|
||||
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
|
||||
pod_name="test-file-volume"
|
||||
container_name="busybox-file-volume-container"
|
||||
tmp_file=$(exec_host mktemp /tmp/file-volume-test-foo.XXXXX)
|
||||
mount_path="/tmp/foo.txt"
|
||||
file_body="test"
|
||||
get_pod_config_dir
|
||||
}
|
||||
|
||||
@test "Test readonly volume for pods" {
|
||||
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
|
||||
# Write test body to temp file
|
||||
exec_host "echo "$file_body" > $tmp_file"
|
||||
|
||||
# Create test yaml
|
||||
sed -e "s|HOST_FILE|$tmp_file|" ${pod_config_dir}/pod-file-volume.yaml > ${pod_config_dir}/test-pod-file-volume.yaml
|
||||
sed -i "s|MOUNT_PATH|$mount_path|" ${pod_config_dir}/test-pod-file-volume.yaml
|
||||
|
||||
# Create pod
|
||||
kubectl create -f "${pod_config_dir}/test-pod-file-volume.yaml"
|
||||
|
||||
# Check pod creation
|
||||
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
|
||||
|
||||
# Validate file volume body inside the pod
|
||||
file_in_container=$(kubectl exec $pod_name -- cat $mount_path)
|
||||
[ "$file_body" == "$file_in_container" ]
|
||||
}
|
||||
|
||||
teardown() {
|
||||
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
|
||||
kubectl delete pod "$pod_name"
|
||||
exec_host rm -f $tmp_file
|
||||
rm -f ${pod_config_dir}/test-pod-file-volume.yaml.yaml
|
||||
}
|
||||
67
tests/integration/kubernetes/k8s-volume.bats
Normal file
67
tests/integration/kubernetes/k8s-volume.bats
Normal file
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
load "${BATS_TEST_DIRNAME}/../../common.bash"
|
||||
load "${BATS_TEST_DIRNAME}/tests_common.sh"
|
||||
TEST_INITRD="${TEST_INITRD:-no}"
|
||||
|
||||
setup() {
|
||||
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
|
||||
|
||||
get_pod_config_dir
|
||||
|
||||
tmp_file=$(exec_host mktemp -d /tmp/data.XXXX)
|
||||
pod_yaml=$(mktemp --tmpdir pod_config.XXXXXX.yaml)
|
||||
msg="Hello from Kubernetes"
|
||||
exec_host "echo $msg > $tmp_file/index.html"
|
||||
pod_name="pv-pod"
|
||||
# Define temporary file at yaml
|
||||
sed -e "s|tmp_data|${tmp_file}|g" ${pod_config_dir}/pv-volume.yaml > "$pod_yaml"
|
||||
}
|
||||
|
||||
@test "Create Persistent Volume" {
|
||||
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
|
||||
|
||||
volume_name="pv-volume"
|
||||
volume_claim="pv-claim"
|
||||
|
||||
# Create the persistent volume
|
||||
kubectl create -f "$pod_yaml"
|
||||
|
||||
# Check the persistent volume is Available
|
||||
cmd="kubectl get pv $volume_name | grep Available"
|
||||
waitForProcess "$wait_time" "$sleep_time" "$cmd"
|
||||
|
||||
# Create the persistent volume claim
|
||||
kubectl create -f "${pod_config_dir}/volume-claim.yaml"
|
||||
|
||||
# Check the persistent volume claim is Bound.
|
||||
cmd="kubectl get pvc $volume_claim | grep Bound"
|
||||
waitForProcess "$wait_time" "$sleep_time" "$cmd"
|
||||
|
||||
# Create pod
|
||||
kubectl create -f "${pod_config_dir}/pv-pod.yaml"
|
||||
|
||||
# Check pod creation
|
||||
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
|
||||
|
||||
cmd="cat /mnt/index.html"
|
||||
kubectl exec $pod_name -- sh -c "$cmd" | grep "$msg"
|
||||
}
|
||||
|
||||
teardown() {
|
||||
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
|
||||
|
||||
# Debugging information
|
||||
kubectl describe "pod/$pod_name"
|
||||
|
||||
kubectl delete pod "$pod_name"
|
||||
kubectl delete pvc "$volume_claim"
|
||||
kubectl delete pv "$volume_name"
|
||||
rm -f "$pod_yaml"
|
||||
exec_host rm -rf "$tmp_file"
|
||||
}
|
||||
@@ -8,6 +8,7 @@
|
||||
set -e
|
||||
|
||||
kubernetes_dir=$(dirname "$(readlink -f "$0")")
|
||||
source "${kubernetes_dir}/../../common.bash"
|
||||
|
||||
TARGET_ARCH="${TARGET_ARCH:-x86_64}"
|
||||
KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
|
||||
@@ -27,6 +28,7 @@ else
|
||||
"k8s-empty-dirs.bats" \
|
||||
"k8s-env.bats" \
|
||||
"k8s-exec.bats" \
|
||||
"k8s-file-volume.bats" \
|
||||
"k8s-inotify.bats" \
|
||||
"k8s-job.bats" \
|
||||
"k8s-kill-all-process-in-container.bats" \
|
||||
@@ -50,6 +52,7 @@ else
|
||||
"k8s-sysctls.bats" \
|
||||
"k8s-security-context.bats" \
|
||||
"k8s-shared-volume.bats" \
|
||||
"k8s-volume.bats" \
|
||||
"k8s-nginx-connectivity.bats" \
|
||||
)
|
||||
fi
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pv-pod
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
runtimeClassName: kata
|
||||
volumes:
|
||||
- name: pv-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: pv-claim
|
||||
containers:
|
||||
- name: pv-container
|
||||
image: quay.io/prometheus/busybox:latest
|
||||
ports:
|
||||
command:
|
||||
- sleep
|
||||
- "120"
|
||||
volumeMounts:
|
||||
- mountPath: "/mnt/"
|
||||
name: pv-storage
|
||||
@@ -0,0 +1,19 @@
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
kind: PersistentVolume
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pv-volume
|
||||
labels:
|
||||
type: local
|
||||
spec:
|
||||
storageClassName: manual
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
hostPath:
|
||||
path: "tmp_data"
|
||||
@@ -0,0 +1,16 @@
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pv-claim
|
||||
spec:
|
||||
storageClassName: manual
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 3Gi
|
||||
@@ -8,26 +8,33 @@ set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
kubernetes_dir=$(dirname "$(readlink -f "$0")")
|
||||
source "${kubernetes_dir}/../../common.bash"
|
||||
|
||||
reset_workloads_work_dir() {
|
||||
rm -rf ${kubernetes_dir}/runtimeclass_workloads_work
|
||||
cp -R ${kubernetes_dir}/runtimeclass_workloads ${kubernetes_dir}/runtimeclass_workloads_work
|
||||
}
|
||||
|
||||
set_runtime_class() {
|
||||
sed -i -e "s|runtimeClassName: kata|runtimeClassName: kata-${KATA_HYPERVISOR}|" ${kubernetes_dir}/runtimeclass_workloads/*.yaml
|
||||
sed -i -e "s|runtimeClassName: kata|runtimeClassName: kata-${KATA_HYPERVISOR}|" ${kubernetes_dir}/runtimeclass_workloads_work/*.yaml
|
||||
}
|
||||
|
||||
set_kernel_path() {
|
||||
if [[ "${KATA_HOST_OS}" = "cbl-mariner" ]]; then
|
||||
mariner_kernel_path="/usr/share/cloud-hypervisor/vmlinux.bin"
|
||||
find ${kubernetes_dir}/runtimeclass_workloads/*.yaml -exec yq write -i {} 'metadata.annotations[io.katacontainers.config.hypervisor.kernel]' "${mariner_kernel_path}" \;
|
||||
find ${kubernetes_dir}/runtimeclass_workloads_work/*.yaml -exec yq write -i {} 'metadata.annotations[io.katacontainers.config.hypervisor.kernel]' "${mariner_kernel_path}" \;
|
||||
fi
|
||||
}
|
||||
|
||||
set_initrd_path() {
|
||||
if [[ "${KATA_HOST_OS}" = "cbl-mariner" ]]; then
|
||||
initrd_path="/opt/kata/share/kata-containers/kata-containers-initrd-cbl-mariner.img"
|
||||
find ${kubernetes_dir}/runtimeclass_workloads/*.yaml -exec yq write -i {} 'metadata.annotations[io.katacontainers.config.hypervisor.initrd]' "${initrd_path}" \;
|
||||
initrd_path="/opt/kata/share/kata-containers/kata-containers-initrd-mariner.img"
|
||||
find ${kubernetes_dir}/runtimeclass_workloads_work/*.yaml -exec yq write -i {} 'metadata.annotations[io.katacontainers.config.hypervisor.initrd]' "${initrd_path}" \;
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
reset_workloads_work_dir
|
||||
set_runtime_class
|
||||
set_kernel_path
|
||||
set_initrd_path
|
||||
|
||||
@@ -34,6 +34,19 @@ dragonball_limitations="https://github.com/kata-containers/kata-containers/issue
|
||||
export KUBECONFIG="${KUBECONFIG:-$HOME/.kube/config}"
|
||||
|
||||
get_pod_config_dir() {
|
||||
pod_config_dir="${BATS_TEST_DIRNAME}/runtimeclass_workloads"
|
||||
pod_config_dir="${BATS_TEST_DIRNAME}/runtimeclass_workloads_work"
|
||||
info "k8s configured to use runtimeclass"
|
||||
}
|
||||
|
||||
# Runs a command in the host filesystem.
|
||||
exec_host() {
|
||||
node="$(kubectl get node -o name)"
|
||||
# `kubectl debug` always returns 0, so we hack it to return the right exit code.
|
||||
command="$@"
|
||||
command+='; echo -en \\n$?'
|
||||
output="$(kubectl debug -qit "${node}" --image=alpine:latest -- chroot /host bash -c "${command}")"
|
||||
kubectl get pods -o name | grep node-debugger | xargs kubectl delete > /dev/null
|
||||
exit_code="$(echo "${output}" | tail -1)"
|
||||
echo "$(echo "${output}" | head -n -1)"
|
||||
return ${exit_code}
|
||||
}
|
||||
|
||||
38
tests/integration/nydus/gha-run.sh
Executable file
38
tests/integration/nydus/gha-run.sh
Executable file
@@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
kata_tarball_dir="${2:-kata-artifacts}"
|
||||
nydus_dir="$(dirname "$(readlink -f "$0")")"
|
||||
source "${nydus_dir}/../../common.bash"
|
||||
|
||||
function install_dependencies() {
|
||||
info "Installing the dependencies needed for running the nydus tests"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function run() {
|
||||
info "Running nydus tests using ${KATA_HYPERVISOR} hypervisor"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function main() {
|
||||
action="${1:-}"
|
||||
case "${action}" in
|
||||
install-dependencies) install_dependencies ;;
|
||||
install-kata) return 0 ;;
|
||||
run) run ;;
|
||||
*) >&2 die "Invalid argument" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
main "$@"
|
||||
12
tests/integration/nydus/nydus-container.yaml
Normal file
12
tests/integration/nydus/nydus-container.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
metadata:
|
||||
name: nydus-container
|
||||
image:
|
||||
image: ghcr.io/dragonflyoss/image-service/alpine:nydus-latest
|
||||
command:
|
||||
- tail
|
||||
- -f
|
||||
- /dev/null
|
||||
linux:
|
||||
resources:
|
||||
memory_limit_in_bytes: 524288000
|
||||
log_path: nydus.0.log
|
||||
5
tests/integration/nydus/nydus-sandbox.yaml
Normal file
5
tests/integration/nydus/nydus-sandbox.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
metadata:
|
||||
attempt: 1
|
||||
name: nydus-sandbox
|
||||
namespace: default
|
||||
log_directory: /tmp
|
||||
211
tests/integration/nydus/nydus_tests.sh
Executable file
211
tests/integration/nydus/nydus_tests.sh
Executable file
@@ -0,0 +1,211 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2022 Ant Group
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This will test the nydus feature is working properly
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -o errtrace
|
||||
|
||||
dir_path=$(dirname "$0")
|
||||
source "${dir_path}/../../lib/common.bash"
|
||||
source "${dir_path}/../../.ci/lib.sh"
|
||||
source "/etc/os-release" || source "/usr/lib/os-release"
|
||||
KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
|
||||
|
||||
need_restore_kata_config=false
|
||||
kata_config_backup="/tmp/kata-configuration.toml"
|
||||
SYSCONFIG_FILE="/etc/kata-containers/configuration.toml"
|
||||
DEFAULT_CONFIG_FILE="/opt/kata/share/defaults/kata-containers/configuration-qemu.toml"
|
||||
CLH_CONFIG_FILE="/opt/kata/share/defaults/kata-containers/configuration-clh.toml"
|
||||
DB_CONFIG_FILE="/opt/kata/share/defaults/kata-containers/configuration-dragonball.toml"
|
||||
need_restore_containerd_config=false
|
||||
containerd_config="/etc/containerd/config.toml"
|
||||
containerd_config_backup="/tmp/containerd.config.toml"
|
||||
|
||||
# test image for container
|
||||
IMAGE="${IMAGE:-ghcr.io/dragonflyoss/image-service/alpine:nydus-latest}"
|
||||
|
||||
if [ "$KATA_HYPERVISOR" != "qemu" ] && [ "$KATA_HYPERVISOR" != "cloud-hypervisor" ] && [ "$KATA_HYPERVISOR" != "dragonball" ]; then
|
||||
echo "Skip nydus test for $KATA_HYPERVISOR, it only works for QEMU/CLH/DB now."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
arch="$(uname -m)"
|
||||
if [ "$arch" != "x86_64" ]; then
|
||||
echo "Skip nydus test for $arch, it only works for x86_64 now. See https://github.com/kata-containers/tests/issues/4445"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
function install_from_tarball() {
|
||||
local package_name="$1"
|
||||
local binary_name="$2"
|
||||
[ -n "$package_name" ] || die "need package_name"
|
||||
[ -n "$binary_name" ] || die "need package release binary_name"
|
||||
|
||||
local url=$(get_version "externals.${package_name}.url")
|
||||
local version=$(get_version "externals.${package_name}.version")
|
||||
local tarball_url="${url}/releases/download/${version}/${binary_name}-${version}-$arch.tgz"
|
||||
if [ "${package_name}" == "nydus" ]; then
|
||||
local goarch="$(${dir_path}/../../.ci/kata-arch.sh --golang)"
|
||||
tarball_url="${url}/releases/download/${version}/${binary_name}-${version}-linux-$goarch.tgz"
|
||||
fi
|
||||
echo "Download tarball from ${tarball_url}"
|
||||
curl -Ls "$tarball_url" | sudo tar xfz - -C /usr/local/bin --strip-components=1
|
||||
}
|
||||
|
||||
function setup_nydus() {
|
||||
# install nydus
|
||||
install_from_tarball "nydus" "nydus-static"
|
||||
|
||||
# install nydus-snapshotter
|
||||
install_from_tarball "nydus-snapshotter" "nydus-snapshotter"
|
||||
|
||||
# Config nydus snapshotter
|
||||
sudo -E cp "$dir_path/nydusd-config.json" /etc/
|
||||
|
||||
# start nydus-snapshotter
|
||||
nohup /usr/local/bin/containerd-nydus-grpc \
|
||||
--config-path /etc/nydusd-config.json \
|
||||
--shared-daemon \
|
||||
--log-level debug \
|
||||
--root /var/lib/containerd/io.containerd.snapshotter.v1.nydus \
|
||||
--cache-dir /var/lib/nydus/cache \
|
||||
--nydusd-path /usr/local/bin/nydusd \
|
||||
--nydusimg-path /usr/local/bin/nydus-image \
|
||||
--disable-cache-manager true \
|
||||
--enable-nydus-overlayfs true \
|
||||
--log-to-stdout >/dev/null 2>&1 &
|
||||
}
|
||||
|
||||
function config_kata() {
|
||||
sudo mkdir -p /etc/kata-containers
|
||||
if [ -f "$SYSCONFIG_FILE" ]; then
|
||||
need_restore_kata_config=true
|
||||
sudo cp -a "${SYSCONFIG_FILE}" "${kata_config_backup}"
|
||||
elif [ "$KATA_HYPERVISOR" == "qemu" ]; then
|
||||
sudo cp -a "${DEFAULT_CONFIG_FILE}" "${SYSCONFIG_FILE}"
|
||||
elif [ "$KATA_HYPERVISOR" == "dragonball" ]; then
|
||||
sudo cp -a "${DB_CONFIG_FILE}" "${SYSCONFIG_FILE}"
|
||||
else
|
||||
sudo cp -a "${CLH_CONFIG_FILE}" "${SYSCONFIG_FILE}"
|
||||
fi
|
||||
|
||||
echo "Enabling all debug options in file ${SYSCONFIG_FILE}"
|
||||
sudo sed -i -e 's/^#\(enable_debug\).*=.*$/\1 = true/g' "${SYSCONFIG_FILE}"
|
||||
sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 agent.log=debug"/g' "${SYSCONFIG_FILE}"
|
||||
|
||||
if [ "$KATA_HYPERVISOR" != "dragonball" ]; then
|
||||
sudo sed -i 's|^shared_fs.*|shared_fs = "virtio-fs-nydus"|g' "${SYSCONFIG_FILE}"
|
||||
sudo sed -i 's|^virtio_fs_daemon.*|virtio_fs_daemon = "/usr/local/bin/nydusd"|g' "${SYSCONFIG_FILE}"
|
||||
fi
|
||||
|
||||
sudo sed -i 's|^virtio_fs_extra_args.*|virtio_fs_extra_args = []|g' "${SYSCONFIG_FILE}"
|
||||
}
|
||||
|
||||
function config_containerd() {
|
||||
readonly runc_path=$(command -v runc)
|
||||
sudo mkdir -p /etc/containerd/
|
||||
if [ -f "$containerd_config" ]; then
|
||||
need_restore_containerd_config=true
|
||||
sudo cp -a "${containerd_config}" "${containerd_config_backup}"
|
||||
else
|
||||
sudo rm "${containerd_config}"
|
||||
fi
|
||||
|
||||
cat <<EOF | sudo tee $containerd_config
|
||||
[debug]
|
||||
level = "debug"
|
||||
[proxy_plugins]
|
||||
[proxy_plugins.nydus]
|
||||
type = "snapshot"
|
||||
address = "/run/containerd-nydus/containerd-nydus-grpc.sock"
|
||||
[plugins]
|
||||
[plugins.cri]
|
||||
disable_hugetlb_controller = false
|
||||
[plugins.cri.containerd]
|
||||
snapshotter = "nydus"
|
||||
disable_snapshot_annotations = false
|
||||
[plugins.cri.containerd.runtimes]
|
||||
[plugins.cri.containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins.cri.containerd.runtimes.runc.options]
|
||||
BinaryName = "${runc_path}"
|
||||
Root = ""
|
||||
[plugins.cri.containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
privileged_without_host_devices = true
|
||||
EOF
|
||||
}
|
||||
|
||||
function setup() {
|
||||
setup_nydus
|
||||
config_kata
|
||||
config_containerd
|
||||
restart_containerd_service
|
||||
check_processes
|
||||
extract_kata_env
|
||||
}
|
||||
|
||||
function run_test() {
|
||||
sudo -E crictl pull "${IMAGE}"
|
||||
pod=$(sudo -E crictl runp -r kata $dir_path/nydus-sandbox.yaml)
|
||||
echo "Pod $pod created"
|
||||
cnt=$(sudo -E crictl create $pod $dir_path/nydus-container.yaml $dir_path/nydus-sandbox.yaml)
|
||||
echo "Container $cnt created"
|
||||
sudo -E crictl start $cnt
|
||||
echo "Container $cnt started"
|
||||
|
||||
# ensure container is running
|
||||
state=$(sudo -E crictl inspect $cnt | jq .status.state | tr -d '"')
|
||||
[ $state == "CONTAINER_RUNNING" ] || die "Container is not running($state)"
|
||||
# run a command in container
|
||||
crictl exec $cnt ls
|
||||
|
||||
# cleanup containers
|
||||
sudo -E crictl stop $cnt
|
||||
sudo -E crictl stopp $pod
|
||||
sudo -E crictl rmp $pod
|
||||
}
|
||||
|
||||
function teardown() {
|
||||
echo "Running teardown"
|
||||
|
||||
# kill nydus-snapshotter
|
||||
bin=containerd-nydus-grpc
|
||||
kill -9 $(pidof $bin) || true
|
||||
[ "$(pidof $bin)" == "" ] || die "$bin is running"
|
||||
|
||||
bin=nydusd
|
||||
kill -9 $(pidof $bin) || true
|
||||
[ "$(pidof $bin)" == "" ] || die "$bin is running"
|
||||
|
||||
# restore kata configuratiom.toml if needed
|
||||
if [ "${need_restore_kata_config}" == "true" ]; then
|
||||
sudo mv "$kata_config_backup" "$SYSCONFIG_FILE"
|
||||
else
|
||||
sudo rm "$SYSCONFIG_FILE"
|
||||
fi
|
||||
|
||||
# restore containerd config.toml if needed
|
||||
if [ "${need_restore_containerd_config}" == "true" ]; then
|
||||
sudo mv "$containerd_config_backup" "$containerd_config"
|
||||
else
|
||||
sudo rm "$containerd_config"
|
||||
fi
|
||||
|
||||
clean_env_ctr
|
||||
check_processes
|
||||
}
|
||||
|
||||
trap teardown EXIT
|
||||
|
||||
echo "Running setup"
|
||||
setup
|
||||
|
||||
echo "Running nydus integration tests"
|
||||
run_test
|
||||
27
tests/integration/nydus/nydusd-config.json
Normal file
27
tests/integration/nydus/nydusd-config.json
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"device": {
|
||||
"backend": {
|
||||
"type": "registry",
|
||||
"config": {
|
||||
"scheme": "https",
|
||||
"timeout": 5,
|
||||
"connect_timeout": 5,
|
||||
"retry_limit": 2
|
||||
}
|
||||
},
|
||||
"cache": {
|
||||
"type": "blobcache",
|
||||
"config": {
|
||||
"work_dir": "/var/lib/nydus/cache"
|
||||
}
|
||||
}
|
||||
},
|
||||
"mode": "direct",
|
||||
"digest_validate": false,
|
||||
"iostats_files": false,
|
||||
"enable_xattr": true,
|
||||
"fs_prefetch": {
|
||||
"enable": true,
|
||||
"threads_count": 2
|
||||
}
|
||||
}
|
||||
120
tests/kata-arch.sh
Executable file
120
tests/kata-arch.sh
Executable file
@@ -0,0 +1,120 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2018-2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
typeset -r script_name=${0##*/}
|
||||
|
||||
typeset -r cidir=$(dirname "$0")
|
||||
|
||||
function usage() {
|
||||
cat <<EOF
|
||||
Description: Display host architecture name in various formats.
|
||||
|
||||
Usage: $script_name [options]
|
||||
|
||||
Options:
|
||||
|
||||
-d, --default : Show arch(1) architecture (this is the default).
|
||||
-g, --golang : Show architecture name using golang naming.
|
||||
-r, --rust : Show architecture name using rust naming
|
||||
-h, --help : Show this help.
|
||||
-k, --kernel : Show architecture name compatible with Linux* build system.
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Convert architecture to the name used by golang
|
||||
function arch_to_golang() {
|
||||
local -r arch="$1"
|
||||
|
||||
case "$arch" in
|
||||
aarch64) echo "arm64";;
|
||||
ppc64le) echo "$arch";;
|
||||
x86_64) echo "amd64";;
|
||||
s390x) echo "s390x";;
|
||||
*) die "unsupported architecture: $arch";;
|
||||
esac
|
||||
}
|
||||
|
||||
# Convert architecture to the name used by rust
|
||||
function arch_to_rust() {
|
||||
local arch="$1"
|
||||
|
||||
if [ "${arch}" == "ppc64le" ]; then
|
||||
arch="powerpc64le"
|
||||
fi
|
||||
|
||||
echo "${arch}"
|
||||
}
|
||||
|
||||
# Convert architecture to the name used by the Linux kernel build system
|
||||
function arch_to_kernel() {
|
||||
local -r arch="$1"
|
||||
|
||||
case "$arch" in
|
||||
aarch64) echo "arm64";;
|
||||
ppc64le) echo "powerpc";;
|
||||
x86_64) echo "$arch";;
|
||||
s390x) echo "s390x";;
|
||||
*) die "unsupported architecture: $arch";;
|
||||
esac
|
||||
}
|
||||
|
||||
function main() {
|
||||
local type="default"
|
||||
|
||||
local getopt_cmd="getopt"
|
||||
|
||||
# macOS default getopt does not recognize GNU options
|
||||
[ "$(uname -s)" == "Darwin" ] && getopt_cmd="/usr/local/opt/gnu-getopt/bin/${getopt_cmd}"
|
||||
|
||||
local args=$("$getopt_cmd" \
|
||||
-n "$script_name" \
|
||||
-a \
|
||||
--options="dgrhk" \
|
||||
--longoptions="default golang rust help kernel" \
|
||||
-- "$@")
|
||||
|
||||
eval set -- "$args"
|
||||
[ $? -ne 0 ] && { usage >&2; exit 1; }
|
||||
|
||||
while [ $# -gt 1 ]
|
||||
do
|
||||
case "$1" in
|
||||
-d|--default) ;;
|
||||
|
||||
-g|--golang) type="golang";;
|
||||
|
||||
-r|--rust) type="rust";;
|
||||
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
|
||||
-k|--kernel) type="kernel";;
|
||||
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
local -r arch=$(uname -m)
|
||||
|
||||
case "$type" in
|
||||
default) echo "$arch";;
|
||||
golang) arch_to_golang "$arch";;
|
||||
rust) arch_to_rust "${arch}";;
|
||||
kernel) arch_to_kernel "$arch";;
|
||||
esac
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -70,6 +70,8 @@ Tests relating to networking. General items could include:
|
||||
|
||||
Tests relating to the storage (graph, volume) drivers.
|
||||
|
||||
For further details see the [storage tests documentation](storage).
|
||||
|
||||
### Disk
|
||||
|
||||
Test relating to measure reading and writing against clusters.
|
||||
@@ -79,6 +81,14 @@ Test relating to measure reading and writing against clusters.
|
||||
Tests relating with TensorFlow and Pytorch implementations of several popular
|
||||
convolutional models.
|
||||
|
||||
For further details see the [machine learning tests documentation](machine_learning).
|
||||
|
||||
### `CPU`
|
||||
|
||||
Tests related with `CPU` performance.
|
||||
|
||||
For further details see the [`cpu` tests documentation](cpu).
|
||||
|
||||
## Saving Results
|
||||
|
||||
In order to ensure continuity, and thus testing and historical tracking of results,
|
||||
|
||||
@@ -16,9 +16,9 @@ description = "measure container lifecycle timings"
|
||||
# within (inclusive)
|
||||
checkvar = ".\"boot-times\".Results | .[] | .\"to-workload\".Result"
|
||||
checktype = "mean"
|
||||
midval = 0.42
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
midval = 0.69
|
||||
minpercent = 30.0
|
||||
maxpercent = 30.0
|
||||
|
||||
[[metric]]
|
||||
name = "memory-footprint"
|
||||
@@ -32,3 +32,68 @@ checktype = "mean"
|
||||
midval = 2518364.00
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
|
||||
[[metric]]
|
||||
name = "memory-footprint-inside-container"
|
||||
type = "json"
|
||||
description = "measure memory inside the container"
|
||||
# Min and Max values to set a 'range' that
|
||||
# the median of the CSV Results data must fall
|
||||
# within (inclusive)
|
||||
checkvar = ".\"memory-footprint-inside-container\".Results | .[] | .memtotal.Result"
|
||||
checktype = "mean"
|
||||
midval = 4135244.0
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
|
||||
[[metric]]
|
||||
name = "blogbench"
|
||||
type = "json"
|
||||
description = "measure container average of blogbench write"
|
||||
# Min and Max values to set a 'range' that
|
||||
# the median of the CSV Results data must fall
|
||||
# within (inclusive)
|
||||
checkvar = ".\"blogbench\".Results | .[] | .write.Result"
|
||||
checktype = "mean"
|
||||
midval = 1623.0
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
|
||||
[[metric]]
|
||||
name = "blogbench"
|
||||
type = "json"
|
||||
description = "measure container average of blogbench read"
|
||||
# Min and Max values to set a 'range' that
|
||||
# the median of the CSV Results data must fall
|
||||
# within (inclusive)
|
||||
checkvar = ".\"blogbench\".Results | .[] | .read.Result"
|
||||
checktype = "mean"
|
||||
midval = 96939.0
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
|
||||
[[metric]]
|
||||
name = "tensorflow"
|
||||
type = "json"
|
||||
description = "tensorflow resnet model"
|
||||
# Min and Max values to set a 'range' that
|
||||
# the median of the CSV Results data must fall
|
||||
# within (inclusive)
|
||||
checkvar = ".\"tensorflow\".Results | .[] | .resnet.Result"
|
||||
checktype = "mean"
|
||||
midval = 3566.0
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
|
||||
[[metric]]
|
||||
name = "tensorflow"
|
||||
type = "json"
|
||||
description = "tensorflow alexnet model"
|
||||
# Min and Max values to set a 'range' that
|
||||
# the median of the CSV Results data must fall
|
||||
# within (inclusive)
|
||||
checkvar = ".\"tensorflow\".Results | .[] | .alexnet.Result"
|
||||
checktype = "mean"
|
||||
midval = 98.0
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
|
||||
@@ -16,9 +16,9 @@ description = "measure container lifecycle timings"
|
||||
# within (inclusive)
|
||||
checkvar = ".\"boot-times\".Results | .[] | .\"to-workload\".Result"
|
||||
checktype = "mean"
|
||||
midval = 0.61
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
midval = 0.71
|
||||
minpercent = 30.0
|
||||
maxpercent = 30.0
|
||||
|
||||
[[metric]]
|
||||
name = "memory-footprint"
|
||||
@@ -32,3 +32,68 @@ checktype = "mean"
|
||||
midval = 2435844.00
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
|
||||
[[metric]]
|
||||
name = "memory-footprint-inside-container"
|
||||
type = "json"
|
||||
description = "measure memory inside the container"
|
||||
# Min and Max values to set a 'range' that
|
||||
# the median of the CSV Results data must fall
|
||||
# within (inclusive)
|
||||
checkvar = ".\"memory-footprint-inside-container\".Results | .[] | .memtotal.Result"
|
||||
checktype = "mean"
|
||||
midval = 3677280.0
|
||||
minpercent = 25.0
|
||||
maxpercent = 25.0
|
||||
|
||||
[[metric]]
|
||||
name = "blogbench"
|
||||
type = "json"
|
||||
description = "measure container average of blogbench write"
|
||||
# Min and Max values to set a 'range' that
|
||||
# the median of the CSV Results data must fall
|
||||
# within (inclusive)
|
||||
checkvar = ".\"blogbench\".Results | .[] | .write.Result"
|
||||
checktype = "mean"
|
||||
midval = 1639.0
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
|
||||
[[metric]]
|
||||
name = "blogbench"
|
||||
type = "json"
|
||||
description = "measure container average of blogbench read"
|
||||
# Min and Max values to set a 'range' that
|
||||
# the median of the CSV Results data must fall
|
||||
# within (inclusive)
|
||||
checkvar = ".\"blogbench\".Results | .[] | .read.Result"
|
||||
checktype = "mean"
|
||||
midval = 98687.0
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
|
||||
[[metric]]
|
||||
name = "tensorflow"
|
||||
type = "json"
|
||||
description = "tensorflow resnet model"
|
||||
# Min and Max values to set a 'range' that
|
||||
# the median of the CSV Results data must fall
|
||||
# within (inclusive)
|
||||
checkvar = ".\"tensorflow\".Results | .[] | .resnet.Result"
|
||||
checktype = "mean"
|
||||
midval = 3546.0
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
|
||||
[[metric]]
|
||||
name = "tensorflow"
|
||||
type = "json"
|
||||
description = "tensorflow alexnet model"
|
||||
# Min and Max values to set a 'range' that
|
||||
# the median of the CSV Results data must fall
|
||||
# within (inclusive)
|
||||
checkvar = ".\"tensorflow\".Results | .[] | .alexnet.Result"
|
||||
checktype = "mean"
|
||||
midval = 98.0
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
|
||||
9
tests/metrics/cpu/README.md
Normal file
9
tests/metrics/cpu/README.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Kata Containers C-Ray Metrics
|
||||
This is a test of C-Ray which is a simple raytracer designed to test the floating-point CPU performance.
|
||||
|
||||
## Running the C-Ray test
|
||||
Individual test can be run by hand, for example:
|
||||
|
||||
```
|
||||
$ cd metrics/disk/c-ray $ ./cray.sh
|
||||
```
|
||||
26
tests/metrics/cpu/c-ray/Dockerfile
Normal file
26
tests/metrics/cpu/c-ray/Dockerfile
Normal file
@@ -0,0 +1,26 @@
|
||||
# Copyright (c) 2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Usage: FROM [image name]
|
||||
FROM ubuntu:20.04
|
||||
|
||||
# Version of the Dockerfile
|
||||
LABEL DOCKERFILE_VERSION="1.0"
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# URL for c-ray benchmark
|
||||
ENV CRAY_URL "http://www.phoronix-test-suite.com/benchmark-files/c-ray-1.1.tar.gz"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends build-essential gcc curl && \
|
||||
apt-get remove -y unattended-upgrades && \
|
||||
curl -OkL "${CRAY_URL}" && \
|
||||
tar -zxvf c-ray-1.1.tar.gz && \
|
||||
cd c-ray-1.1 && \
|
||||
cc -o c-ray-mt c-ray-mt.c -lm -lpthread && \
|
||||
make && \
|
||||
make install
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
53
tests/metrics/cpu/c-ray/cray.sh
Executable file
53
tests/metrics/cpu/c-ray/cray.sh
Executable file
@@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
# General env
|
||||
SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
|
||||
source "${SCRIPT_PATH}/../../lib/common.bash"
|
||||
|
||||
TEST_NAME="cray"
|
||||
IMAGE="docker.io/library/cray:latest"
|
||||
DOCKERFILE="${SCRIPT_PATH}/Dockerfile"
|
||||
CMD="cd c-ray-1.1 && ./c-ray-mt -t 32 -s 1024x768 -r 8 -i sphfract -o output.ppm 2>&1 | tee -a output.txt && cat output.txt"
|
||||
cray_file=$(mktemp crayresults.XXXXXXXXXX)
|
||||
|
||||
function remove_tmp_file() {
|
||||
rm -rf "${cray_file}"
|
||||
}
|
||||
|
||||
trap remove_tmp_file EXIT
|
||||
|
||||
function main() {
|
||||
# Check tools/commands dependencies
|
||||
cmds=("awk" "docker")
|
||||
init_env
|
||||
check_cmds "${cmds[@]}"
|
||||
check_ctr_images "$IMAGE" "$DOCKERFILE"
|
||||
|
||||
sudo -E "${CTR_EXE}" run --rm --runtime="${CTR_RUNTIME}" "${IMAGE}" test sh -c "${CMD}" > "${cray_file}"
|
||||
metrics_json_init
|
||||
results=$(cat "${cray_file}" | grep seconds | awk '{print $3}' | head -n 1)
|
||||
metrics_json_start_array
|
||||
|
||||
local json="$(cat << EOF
|
||||
{
|
||||
"rendering": {
|
||||
"Result": ${results},
|
||||
"Units": "s"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
)"
|
||||
metrics_json_add_array_element "$json"
|
||||
metrics_json_end_array "Results"
|
||||
metrics_json_save
|
||||
|
||||
clean_env_ctr
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -320,8 +320,6 @@ EOF
|
||||
|
||||
metrics_json_add_array_element "$json"
|
||||
metrics_json_end_array "Results"
|
||||
|
||||
clean_env_ctr
|
||||
}
|
||||
|
||||
function save_config(){
|
||||
@@ -344,6 +342,9 @@ EOF
|
||||
}
|
||||
|
||||
function main(){
|
||||
# Collect kata-env data
|
||||
common_init
|
||||
|
||||
# Verify enough arguments
|
||||
if [ $# != 2 ] && [ $# != 3 ];then
|
||||
echo >&2 "error: Not enough arguments [$@]"
|
||||
@@ -378,6 +379,7 @@ function main(){
|
||||
fi
|
||||
|
||||
metrics_json_save
|
||||
clean_env_ctr
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
||||
@@ -20,7 +20,7 @@ IMAGE='quay.io/prometheus/busybox:latest'
|
||||
CMD="sleep 10; cat /proc/meminfo"
|
||||
# We specify here in 'k', as that then matches the results we get from the meminfo,
|
||||
# which makes later direct comparison easier.
|
||||
MEMSIZE=${MEMSIZE:-$((2048*1024))}
|
||||
MEMSIZE="${MEMSIZE:-$((2048*1024))}"
|
||||
|
||||
# this variable determines the number of attempts when a test
|
||||
# result is considered not valid (a zero value or a negative value)
|
||||
@@ -38,7 +38,7 @@ count_iters=0
|
||||
# valid_result: if value stored is '1' the result is valid, '0' otherwise
|
||||
valid_result=0
|
||||
|
||||
parse_results() {
|
||||
function parse_results() {
|
||||
local raw_results="${1}"
|
||||
|
||||
# Variables used for sum cummulative values in the case of two or more reps.
|
||||
@@ -47,20 +47,20 @@ parse_results() {
|
||||
local memfree_acu="${3:-0}"
|
||||
local memavailable_acu="${4:-0}"
|
||||
|
||||
local memtotal=$(echo "$raw_results" | awk '/MemTotal/ {print $2}')
|
||||
units_memtotal=$(echo "$raw_results" | awk '/MemTotal/ {print $3}')
|
||||
local memtotal=$(echo "${raw_results}" | awk '/MemTotal/ {print $2}')
|
||||
units_memtotal=$(echo "${raw_results}" | awk '/MemTotal/ {print $3}')
|
||||
|
||||
local memfree=$(echo "$raw_results" | awk '/MemFree/ {print $2}')
|
||||
units_memfree=$(echo "$raw_results" | awk '/MemFree/ {print $3}')
|
||||
local memfree=$(echo "${raw_results}" | awk '/MemFree/ {print $2}')
|
||||
units_memfree=$(echo "${raw_results}" | awk '/MemFree/ {print $3}')
|
||||
|
||||
local memavailable=$(echo "$raw_results" | awk '/MemAvailable/ {print $2}')
|
||||
units_memavailable=$(echo "$raw_results" | awk '/MemAvailable/ {print $3}')
|
||||
local memavailable=$(echo "${raw_results}" | awk '/MemAvailable/ {print $2}')
|
||||
units_memavailable=$(echo "${raw_results}" | awk '/MemAvailable/ {print $3}')
|
||||
|
||||
# check results: if any result is zero or negative, it is considered as invalid, and the test will be repeated.
|
||||
if (( $(echo "$memtotal <= 0" | bc -l) )) || (( $(echo "$memfree <= 0" | bc -l) )) || (( $(echo "$memavailable <= 0" | bc -l) )); then
|
||||
if (( $(echo "${memtotal} <= 0" | bc -l) )) || (( $(echo "${memfree} <= 0" | bc -l) )) || (( $(echo "${memavailable} <= 0" | bc -l) )); then
|
||||
MAX_FAILED_ATTEMPTS=$((MAX_FAILED_ATTEMPTS-1))
|
||||
valid_result=0
|
||||
info "Skipping invalid result: memtotal: $memtotal memfree: $memfree memavailable: $memavailable"
|
||||
info "Skipping invalid result: memtotal: ${memtotal} memfree: ${memfree} memavailable: ${memavailable}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
@@ -68,14 +68,14 @@ parse_results() {
|
||||
memfreeAvg=$((memfree+memfree_acu))
|
||||
memavailableAvg=$((memavailable+memavailable_acu))
|
||||
valid_result=1
|
||||
info "Iteration# $count_iters memtotal: $memtotal memfree: $memfree memavailable: $memavailable"
|
||||
info "Iteration# ${count_iters} memtotal: ${memtotal} memfree: ${memfree} memavailable: ${memavailable}"
|
||||
}
|
||||
|
||||
store_results_json() {
|
||||
function store_results_json() {
|
||||
metrics_json_start_array
|
||||
memtotalAvg=$(echo "scale=2; $memtotalAvg / $count_iters" | bc)
|
||||
memfreeAvg=$(echo "scale=2; $memfreeAvg / $count_iters" | bc)
|
||||
memavailableAvg=$(echo "scale=2; $memavailableAvg / $count_iters" | bc)
|
||||
memtotalAvg=$(echo "scale=2; ${memtotalAvg} / ${count_iters}" | bc)
|
||||
memfreeAvg=$(echo "scale=2; ${memfreeAvg} / ${count_iters}" | bc)
|
||||
memavailableAvg=$(echo "scale=2; ${memavailableAvg} / ${count_iters}" | bc)
|
||||
|
||||
local json="$(cat << EOF
|
||||
{
|
||||
@@ -109,7 +109,7 @@ EOF
|
||||
function main() {
|
||||
# switch to select output format
|
||||
local num_iterations=${1:-1}
|
||||
info "Iterations: $num_iterations"
|
||||
info "Iterations: ${num_iterations}"
|
||||
|
||||
# Check tools/commands dependencies
|
||||
cmds=("awk" "ctr")
|
||||
@@ -117,13 +117,13 @@ function main() {
|
||||
check_cmds "${cmds[@]}"
|
||||
check_images "${IMAGE}"
|
||||
metrics_json_init
|
||||
while [ $count_iters -lt $num_iterations ]; do
|
||||
local output=$(sudo -E "${CTR_EXE}" run --memory-limit $((MEMSIZE*1024)) --rm --runtime=$CTR_RUNTIME $IMAGE busybox sh -c "$CMD" 2>&1)
|
||||
while [ "${count_iters}" -lt "${num_iterations}" ]; do
|
||||
local output=$(sudo -E "${CTR_EXE}" run --memory-limit $((MEMSIZE*1024)) --rm --runtime="${CTR_RUNTIME}" "${IMAGE}" busybox sh -c "${CMD}" 2>&1)
|
||||
parse_results "${output}" "${memtotalAvg}" "${memfreeAvg}" "${memavailableAvg}"
|
||||
|
||||
# quit if number of attempts exceeds the allowed value.
|
||||
[ ${MAX_FAILED_ATTEMPTS} -eq 0 ] && die "Max number of attempts exceeded."
|
||||
[ ${valid_result} -eq 1 ] && count_iters=$((count_iters+1))
|
||||
[ "${MAX_FAILED_ATTEMPTS}" -eq 0 ] && die "Max number of attempts exceeded."
|
||||
[ "${valid_result}" -eq 1 ] && count_iters=$((count_iters+1))
|
||||
done
|
||||
store_results_json
|
||||
clean_env_ctr
|
||||
|
||||
@@ -18,59 +18,6 @@ declare -r results_dir="${metrics_dir}/results"
|
||||
declare -r checkmetrics_dir="${metrics_dir}/cmd/checkmetrics"
|
||||
declare -r checkmetrics_config_dir="${checkmetrics_dir}/ci_worker"
|
||||
|
||||
function create_symbolic_links() {
|
||||
local link_configuration_file="/opt/kata/share/defaults/kata-containers/configuration.toml"
|
||||
local source_configuration_file="/opt/kata/share/defaults/kata-containers/configuration-${KATA_HYPERVISOR}.toml"
|
||||
|
||||
if [ "${KATA_HYPERVISOR}" != 'qemu' ] && [ "${KATA_HYPERVISOR}" != 'clh' ]; then
|
||||
die "Failed to set the configuration.toml: '${KATA_HYPERVISOR}' is not recognized as a valid hypervisor name."
|
||||
fi
|
||||
|
||||
sudo ln -sf "${source_configuration_file}" "${link_configuration_file}"
|
||||
}
|
||||
|
||||
# Configures containerd
|
||||
function overwrite_containerd_config() {
|
||||
containerd_config="/etc/containerd/config.toml"
|
||||
sudo rm "${containerd_config}"
|
||||
sudo tee "${containerd_config}" << EOF
|
||||
version = 2
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
SystemdCgroup = true
|
||||
|
||||
[plugins]
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
default_runtime_name = "kata"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
EOF
|
||||
}
|
||||
|
||||
function install_kata() {
|
||||
local kata_tarball="kata-static.tar.xz"
|
||||
declare -r katadir="/opt/kata"
|
||||
declare -r destdir="/"
|
||||
declare -r local_bin_dir="/usr/local/bin/"
|
||||
|
||||
# Removing previous kata installation
|
||||
sudo rm -rf "${katadir}"
|
||||
|
||||
pushd "${kata_tarball_dir}"
|
||||
sudo tar -xvf "${kata_tarball}" -C "${destdir}"
|
||||
popd
|
||||
|
||||
# create symbolic links to kata components
|
||||
for b in "${katadir}/bin/*" ; do
|
||||
sudo ln -sf "${b}" "${local_bin_dir}/$(basename $b)"
|
||||
done
|
||||
|
||||
check_containerd_config_for_kata
|
||||
restart_containerd_service
|
||||
install_checkmetrics
|
||||
}
|
||||
|
||||
function install_checkmetrics() {
|
||||
# Ensure we have the latest checkmetrics
|
||||
pushd "${checkmetrics_dir}"
|
||||
@@ -79,20 +26,18 @@ function install_checkmetrics() {
|
||||
popd
|
||||
}
|
||||
|
||||
function check_containerd_config_for_kata() {
|
||||
# check containerd config
|
||||
declare -r line1="default_runtime_name = \"kata\""
|
||||
declare -r line2="runtime_type = \"io.containerd.kata.v2\""
|
||||
declare -r num_lines_containerd=2
|
||||
declare -r containerd_path="/etc/containerd/config.toml"
|
||||
local count_matches=$(grep -ic "$line1\|$line2" "${containerd_path}")
|
||||
# @path_results: path to the input metric-results folder
|
||||
# @tarball_fname: path and filename to the output tarball
|
||||
function compress_metrics_results_dir()
|
||||
{
|
||||
local path_results="${1:-results}"
|
||||
local tarball_fname="${2:-}"
|
||||
|
||||
if [ "${count_matches}" = "${num_lines_containerd}" ]; then
|
||||
info "containerd ok"
|
||||
else
|
||||
info "overwriting containerd configuration w/ a valid one"
|
||||
overwrite_containerd_config
|
||||
fi
|
||||
[ -z "${tarball_fname}" ] && die "Missing the tarball filename or the path to save the tarball results is incorrect."
|
||||
[ ! -d "${path_results}" ] && die "Missing path to the results folder."
|
||||
|
||||
cd "${path_results}" && tar -czf "${tarball_fname}" *.json && cd -
|
||||
info "tarball generated: ${tarball_fname}"
|
||||
}
|
||||
|
||||
function check_metrics() {
|
||||
@@ -111,46 +56,45 @@ function make_tarball_results() {
|
||||
function run_test_launchtimes() {
|
||||
info "Running Launch Time test using ${KATA_HYPERVISOR} hypervisor"
|
||||
|
||||
create_symbolic_links
|
||||
bash tests/metrics/time/launch_times.sh -i public.ecr.aws/ubuntu/ubuntu:latest -n 20
|
||||
}
|
||||
|
||||
function run_test_memory_usage() {
|
||||
info "Running memory-usage test using ${KATA_HYPERVISOR} hypervisor"
|
||||
|
||||
create_symbolic_links
|
||||
bash tests/metrics/density/memory_usage.sh 20 5
|
||||
|
||||
check_metrics
|
||||
}
|
||||
|
||||
function run_test_memory_usage_inside_container() {
|
||||
info "Running memory-usage inside the container test using ${KATA_HYPERVISOR} hypervisor"
|
||||
|
||||
# ToDo: remove the exit once the metrics workflow is stable
|
||||
exit 0
|
||||
create_symbolic_links
|
||||
bash tests/metrics/density/memory_usage_inside_container.sh 5
|
||||
}
|
||||
|
||||
function run_test_blogbench() {
|
||||
info "Running Blogbench test using ${KATA_HYPERVISOR} hypervisor"
|
||||
|
||||
# ToDo: remove the exit once the metrics workflow is stable
|
||||
exit 0
|
||||
create_symbolic_links
|
||||
bash tests/metrics/storage/blogbench.sh
|
||||
}
|
||||
|
||||
function run_test_tensorflow() {
|
||||
info "Running TensorFlow test using ${KATA_HYPERVISOR} hypervisor"
|
||||
|
||||
bash tests/metrics/machine_learning/tensorflow.sh 1 20
|
||||
|
||||
check_metrics
|
||||
}
|
||||
|
||||
function main() {
|
||||
action="${1:-}"
|
||||
case "${action}" in
|
||||
install-kata) install_kata ;;
|
||||
install-kata) install_kata && install_checkmetrics ;;
|
||||
make-tarball-results) make_tarball_results ;;
|
||||
run-test-launchtimes) run_test_launchtimes ;;
|
||||
run-test-memory-usage) run_test_memory_usage ;;
|
||||
run-test-memory-usage-inside-container) run_test_memory_usage_inside_container ;;
|
||||
run-test-blogbench) run_test_blogbench ;;
|
||||
run-test-tensorflow) run_test_tensorflow ;;
|
||||
*) >&2 die "Invalid argument" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ DOCKER_EXE="${DOCKER_EXE:-docker}"
|
||||
CTR_RUNTIME="${CTR_RUNTIME:-io.containerd.kata.v2}"
|
||||
RUNTIME="${RUNTIME:-containerd-shim-kata-v2}"
|
||||
KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
|
||||
TEST_REPO="${TEST_REPO:-github.com/kata-containers/tests}"
|
||||
JSON_HOST="${JSON_HOST:-}"
|
||||
|
||||
KSM_BASE="/sys/kernel/mm/ksm"
|
||||
@@ -179,6 +178,7 @@ function init_env()
|
||||
# This clean up is more aggressive, this is in order to
|
||||
# decrease the factors that could affect the metrics results.
|
||||
kill_processes_before_start
|
||||
info "init environment complete"
|
||||
}
|
||||
|
||||
# This function checks if there are containers or
|
||||
@@ -220,11 +220,11 @@ function show_system_ctr_state()
|
||||
|
||||
function common_init()
|
||||
{
|
||||
if [ "$CTR_RUNTIME" == "io.containerd.kata.v2" ] || [ "$RUNTIME" == "containerd-shim-kata-v2" ]; then
|
||||
if [ "${CTR_RUNTIME}" = "io.containerd.kata.v2" ] || [ "${RUNTIME}" = "containerd-shim-kata-v2" ]; then
|
||||
extract_kata_env
|
||||
else
|
||||
# We know we have nothing to do for runc or shimv2
|
||||
if [ "$CTR_RUNTIME" != "io.containerd.runc.v2" ] || [ "$RUNTIME" != "runc" ]; then
|
||||
if [ "${CTR_RUNTIME}" != "io.containerd.runc.v2" ] && [ "${RUNTIME}" != "runc" ]; then
|
||||
warn "Unrecognised runtime"
|
||||
fi
|
||||
fi
|
||||
@@ -256,7 +256,7 @@ function set_ksm_aggressive()
|
||||
fi
|
||||
}
|
||||
|
||||
restore_virtio_fs(){
|
||||
function restore_virtio_fs(){
|
||||
# Re-enable virtio-fs if it was enabled previously
|
||||
[ -n "${was_virtio_fs}" ] && sudo -E PATH="$PATH" "${LIB_DIR}/../../.ci/set_kata_config.sh" shared_fs virtio-fs || \
|
||||
info "Not restoring virtio-fs since it wasn't enabled previously"
|
||||
@@ -359,5 +359,3 @@ function wait_ksm_settle()
|
||||
done
|
||||
info "Timed out after ${1}s waiting for KSM to settle"
|
||||
}
|
||||
|
||||
common_init
|
||||
|
||||
@@ -13,7 +13,7 @@ JSON_TX_ONELINE="${JSON_TX_ONELINE:-}"
|
||||
JSON_URL="${JSON_URL:-}"
|
||||
|
||||
# Generate a timestamp in nanoseconds since 1st Jan 1970
|
||||
timestamp_ns() {
|
||||
function timestamp_ns() {
|
||||
local t
|
||||
local s
|
||||
local n
|
||||
@@ -22,18 +22,21 @@ timestamp_ns() {
|
||||
t="$(date +%-s:%-N)"
|
||||
s=$(echo $t | awk -F ':' '{print $1}')
|
||||
n=$(echo $t | awk -F ':' '{print $2}')
|
||||
ns=$(( (s * 1000000000) + n ))
|
||||
ns=$(echo "$s * 1000000000 + $n" | bc)
|
||||
|
||||
echo $ns
|
||||
}
|
||||
|
||||
# Generate a timestamp in milliseconds since 1st Jan 1970
|
||||
timestamp_ms() {
|
||||
function timestamp_ms() {
|
||||
echo $(($(date +%s%N)/1000000))
|
||||
}
|
||||
|
||||
# Intialise the json subsystem
|
||||
metrics_json_init() {
|
||||
# Initialise the json subsystem
|
||||
function metrics_json_init() {
|
||||
# collect kata-env data
|
||||
common_init
|
||||
|
||||
# Clear out any previous results
|
||||
json_result_array=()
|
||||
|
||||
@@ -45,18 +48,18 @@ metrics_json_init() {
|
||||
EOF
|
||||
)"
|
||||
|
||||
if [ "$CTR_RUNTIME" == "io.containerd.kata.v2" ]; then
|
||||
if [ "${CTR_RUNTIME}" == "io.containerd.kata.v2" ]; then
|
||||
metrics_json_add_fragment "$json"
|
||||
|
||||
local json="$(cat << EOF
|
||||
"env" : {
|
||||
"RuntimeVersion": "$RUNTIME_VERSION",
|
||||
"RuntimeCommit": "$RUNTIME_COMMIT",
|
||||
"RuntimeConfig": "$RUNTIME_CONFIG_PATH",
|
||||
"Hypervisor": "$HYPERVISOR_PATH",
|
||||
"HypervisorVersion": "$HYPERVISOR_VERSION",
|
||||
"Shim": "$SHIM_PATH",
|
||||
"ShimVersion": "$SHIM_VERSION",
|
||||
"RuntimeVersion": "${RUNTIME_VERSION}",
|
||||
"RuntimeCommit": "${RUNTIME_COMMIT}",
|
||||
"RuntimeConfig": "${RUNTIME_CONFIG_PATH}",
|
||||
"Hypervisor": "${HYPERVISOR_PATH}",
|
||||
"HypervisorVersion": "${HYPERVISOR_VERSION}",
|
||||
"Shim": "${SHIM_PATH}",
|
||||
"ShimVersion": "${SHIM_VERSION}",
|
||||
"machinename": "$(uname -n)"
|
||||
}
|
||||
EOF
|
||||
@@ -86,7 +89,7 @@ EOF
|
||||
metrics_json_add_fragment "$json"
|
||||
|
||||
# Now add a runtime specific environment section if we can
|
||||
local iskata=$(is_a_kata_runtime "$RUNTIME")
|
||||
local iskata=$(is_a_kata_runtime "${RUNTIME}")
|
||||
if [ "$iskata" == "1" ]; then
|
||||
local rpath="$(command -v kata-runtime)"
|
||||
local json="$(cat << EOF
|
||||
@@ -97,7 +100,7 @@ EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$CTR_RUNTIME" == "io.containerd.runc.v2" ]; then
|
||||
if [ "${CTR_RUNTIME}" == "io.containerd.runc.v2" ]; then
|
||||
metrics_json_add_fragment "$json"
|
||||
local output=$(runc -v)
|
||||
local runcversion=$(grep version <<< "$output" | sed 's/runc version //')
|
||||
@@ -106,8 +109,8 @@ EOF
|
||||
"runc-env" :
|
||||
{
|
||||
"Version": {
|
||||
"Semver": "$runcversion",
|
||||
"Commit": "$runccommit"
|
||||
"Semver": "${runcversion}",
|
||||
"Commit": "${runccommit}"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
@@ -118,10 +121,10 @@ EOF
|
||||
}
|
||||
|
||||
# Save out the final JSON file
|
||||
metrics_json_save() {
|
||||
function metrics_json_save() {
|
||||
|
||||
if [ ! -d ${RESULT_DIR} ];then
|
||||
mkdir -p ${RESULT_DIR}
|
||||
if [ ! -d "${RESULT_DIR}" ];then
|
||||
mkdir -p "${RESULT_DIR}"
|
||||
fi
|
||||
|
||||
local maxelem=$(( ${#json_result_array[@]} - 1 ))
|
||||
@@ -163,12 +166,12 @@ EOF
|
||||
fi
|
||||
}
|
||||
|
||||
metrics_json_end_of_system() {
|
||||
function metrics_json_end_of_system() {
|
||||
system_index=$(( ${#json_result_array[@]}))
|
||||
}
|
||||
|
||||
# Add a top level (complete) JSON fragment to the data
|
||||
metrics_json_add_fragment() {
|
||||
function metrics_json_add_fragment() {
|
||||
local data=$1
|
||||
|
||||
# Place on end of array
|
||||
@@ -176,12 +179,12 @@ metrics_json_add_fragment() {
|
||||
}
|
||||
|
||||
# Prepare to collect up array elements
|
||||
metrics_json_start_array() {
|
||||
function metrics_json_start_array() {
|
||||
json_array_array=()
|
||||
}
|
||||
|
||||
# Add a (complete) element to the current array
|
||||
metrics_json_add_array_element() {
|
||||
function metrics_json_add_array_element() {
|
||||
local data=$1
|
||||
|
||||
# Place on end of array
|
||||
@@ -189,7 +192,7 @@ metrics_json_add_array_element() {
|
||||
}
|
||||
|
||||
# Add a fragment to the current array element
|
||||
metrics_json_add_array_fragment() {
|
||||
function metrics_json_add_array_fragment() {
|
||||
local data=$1
|
||||
|
||||
# Place on end of array
|
||||
@@ -197,7 +200,7 @@ metrics_json_add_array_fragment() {
|
||||
}
|
||||
|
||||
# Turn the currently registered array fragments into an array element
|
||||
metrics_json_close_array_element() {
|
||||
function metrics_json_close_array_element() {
|
||||
|
||||
local maxelem=$(( ${#json_array_fragments[@]} - 1 ))
|
||||
local json="$(cat << EOF
|
||||
@@ -221,7 +224,7 @@ EOF
|
||||
}
|
||||
|
||||
# Close the current array
|
||||
metrics_json_end_array() {
|
||||
function metrics_json_end_array() {
|
||||
local name=$1
|
||||
|
||||
local maxelem=$(( ${#json_array_array[@]} - 1 ))
|
||||
|
||||
49
tests/metrics/machine_learning/README.md
Normal file
49
tests/metrics/machine_learning/README.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# Kata Containers Tensorflow Metrics
|
||||
|
||||
Kata Containers provides a series of performance tests using the
|
||||
TensorFlow reference benchmarks (tf_cnn_benchmarks).
|
||||
The tf_cnn_benchmarks containers TensorFlow implementations of several
|
||||
popular convolutional models https://github.com/tensorflow/benchmarks/tree/master/scripts/tf_cnn_benchmarks.
|
||||
|
||||
Currently the TensorFlow benchmark on Kata Containers includes test for
|
||||
the `AxelNet` and `ResNet50` models.
|
||||
|
||||
## Running the test
|
||||
|
||||
Individual tests can be run by hand, for example:
|
||||
|
||||
```
|
||||
$ cd metrics/machine_learning
|
||||
$ ./tensorflow.sh 25 60
|
||||
```
|
||||
# Kata Containers Pytorch Metrics
|
||||
|
||||
Based on a suite of Python high performance computing benchmarks that
|
||||
uses various popular Python HPC libraries using Python
|
||||
https://github.com/dionhaefner/pyhpc-benchmarks.
|
||||
|
||||
## Running the Pytorch test
|
||||
|
||||
Individual tests can be run by hand, for example:
|
||||
|
||||
```
|
||||
$ cd metrics/machine_learning
|
||||
$ ./tensorflow.sh 40 100
|
||||
```
|
||||
# Kata Containers Tensorflow `MobileNet` Metrics
|
||||
|
||||
`MobileNets` are small, low-latency, low-power models parameterized to meet the resource
|
||||
constraints of a variety of use cases. They can be built upon for classification, detection,
|
||||
embeddings and segmentation similar to how other popular large scale models, such as Inception, are used.
|
||||
`MobileNets` can be run efficiently on mobile devices with `Tensorflow` Lite.
|
||||
|
||||
Kata Containers provides a test for running `MobileNet V1` inference using Intel-Optimized `Tensorflow`.
|
||||
|
||||
## Running the `Tensorflow` `MobileNet` test
|
||||
Individual test can be run by hand, for example:
|
||||
|
||||
```
|
||||
$ cd metrics/machine_learning
|
||||
$ ./tensorflow_mobilenet_benchmark.sh 25 60
|
||||
```
|
||||
|
||||
160
tests/metrics/machine_learning/pytorch.sh
Executable file
160
tests/metrics/machine_learning/pytorch.sh
Executable file
@@ -0,0 +1,160 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
# General env
|
||||
SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
|
||||
source "${SCRIPT_PATH}/../lib/common.bash"
|
||||
|
||||
IMAGE="docker.io/library/pytorch:latest"
|
||||
DOCKERFILE="${SCRIPT_PATH}/pytorch_dockerfile/Dockerfile"
|
||||
equation_pytorch_file=$(mktemp pytorchresults.XXXXXXXXXX)
|
||||
isoneural_pytorch_file=$(mktemp pytorchresults.XXXXXXXXXX)
|
||||
NUM_CONTAINERS="$1"
|
||||
TIMEOUT="$2"
|
||||
TEST_NAME="pytorch"
|
||||
CMD_RUN="cd pyhpc-benchmarks-3.0 && python run.py benchmarks/equation_of_state --burnin 20 --device cpu -b pytorch -s 524288 > LOG"
|
||||
CMD_RUN_ISONEURAL="cd pyhpc-benchmarks-3.0 && python run.py benchmarks/isoneutral_mixing --burnin 20 --device cpu -b pytorch -s 524288 > LOG"
|
||||
CMD_RESULT="cd pyhpc-benchmarks-3.0 && cat LOG"
|
||||
CMD_FILE="cat pyhpc-benchmarks-3.0/LOG | grep 'seconds' | wc -l"
|
||||
PAYLOAD_ARGS="tail -f /dev/null"
|
||||
|
||||
function remove_tmp_file() {
|
||||
rm -rf "${equation_pytorch_file}" "${isoneural_pytorch_file}"
|
||||
}
|
||||
|
||||
trap remove_tmp_file EXIT
|
||||
|
||||
function check_containers_are_up() {
|
||||
local containers_launched=0
|
||||
for i in $(seq "${TIMEOUT}") ; do
|
||||
info "Verify that the containers are running"
|
||||
containers_launched="$(sudo ${CTR_EXE} t list | grep -c "RUNNING")"
|
||||
[ "${containers_launched}" -eq "${NUM_CONTAINERS}" ] && break
|
||||
sleep 1
|
||||
[ "${i}" == "${TIMEOUT}" ] && return 1
|
||||
done
|
||||
}
|
||||
|
||||
function equation_of_state_pytorch_test() {
|
||||
info "Running Equation of State Pytorch test"
|
||||
for i in "${containers[@]}"; do
|
||||
sudo -E "${CTR_EXE}" t exec -d --exec-id "$(random_name)" "${i}" sh -c "${CMD_RUN}"
|
||||
done
|
||||
|
||||
for i in "${containers[@]}"; do
|
||||
check_file=$(sudo -E "${CTR_EXE}" t exec --exec-id "$(random_name)" "${i}" sh -c "${CMD_FILE}")
|
||||
retries="200"
|
||||
for j in $(seq 1 "${retries}"); do
|
||||
[ "${check_file}" -eq 1 ] && break
|
||||
sleep 1
|
||||
done
|
||||
done
|
||||
|
||||
for i in "${containers[@]}"; do
|
||||
sudo -E "${CTR_EXE}" t exec --exec-id "$(random_name)" "${i}" sh -c "${CMD_RESULT}" >> "${equation_pytorch_file}"
|
||||
done
|
||||
|
||||
local equation_pytorch_results=$(cat "${equation_pytorch_file}" | grep pytorch | sed '/Using pytorch version/d' | awk '{print $4}' | tr '\n' ',' | sed 's/.$//')
|
||||
local equation_average_pytorch=$(echo "${equation_pytorch_results}" | sed "s/,/+/g;s/.*/(&)\/$NUM_CONTAINERS/g" | bc -l)
|
||||
|
||||
local json="$(cat << EOF
|
||||
{
|
||||
"Pytorch Equation of State": {
|
||||
"Result": "${equation_pytorch_results}",
|
||||
"Average": "${equation_average_pytorch}",
|
||||
"Units": "s"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
)"
|
||||
metrics_json_add_array_element "$json"
|
||||
|
||||
}
|
||||
|
||||
function isoneural_pytorch_test() {
|
||||
info "Running Isoneural Pytorch test"
|
||||
for i in "${containers[@]}"; do
|
||||
sudo -E "${CTR_EXE}" t exec -d --exec-id "$(random_name)" "${i}" sh -c "${CMD_RUN_ISONEURAL}"
|
||||
done
|
||||
|
||||
for i in "${containers[@]}"; do
|
||||
check_file=$(sudo -E "${CTR_EXE}" t exec --exec-id "$(random_name)" "${i}" sh -c "${CMD_FILE}")
|
||||
retries="200"
|
||||
for j in $(seq 1 "${retries}"); do
|
||||
[ "${check_file}" -eq 1 ] && break
|
||||
sleep 1
|
||||
done
|
||||
done
|
||||
|
||||
for i in "${containers[@]}"; do
|
||||
sudo -E "${CTR_EXE}" t exec --exec-id "$(random_name)" "${i}" sh -c "${CMD_RESULT}" >> "${isoneural_pytorch_file}"
|
||||
done
|
||||
|
||||
local isoneural_pytorch_results=$(cat "${isoneural_pytorch_file}" | grep pytorch | sed '/Using pytorch version/d' | awk '{print $4}' | tr '\n' ',' | sed 's/.$//')
|
||||
local isoneural_average_pytorch=$(echo "${isoneural_pytorch_results}" | sed "s/,/+/g;s/.*/(&)\/$NUM_CONTAINERS/g" | bc -l)
|
||||
|
||||
local json="$(cat << EOF
|
||||
{
|
||||
"Pytorch Isoneural": {
|
||||
"Result": "${isoneural_pytorch_results}",
|
||||
"Average": "${isoneural_average_pytorch}",
|
||||
"Units": "s"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
)"
|
||||
metrics_json_add_array_element "$json"
|
||||
metrics_json_end_array "Results"
|
||||
|
||||
}
|
||||
|
||||
|
||||
function main() {
|
||||
# Verify enough arguments
|
||||
if [ $# != 2 ]; then
|
||||
echo >&2 "error: Not enough arguments [$@]"
|
||||
help
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local i=0
|
||||
local containers=()
|
||||
local not_started_count="${NUM_CONTAINERS}"
|
||||
|
||||
# Check tools/commands dependencies
|
||||
cmds=("awk" "docker" "bc")
|
||||
check_cmds "${cmds[@]}"
|
||||
check_ctr_images "${IMAGE}" "${DOCKERFILE}"
|
||||
|
||||
init_env
|
||||
info "Creating ${NUM_CONTAINERS} containers"
|
||||
|
||||
for ((i=1; i<= "${NUM_CONTAINERS}"; i++)); do
|
||||
containers+=($(random_name))
|
||||
sudo -E "${CTR_EXE}" run -d --runtime "${CTR_RUNTIME}" "${IMAGE}" "${containers[-1]}" sh -c "${PAYLOAD_ARGS}"
|
||||
((not_started_count--))
|
||||
info "$not_started_count remaining containers"
|
||||
done
|
||||
|
||||
metrics_json_init
|
||||
metrics_json_start_array
|
||||
|
||||
|
||||
# Check that the requested number of containers are running
|
||||
check_containers_are_up
|
||||
|
||||
equation_of_state_pytorch_test
|
||||
|
||||
isoneural_pytorch_test
|
||||
|
||||
metrics_json_save
|
||||
|
||||
clean_env_ctr
|
||||
|
||||
}
|
||||
main "$@"
|
||||
19
tests/metrics/machine_learning/pytorch_dockerfile/Dockerfile
Normal file
19
tests/metrics/machine_learning/pytorch_dockerfile/Dockerfile
Normal file
@@ -0,0 +1,19 @@
|
||||
# Copyright (c) 2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Usage: FROM [image name]
|
||||
FROM intel/intel-optimized-pytorch:1.12.100
|
||||
|
||||
# Version of the Dockerfile
|
||||
LABEL DOCKERFILE_VERSION="1.0"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends build-essential curl git && \
|
||||
apt-get remove -y unattended-upgrades && \
|
||||
curl -OkL https://github.com/dionhaefner/pyhpc-benchmarks/archive/refs/tags/v3.0.tar.gz && \
|
||||
tar -xf v3.0.tar.gz && \
|
||||
pip install --no-cache-dir click==8.1.3 && \
|
||||
cd pyhpc-benchmarks-3.0 && pip3 install --no-cache-dir --user torch==1.10.0
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
236
tests/metrics/machine_learning/tensorflow.sh
Executable file
236
tests/metrics/machine_learning/tensorflow.sh
Executable file
@@ -0,0 +1,236 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -o pipefail
|
||||
|
||||
# General env
|
||||
SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
|
||||
source "${SCRIPT_PATH}/../lib/common.bash"
|
||||
|
||||
IMAGE="docker.io/library/tensorflow:latest"
|
||||
DOCKERFILE="${SCRIPT_PATH}/tensorflow_dockerfile/Dockerfile"
|
||||
BATCH_SIZE="100"
|
||||
NUM_BATCHES="100"
|
||||
resnet_tensorflow_file=$(mktemp resnettensorflowresults.XXXXXXXXXX)
|
||||
alexnet_tensorflow_file=$(mktemp alexnettensorflowresults.XXXXXXXXXX)
|
||||
NUM_CONTAINERS="$1"
|
||||
TIMEOUT="$2"
|
||||
TEST_NAME="tensorflow"
|
||||
PAYLOAD_ARGS="tail -f /dev/null"
|
||||
# Options to control the start of the workload using a trigger-file
|
||||
dst_dir="/host"
|
||||
src_dir=$(mktemp --tmpdir -d tensorflow.XXXXXXXXXX)
|
||||
MOUNT_OPTIONS="type=bind,src=$src_dir,dst=$dst_dir,options=rbind:ro"
|
||||
# CMD points to the script that starts the workload
|
||||
alexnet_start_script="alexnet_start.sh"
|
||||
resnet_start_script="resnet_start.sh"
|
||||
CMD_RESNET="$dst_dir/$resnet_start_script"
|
||||
CMD_ALEXNET="$dst_dir/$alexnet_start_script"
|
||||
timeout=600
|
||||
INITIAL_NUM_PIDS=1
|
||||
CMD_FILE="cat alexnet_results | grep 'total images' | wc -l"
|
||||
RESNET_CMD_FILE="cat resnet_results | grep 'total images' | wc -l"
|
||||
|
||||
function remove_tmp_file() {
|
||||
rm -rf "${resnet_tensorflow_file}" "${alexnet_tensorflow_file}"
|
||||
}
|
||||
|
||||
trap remove_tmp_file EXIT
|
||||
|
||||
function help() {
|
||||
cat << EOF
|
||||
Usage: $0 <count> <timeout>
|
||||
Description:
|
||||
This script launches n number of containers
|
||||
to run the tf cnn benchmarks using a Tensorflow
|
||||
container.
|
||||
Options:
|
||||
<count> : Number of containers to run.
|
||||
<timeout> : Timeout to launch the containers.
|
||||
EOF
|
||||
}
|
||||
|
||||
function create_resnet_start_script() {
|
||||
local script="${src_dir}/${resnet_start_script}"
|
||||
rm -rf "${script}"
|
||||
|
||||
cat <<EOF >>"${script}"
|
||||
#!/bin/bash
|
||||
python benchmarks/scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py -data_format=NHWC --device cpu --batch_size=${BATCH_SIZE} --num_batches=${NUM_BATCHES} > resnet_results
|
||||
EOF
|
||||
chmod +x "${script}"
|
||||
}
|
||||
|
||||
function create_alexnet_start_script() {
|
||||
local script="${src_dir}/${alexnet_start_script}"
|
||||
rm -rf "${script}"
|
||||
|
||||
cat <<EOF >>"${script}"
|
||||
#!/bin/bash
|
||||
python benchmarks/scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py --num_batches=${NUM_BATCHES} --device=cpu --batch_size=${BATCH_SIZE} --forward_only=true --model=alexnet --data_format=NHWC > alexnet_results
|
||||
EOF
|
||||
chmod +x "${script}"
|
||||
}
|
||||
|
||||
function tensorflow_test() {
|
||||
info "Copy Resnet Tensorflow test"
|
||||
local pids=()
|
||||
local j=0
|
||||
for i in "${containers[@]}"; do
|
||||
$(sudo -E "${CTR_EXE}" t exec -d --exec-id "$(random_name)" "${i}" sh -c "${CMD_RESNET}")&
|
||||
pids["${j}"]=$!
|
||||
((j++))
|
||||
done
|
||||
|
||||
# wait for all pids
|
||||
for pid in ${pids[*]}; do
|
||||
wait "${pid}"
|
||||
done
|
||||
|
||||
info "All containers are running the workload..."
|
||||
|
||||
for i in "${containers[@]}"; do
|
||||
check_file=$(sudo -E "${CTR_EXE}" t exec --exec-id "$(random_name)" "${i}" sh -c "${RESNET_CMD_FILE}")
|
||||
retries="300"
|
||||
for j in $(seq 1 "${retries}"); do
|
||||
[ "${check_file}" -eq "1" ] && break
|
||||
sleep 1
|
||||
done
|
||||
done
|
||||
|
||||
info "Copy Alexnet Tensorflow test"
|
||||
local pids=()
|
||||
local j=0
|
||||
for i in "${containers[@]}"; do
|
||||
$(sudo -E "${CTR_EXE}" t exec -d --exec-id "$(random_name)" "${i}" sh -c "${CMD_ALEXNET}")&
|
||||
pids["${j}"]=$!
|
||||
((j++))
|
||||
done
|
||||
|
||||
# wait for all pids
|
||||
for pid in ${pids[*]}; do
|
||||
wait "${pid}"
|
||||
done
|
||||
|
||||
for i in "${containers[@]}"; do
|
||||
check_file=$(sudo -E "${CTR_EXE}" t exec --exec-id "$(random_name)" "${i}" sh -c "${CMD_FILE}")
|
||||
retries="300"
|
||||
for j in $(seq 1 "${retries}"); do
|
||||
[ "${check_file}" -eq "1" ] && break
|
||||
sleep 1
|
||||
done
|
||||
done
|
||||
|
||||
for i in "${containers[@]}"; do
|
||||
sudo -E "${CTR_EXE}" t exec --exec-id "$(random_name)" "${i}" sh -c "cat resnet_results" >> "${resnet_tensorflow_file}"
|
||||
done
|
||||
|
||||
local res_results=$(cat "${resnet_tensorflow_file}" | grep "total images/sec" | cut -d ":" -f2 | sed -e 's/^[ \t]*//' | tr '\n' ',' | sed 's/.$//')
|
||||
local resnet_results=$(printf "%.0f\n" "${res_results}")
|
||||
local res_average=$(echo "${resnet_results}" | sed "s/,/+/g;s/.*/(&)\/${NUM_CONTAINERS}/g" | bc -l)
|
||||
local average_resnet=$(printf "%.0f\n" "${res_average}")
|
||||
|
||||
for i in "${containers[@]}"; do
|
||||
sudo -E "${CTR_EXE}" t exec --exec-id "$(random_name)" "${i}" sh -c "cat alexnet_results" >> "${alexnet_tensorflow_file}"
|
||||
done
|
||||
|
||||
local alex_results=$(cat "${alexnet_tensorflow_file}" | grep "total images/sec" | cut -d ":" -f2 | sed -e 's/^[ \t]*//' | tr '\n' ',' | sed 's/.$//')
|
||||
local alexnet_results=$(printf "%.0f\n" "${alex_results}")
|
||||
local alex_average=$(echo "${alexnet_results}" | sed "s/,/+/g;s/.*/(&)\/${NUM_CONTAINERS}/g" | bc -l)
|
||||
local average_alexnet=$(printf "%.0f\n" "${alex_average}")
|
||||
|
||||
local json="$(cat << EOF
|
||||
{
|
||||
"resnet": {
|
||||
"Result": ${resnet_results},
|
||||
"Average": ${average_resnet},
|
||||
"Units": "images/s"
|
||||
},
|
||||
"alexnet": {
|
||||
"Result": ${alexnet_results},
|
||||
"Average": ${average_alexnet},
|
||||
"Units": "images/s"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
)"
|
||||
metrics_json_add_array_element "$json"
|
||||
metrics_json_end_array "Results"
|
||||
}
|
||||
|
||||
function check_containers_are_up() {
|
||||
local containers_launched=0
|
||||
for i in $(seq "${TIMEOUT}") ; do
|
||||
info "Verify that the containers are running"
|
||||
containers_launched="$(sudo ${CTR_EXE} t list | grep -c "RUNNING")"
|
||||
[ "${containers_launched}" -eq "${NUM_CONTAINERS}" ] && break
|
||||
sleep 1
|
||||
[ "${i}" == "${TIMEOUT}" ] && return 1
|
||||
done
|
||||
}
|
||||
|
||||
function main() {
|
||||
# Verify enough arguments
|
||||
if [ $# != 2 ]; then
|
||||
echo >&2 "error: Not enough arguments [$@]"
|
||||
help
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local i=0
|
||||
local containers=()
|
||||
local not_started_count="${NUM_CONTAINERS}"
|
||||
|
||||
# Check tools/commands dependencies
|
||||
cmds=("awk" "docker" "bc")
|
||||
check_cmds "${cmds[@]}"
|
||||
check_ctr_images "${IMAGE}" "${DOCKERFILE}"
|
||||
|
||||
init_env
|
||||
create_resnet_start_script
|
||||
create_alexnet_start_script
|
||||
|
||||
info "Creating ${NUM_CONTAINERS} containers"
|
||||
|
||||
for ((i=1; i<= "${NUM_CONTAINERS}"; i++)); do
|
||||
containers+=($(random_name))
|
||||
sudo -E "${CTR_EXE}" run -d --runtime "${CTR_RUNTIME}" --mount="${MOUNT_OPTIONS}" "${IMAGE}" "${containers[-1]}" sh -c "${PAYLOAD_ARGS}"
|
||||
((not_started_count--))
|
||||
info "$not_started_count remaining containers"
|
||||
done
|
||||
|
||||
metrics_json_init
|
||||
metrics_json_start_array
|
||||
|
||||
# Check that the requested number of containers are running
|
||||
check_containers_are_up
|
||||
|
||||
# Check that the requested number of containers are running
|
||||
local timeout_launch="10"
|
||||
check_containers_are_up & pid=$!
|
||||
(sleep "${timeout_launch}" && kill -HUP "${pid}") 2>/dev/null & pid_tout=$!
|
||||
|
||||
if wait "${pid}" 2>/dev/null; then
|
||||
pkill -HUP -P "${pid_tout}"
|
||||
wait "${pid_tout}"
|
||||
else
|
||||
warn "Time out exceeded"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Get the initial number of pids in a single container before the workload starts
|
||||
INITIAL_NUM_PIDS=$(sudo -E "${CTR_EXE}" t metrics "${containers[-1]}" | grep pids.current | grep pids.current | xargs | cut -d ' ' -f 2)
|
||||
((INITIAL_NUM_PIDS++))
|
||||
|
||||
tensorflow_test
|
||||
|
||||
metrics_json_save
|
||||
|
||||
rm -rf "${src_dir}"
|
||||
|
||||
clean_env_ctr
|
||||
}
|
||||
main "$@"
|
||||
@@ -0,0 +1,18 @@
|
||||
# Copyright (c) 2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Usage: FROM [image name]
|
||||
FROM intel/intel-optimized-tensorflow:2.9.1
|
||||
|
||||
# Version of the Dockerfile
|
||||
LABEL DOCKERFILE_VERSION="1.0"
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends build-essential git && \
|
||||
apt-get remove -y unattended-upgrades && \
|
||||
git clone https://github.com/tensorflow/benchmarks
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
190
tests/metrics/machine_learning/tensorflow_mobilenet_benchmark.sh
Executable file
190
tests/metrics/machine_learning/tensorflow_mobilenet_benchmark.sh
Executable file
@@ -0,0 +1,190 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -o pipefail
|
||||
|
||||
# General env
|
||||
SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
|
||||
source "${SCRIPT_PATH}/../lib/common.bash"
|
||||
|
||||
IMAGE="docker.io/library/tensorflowmobilenet:latest"
|
||||
DOCKERFILE="${SCRIPT_PATH}/tensorflow_mobilenet_dockerfile/Dockerfile"
|
||||
tensorflow_file=$(mktemp tensorflowresults.XXXXXXXXXX)
|
||||
NUM_CONTAINERS="$1"
|
||||
TIMEOUT="$2"
|
||||
TEST_NAME="tensorflow-intelai"
|
||||
PAYLOAD_ARGS="tail -f /dev/null"
|
||||
TESTDIR="${TESTDIR:-/testdir}"
|
||||
# Options to control the start of the workload using a trigger-file
|
||||
dst_dir="/host"
|
||||
src_dir=$(mktemp --tmpdir -d tensorflowai.XXXXXXXXXX)
|
||||
MOUNT_OPTIONS="type=bind,src=$src_dir,dst=$dst_dir,options=rbind:ro"
|
||||
start_script="mobilenet_start.sh"
|
||||
# CMD points to the script that starts the workload
|
||||
CMD="$dst_dir/$start_script"
|
||||
guest_trigger_file="$dst_dir/$trigger_file"
|
||||
host_trigger_file="$src_dir/$trigger_file"
|
||||
timeout=600
|
||||
INITIAL_NUM_PIDS=1
|
||||
CMD_FILE="cat results | grep 'Average Throughput' | wc -l"
|
||||
CMD_RESULTS="cat results | grep 'Average Throughput' | cut -d':' -f2 | cut -d' ' -f2 | tr '\n' ','"
|
||||
|
||||
function remove_tmp_file() {
|
||||
rm -rf "${tensorflow_file}"
|
||||
}
|
||||
|
||||
trap remove_tmp_file EXIT
|
||||
|
||||
function help() {
|
||||
cat << EOF
|
||||
Usage: $0 <count> <timeout>
|
||||
Description:
|
||||
This script launches n number of containers
|
||||
to run the tf cnn benchmarks using a Tensorflow
|
||||
container.
|
||||
Options:
|
||||
<count> : Number of containers to run.
|
||||
<timeout> : Timeout to launch the containers.
|
||||
EOF
|
||||
}
|
||||
|
||||
function create_start_script() {
|
||||
local script="${src_dir}/${start_script}"
|
||||
rm -rf "${script}"
|
||||
|
||||
cat <<EOF >>"${script}"
|
||||
#!/bin/bash
|
||||
python3.8 models/benchmarks/launch_benchmark.py --benchmark-only --framework tensorflow --model-name mobilenet_v1 --mode inference --precision bfloat16 --batch-size 100 --in-graph /mobilenet_v1_1.0_224_frozen.pb --num-intra-threads 16 --num-inter-threads 1 --verbose --\ input_height=224 input_width=224 warmup_steps=20 steps=20 \ input_layer=input output_layer=MobilenetV1/Predictions/Reshape_1 > results
|
||||
EOF
|
||||
chmod +x "${script}"
|
||||
}
|
||||
|
||||
function mobilenet_test() {
|
||||
local CMD_EXPORT_VAR="export KMP_AFFINITY=granularity=fine,verbose,compact && export OMP_NUM_THREADS=16"
|
||||
|
||||
info "Export environment variables"
|
||||
for i in "${containers[@]}"; do
|
||||
sudo -E "${CTR_EXE}" t exec -d --exec-id "$(random_name)" "${i}" sh -c "${CMD_EXPORT_VAR}"
|
||||
done
|
||||
|
||||
info "Running Mobilenet Tensorflow test"
|
||||
local pids=()
|
||||
local j=0
|
||||
for i in "${containers[@]}"; do
|
||||
$(sudo -E "${CTR_EXE}" t exec --exec-id "$(random_name)" "${i}" sh -c "${CMD}")&
|
||||
pids["${j}"]=$!
|
||||
((j++))
|
||||
done
|
||||
|
||||
# wait for all pids
|
||||
for pid in ${pids[*]}; do
|
||||
wait "${pid}"
|
||||
done
|
||||
|
||||
touch "${host_trigger_file}"
|
||||
info "All containers are running the workload..."
|
||||
|
||||
for i in "${containers[@]}"; do
|
||||
check_file=$(sudo -E "${CTR_EXE}" t exec -d --exec-id "$(random_name)" "${i}" sh -c "${CMD_FILE}")
|
||||
retries="30"
|
||||
for j in $(seq 1 "${retries}"); do
|
||||
[ "${check_file}" -eq "1" ] && break
|
||||
sleep 1
|
||||
done
|
||||
done
|
||||
|
||||
for i in "${containers[@]}"; do
|
||||
sudo -E "${CTR_EXE}" t exec --exec-id "$(random_name)" "${i}" sh -c "${CMD_RESULTS}" >> "${tensorflow_file}"
|
||||
done
|
||||
|
||||
local mobilenet_results=$(cat "${tensorflow_file}" | sed 's/.$//')
|
||||
local average_mobilenet=$(echo "${mobilenet_results}" | sed 's/.$//' | sed "s/,/+/g;s/.*/(&)\/$NUM_CONTAINERS/g" | bc -l)
|
||||
local json="$(cat << EOF
|
||||
{
|
||||
"Mobilenet": {
|
||||
"Result": "${mobilenet_results}",
|
||||
"Average": "${average_mobilenet}",
|
||||
"Units": "images/s"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
)"
|
||||
metrics_json_add_array_element "$json"
|
||||
metrics_json_end_array "Results"
|
||||
}
|
||||
|
||||
function check_containers_are_up() {
|
||||
local containers_launched=0
|
||||
for i in $(seq "${TIMEOUT}") ; do
|
||||
info "Verify that the containers are running"
|
||||
containers_launched="$(sudo ${CTR_EXE} t list | grep -c "RUNNING")"
|
||||
[ "${containers_launched}" -eq "${NUM_CONTAINERS}" ] && break
|
||||
sleep 1
|
||||
[ "${i}" == "${TIMEOUT}" ] && return 1
|
||||
done
|
||||
}
|
||||
|
||||
function main() {
|
||||
# Verify enough arguments
|
||||
if [ $# != 2 ]; then
|
||||
echo >&2 "error: Not enough arguments [$@]"
|
||||
help
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local i=0
|
||||
local containers=()
|
||||
local not_started_count="${NUM_CONTAINERS}"
|
||||
|
||||
# Check tools/commands dependencies
|
||||
cmds=("awk" "docker" "bc")
|
||||
check_cmds "${cmds[@]}"
|
||||
check_ctr_images "${IMAGE}" "${DOCKERFILE}"
|
||||
|
||||
init_env
|
||||
create_start_script
|
||||
|
||||
info "Creating ${NUM_CONTAINERS} containers"
|
||||
|
||||
for ((i=1; i<= "${NUM_CONTAINERS}"; i++)); do
|
||||
containers+=($(random_name))
|
||||
sudo -E "${CTR_EXE}" run -d --runtime "${CTR_RUNTIME}" --mount="${MOUNT_OPTIONS}" "${IMAGE}" "${containers[-1]}" sh -c "${PAYLOAD_ARGS}"
|
||||
((not_started_count--))
|
||||
info "${not_started_count} remaining containers"
|
||||
done
|
||||
|
||||
metrics_json_init
|
||||
metrics_json_start_array
|
||||
|
||||
# Check that the requested number of containers are running
|
||||
check_containers_are_up
|
||||
|
||||
# Check that the requested number of containers are running
|
||||
local timeout_launch="10"
|
||||
check_containers_are_up & pid=$!
|
||||
(sleep "${timeout_launch}" && kill -HUP "${pid}") 2>/dev/null & pid_tout=$!
|
||||
|
||||
if wait "${pid}" 2>/dev/null; then
|
||||
pkill -HUP -P "${pid_tout}"
|
||||
wait "${pid_tout}"
|
||||
else
|
||||
warn "Time out exceeded"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Get the initial number of pids in a single container before the workload starts
|
||||
INITIAL_NUM_PIDS=$(sudo -E "${CTR_EXE}" t metrics "${containers[-1]}" | grep pids.current | grep pids.current | xargs | cut -d ' ' -f 2)
|
||||
((INITIAL_NUM_PIDS++))
|
||||
|
||||
mobilenet_test
|
||||
|
||||
metrics_json_save
|
||||
|
||||
sudo rm -rf "${src_dir}"
|
||||
|
||||
clean_env_ctr
|
||||
}
|
||||
main "$@"
|
||||
@@ -0,0 +1,21 @@
|
||||
# Copyright (c) 2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Usage: FROM [image name]
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Version of the Dockerfile
|
||||
LABEL DOCKERFILE_VERSION="1.0"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y wget nano curl build-essential git && \
|
||||
apt-get install -y python3.8 python3-pip && \
|
||||
pip install --no-cache-dir intel-tensorflow-avx512==2.8.0 && \
|
||||
pip install --no-cache-dir protobuf==3.20.* && \
|
||||
wget -q https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_8/mobilenet_v1_1.0_224_frozen.pb && \
|
||||
git clone https://github.com/IntelAI/models.git
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
11
tests/metrics/storage/README.md
Normal file
11
tests/metrics/storage/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Kata Containers storage I/O tests
|
||||
The metrics tests in this directory are designed to be used to assess storage IO.
|
||||
## `Blogbench` test
|
||||
The `blogbench` script is based on the `blogbench` program which is designed to emulate a busy blog server with a number of concurrent
|
||||
threads performing a mixture of reads, writes and rewrites.
|
||||
### Running the `blogbench` test
|
||||
The `blogbench` test can be run by hand, for example:
|
||||
```
|
||||
$ cd metrics
|
||||
$ bash storage/blogbench.sh
|
||||
```
|
||||
@@ -36,8 +36,10 @@ function main() {
|
||||
init_env
|
||||
check_cmds "${cmds[@]}"
|
||||
check_ctr_images "${IMAGE}" "${DOCKERFILE}"
|
||||
sudo systemctl restart containerd
|
||||
metrics_json_init
|
||||
|
||||
info "Running Blogbench test"
|
||||
local output=$(sudo -E ${CTR_EXE} run --rm --runtime=${CTR_RUNTIME} ${IMAGE} test ${CMD})
|
||||
|
||||
# Save configuration
|
||||
@@ -65,6 +67,7 @@ EOF
|
||||
metrics_json_end_array "Config"
|
||||
|
||||
# Save results
|
||||
info "Saving Blogbench results"
|
||||
metrics_json_start_array
|
||||
|
||||
local writes=$(tail -2 <<< "${output}" | head -1 | awk '{print $5}')
|
||||
@@ -83,11 +86,11 @@ EOF
|
||||
local json="$(cat << EOF
|
||||
{
|
||||
"write": {
|
||||
"Result" : "${writes}",
|
||||
"Result" : ${writes},
|
||||
"Units" : "items"
|
||||
},
|
||||
"read": {
|
||||
"Result" : "${reads}",
|
||||
"Result" : ${reads},
|
||||
"Units" : "items"
|
||||
},
|
||||
"Nb blogs": {
|
||||
|
||||
@@ -11,6 +11,8 @@ FROM docker.io/library/ubuntu:latest
|
||||
# Version of the Dockerfile
|
||||
LABEL DOCKERFILE_VERSION="1.0"
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# URL for blogbench test and blogbench version
|
||||
ENV BLOGBENCH_URL "https://download.pureftpd.org/pub/blogbench"
|
||||
ENV BLOGBENCH_VERSION 1.1
|
||||
|
||||
1
tests/metrics/storage/fio-k8s/.gitignore
vendored
Normal file
1
tests/metrics/storage/fio-k8s/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
./cmd/fiotest/fio-k8s
|
||||
28
tests/metrics/storage/fio-k8s/Makefile
Normal file
28
tests/metrics/storage/fio-k8s/Makefile
Normal file
@@ -0,0 +1,28 @@
|
||||
#
|
||||
# Copyright (c) 2021-2022 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
MKFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
|
||||
MKFILE_DIR := $(dir $(MKFILE_PATH))
|
||||
|
||||
build:
|
||||
make -C $(MKFILE_DIR)/cmd/fiotest/ gomod
|
||||
make -C $(MKFILE_DIR)/cmd/fiotest/ build
|
||||
|
||||
test-report:
|
||||
$(MKFILE_DIR)/scripts/dax-compare-test/report/gen-html-fio-report.sh $(MKFILE_DIR)/cmd/fiotest/test-results/
|
||||
|
||||
test-report-interactive:
|
||||
$(MKFILE_DIR)/scripts/dax-compare-test/report/run-docker-jupyter-server.sh $(MKFILE_DIR)/cmd/fiotest/test-results/
|
||||
|
||||
test: build
|
||||
make -C $(MKFILE_DIR)/cmd/fiotest/ run
|
||||
make test-report
|
||||
|
||||
run: build
|
||||
make -C $(MKFILE_DIR)/scripts/dax-compare-test/ run
|
||||
|
||||
test-ci: build
|
||||
make -C $(MKFILE_DIR)/cmd/fiotest/ runci
|
||||
24
tests/metrics/storage/fio-k8s/cmd/fiotest/Makefile
Normal file
24
tests/metrics/storage/fio-k8s/cmd/fiotest/Makefile
Normal file
@@ -0,0 +1,24 @@
|
||||
#
|
||||
# Copyright (c) 2021-2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
MKFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
|
||||
MKFILE_DIR := $(dir $(MKFILE_PATH))
|
||||
|
||||
build:
|
||||
GO111MODULE=on go build
|
||||
|
||||
run: build
|
||||
$(MKFILE_DIR)/fio-k8s --debug --fio.size 10M --output-dir test-results --test-name kata $(MKFILE_DIR)/../../configs/example-config/
|
||||
$(MKFILE_DIR)/fio-k8s --debug --fio.size 10M --output-dir test-results --test-name runc --container-runtime runc $(MKFILE_DIR)/../../configs/example-config/
|
||||
|
||||
gomod:
|
||||
go mod edit -replace=github.com/kata-containers/kata-containers/tests/metrics/k8s=../../pkg/k8s
|
||||
go mod edit -replace=github.com/kata-containers/kata-containers/tests/metrics/exec=../../pkg/exec
|
||||
go mod edit -replace=github.com/kata-containers/kata-containers/tests/metrics/env=../../pkg/env
|
||||
go mod tidy
|
||||
|
||||
runci: build
|
||||
$(MKFILE_DIR)/fio-k8s --debug --fio.size 10M --output-dir test-results --test-name kata $(MKFILE_DIR)/../../configs/example-config/
|
||||
24
tests/metrics/storage/fio-k8s/cmd/fiotest/go.mod
Normal file
24
tests/metrics/storage/fio-k8s/cmd/fiotest/go.mod
Normal file
@@ -0,0 +1,24 @@
|
||||
module github.com/kata-containers/kata-containers/tests/metrics/storage/fio-k8s
|
||||
|
||||
go 1.19
|
||||
|
||||
replace github.com/kata-containers/kata-containers/tests/metrics/exec => ../../pkg/exec
|
||||
|
||||
replace github.com/kata-containers/kata-containers/tests/metrics/k8s => ../../pkg/k8s
|
||||
|
||||
replace github.com/kata-containers/kata-containers/tests/metrics/env => ../../pkg/env
|
||||
|
||||
require (
|
||||
github.com/kata-containers/kata-containers/tests/metrics/env v0.0.0-00010101000000-000000000000
|
||||
github.com/kata-containers/kata-containers/tests/metrics/exec v0.0.0-00010101000000-000000000000
|
||||
github.com/kata-containers/kata-containers/tests/metrics/k8s v0.0.0-00010101000000-000000000000
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/urfave/cli v1.22.14
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
|
||||
)
|
||||
31
tests/metrics/storage/fio-k8s/cmd/fiotest/go.sum
Normal file
31
tests/metrics/storage/fio-k8s/cmd/fiotest/go.sum
Normal file
@@ -0,0 +1,31 @@
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk=
|
||||
github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
373
tests/metrics/storage/fio-k8s/cmd/fiotest/main.go
Normal file
373
tests/metrics/storage/fio-k8s/cmd/fiotest/main.go
Normal file
@@ -0,0 +1,373 @@
|
||||
// Copyright (c) 2021-2023 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
env "github.com/kata-containers/kata-containers/tests/metrics/env"
|
||||
exec "github.com/kata-containers/kata-containers/tests/metrics/exec"
|
||||
"github.com/kata-containers/kata-containers/tests/metrics/k8s"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var log = logrus.New()
|
||||
|
||||
var (
|
||||
optContainerRuntime = "container-runtime"
|
||||
optDebug = "debug"
|
||||
optOutputDir = "output-dir"
|
||||
optTestName = "test-name"
|
||||
// fio options
|
||||
optFioBlockSize = "fio.block-size"
|
||||
optFioDirect = "fio.direct"
|
||||
optFioIoDepth = "fio.iodepth"
|
||||
optFioSize = "fio.size"
|
||||
optFioNumJobs = "fio.numjobs"
|
||||
)
|
||||
|
||||
type RwFioOp struct {
|
||||
BandwidthKb int `json:"bw"`
|
||||
IOPS float64 `json:"iops"`
|
||||
}
|
||||
|
||||
type fioResult struct {
|
||||
GlobalOptions struct {
|
||||
IOEngine string `json:"ioengine"`
|
||||
RW string `json:"rw"`
|
||||
} `json:"global options"`
|
||||
Jobs []struct {
|
||||
JobName string `json:"jobname"`
|
||||
Read RwFioOp `json:"read"`
|
||||
Write RwFioOp `json:"write"`
|
||||
} `json:"jobs"`
|
||||
}
|
||||
|
||||
// Run fio in k8s metrics test in K8s
|
||||
func (c fioTestConfig) run() (result fioResult, err error) {
|
||||
log.Infof("Running fio config: %s", c.jobFile)
|
||||
|
||||
pod := k8s.Pod{YamlPath: c.k8sYaml}
|
||||
|
||||
log.Infof("Delete pod if already created")
|
||||
err = pod.Delete()
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
log.Infof("Create pod: %s", pod.YamlPath)
|
||||
err = pod.Run()
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
log.Info("Deleting pod")
|
||||
delErr := pod.Delete()
|
||||
if delErr != nil {
|
||||
log.Error(delErr)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "Could not delete pod after: %s", delErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
destDir := "/home/fio-jobs"
|
||||
_, err = pod.Exec("mkdir " + destDir)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
dstJobFile := path.Join(destDir, "jobFile")
|
||||
err = pod.CopyFromHost(c.jobFile, dstJobFile)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
_, err = pod.Exec("apt update")
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
_, err = pod.Exec("apt install -y fio")
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
err = env.DropCaches()
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
var directStr string
|
||||
if c.direct {
|
||||
directStr = "1"
|
||||
} else {
|
||||
directStr = "0"
|
||||
}
|
||||
|
||||
cmdFio := "fio"
|
||||
cmdFio += " --append-terse "
|
||||
cmdFio += " --blocksize=" + c.blocksize
|
||||
cmdFio += " --direct=" + directStr
|
||||
cmdFio += " --directory=" + c.directory
|
||||
cmdFio += " --iodepth=" + c.iodepth
|
||||
cmdFio += " --numjobs=" + c.numjobs
|
||||
cmdFio += " --runtime=" + c.runtime
|
||||
cmdFio += " --size=" + c.size
|
||||
cmdFio += " --output-format=json"
|
||||
cmdFio += " " + dstJobFile
|
||||
|
||||
log.Infof("Exec fio")
|
||||
output, err := pod.Exec(cmdFio, k8s.ExecOptShowStdOut())
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
err = json.Unmarshal([]byte(output), &result)
|
||||
if err != nil {
|
||||
return result, errors.Wrapf(err, "failed to unmarshall output : %s", output)
|
||||
}
|
||||
|
||||
log.Infof("ioengine:%s", result.GlobalOptions.IOEngine)
|
||||
log.Infof("rw:%s", result.GlobalOptions.RW)
|
||||
if len(result.Jobs) == 0 {
|
||||
return result, errors.New("No jobs found after parsing fio results")
|
||||
}
|
||||
|
||||
testDir := path.Join(c.outputDir, filepath.Base(c.jobFile))
|
||||
err = os.MkdirAll(testDir, 0775)
|
||||
if err != nil {
|
||||
return result, errors.Wrapf(err, "failed to create test directory for :%s", c.jobFile)
|
||||
}
|
||||
outputFile := path.Join(testDir, "output.json")
|
||||
log.Infof("Store results output in : %s", outputFile)
|
||||
|
||||
err = os.WriteFile(outputFile, []byte(output), 0644)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type fioTestConfig struct {
|
||||
//test options
|
||||
k8sYaml string
|
||||
containerRuntime string
|
||||
outputDir string
|
||||
|
||||
//fio options
|
||||
blocksize string
|
||||
directory string
|
||||
iodepth string
|
||||
numjobs string
|
||||
jobFile string
|
||||
loops string
|
||||
runtime string
|
||||
size string
|
||||
|
||||
direct bool
|
||||
}
|
||||
|
||||
func runFioJobs(testDirPath string, cfg fioTestConfig) (results []fioResult, err error) {
|
||||
fioJobsDir, err := filepath.Abs(path.Join(testDirPath, "fio-jobs"))
|
||||
if err != nil {
|
||||
return results, err
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(fioJobsDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return results, err
|
||||
}
|
||||
|
||||
if cfg.containerRuntime == "" {
|
||||
return results, errors.New("containerRuntime is empty")
|
||||
}
|
||||
|
||||
podYAMLName := cfg.containerRuntime + ".yaml"
|
||||
cfg.k8sYaml = path.Join(testDirPath, podYAMLName)
|
||||
|
||||
if len(files) == 0 {
|
||||
return results, errors.New("No fio configs found")
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
cfg.jobFile = path.Join(fioJobsDir, file.Name())
|
||||
r, err := cfg.run()
|
||||
if err != nil {
|
||||
return results, err
|
||||
}
|
||||
results = append(results, r)
|
||||
|
||||
log.Infof("workload:%s", r.Jobs[0].JobName)
|
||||
log.Infof("bw_r:%d", r.Jobs[0].Read.BandwidthKb)
|
||||
log.Infof("IOPS_r:%f", r.Jobs[0].Read.IOPS)
|
||||
log.Infof("bw_w:%d", r.Jobs[0].Write.BandwidthKb)
|
||||
log.Infof("IOPS_w:%f", r.Jobs[0].Write.IOPS)
|
||||
|
||||
waitTime := 5
|
||||
log.Debugf("Sleep %d seconds(if not wait sometimes create another pod timesout)", waitTime)
|
||||
time.Sleep(time.Duration(waitTime) * time.Second)
|
||||
}
|
||||
return results, err
|
||||
|
||||
}
|
||||
|
||||
func generateResultsView(testName string, results []fioResult, outputDir string) error {
|
||||
outputFile := path.Join(outputDir, "results.csv")
|
||||
f, err := os.Create(outputFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
log.Infof("Creating results output in %s", outputFile)
|
||||
|
||||
w := csv.NewWriter(f)
|
||||
|
||||
headers := []string{"NAME", "WORKLOAD", "bw_r", "bw_w", "IOPS_r", "IOPS_w"}
|
||||
err = w.Write(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, r := range results {
|
||||
if len(r.Jobs) == 0 {
|
||||
return errors.Errorf("fio result has no jobs: %v", r)
|
||||
}
|
||||
row := []string{testName}
|
||||
row = append(row, r.Jobs[0].JobName)
|
||||
row = append(row, fmt.Sprintf("%d", r.Jobs[0].Read.BandwidthKb))
|
||||
row = append(row, fmt.Sprintf("%d", r.Jobs[0].Write.BandwidthKb))
|
||||
row = append(row, fmt.Sprintf("%f", r.Jobs[0].Read.IOPS))
|
||||
row = append(row, fmt.Sprintf("%f", r.Jobs[0].Write.IOPS))
|
||||
if err := w.Write(row); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
w.Flush()
|
||||
|
||||
return w.Error()
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
app := &cli.App{
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: optDebug,
|
||||
Usage: "Logs in debug level",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: optTestName,
|
||||
Value: "kata-fio-test",
|
||||
Usage: "Change the fio test name for reports",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: optOutputDir,
|
||||
Value: ".",
|
||||
Usage: "Use a file to store results",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: optContainerRuntime,
|
||||
Value: "kata",
|
||||
Usage: "Choose the runtime to use",
|
||||
},
|
||||
//fio options
|
||||
&cli.StringFlag{
|
||||
Name: optFioSize,
|
||||
Value: "200M",
|
||||
Usage: "File size to use for tests",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: optFioBlockSize,
|
||||
Value: "4K",
|
||||
Usage: "Block size for fio tests",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: optFioDirect,
|
||||
Usage: "Use direct io",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: optFioIoDepth,
|
||||
Value: "16",
|
||||
Usage: "Number of I/O units to keep in flight against the file",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: optFioNumJobs,
|
||||
Value: "1",
|
||||
Usage: "Number of clones (processes/threads performing the same workload) of this job",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
jobsDir := c.Args().First()
|
||||
|
||||
if jobsDir == "" {
|
||||
cli.SubcommandHelpTemplate = strings.Replace(cli.SubcommandHelpTemplate, "[arguments...]", "<test-config-dir>", -1)
|
||||
cli.ShowCommandHelp(c, "")
|
||||
return errors.New("Missing <test-config-dir>")
|
||||
}
|
||||
|
||||
if c.Bool(optDebug) {
|
||||
log.SetLevel(logrus.DebugLevel)
|
||||
k8s.Debug = true
|
||||
env.Debug = true
|
||||
}
|
||||
|
||||
exec.SetLogger(log)
|
||||
k8s.SetLogger(log)
|
||||
env.SetLogger(log)
|
||||
|
||||
testName := c.String(optTestName)
|
||||
|
||||
outputDir, err := filepath.Abs(path.Join(c.String(optOutputDir), testName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := fioTestConfig{
|
||||
blocksize: c.String(optFioBlockSize),
|
||||
direct: c.Bool(optFioDirect),
|
||||
directory: ".",
|
||||
iodepth: c.String(optFioIoDepth),
|
||||
loops: "3",
|
||||
numjobs: c.String(optFioNumJobs),
|
||||
runtime: "20",
|
||||
size: c.String(optFioSize),
|
||||
containerRuntime: c.String(optContainerRuntime),
|
||||
outputDir: outputDir,
|
||||
}
|
||||
|
||||
log.Infof("Results will be created in %s", cfg.outputDir)
|
||||
|
||||
err = os.MkdirAll(cfg.outputDir, 0775)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
results, err := runFioJobs(jobsDir, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return generateResultsView(c.String(optTestName), results, outputDir)
|
||||
},
|
||||
}
|
||||
|
||||
err := app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2022 Intel Corporation
|
||||
[global]
|
||||
name=io_uring
|
||||
filename=fio-file
|
||||
rw=randrw
|
||||
rwmixread=75
|
||||
ioengine=io_uring
|
||||
|
||||
[randrw-io_uring]
|
||||
@@ -0,0 +1,10 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2021 Intel Corporation
|
||||
[global]
|
||||
name=randrw-libaio
|
||||
filename=fio-file
|
||||
rw=randrw
|
||||
rwmixread=75
|
||||
ioengine=libaio
|
||||
|
||||
[randrw-libaio]
|
||||
@@ -0,0 +1,10 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2022 Intel Corporation
|
||||
[global]
|
||||
name=sync
|
||||
filename=fio-file
|
||||
rw=randrw
|
||||
rwmixread=75
|
||||
ioengine=sync
|
||||
|
||||
[randrw-sync]
|
||||
@@ -0,0 +1,16 @@
|
||||
## Copyright (c) 2021 Intel Corporation
|
||||
#
|
||||
## SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: iometrics
|
||||
spec:
|
||||
runtimeClassName: kata
|
||||
containers:
|
||||
- name: iometrics
|
||||
image: ubuntu:latest
|
||||
# Just spin & wait forever
|
||||
command: [ "/bin/bash", "-c", "--" ]
|
||||
args: [ "sleep infinity" ]
|
||||
@@ -0,0 +1,15 @@
|
||||
## Copyright (c) 2021 Intel Corporation
|
||||
#
|
||||
## SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: iometrics
|
||||
spec:
|
||||
containers:
|
||||
- name: iometrics
|
||||
image: ubuntu:latest
|
||||
# Just spin & wait forever
|
||||
command: [ "/bin/bash", "-c", "--" ]
|
||||
args: [ "sleep infinity" ]
|
||||
@@ -0,0 +1,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2021 Intel Corporation
|
||||
[global]
|
||||
name=randread-libaio
|
||||
filename=fio-file
|
||||
rw=randread
|
||||
ioengine=libaio
|
||||
|
||||
[randread-libaio]
|
||||
@@ -0,0 +1,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2021 Intel Corporation
|
||||
[global]
|
||||
name=randread-mmap
|
||||
rw=randread
|
||||
ioengine=mmap
|
||||
|
||||
[randread-mmap]
|
||||
filename=fio-file
|
||||
@@ -0,0 +1,10 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2021 Intel Corporation
|
||||
[global]
|
||||
name=randrw-libaio
|
||||
filename=fio-file
|
||||
rw=randrw
|
||||
rwmixread=75
|
||||
ioengine=libaio
|
||||
|
||||
[randrw-libaio]
|
||||
@@ -0,0 +1,10 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2021 Intel Corporation
|
||||
[global]
|
||||
name=randrw-mmap
|
||||
rw=randrw
|
||||
rwmixread=75
|
||||
ioengine=mmap
|
||||
|
||||
[randrw-mmap]
|
||||
filename=fio-file
|
||||
@@ -0,0 +1,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2021 Intel Corporation
|
||||
[global]
|
||||
name=randwrite-libaio
|
||||
filename=fio-file
|
||||
rw=randwrite
|
||||
ioengine=libaio
|
||||
|
||||
[randwrite-libaio]
|
||||
@@ -0,0 +1,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2021 Intel Corporation
|
||||
[global]
|
||||
name=randwrite-mmap
|
||||
rw=randwrite
|
||||
ioengine=mmap
|
||||
|
||||
[randwrite-mmap]
|
||||
filename=fio-file
|
||||
@@ -0,0 +1,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2021 Intel Corporation
|
||||
[global]
|
||||
name=seqread-libaio
|
||||
filename=fio-file
|
||||
rw=read
|
||||
ioengine=libaio
|
||||
|
||||
[seqread-libaio]
|
||||
@@ -0,0 +1,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2021 Intel Corporation
|
||||
[global]
|
||||
name=seqread-mmap
|
||||
rw=read
|
||||
ioengine=mmap
|
||||
|
||||
[seqread-mmap]
|
||||
filename=fio-file
|
||||
@@ -0,0 +1,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2021 Intel Corporation
|
||||
[global]
|
||||
name=seqread-psync
|
||||
filename=fio-file
|
||||
rw=read
|
||||
|
||||
[seqread-psync]
|
||||
@@ -0,0 +1,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2021 Intel Corporation
|
||||
[global]
|
||||
name=seqwrite-libaio
|
||||
filename=fio-file
|
||||
rw=write
|
||||
ioengine=libaio
|
||||
|
||||
[seqwrite-libaio]
|
||||
@@ -0,0 +1,10 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright (c) 2021 Intel Corporation
|
||||
[global]
|
||||
name=seqwrite-mmap
|
||||
filename=fio-file
|
||||
rw=write
|
||||
ioengine=mmap
|
||||
|
||||
[seqwrite-mmap]
|
||||
filename=fio-file
|
||||
16
tests/metrics/storage/fio-k8s/configs/test-config/kata.yaml
Normal file
16
tests/metrics/storage/fio-k8s/configs/test-config/kata.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
## Copyright (c) 2021 Intel Corporation
|
||||
#
|
||||
## SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: iometrics
|
||||
spec:
|
||||
runtimeClassName: kata
|
||||
containers:
|
||||
- name: iometrics
|
||||
image: ubuntu:latest
|
||||
# Just spin & wait forever
|
||||
command: [ "/bin/bash", "-c", "--" ]
|
||||
args: [ "sleep infinity" ]
|
||||
15
tests/metrics/storage/fio-k8s/configs/test-config/runc.yaml
Normal file
15
tests/metrics/storage/fio-k8s/configs/test-config/runc.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
## Copyright (c) 2021 Intel Corporation
|
||||
#
|
||||
## SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: iometrics
|
||||
spec:
|
||||
containers:
|
||||
- name: iometrics
|
||||
image: ubuntu:latest
|
||||
# Just spin & wait forever
|
||||
command: [ "/bin/bash", "-c", "--" ]
|
||||
args: [ "sleep infinity" ]
|
||||
9
tests/metrics/storage/fio-k8s/pkg/env/Makefile
vendored
Normal file
9
tests/metrics/storage/fio-k8s/pkg/env/Makefile
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
#
|
||||
# Copyright (c) 2021-2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
gomod:
|
||||
GO111MODULE=on go mod edit -replace=github.com/kata-containers/kata-containers/tests/metrics/exec=../exec
|
||||
GO111MODULE=on go mod tidy
|
||||
38
tests/metrics/storage/fio-k8s/pkg/env/env.go
vendored
Normal file
38
tests/metrics/storage/fio-k8s/pkg/env/env.go
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright (c) 2021-2023 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package env
|
||||
|
||||
import (
|
||||
exec "github.com/kata-containers/kata-containers/tests/metrics/exec"
|
||||
)
|
||||
|
||||
// logger interface for pkg
|
||||
var log logger
|
||||
var Debug bool = false
|
||||
|
||||
type logger interface {
|
||||
Infof(string, ...interface{})
|
||||
Debugf(string, ...interface{})
|
||||
Errorf(string, ...interface{})
|
||||
}
|
||||
|
||||
func SetLogger(l logger) {
|
||||
log = l
|
||||
}
|
||||
|
||||
var sysDropCachesPath = "/proc/sys/vm/drop_caches"
|
||||
|
||||
func DropCaches() (err error) {
|
||||
log.Infof("drop caches")
|
||||
_, err = exec.ExecCmd("sync", Debug)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = exec.ExecCmd("echo 3 | sudo tee "+sysDropCachesPath, Debug)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
10
tests/metrics/storage/fio-k8s/pkg/env/go.mod
vendored
Normal file
10
tests/metrics/storage/fio-k8s/pkg/env/go.mod
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
module github.com/kata-containers/kata-containers/tests/metrics/storage/fio-k8s/exec
|
||||
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/kata-containers/kata-containers/tests/metrics/exec v0.0.0-00010101000000-000000000000 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
)
|
||||
|
||||
replace github.com/kata-containers/kata-containers/tests/metrics/exec => ../exec
|
||||
2
tests/metrics/storage/fio-k8s/pkg/env/go.sum
vendored
Normal file
2
tests/metrics/storage/fio-k8s/pkg/env/go.sum
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
67
tests/metrics/storage/fio-k8s/pkg/exec/Exec.go
Normal file
67
tests/metrics/storage/fio-k8s/pkg/exec/Exec.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright (c) 2021-2023 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package exec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// logger interface for pkg
|
||||
var log logger
|
||||
|
||||
type logger interface {
|
||||
Infof(string, ...interface{})
|
||||
Debugf(string, ...interface{})
|
||||
Errorf(string, ...interface{})
|
||||
}
|
||||
|
||||
func SetLogger(l logger) {
|
||||
log = l
|
||||
}
|
||||
|
||||
// Exec a command
|
||||
// err != nil if command fails to execute
|
||||
// output is a string with a combined stdout and stderr
|
||||
func ExecCmd(c string, showInStdout bool) (stdout string, err error) {
|
||||
if c == "" {
|
||||
return "", errors.New("command is empty")
|
||||
}
|
||||
|
||||
log.Debugf("Exec: %s", c)
|
||||
cmd := exec.Command("bash", "-o", "pipefail", "-c", c)
|
||||
var stdBuffer bytes.Buffer
|
||||
var writers []io.Writer
|
||||
writers = append(writers, &stdBuffer)
|
||||
if showInStdout {
|
||||
writers = append(writers, os.Stdout)
|
||||
}
|
||||
mw := io.MultiWriter(writers...)
|
||||
|
||||
cmd.Stdout = mw
|
||||
cmd.Stderr = mw
|
||||
|
||||
err = cmd.Run()
|
||||
output := stdBuffer.String()
|
||||
|
||||
return stdBuffer.String(), errors.Wrap(err, output)
|
||||
}
|
||||
|
||||
// Exec a command
|
||||
// Send output to Stdout and Stderr
|
||||
func ExecStdout(c string) error {
|
||||
if c == "" {
|
||||
return errors.New("command is empty")
|
||||
}
|
||||
|
||||
log.Debugf("Exec: %s", c)
|
||||
cmd := exec.Command("bash", "-o", "pipefail", "-c", c)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
5
tests/metrics/storage/fio-k8s/pkg/exec/go.mod
Normal file
5
tests/metrics/storage/fio-k8s/pkg/exec/go.mod
Normal file
@@ -0,0 +1,5 @@
|
||||
module github.com/kata-containers/kata-containers/tests/metrics/storage/fio-k8s/exec
|
||||
|
||||
go 1.19
|
||||
|
||||
require github.com/pkg/errors v0.9.1
|
||||
2
tests/metrics/storage/fio-k8s/pkg/exec/go.sum
Normal file
2
tests/metrics/storage/fio-k8s/pkg/exec/go.sum
Normal file
@@ -0,0 +1,2 @@
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
8
tests/metrics/storage/fio-k8s/pkg/k8s/Makefile
Normal file
8
tests/metrics/storage/fio-k8s/pkg/k8s/Makefile
Normal file
@@ -0,0 +1,8 @@
|
||||
#
|
||||
# Copyright (c) 2021-2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
gomod:
|
||||
GO111MODULE=on go mod edit -replace=github.com/kata-containers/kata-containers/tests/metrics/exec=../exec
|
||||
GO111MODULE=on go mod tidy
|
||||
34
tests/metrics/storage/fio-k8s/pkg/k8s/exec.go
Normal file
34
tests/metrics/storage/fio-k8s/pkg/k8s/exec.go
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright (c) 2021-2023 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
exec "github.com/kata-containers/kata-containers/tests/metrics/exec"
|
||||
)
|
||||
|
||||
type execOpt struct {
|
||||
showInStdOut bool
|
||||
}
|
||||
|
||||
type ExecOption func(e *execOpt)
|
||||
|
||||
func ExecOptShowStdOut() ExecOption {
|
||||
return func(e *execOpt) {
|
||||
e.showInStdOut = true
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (p *Pod) Exec(cmd string, opts ...ExecOption) (output string, err error) {
|
||||
log.Debugf("Exec %q in %s", cmd, p.YamlPath)
|
||||
o := &execOpt{showInStdOut: false}
|
||||
for _, opt := range opts {
|
||||
opt(o)
|
||||
|
||||
}
|
||||
execCmd := fmt.Sprintf("kubectl exec -f %s -- /bin/bash -c %q", p.YamlPath, cmd)
|
||||
return exec.ExecCmd(execCmd, Debug || o.showInStdOut)
|
||||
}
|
||||
10
tests/metrics/storage/fio-k8s/pkg/k8s/go.mod
Normal file
10
tests/metrics/storage/fio-k8s/pkg/k8s/go.mod
Normal file
@@ -0,0 +1,10 @@
|
||||
module github.com/kata-containers/kata-containers/tests/metrics/k8s
|
||||
|
||||
go 1.19
|
||||
|
||||
replace github.com/kata-containers/kata-containers/tests/metrics/exec => ../exec
|
||||
|
||||
require (
|
||||
github.com/kata-containers/kata-containers/tests/metrics/exec v0.0.0-00010101000000-000000000000 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
)
|
||||
2
tests/metrics/storage/fio-k8s/pkg/k8s/go.sum
Normal file
2
tests/metrics/storage/fio-k8s/pkg/k8s/go.sum
Normal file
@@ -0,0 +1,2 @@
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
68
tests/metrics/storage/fio-k8s/pkg/k8s/k8s.go
Normal file
68
tests/metrics/storage/fio-k8s/pkg/k8s/k8s.go
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright (c) 2021-2023 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
exec "github.com/kata-containers/kata-containers/tests/metrics/exec"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// logger interface for pkg
|
||||
var log logger
|
||||
var Debug bool = false
|
||||
|
||||
type logger interface {
|
||||
Infof(string, ...interface{})
|
||||
Debugf(string, ...interface{})
|
||||
Errorf(string, ...interface{})
|
||||
}
|
||||
|
||||
func SetLogger(l logger) {
|
||||
log = l
|
||||
}
|
||||
|
||||
type Pod struct {
|
||||
YamlPath string
|
||||
}
|
||||
|
||||
func (p *Pod) waitForReady() (err error) {
|
||||
log.Debugf("Wait for pod %s", p.YamlPath)
|
||||
_, err = exec.ExecCmd("kubectl wait --for=condition=ready -f "+p.YamlPath, Debug)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Pod) Run() (err error) {
|
||||
|
||||
log.Debugf("Creating K8s Pod %s", p.YamlPath)
|
||||
_, err = exec.ExecCmd("kubectl apply -f "+p.YamlPath, Debug)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to run pod %s", p.YamlPath)
|
||||
}
|
||||
|
||||
err = p.waitForReady()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to wait for pod %s", p.YamlPath)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Pod) Delete() (err error) {
|
||||
log.Debugf("Delete pod %s", p.YamlPath)
|
||||
_, err = exec.ExecCmd("kubectl delete --ignore-not-found -f "+p.YamlPath, Debug)
|
||||
return errors.Wrapf(err, "Failed to delete pod %s", p.YamlPath)
|
||||
}
|
||||
|
||||
func (p *Pod) CopyFromHost(src, dst string) (err error) {
|
||||
podName, err := exec.ExecCmd("kubectl get -f "+p.YamlPath+" -o jsonpath={.metadata.name}", Debug)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debugf("Copy from host %q->%q in pod %s", src, dst, p.YamlPath)
|
||||
execCmd := fmt.Sprintf("kubectl cp %s %s:%s", src, podName, dst)
|
||||
_, err = exec.ExecCmd(execCmd, Debug)
|
||||
return err
|
||||
}
|
||||
10
tests/metrics/storage/fio-k8s/scripts/Makefile
Normal file
10
tests/metrics/storage/fio-k8s/scripts/Makefile
Normal file
@@ -0,0 +1,10 @@
|
||||
#
|
||||
# Copyright (c) 2021-2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
MKFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
|
||||
MKFILE_DIR := $(dir $(MKFILE_PATH))
|
||||
run:
|
||||
$(MKFILE_DIR)/compare-virtiofsd-dax.sh
|
||||
"$(MKFILE_DIR)/report/gen-html-fio-report.sh" "./results"
|
||||
151
tests/metrics/storage/fio-k8s/scripts/compare-virtiofsd-dax.sh
Executable file
151
tests/metrics/storage/fio-k8s/scripts/compare-virtiofsd-dax.sh
Executable file
@@ -0,0 +1,151 @@
|
||||
#!/bin/bash
|
||||
#Copyright (c) 2021-2023 Intel Corporation
|
||||
#
|
||||
#SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -o errtrace
|
||||
|
||||
script_dir=$(dirname "$(readlink -f "$0")")
|
||||
|
||||
runtime_path="/opt/kata/bin/kata-runtime"
|
||||
kata_config_path="/opt/kata/share/defaults/kata-containers/configuration.toml"
|
||||
|
||||
results_dir="$(realpath ./)/results"
|
||||
|
||||
KATA_RUNTIME="${KATA_RUNTIME_CLASS:-kata}"
|
||||
BAREMETAL_RUNTIME="runc"
|
||||
RUNTIME_CLASS=""
|
||||
|
||||
FIO_SIZE="${FIO_SIZE:-500M}"
|
||||
FIO_BLOCKSIZE="${FIO_BLOCKSIZE:-4K}"
|
||||
VIRTIOFS_DAX_SIZE=${VIRTIOFS_DAX_SIZE:-600M}
|
||||
|
||||
# set the base case for virtiofsd
|
||||
set_base_virtiofs_config() {
|
||||
# Running kata-qemu-virtiofs
|
||||
# Defaults for virtiofs
|
||||
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_cache '"auto"'
|
||||
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_cache_size ${VIRTIOFS_DAX_SIZE}
|
||||
}
|
||||
|
||||
## helper function: get name of current bash function
|
||||
fn_name() {
|
||||
echo "${FUNCNAME[1]}"
|
||||
}
|
||||
|
||||
# directory where results are stored
|
||||
get_results_dir() {
|
||||
local test_name
|
||||
local test_result_dir
|
||||
test_name="${1}"
|
||||
test_result_dir="${results_dir}/${test_name}"
|
||||
mkdir -p "${test_result_dir}"
|
||||
echo "${test_result_dir}"
|
||||
}
|
||||
|
||||
# Collect kata env
|
||||
# save kata config toml
|
||||
# save output from kata-env
|
||||
kata_env() {
|
||||
local suffix=${1}
|
||||
local config_path
|
||||
local kata_env_bk
|
||||
local kata_config_bk
|
||||
kata_env_bk="$(get_results_dir "${suffix}")/kata-env.toml"
|
||||
kata_config_bk="$(get_results_dir "${suffix}")/kata-config.toml"
|
||||
|
||||
${runtime_path} kata-env >"${kata_env_bk}"
|
||||
config_path="$(${runtime_path} kata-env --json | jq .Runtime.Config.Path -r)"
|
||||
cp "${config_path}" "${kata_config_bk}"
|
||||
}
|
||||
|
||||
# Collect the command used by virtiofsd
|
||||
collect_qemu_virtiofs_cmd() {
|
||||
local rdir
|
||||
local test_name
|
||||
test_name="${1}"
|
||||
|
||||
rdir=$(get_results_dir "${test_name}")
|
||||
# TODO
|
||||
}
|
||||
|
||||
# Run metrics runner
|
||||
run_workload() {
|
||||
local test_name
|
||||
local test_result_file
|
||||
local test_result_dir
|
||||
|
||||
test_name="${1}"
|
||||
|
||||
test_result_dir="$(get_results_dir "${test_name}")"
|
||||
test_result_file="${test_result_dir}/test-out.txt"
|
||||
|
||||
echo "Running for kata config: ${test_name}"
|
||||
collect_qemu_virtiofs_cmd "$test_name"
|
||||
|
||||
fio_runner_dir="${script_dir}/../../cmd/fiotest/"
|
||||
fio_jobs="${script_dir}/../../configs/test-config/"
|
||||
make -C "${fio_runner_dir}" build
|
||||
pwd
|
||||
set -x
|
||||
"${fio_runner_dir}fio-k8s" \
|
||||
--debug \
|
||||
--fio.size "${FIO_SIZE}" \
|
||||
--fio.block-size "${FIO_BLOCKSIZE}" \
|
||||
--container-runtime "${RUNTIME_CLASS}" \
|
||||
--test-name "${test_name}" \
|
||||
--output-dir "$(dirname ${test_result_dir})" \
|
||||
"${fio_jobs}" |
|
||||
tee \
|
||||
"${test_result_file}"
|
||||
set +x
|
||||
}
|
||||
|
||||
pool_0_cache_auto_dax() {
|
||||
local suffix="$(fn_name)"
|
||||
|
||||
set_base_virtiofs_config
|
||||
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_extra_args '["--thread-pool-size=0","-o","no_posix_lock","-o","xattr"]'
|
||||
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_cache '"auto"'
|
||||
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_cache_size 1024
|
||||
kata_env "${suffix}"
|
||||
RUNTIME_CLASS="${KATA_RUNTIME}"
|
||||
run_workload "${suffix}"
|
||||
}
|
||||
|
||||
pool_0_cache_auto_no_dax() {
|
||||
local suffix="$(fn_name)"
|
||||
|
||||
set_base_virtiofs_config
|
||||
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_extra_args '["--thread-pool-size=0","-o","no_posix_lock","-o","xattr"]'
|
||||
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_cache '"auto"'
|
||||
sudo crudini --set --existing "$kata_config_path" hypervisor.qemu virtio_fs_cache_size 0
|
||||
|
||||
kata_env "${suffix}"
|
||||
|
||||
RUNTIME_CLASS="${KATA_RUNTIME}"
|
||||
run_workload "${suffix}"
|
||||
echo "done"
|
||||
}
|
||||
|
||||
k8s_baremetal() {
|
||||
local suffix="$(fn_name)"
|
||||
|
||||
RUNTIME_CLASS="${BAREMETAL_RUNTIME}"
|
||||
run_workload "${suffix}"
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
mkdir -p "${results_dir}"
|
||||
|
||||
k8s_baremetal
|
||||
pool_0_cache_auto_dax
|
||||
pool_0_cache_auto_no_dax
|
||||
}
|
||||
|
||||
main $*
|
||||
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "tWacOPbMYPtc"
|
||||
},
|
||||
"source": [
|
||||
"# FIO comparision"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "jXtTs6yldl_y"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import fio\n",
|
||||
"fio.generate_report()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"collapsed_sections": [],
|
||||
"name": "fio.ipynb",
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
@@ -0,0 +1,102 @@
|
||||
# Copyright (c) 2021-2023 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
import pandas as pd
|
||||
import os
|
||||
import re
|
||||
import io
|
||||
import glob
|
||||
from IPython.display import display, Markdown
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
#Compare the tests results group by fio job.
|
||||
#Input:
|
||||
# df: dataset from `import_data()`
|
||||
# metric: string of metrics provided in `df`
|
||||
def compare_tests_group_by_fio_job(df, metric):
|
||||
test_names, metric_df = group_metrics_group_by_testname(df, metric)
|
||||
show_df(metric_df)
|
||||
plot_df(metric_df,test_names)
|
||||
|
||||
# Given a metric return results per test group by fio job.
|
||||
# input:
|
||||
# df: dataset from `import_data()`
|
||||
# metric: string with the name of the metric to filter.
|
||||
# output:
|
||||
# dataset with fomat:
|
||||
# 'workload' , 'name[0]' , ... , 'name[n]'
|
||||
#
|
||||
def group_metrics_group_by_testname(df, metric):
|
||||
#name of each tests from results
|
||||
names = set()
|
||||
# Rows of new data set
|
||||
rows = []
|
||||
# map:
|
||||
# keys: name of fio job
|
||||
# value: dict[k]:v where k: name of a test, v: value of test for metric`
|
||||
workload = {}
|
||||
|
||||
for k, row in df.iterrows():
|
||||
# name of a fio job
|
||||
w = row['WORKLOAD']
|
||||
# name of tests
|
||||
tname = row['NAME']
|
||||
names.add(tname)
|
||||
# given a fio job name get dict of values
|
||||
# if not previous values init empty dict
|
||||
dict_values = workload.get(w, {})
|
||||
# For a given metric, add it into as value of dict_values[testname]=val
|
||||
#e.g
|
||||
# dict_values["test-name"] = row["IOPS"]
|
||||
dict_values[tname] = row[metric]
|
||||
workload[w] = dict_values
|
||||
|
||||
names = list(names)
|
||||
cols = ['WORKLOAD'] + list(names)
|
||||
rdf = pd.DataFrame(workload,columns = cols)
|
||||
|
||||
for k in workload:
|
||||
d = workload[k]
|
||||
|
||||
if not d[names[0]] == 0:
|
||||
d["WORKLOAD"] = k;
|
||||
rdf = rdf.append(d,ignore_index=True)
|
||||
rdf = rdf.dropna()
|
||||
return names, rdf
|
||||
|
||||
def plot_df(df, names,sort_key=""):
|
||||
if sort_key != "":
|
||||
df.sort_values(sort_key, ascending=False)
|
||||
df.plot(kind='bar',x="WORKLOAD",y=names, figsize=(30, 10))
|
||||
plt.show()
|
||||
|
||||
|
||||
def import_data():
|
||||
frames = []
|
||||
for f in glob.glob('./results/*/results.csv'):
|
||||
print("reading:" + f)
|
||||
df = pd.read_csv(f)
|
||||
frames.append(df)
|
||||
return pd.concat(frames)
|
||||
|
||||
def show_df(df):
|
||||
pd.set_option('display.max_rows', df.shape[0]+1)
|
||||
print(df)
|
||||
|
||||
def print_md(s):
|
||||
display(Markdown(s))
|
||||
|
||||
#notebook entrypoint
|
||||
def generate_report():
|
||||
#Load the all test results in a single dataset
|
||||
df_results = import_data()
|
||||
print_md("Show all data from results")
|
||||
show_df(df_results)
|
||||
print_md("### Compare the tests results group by fio job. The metric used to compare is write bandwidth")
|
||||
compare_tests_group_by_fio_job(df_results, 'bw_w')
|
||||
print_md("### Compare the tests results group by fio job. The metric used to compare is read bandwidth")
|
||||
compare_tests_group_by_fio_job(df_results, 'bw_r')
|
||||
print_md("### Compare the tests results group by fio job. The metric used to compare is write IOPS(Input/Output Operations Per Second)")
|
||||
compare_tests_group_by_fio_job(df_results, 'IOPS_w')
|
||||
print_md("### Compare the tests results group by fio job. The metric used to compare is read IOPS(Input/Output Operations Per Second)")
|
||||
compare_tests_group_by_fio_job(df_results, 'IOPS_r')
|
||||
@@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
#Copyright (c) 2021-2023 Intel Corporation
|
||||
#
|
||||
#SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -o errtrace
|
||||
|
||||
script_dir=$(dirname "$(readlink -f "$0")")
|
||||
|
||||
results_dir=${1:-}
|
||||
|
||||
usage(){
|
||||
echo "$0 <results_dir>"
|
||||
}
|
||||
|
||||
if [ "${results_dir}" == "" ];then
|
||||
echo "missing results directory"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d "${results_dir}" ];then
|
||||
echo "${results_dir} is not a directory"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
results_dir=$(realpath "${results_dir}")
|
||||
|
||||
generate_report(){
|
||||
sudo chown "${USER}:${USER}" -R ${results_dir}
|
||||
sudo docker run --rm -e JUPYTER_ENABLE_LAB=yes \
|
||||
-v "${script_dir}:/home/jovyan" \
|
||||
-v "${results_dir}:/home/jovyan/results" \
|
||||
--user $(id -u):$(id -g) \
|
||||
jupyter/scipy-notebook:399cbb986c6b \
|
||||
bash -e -c '
|
||||
cd results;
|
||||
jupyter nbconvert --execute /home/jovyan/fio.ipynb --to html;
|
||||
cp /home/jovyan/fio.html /home/jovyan/results;
|
||||
'
|
||||
}
|
||||
|
||||
generate_report
|
||||
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
#Copyright (c) 2021-2023 Intel Corporation
|
||||
#
|
||||
#SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -o errtrace
|
||||
|
||||
script_dir=$(dirname "$(readlink -f "$0")")
|
||||
NOTEBOOK_PORT="8888"
|
||||
|
||||
results_dir=${1:-}
|
||||
|
||||
usage(){
|
||||
echo "$0 <results_dir>"
|
||||
}
|
||||
|
||||
if [ "${results_dir}" == "" ];then
|
||||
echo "missing results directory"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d "${results_dir}" ];then
|
||||
echo "${results_dir} is not a directory"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
results_dir=$(realpath "${results_dir}")
|
||||
|
||||
sudo -E docker run --rm -p "${NOTEBOOK_PORT}:${NOTEBOOK_PORT}" -e JUPYTER_ENABLE_LAB=yes \
|
||||
-v "${script_dir}:/home/jovyan" \
|
||||
-v "${results_dir}:/home/jovyan/results" \
|
||||
jupyter/scipy-notebook:399cbb986c6b \
|
||||
start.sh jupyter lab --LabApp.token=''
|
||||
Reference in New Issue
Block a user