Merge pull request #4049 from GeorginaKin/CCv0-main-merge

CCv0: Merge main into CCv0 branch
This commit is contained in:
Steve Horsman
2022-04-07 17:54:25 +01:00
committed by GitHub
90 changed files with 963 additions and 943 deletions

View File

@@ -0,0 +1,44 @@
on:
schedule:
- cron: '0 23 * * 0'
name: Docs URL Alive Check
jobs:
test:
strategy:
matrix:
go-version: [1.17.x]
os: [ubuntu-20.04]
runs-on: ${{ matrix.os }}
env:
target_branch: ${{ github.base_ref }}
steps:
- name: Install Go
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
env:
GOPATH: ${{ runner.workspace }}/kata-containers
- name: Set env
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
run: |
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
- name: Checkout code
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
uses: actions/checkout@v2
with:
fetch-depth: 0
path: ./src/github.com/${{ github.repository }}
- name: Setup
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
run: |
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
env:
GOPATH: ${{ runner.workspace }}/kata-containers
# docs url alive check
- name: Docs URL Alive Check
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
run: |
cd ${GOPATH}/src/github.com/${{ github.repository }} && make docs-url-alive-check

View File

@@ -26,6 +26,7 @@ jobs:
- name: Build ${{ matrix.asset }}
run: |
./tools/packaging/kata-deploy/local-build/kata-deploy-copy-yq-installer.sh
./tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh --build="${KATA_ASSET}"
build_dir=$(readlink -f build)
# store-artifact does not work with symlink
@@ -140,13 +141,10 @@ jobs:
- uses: actions/checkout@v2
- name: generate-and-upload-tarball
run: |
pushd $GITHUB_WORKSPACE/src/agent
cargo vendor >> .cargo/config
popd
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
tarball="kata-containers-$tag-vendor.tar.gz"
pushd $GITHUB_WORKSPACE
tar -cvzf "${tarball}" src/agent/.cargo/config src/agent/vendor
bash -c "tools/packaging/release/generate_vendor.sh ${tarball}"
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}"
popd

1
.gitignore vendored
View File

@@ -9,4 +9,5 @@ src/agent/src/version.rs
src/agent/kata-agent.service
src/agent/protocols/src/*.rs
!src/agent/protocols/src/lib.rs
build

View File

@@ -39,10 +39,16 @@ generate-protocols:
static-checks: build
bash ci/static-checks.sh
docs-url-alive-check:
bash ci/docs-url-alive-check.sh
.PHONY: \
all \
binary-tarball \
default \
install-binary-tarball \
logging-crate-tests \
static-checks
static-checks \
docs-url-alive-check

View File

@@ -1 +1 @@
2.4.0-rc0
2.5.0-alpha0

12
ci/docs-url-alive-check.sh Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/bash
#
# Copyright (c) 2021 Easystack Inc.
#
# SPDX-License-Identifier: Apache-2.0
set -e
cidir=$(dirname "$0")
source "${cidir}/lib.sh"
run_docs_url_alive_check

View File

@@ -44,3 +44,12 @@ run_go_test()
clone_tests_repo
bash "$tests_repo_dir/.ci/go-test.sh"
}
run_docs_url_alive_check()
{
clone_tests_repo
# Make sure we have the targeting branch
git remote set-branches --add origin "${branch}"
git fetch -a
bash "$tests_repo_dir/.ci/static-checks.sh" --docs --all "github.com/kata-containers/kata-containers"
}

View File

@@ -30,7 +30,6 @@ See the [how-to documentation](how-to).
* [GPU Passthrough with Kata](./use-cases/GPU-passthrough-and-Kata.md)
* [SR-IOV with Kata](./use-cases/using-SRIOV-and-kata.md)
* [Intel QAT with Kata](./use-cases/using-Intel-QAT-and-kata.md)
* [VPP with Kata](./use-cases/using-vpp-and-kata.md)
* [SPDK vhost-user with Kata](./use-cases/using-SPDK-vhostuser-and-kata.md)
* [Intel SGX with Kata](./use-cases/using-Intel-SGX-and-kata.md)

View File

@@ -277,7 +277,9 @@ mod tests {
## Temporary files
Always delete temporary files on success.
Use `t.TempDir()` to create temporary directory. The directory created by
`t.TempDir()` is automatically removed when the test and all its subtests
complete.
### Golang temporary files
@@ -286,11 +288,7 @@ func TestSomething(t *testing.T) {
assert := assert.New(t)
// Create a temporary directory
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
// Delete it at the end of the test
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
// Add test logic that will use the tmpdir here...
}

View File

@@ -67,22 +67,15 @@ Using a proxy for multiplexing the connections between the VM and the host uses
4.5MB per [POD][2]. In a high density deployment this could add up to GBs of
memory that could have been used to host more PODs. When we talk about density
each kilobyte matters and it might be the decisive factor between run another
POD or not. For example if you have 500 PODs running in a server, the same
amount of [`kata-proxy`][3] processes will be running and consuming for around
2250MB of RAM. Before making the decision not to use VSOCKs, you should ask
POD or not. Before making the decision not to use VSOCKs, you should ask
yourself, how many more containers can run with the memory RAM consumed by the
Kata proxies?
### Reliability
[`kata-proxy`][3] is in charge of multiplexing the connections between virtual
machine and host processes, if it dies all connections get broken. For example
if you have a [POD][2] with 10 containers running, if `kata-proxy` dies it would
be impossible to contact your containers, though they would still be running.
Since communication via VSOCKs is direct, the only way to lose communication
with the containers is if the VM itself or the `containerd-shim-kata-v2` dies, if this happens
the containers are removed automatically.
[1]: https://wiki.qemu.org/Features/VirtioVsock
[2]: ./vcpu-handling.md#virtual-cpus-and-kubernetes-pods
[3]: https://github.com/kata-containers/proxy

View File

@@ -2,24 +2,15 @@
## Default number of virtual CPUs
Before starting a container, the [runtime][6] reads the `default_vcpus` option
from the [configuration file][7] to determine the number of virtual CPUs
Before starting a container, the [runtime][4] reads the `default_vcpus` option
from the [configuration file][5] to determine the number of virtual CPUs
(vCPUs) needed to start the virtual machine. By default, `default_vcpus` is
equal to 1 for fast boot time and a small memory footprint per virtual machine.
Be aware that increasing this value negatively impacts the virtual machine's
boot time and memory footprint.
In general, we recommend that you do not edit this variable, unless you know
what are you doing. If your container needs more than one vCPU, use
[docker `--cpus`][1], [docker update][4], or [Kubernetes `cpu` limits][2] to
assign more resources.
*Docker*
```sh
$ docker run --name foo -ti --cpus 2 debian bash
$ docker update --cpus 4 foo
```
[Kubernetes `cpu` limits][1] to assign more resources.
*Kubernetes*
@@ -49,7 +40,7 @@ $ sudo -E kubectl create -f ~/cpu-demo.yaml
## Virtual CPUs and Kubernetes pods
A Kubernetes pod is a group of one or more containers, with shared storage and
network, and a specification for how to run the containers [[specification][3]].
network, and a specification for how to run the containers [[specification][2]].
In Kata Containers this group of containers, which is called a sandbox, runs inside
the same virtual machine. If you do not specify a CPU constraint, the runtime does
not add more vCPUs and the container is not placed inside a CPU cgroup.
@@ -73,13 +64,7 @@ constraints with each container trying to consume 100% of vCPU, the resources
divide in two parts, 50% of vCPU for each container because your virtual
machine does not have enough resources to satisfy containers needs. If you want
to give access to a greater or lesser portion of vCPUs to a specific container,
use [`docker --cpu-shares`][1] or [Kubernetes `cpu` requests][2].
*Docker*
```sh
$ docker run -ti --cpus-shares=512 debian bash
```
use [Kubernetes `cpu` requests][1].
*Kubernetes*
@@ -109,10 +94,9 @@ $ sudo -E kubectl create -f ~/cpu-demo.yaml
Before running containers without CPU constraint, consider that your containers
are not running alone. Since your containers run inside a virtual machine other
processes use the vCPUs as well (e.g. `systemd` and the Kata Containers
[agent][5]). In general, we recommend setting `default_vcpus` equal to 1 to
[agent][3]). In general, we recommend setting `default_vcpus` equal to 1 to
allow non-container processes to run on this vCPU and to specify a CPU
constraint for each container. If your container is already running and needs
more vCPUs, you can add more using [docker update][4].
constraint for each container.
## Container with CPU constraint
@@ -121,7 +105,7 @@ constraints using the following formula: `vCPUs = ceiling( quota / period )`, wh
`quota` specifies the number of microseconds per CPU Period that the container is
guaranteed CPU access and `period` specifies the CPU CFS scheduler period of time
in microseconds. The result determines the number of vCPU to hot plug into the
virtual machine. Once the vCPUs have been added, the [agent][5] places the
virtual machine. Once the vCPUs have been added, the [agent][3] places the
container inside a CPU cgroup. This placement allows the container to use only
its assigned resources.
@@ -138,25 +122,6 @@ the virtual machine starts with 8 vCPUs and 1 vCPUs is added and assigned
to the container. Non-container processes might be able to use 8 vCPUs but they
use a maximum 1 vCPU, hence 7 vCPUs might not be used.
*Container without CPU constraint*
```sh
$ docker run -ti debian bash -c "nproc; cat /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_*"
1 # number of vCPUs
100000 # cfs period
-1 # cfs quota
```
*Container with CPU constraint*
```sh
docker run --cpus 4 -ti debian bash -c "nproc; cat /sys/fs/cgroup/cpu,cpuacct/cpu.cfs_*"
5 # number of vCPUs
100000 # cfs period
400000 # cfs quota
```
## Virtual CPU handling without hotplug
In some cases, the hardware and/or software architecture being utilized does not support
@@ -183,11 +148,8 @@ the container's `spec` will provide the sizing information directly. If these ar
calculate the number of CPUs required for the workload and augment this by `default_vcpus`
configuration option, and use this for the virtual machine size.
[1]: https://docs.docker.com/config/containers/resource_constraints/#cpu
[2]: https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource
[3]: https://kubernetes.io/docs/concepts/workloads/pods/pod/
[4]: https://docs.docker.com/engine/reference/commandline/update/
[5]: ../../src/agent
[6]: ../../src/runtime
[7]: ../../src/runtime/README.md#configuration
[1]: https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource
[2]: https://kubernetes.io/docs/concepts/workloads/pods/pod/
[3]: ../../src/agent
[4]: ../../src/runtime
[5]: ../../src/runtime/README.md#configuration

View File

@@ -15,6 +15,11 @@
- `qemu`
- `cloud-hypervisor`
- `firecracker`
In the case of `firecracker` the use of a block device `snapshotter` is needed
for the VM rootfs. Refer to the following guide for additional configuration
steps:
- [Setup Kata containers with `firecracker`](how-to-use-kata-containers-with-firecracker.md)
- `ACRN`
While `qemu` , `cloud-hypervisor` and `firecracker` work out of the box with installation of Kata,

View File

@@ -48,9 +48,9 @@ Running Docker containers Kata Containers requires care because `VOLUME`s specif
kataShared on / type virtiofs (rw,relatime,dax)
```
`kataShared` mount types are powered by [`virtio-fs`][virtio-fs], a marked improvement over `virtio-9p`, thanks to [PR #1016](https://github.com/kata-containers/runtime/pull/1016). While `virtio-fs` is normally an excellent choice, in the case of DinD workloads `virtio-fs` causes an issue -- [it *cannot* be used as a "upper layer" of `overlayfs` without a custom patch](http://lists.katacontainers.io/pipermail/kata-dev/2020-January/001216.html).
`kataShared` mount types are powered by [`virtio-fs`](https://virtio-fs.gitlab.io/), a marked improvement over `virtio-9p`, thanks to [PR #1016](https://github.com/kata-containers/runtime/pull/1016). While `virtio-fs` is normally an excellent choice, in the case of DinD workloads `virtio-fs` causes an issue -- [it *cannot* be used as a "upper layer" of `overlayfs` without a custom patch](http://lists.katacontainers.io/pipermail/kata-dev/2020-January/001216.html).
As `/var/lib/docker` is a `VOLUME` specified by DinD (i.e. the `docker` images tagged `*-dind`/`*-dind-rootless`), `docker` fill fail to start (or even worse, silently pick a worse storage driver like `vfs`) when started in a Kata Container. Special measures must be taken when running DinD-powered workloads in Kata Containers.
As `/var/lib/docker` is a `VOLUME` specified by DinD (i.e. the `docker` images tagged `*-dind`/`*-dind-rootless`), `docker` will fail to start (or even worse, silently pick a worse storage driver like `vfs`) when started in a Kata Container. Special measures must be taken when running DinD-powered workloads in Kata Containers.
## Workarounds/Solutions
@@ -58,7 +58,7 @@ Thanks to various community contributions (see [issue references below](#referen
### Use a memory backed volume
For small workloads (small container images, without much generated filesystem load), a memory-backed volume is sufficient. Kubernetes supports a variant of [the `EmptyDir` volume][k8s-emptydir], which allows for memdisk-backed storage -- the [the `medium: Memory` ][k8s-memory-volume-type]. An example of a `Pod` using such a setup [was contributed](https://github.com/kata-containers/runtime/issues/1429#issuecomment-477385283), and is reproduced below:
For small workloads (small container images, without much generated filesystem load), a memory-backed volume is sufficient. Kubernetes supports a variant of [the `EmptyDir` volume](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir), which allows for memdisk-backed storage -- the the `medium: Memory`. An example of a `Pod` using such a setup [was contributed](https://github.com/kata-containers/runtime/issues/1429#issuecomment-477385283), and is reproduced below:
```yaml
apiVersion: v1

View File

@@ -101,7 +101,7 @@ Start an ACRN based Kata Container,
$ sudo docker run -ti --runtime=kata-runtime busybox sh
```
You will see ACRN(`acrn-dm`) is now running on your system, as well as a `kata-shim`, `kata-proxy`. You should obtain an interactive shell prompt. Verify that all the Kata processes terminate once you exit the container.
You will see ACRN(`acrn-dm`) is now running on your system, as well as a `kata-shim`. You should obtain an interactive shell prompt. Verify that all the Kata processes terminate once you exit the container.
```bash
$ ps -ef | grep -E "kata|acrn"

View File

@@ -0,0 +1,254 @@
# Configure Kata Containers to use Firecracker
This document provides an overview on how to run Kata Containers with the AWS Firecracker hypervisor.
## Introduction
AWS Firecracker is an open source virtualization technology that is purpose-built for creating and managing secure, multi-tenant container and function-based services that provide serverless operational models. AWS Firecracker runs workloads in lightweight virtual machines, called `microVMs`, which combine the security and isolation properties provided by hardware virtualization technology with the speed and flexibility of Containers.
Please refer to AWS Firecracker [documentation](https://github.com/firecracker-microvm/firecracker/blob/main/docs/getting-started.md) for more details.
## Pre-requisites
This document requires the presence of Kata Containers on your system. Install using the instructions available through the following links:
- Kata Containers [automated installation](../install/README.md)
- Kata Containers manual installation: Automated installation does not seem to be supported for Clear Linux, so please use [manual installation](../Developer-Guide.md) steps.
> **Note:** Create rootfs image and not initrd image.
## Install AWS Firecracker
Kata Containers only support AWS Firecracker v0.23.4 ([yet](https://github.com/kata-containers/kata-containers/pull/1519)).
To install Firecracker we need to get the `firecracker` and `jailer` binaries:
```bash
$ release_url="https://github.com/firecracker-microvm/firecracker/releases"
$ version="v0.23.1"
$ arch=`uname -m`
$ curl ${release_url}/download/${version}/firecracker-${version}-${arch} -o firecracker
$ curl ${release_url}/download/${version}/jailer-${version}-${arch} -o jailer
$ chmod +x jailer firecracker
```
To make the binaries available from the default system `PATH` it is recommended to move them to `/usr/local/bin` or add a symbolic link:
```bash
$ sudo ln -s $(pwd)/firecracker /usr/local/bin
$ sudo ln -s $(pwd)/jailer /usr/local/bin
```
More details can be found in [AWS Firecracker docs](https://github.com/firecracker-microvm/firecracker/blob/main/docs/getting-started.md)
In order to run Kata with AWS Firecracker a block device as the backing store for a VM is required. To interact with `containerd` and Kata we use the `devmapper` `snapshotter`.
## Configure `devmapper`
To check support for your `containerd` installation, you can run:
```
$ ctr plugins ls |grep devmapper
```
if the output of the above command is:
```
io.containerd.snapshotter.v1 devmapper linux/amd64 ok
```
then you can skip this section and move on to `Configure Kata Containers with AWS Firecracker`
If the output of the above command is:
```
io.containerd.snapshotter.v1 devmapper linux/amd64 error
```
then we need to setup `devmapper` `snapshotter`. Based on a [very useful
guide](https://docs.docker.com/storage/storagedriver/device-mapper-driver/)
from docker, we can set it up using the following scripts:
> **Note:** The following scripts assume a 100G sparse file for storing container images, a 10G sparse file for the thin-provisioning pool and 10G base image files for any sandboxed container created. This means that we will need at least 10GB free space.
```
#!/bin/bash
set -ex
DATA_DIR=/var/lib/containerd/devmapper
POOL_NAME=devpool
mkdir -p ${DATA_DIR}
# Create data file
sudo touch "${DATA_DIR}/data"
sudo truncate -s 100G "${DATA_DIR}/data"
# Create metadata file
sudo touch "${DATA_DIR}/meta"
sudo truncate -s 10G "${DATA_DIR}/meta"
# Allocate loop devices
DATA_DEV=$(sudo losetup --find --show "${DATA_DIR}/data")
META_DEV=$(sudo losetup --find --show "${DATA_DIR}/meta")
# Define thin-pool parameters.
# See https://www.kernel.org/doc/Documentation/device-mapper/thin-provisioning.txt for details.
SECTOR_SIZE=512
DATA_SIZE="$(sudo blockdev --getsize64 -q ${DATA_DEV})"
LENGTH_IN_SECTORS=$(bc <<< "${DATA_SIZE}/${SECTOR_SIZE}")
DATA_BLOCK_SIZE=128
LOW_WATER_MARK=32768
# Create a thin-pool device
sudo dmsetup create "${POOL_NAME}" \
--table "0 ${LENGTH_IN_SECTORS} thin-pool ${META_DEV} ${DATA_DEV} ${DATA_BLOCK_SIZE} ${LOW_WATER_MARK}"
cat << EOF
#
# Add this to your config.toml configuration file and restart `containerd` daemon
#
[plugins]
[plugins.devmapper]
pool_name = "${POOL_NAME}"
root_path = "${DATA_DIR}"
base_image_size = "10GB"
discard_blocks = true
EOF
```
Make it executable and run it:
```bash
$ sudo chmod +x ~/scripts/devmapper/create.sh
$ cd ~/scripts/devmapper/
$ sudo ./create.sh
```
Now, we can add the `devmapper` configuration provided from the script to `/etc/containerd/config.toml`.
> **Note:** If you are using the default `containerd` configuration (`containerd config default >> /etc/containerd/config.toml`), you may need to edit the existing `[plugins."io.containerd.snapshotter.v1.devmapper"]`configuration.
Save and restart `containerd`:
```bash
$ sudo systemctl restart containerd
```
We can use `dmsetup` to verify that the thin-pool was created successfully.
```bash
$ sudo dmsetup ls
```
We should also check that `devmapper` is registered and running:
```bash
$ sudo ctr plugins ls | grep devmapper
```
This script needs to be run only once, while setting up the `devmapper` `snapshotter` for `containerd`. Afterwards, make sure that on each reboot, the thin-pool is initialized from the same data directory. Otherwise, all the fetched containers (or the ones that you have created) will be re-initialized. A simple script that re-creates the thin-pool from the same data directory is shown below:
```
#!/bin/bash
set -ex
DATA_DIR=/var/lib/containerd/devmapper
POOL_NAME=devpool
# Allocate loop devices
DATA_DEV=$(sudo losetup --find --show "${DATA_DIR}/data")
META_DEV=$(sudo losetup --find --show "${DATA_DIR}/meta")
# Define thin-pool parameters.
# See https://www.kernel.org/doc/Documentation/device-mapper/thin-provisioning.txt for details.
SECTOR_SIZE=512
DATA_SIZE="$(sudo blockdev --getsize64 -q ${DATA_DEV})"
LENGTH_IN_SECTORS=$(bc <<< "${DATA_SIZE}/${SECTOR_SIZE}")
DATA_BLOCK_SIZE=128
LOW_WATER_MARK=32768
# Create a thin-pool device
sudo dmsetup create "${POOL_NAME}" \
--table "0 ${LENGTH_IN_SECTORS} thin-pool ${META_DEV} ${DATA_DEV} ${DATA_BLOCK_SIZE} ${LOW_WATER_MARK}"
```
We can create a systemd service to run the above script on each reboot:
```bash
$ sudo nano /lib/systemd/system/devmapper_reload.service
```
The service file:
```
[Unit]
Description=Devmapper reload script
[Service]
ExecStart=/path/to/script/reload.sh
[Install]
WantedBy=multi-user.target
```
Enable the newly created service:
```bash
$ sudo systemctl daemon-reload
$ sudo systemctl enable devmapper_reload.service
$ sudo systemctl start devmapper_reload.service
```
## Configure Kata Containers with AWS Firecracker
To configure Kata Containers with AWS Firecracker, copy the generated `configuration-fc.toml` file when building the `kata-runtime` to either `/etc/kata-containers/configuration-fc.toml` or `/usr/share/defaults/kata-containers/configuration-fc.toml`.
The following command shows full paths to the `configuration.toml` files that the runtime loads. It will use the first path that exists. (Please make sure the kernel and image paths are set correctly in the `configuration.toml` file)
```bash
$ sudo kata-runtime --show-default-config-paths
```
## Configure `containerd`
Next, we need to configure containerd. Add a file in your path (e.g. `/usr/local/bin/containerd-shim-kata-fc-v2`) with the following contents:
```
#!/bin/bash
KATA_CONF_FILE=/etc/containers/configuration-fc.toml /usr/local/bin/containerd-shim-kata-v2 $@
```
> **Note:** You may need to edit the paths of the configuration file and the `containerd-shim-kata-v2` to correspond to your setup.
Make it executable:
```bash
$ sudo chmod +x /usr/local/bin/containerd-shim-kata-fc-v2
```
Add the relevant section in `containerd`s `config.toml` file (`/etc/containerd/config.toml`):
```
[plugins.cri.containerd.runtimes]
[plugins.cri.containerd.runtimes.kata-fc]
runtime_type = "io.containerd.kata-fc.v2"
```
> **Note:** If you are using the default `containerd` configuration (`containerd config default >> /etc/containerd/config.toml`),
> the configuration should change to :
```
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata-fc]
runtime_type = "io.containerd.kata-fc.v2"
```
Restart `containerd`:
```bash
$ sudo systemctl restart containerd
```
## Verify the installation
We are now ready to launch a container using Kata with Firecracker to verify that everything worked:
```bash
$ sudo ctr images pull --snapshotter devmapper docker.io/library/ubuntu:latest
$ sudo ctr run --snapshotter devmapper --runtime io.containerd.run.kata-fc.v2 -t --rm docker.io/library/ubuntu
```

View File

@@ -1,76 +0,0 @@
# Setup to run VPP
The Data Plane Development Kit (DPDK) is a set of libraries and drivers for
fast packet processing. Vector Packet Processing (VPP) is a platform
extensible framework that provides out-of-the-box production quality
switch and router functionality. VPP is a high performance packet-processing
stack that can run on commodity CPUs. Enabling VPP with DPDK support can
yield significant performance improvements over a Linux\* bridge providing a
switch with DPDK VHOST-USER ports.
For more information about VPP visit their [wiki](https://wiki.fd.io/view/VPP).
## Install and configure Kata Containers
Follow the [Kata Containers setup instructions](../Developer-Guide.md).
In order to make use of VHOST-USER based interfaces, the container needs to be backed
by huge pages. `HugePages` support is required for the large memory pool allocation used for
DPDK packet buffers. This is a feature which must be configured within the Linux Kernel. See
[the DPDK documentation](https://doc.dpdk.org/guides/linux_gsg/sys_reqs.html#use-of-hugepages-in-the-linux-environment)
for details on how to enable for the host. After enabling huge pages support on the host system,
update the Kata configuration to enable huge page support in the guest kernel:
```
$ sudo sed -i -e 's/^# *\(enable_hugepages\).*=.*$/\1 = true/g' /usr/share/defaults/kata-containers/configuration.toml
```
## Install VPP
Follow the [VPP installation instructions](https://wiki.fd.io/view/VPP/Installing_VPP_binaries_from_packages).
After a successful installation, your host system is ready to start
connecting Kata Containers with VPP bridges.
### Install the VPP Docker\* plugin
To create a Docker network and connect Kata Containers easily to that network through
Docker, install a VPP Docker plugin.
To install the plugin, follow the [plugin installation instructions](https://github.com/clearcontainers/vpp).
This VPP plugin allows the creation of a VPP network. Every container added
to this network is connected through an L2 bridge-domain provided by VPP.
## Example: Launch two Kata Containers using VPP
To use VPP, use Docker to create a network that makes use of VPP.
For example:
```
$ sudo docker network create -d=vpp --ipam-driver=vpp --subnet=192.168.1.0/24 --gateway=192.168.1.1 vpp_net
```
Test connectivity by launching two containers:
```
$ sudo docker run --runtime=kata-runtime --net=vpp_net --ip=192.168.1.2 --mac-address=CA:FE:CA:FE:01:02 -it busybox bash -c "ip a; ip route; sleep 300"
$ sudo docker run --runtime=kata-runtime --net=vpp_net --ip=192.168.1.3 --mac-address=CA:FE:CA:FE:01:03 -it busybox bash -c "ip a; ip route; ping 192.168.1.2"
```
These commands setup two Kata Containers connected via a VPP L2 bridge
domain. The first of the two VMs displays the networking details and then
sleeps providing a period of time for it to be pinged. The second
VM displays its networking details and then pings the first VM, verifying
connectivity between them.
After verifying connectivity, cleanup with the following commands:
```
$ sudo docker kill $(sudo docker ps --no-trunc -aq)
$ sudo docker rm $(sudo docker ps --no-trunc -aq)
$ sudo docker network rm vpp_net
$ sudo service vpp stop
```

View File

@@ -151,12 +151,12 @@ async fn register_memory_event(
let eventfd = eventfd(0, EfdFlags::EFD_CLOEXEC)?;
let event_control_path = Path::new(&cg_dir).join("cgroup.event_control");
let data;
if arg.is_empty() {
data = format!("{} {}", eventfd, event_file.as_raw_fd());
let data = if arg.is_empty() {
format!("{} {}", eventfd, event_file.as_raw_fd())
} else {
data = format!("{} {} {}", eventfd, event_file.as_raw_fd(), arg);
}
format!("{} {} {}", eventfd, event_file.as_raw_fd(), arg)
};
fs::write(&event_control_path, data)?;

View File

@@ -357,13 +357,12 @@ fn seccomp_grpc_to_oci(sec: &grpc::LinuxSeccomp) -> oci::LinuxSeccomp {
for sys in sec.Syscalls.iter() {
let mut args = Vec::new();
let errno_ret: u32;
if sys.has_errnoret() {
errno_ret = sys.get_errnoret();
let errno_ret: u32 = if sys.has_errnoret() {
sys.get_errnoret()
} else {
errno_ret = libc::EPERM as u32;
}
libc::EPERM as u32
};
for arg in sys.Args.iter() {
args.push(oci::LinuxSeccompArg {

View File

@@ -8,8 +8,8 @@ use std::fs::File;
use std::os::unix::io::RawFd;
use tokio::sync::mpsc::Sender;
use nix::errno::Errno;
use nix::fcntl::{fcntl, FcntlArg, OFlag};
use nix::sys::signal::{self, Signal};
use nix::sys::wait::{self, WaitStatus};
use nix::unistd::{self, Pid};
use nix::Result;
@@ -80,7 +80,7 @@ pub struct Process {
pub trait ProcessOperations {
fn pid(&self) -> Pid;
fn wait(&self) -> Result<WaitStatus>;
fn signal(&self, sig: Signal) -> Result<()>;
fn signal(&self, sig: libc::c_int) -> Result<()>;
}
impl ProcessOperations for Process {
@@ -92,8 +92,10 @@ impl ProcessOperations for Process {
wait::waitpid(Some(self.pid()), None)
}
fn signal(&self, sig: Signal) -> Result<()> {
signal::kill(self.pid(), Some(sig))
fn signal(&self, sig: libc::c_int) -> Result<()> {
let res = unsafe { libc::kill(self.pid().into(), sig) };
Errno::result(res).map(drop)
}
}
@@ -281,6 +283,6 @@ mod tests {
// signal to every process in the process
// group of the calling process.
process.pid = 0;
assert!(process.signal(Signal::SIGCONT).is_ok());
assert!(process.signal(libc::SIGCONT).is_ok());
}
}

View File

@@ -126,9 +126,7 @@ fn announce(logger: &Logger, config: &AgentConfig) {
// output to the vsock port specified, or stdout.
async fn create_logger_task(rfd: RawFd, vsock_port: u32, shutdown: Receiver<bool>) -> Result<()> {
let mut reader = PipeStream::from_fd(rfd);
let mut writer: Box<dyn AsyncWrite + Unpin + Send>;
if vsock_port > 0 {
let mut writer: Box<dyn AsyncWrite + Unpin + Send> = if vsock_port > 0 {
let listenfd = socket::socket(
AddressFamily::Vsock,
SockType::Stream,
@@ -140,10 +138,10 @@ async fn create_logger_task(rfd: RawFd, vsock_port: u32, shutdown: Receiver<bool
socket::bind(listenfd, &addr)?;
socket::listen(listenfd, 1)?;
writer = Box::new(util::get_vsock_stream(listenfd).await?);
Box::new(util::get_vsock_stream(listenfd).await?)
} else {
writer = Box::new(tokio::io::stdout());
}
Box::new(tokio::io::stdout())
};
let _ = util::interruptable_io_copier(&mut reader, &mut writer, shutdown).await;

View File

@@ -344,25 +344,25 @@ fn set_gauge_vec_meminfo(gv: &prometheus::GaugeVec, meminfo: &procfs::Meminfo) {
#[instrument]
fn set_gauge_vec_cpu_time(gv: &prometheus::GaugeVec, cpu: &str, cpu_time: &procfs::CpuTime) {
gv.with_label_values(&[cpu, "user"])
.set(cpu_time.user as f64);
.set(cpu_time.user_ms() as f64);
gv.with_label_values(&[cpu, "nice"])
.set(cpu_time.nice as f64);
.set(cpu_time.nice_ms() as f64);
gv.with_label_values(&[cpu, "system"])
.set(cpu_time.system as f64);
.set(cpu_time.system_ms() as f64);
gv.with_label_values(&[cpu, "idle"])
.set(cpu_time.idle as f64);
.set(cpu_time.idle_ms() as f64);
gv.with_label_values(&[cpu, "iowait"])
.set(cpu_time.iowait.unwrap_or(0) as f64);
.set(cpu_time.iowait_ms().unwrap_or(0) as f64);
gv.with_label_values(&[cpu, "irq"])
.set(cpu_time.irq.unwrap_or(0) as f64);
.set(cpu_time.irq_ms().unwrap_or(0) as f64);
gv.with_label_values(&[cpu, "softirq"])
.set(cpu_time.softirq.unwrap_or(0) as f64);
.set(cpu_time.softirq_ms().unwrap_or(0) as f64);
gv.with_label_values(&[cpu, "steal"])
.set(cpu_time.steal.unwrap_or(0) as f64);
.set(cpu_time.steal_ms().unwrap_or(0) as f64);
gv.with_label_values(&[cpu, "guest"])
.set(cpu_time.guest.unwrap_or(0) as f64);
.set(cpu_time.guest_ms().unwrap_or(0) as f64);
gv.with_label_values(&[cpu, "guest_nice"])
.set(cpu_time.guest_nice.unwrap_or(0) as f64);
.set(cpu_time.guest_nice_ms().unwrap_or(0) as f64);
}
#[instrument]

View File

@@ -1036,7 +1036,7 @@ mod tests {
let dest_filename: String;
if !d.src.is_empty() {
src = dir.path().join(d.src.to_string());
src = dir.path().join(d.src);
src_filename = src
.to_str()
.expect("failed to convert src to filename")
@@ -1046,7 +1046,7 @@ mod tests {
}
if !d.dest.is_empty() {
dest = dir.path().join(d.dest.to_string());
dest = dir.path().join(d.dest);
dest_filename = dest
.to_str()
.expect("failed to convert dest to filename")

View File

@@ -19,6 +19,7 @@ use ttrpc::{
};
use anyhow::{anyhow, Context, Result};
use cgroups::freezer::FreezerState;
use oci::{LinuxNamespace, Root, Spec};
use protobuf::{Message, RepeatedField, SingularPtrField};
use protocols::agent::{
@@ -39,9 +40,9 @@ use rustjail::specconv::CreateOpts;
use nix::errno::Errno;
use nix::mount::MsFlags;
use nix::sys::signal::Signal;
use nix::sys::stat;
use nix::unistd::{self, Pid};
use rustjail::cgroups::Manager;
use rustjail::process::ProcessOperations;
use sysinfo::{DiskExt, System, SystemExt};
@@ -70,7 +71,6 @@ use tracing_opentelemetry::OpenTelemetrySpanExt;
use tracing::instrument;
use libc::{self, c_char, c_ushort, pid_t, winsize, TIOCSWINSZ};
use std::convert::TryFrom;
use std::fs;
use std::os::unix::fs::MetadataExt;
use std::os::unix::prelude::PermissionsExt;
@@ -410,7 +410,6 @@ impl AgentService {
let cid = req.container_id.clone();
let eid = req.exec_id.clone();
let s = self.sandbox.clone();
let mut sandbox = s.lock().await;
info!(
sl!(),
@@ -419,27 +418,93 @@ impl AgentService {
"exec-id" => eid.clone(),
);
let p = sandbox.find_container_process(cid.as_str(), eid.as_str())?;
let mut signal = Signal::try_from(req.signal as i32).map_err(|e| {
anyhow!(e).context(format!(
"failed to convert {:?} to signal (container-id: {}, exec-id: {})",
req.signal, cid, eid
))
})?;
// For container initProcess, if it hasn't installed handler for "SIGTERM" signal,
// it will ignore the "SIGTERM" signal sent to it, thus send it "SIGKILL" signal
// instead of "SIGTERM" to terminate it.
if p.init && signal == Signal::SIGTERM && !is_signal_handled(p.pid, req.signal) {
signal = Signal::SIGKILL;
let mut sig: libc::c_int = req.signal as libc::c_int;
{
let mut sandbox = s.lock().await;
let p = sandbox.find_container_process(cid.as_str(), eid.as_str())?;
// For container initProcess, if it hasn't installed handler for "SIGTERM" signal,
// it will ignore the "SIGTERM" signal sent to it, thus send it "SIGKILL" signal
// instead of "SIGTERM" to terminate it.
if p.init && sig == libc::SIGTERM && !is_signal_handled(p.pid, sig as u32) {
sig = libc::SIGKILL;
}
p.signal(sig)?;
}
p.signal(signal)?;
if eid.is_empty() {
// eid is empty, signal all the remaining processes in the container cgroup
info!(
sl!(),
"signal all the remaining processes";
"container-id" => cid.clone(),
"exec-id" => eid.clone(),
);
if let Err(err) = self.freeze_cgroup(&cid, FreezerState::Frozen).await {
warn!(
sl!(),
"freeze cgroup failed";
"container-id" => cid.clone(),
"exec-id" => eid.clone(),
"error" => format!("{:?}", err),
);
}
let pids = self.get_pids(&cid).await?;
for pid in pids.iter() {
let res = unsafe { libc::kill(*pid, sig) };
if let Err(err) = Errno::result(res).map(drop) {
warn!(
sl!(),
"signal failed";
"container-id" => cid.clone(),
"exec-id" => eid.clone(),
"pid" => pid,
"error" => format!("{:?}", err),
);
}
}
if let Err(err) = self.freeze_cgroup(&cid, FreezerState::Thawed).await {
warn!(
sl!(),
"unfreeze cgroup failed";
"container-id" => cid.clone(),
"exec-id" => eid.clone(),
"error" => format!("{:?}", err),
);
}
}
Ok(())
}
async fn freeze_cgroup(&self, cid: &str, state: FreezerState) -> Result<()> {
let s = self.sandbox.clone();
let mut sandbox = s.lock().await;
let ctr = sandbox
.get_container(cid)
.ok_or_else(|| anyhow!("Invalid container id {}", cid))?;
let cm = ctr
.cgroup_manager
.as_ref()
.ok_or_else(|| anyhow!("cgroup manager not exist"))?;
cm.freeze(state)?;
Ok(())
}
async fn get_pids(&self, cid: &str) -> Result<Vec<i32>> {
let s = self.sandbox.clone();
let mut sandbox = s.lock().await;
let ctr = sandbox
.get_container(cid)
.ok_or_else(|| anyhow!("Invalid container id {}", cid))?;
let cm = ctr
.cgroup_manager
.as_ref()
.ok_or_else(|| anyhow!("cgroup manager not exist"))?;
let pids = cm.get_pids()?;
Ok(pids)
}
#[instrument]
async fn do_wait_process(
&self,

View File

@@ -58,17 +58,16 @@ async fn handle_sigchild(logger: Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
}
let mut p = process.unwrap();
let ret: i32;
match wait_status {
WaitStatus::Exited(_, c) => ret = c,
WaitStatus::Signaled(_, sig, _) => ret = sig as i32,
let ret: i32 = match wait_status {
WaitStatus::Exited(_, c) => c,
WaitStatus::Signaled(_, sig, _) => sig as i32,
_ => {
info!(logger, "got wrong status for process";
"child-status" => format!("{:?}", wait_status));
continue;
}
}
};
p.exit_code = ret;
let _ = p.exit_tx.take();

View File

@@ -237,8 +237,6 @@ mod tests {
JoinError,
>;
let result: std::result::Result<u64, std::io::Error>;
select! {
res = handle => spawn_result = res,
_ = &mut timeout => panic!("timed out"),
@@ -246,7 +244,7 @@ mod tests {
assert!(spawn_result.is_ok());
result = spawn_result.unwrap();
let result: std::result::Result<u64, std::io::Error> = spawn_result.unwrap();
assert!(result.is_ok());
@@ -278,8 +276,6 @@ mod tests {
let spawn_result: std::result::Result<std::result::Result<u64, std::io::Error>, JoinError>;
let result: std::result::Result<u64, std::io::Error>;
select! {
res = handle => spawn_result = res,
_ = &mut timeout => panic!("timed out"),
@@ -287,7 +283,7 @@ mod tests {
assert!(spawn_result.is_ok());
result = spawn_result.unwrap();
let result: std::result::Result<u64, std::io::Error> = spawn_result.unwrap();
assert!(result.is_ok());
@@ -320,8 +316,6 @@ mod tests {
let spawn_result: std::result::Result<std::result::Result<u64, std::io::Error>, JoinError>;
let result: std::result::Result<u64, std::io::Error>;
select! {
res = handle => spawn_result = res,
_ = &mut timeout => panic!("timed out"),
@@ -329,7 +323,7 @@ mod tests {
assert!(spawn_result.is_ok());
result = spawn_result.unwrap();
let result: std::result::Result<u64, std::io::Error> = spawn_result.unwrap();
assert!(result.is_ok());

View File

@@ -178,13 +178,11 @@ impl Builder {
pub fn init(self) -> Exporter {
let Builder { port, cid, logger } = self;
let cid_str: String;
if self.cid == libc::VMADDR_CID_ANY {
cid_str = ANY_CID.to_string();
let cid_str: String = if self.cid == libc::VMADDR_CID_ANY {
ANY_CID.to_string()
} else {
cid_str = format!("{}", self.cid);
}
format!("{}", self.cid)
};
Exporter {
port,

View File

@@ -589,12 +589,10 @@ $(GENERATED_FILES): %: %.in $(MAKEFILE_LIST) VERSION .git-commit
generate-config: $(CONFIGS)
test: install-hook go-test
test: hook go-test
install-hook:
make -C virtcontainers hook
echo "installing mock hook"
sudo -E make -C virtcontainers install
hook:
make -C pkg/katautils/mockhook
go-test: $(GENERATED_FILES)
go clean -testcache

View File

@@ -21,7 +21,7 @@ import (
const defaultListenAddress = "127.0.0.1:8090"
var monitorListenAddr = flag.String("listen-address", defaultListenAddress, "The address to listen on for HTTP requests.")
var runtimeEndpoint = flag.String("runtime-endpoint", "/run/containerd/containerd.sock", `Endpoint of CRI container runtime service. (default: "/run/containerd/containerd.sock")`)
var runtimeEndpoint = flag.String("runtime-endpoint", "/run/containerd/containerd.sock", "Endpoint of CRI container runtime service.")
var logLevel = flag.String("log-level", "info", "Log level of logrus(trace/debug/info/warn/error/fatal/panic).")
// These values are overridden via ldflags

View File

@@ -8,7 +8,6 @@ package main
import (
"context"
"flag"
"os"
"testing"
"github.com/stretchr/testify/assert"
@@ -43,9 +42,7 @@ func TestFactoryCLIFunctionNoRuntimeConfig(t *testing.T) {
func TestFactoryCLIFunctionInit(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
runtimeConfig, err := newTestRuntimeConfig(tmpdir, testConsole, true)
assert.NoError(err)
@@ -92,9 +89,7 @@ func TestFactoryCLIFunctionInit(t *testing.T) {
func TestFactoryCLIFunctionDestroy(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
runtimeConfig, err := newTestRuntimeConfig(tmpdir, testConsole, true)
assert.NoError(err)
@@ -126,9 +121,7 @@ func TestFactoryCLIFunctionDestroy(t *testing.T) {
func TestFactoryCLIFunctionStatus(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
runtimeConfig, err := newTestRuntimeConfig(tmpdir, testConsole, true)
assert.NoError(err)

View File

@@ -71,11 +71,7 @@ func TestCCCheckCLIFunction(t *testing.T) {
func TestCheckCheckKernelModulesNoNesting(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
savedSysModuleDir := sysModuleDir
savedProcCPUInfo := procCPUInfo
@@ -91,7 +87,7 @@ func TestCheckCheckKernelModulesNoNesting(t *testing.T) {
procCPUInfo = savedProcCPUInfo
}()
err = os.MkdirAll(sysModuleDir, testDirMode)
err := os.MkdirAll(sysModuleDir, testDirMode)
if err != nil {
t.Fatal(err)
}
@@ -156,11 +152,7 @@ func TestCheckCheckKernelModulesNoNesting(t *testing.T) {
func TestCheckCheckKernelModulesNoUnrestrictedGuest(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
savedSysModuleDir := sysModuleDir
savedProcCPUInfo := procCPUInfo
@@ -176,7 +168,7 @@ func TestCheckCheckKernelModulesNoUnrestrictedGuest(t *testing.T) {
procCPUInfo = savedProcCPUInfo
}()
err = os.MkdirAll(sysModuleDir, testDirMode)
err := os.MkdirAll(sysModuleDir, testDirMode)
if err != nil {
t.Fatal(err)
}
@@ -255,11 +247,7 @@ func TestCheckHostIsVMContainerCapable(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
savedSysModuleDir := sysModuleDir
savedProcCPUInfo := procCPUInfo
@@ -275,7 +263,7 @@ func TestCheckHostIsVMContainerCapable(t *testing.T) {
procCPUInfo = savedProcCPUInfo
}()
err = os.MkdirAll(sysModuleDir, testDirMode)
err := os.MkdirAll(sysModuleDir, testDirMode)
if err != nil {
t.Fatal(err)
}
@@ -405,11 +393,7 @@ func TestArchKernelParamHandler(t *testing.T) {
func TestKvmIsUsable(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
savedKvmDevice := kvmDevice
fakeKVMDevice := filepath.Join(dir, "kvm")
@@ -419,7 +403,7 @@ func TestKvmIsUsable(t *testing.T) {
kvmDevice = savedKvmDevice
}()
err = kvmIsUsable()
err := kvmIsUsable()
assert.Error(err)
err = createEmptyFile(fakeKVMDevice)
@@ -457,9 +441,7 @@ foo : bar
func TestSetCPUtype(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
savedArchRequiredCPUFlags := archRequiredCPUFlags
savedArchRequiredCPUAttribs := archRequiredCPUAttribs

View File

@@ -67,11 +67,7 @@ foo : bar
{validContents, validNormalizeVendorName, validNormalizeModelName, false},
}
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
savedProcCPUInfo := procCPUInfo
@@ -84,7 +80,7 @@ foo : bar
procCPUInfo = savedProcCPUInfo
}()
_, _, err = getCPUDetails()
_, _, err := getCPUDetails()
// ENOENT
assert.Error(t, err)
assert.True(t, os.IsNotExist(err))

View File

@@ -9,7 +9,6 @@
package main
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
@@ -18,9 +17,7 @@ import (
func testSetCPUTypeGeneric(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
savedArchRequiredCPUFlags := archRequiredCPUFlags
savedArchRequiredCPUAttribs := archRequiredCPUAttribs

View File

@@ -7,7 +7,6 @@ package main
import (
"fmt"
"os"
"path/filepath"
"testing"
@@ -118,11 +117,7 @@ func TestArchKernelParamHandler(t *testing.T) {
func TestKvmIsUsable(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
savedKvmDevice := kvmDevice
fakeKVMDevice := filepath.Join(dir, "kvm")
@@ -132,7 +127,7 @@ func TestKvmIsUsable(t *testing.T) {
kvmDevice = savedKvmDevice
}()
err = kvmIsUsable()
err := kvmIsUsable()
assert.Error(err)
err = createEmptyFile(fakeKVMDevice)

View File

@@ -7,7 +7,6 @@ package main
import (
"fmt"
"os"
"path/filepath"
"testing"
@@ -117,11 +116,7 @@ func TestArchKernelParamHandler(t *testing.T) {
func TestKvmIsUsable(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
savedKvmDevice := kvmDevice
fakeKVMDevice := filepath.Join(dir, "kvm")

View File

@@ -155,11 +155,7 @@ func makeCPUInfoFile(path, vendorID, flags string) error {
// nolint: unused, deadcode
func genericTestGetCPUDetails(t *testing.T, validVendor string, validModel string, validContents string, data []testCPUDetail) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
savedProcCPUInfo := procCPUInfo
@@ -172,7 +168,7 @@ func genericTestGetCPUDetails(t *testing.T, validVendor string, validModel strin
procCPUInfo = savedProcCPUInfo
}()
_, _, err = getCPUDetails()
_, _, err := getCPUDetails()
// ENOENT
assert.Error(t, err)
assert.True(t, os.IsNotExist(err))
@@ -197,11 +193,7 @@ func genericTestGetCPUDetails(t *testing.T, validVendor string, validModel strin
func genericCheckCLIFunction(t *testing.T, cpuData []testCPUData, moduleData []testModuleData) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
_, config, err := makeRuntimeConfig(dir)
assert.NoError(err)
@@ -307,15 +299,11 @@ func TestCheckGetCPUInfo(t *testing.T) {
{"foo\n\nbar\nbaz\n\n", "foo", false},
}
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
file := filepath.Join(dir, "cpuinfo")
// file doesn't exist
_, err = getCPUInfo(file)
_, err := getCPUInfo(file)
assert.Error(err)
for _, d := range data {
@@ -527,11 +515,7 @@ func TestCheckHaveKernelModule(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
savedModProbeCmd := modProbeCmd
savedSysModuleDir := sysModuleDir
@@ -545,7 +529,7 @@ func TestCheckHaveKernelModule(t *testing.T) {
sysModuleDir = savedSysModuleDir
}()
err = os.MkdirAll(sysModuleDir, testDirMode)
err := os.MkdirAll(sysModuleDir, testDirMode)
if err != nil {
t.Fatal(err)
}
@@ -577,11 +561,7 @@ func TestCheckHaveKernelModule(t *testing.T) {
func TestCheckCheckKernelModules(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
savedModProbeCmd := modProbeCmd
savedSysModuleDir := sysModuleDir
@@ -595,7 +575,7 @@ func TestCheckCheckKernelModules(t *testing.T) {
sysModuleDir = savedSysModuleDir
}()
err = os.MkdirAll(sysModuleDir, testDirMode)
err := os.MkdirAll(sysModuleDir, testDirMode)
if err != nil {
t.Fatal(err)
}
@@ -662,11 +642,7 @@ func TestCheckCheckKernelModulesUnreadableFile(t *testing.T) {
t.Skip(ktu.TestDisabledNeedNonRoot)
}
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
testData := map[string]kernelModule{
"foo": {
@@ -691,7 +667,7 @@ func TestCheckCheckKernelModulesUnreadableFile(t *testing.T) {
}()
modPath := filepath.Join(sysModuleDir, "foo/parameters")
err = os.MkdirAll(modPath, testDirMode)
err := os.MkdirAll(modPath, testDirMode)
assert.NoError(err)
modParamFile := filepath.Join(modPath, "param1")
@@ -710,11 +686,7 @@ func TestCheckCheckKernelModulesUnreadableFile(t *testing.T) {
func TestCheckCheckKernelModulesInvalidFileContents(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
testData := map[string]kernelModule{
"foo": {
@@ -739,7 +711,7 @@ func TestCheckCheckKernelModulesInvalidFileContents(t *testing.T) {
}()
modPath := filepath.Join(sysModuleDir, "foo/parameters")
err = os.MkdirAll(modPath, testDirMode)
err := os.MkdirAll(modPath, testDirMode)
assert.NoError(err)
modParamFile := filepath.Join(modPath, "param1")
@@ -755,11 +727,7 @@ func TestCheckCheckKernelModulesInvalidFileContents(t *testing.T) {
func TestCheckCLIFunctionFail(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
_, config, err := makeRuntimeConfig(dir)
assert.NoError(err)
@@ -788,11 +756,7 @@ func TestCheckCLIFunctionFail(t *testing.T) {
func TestCheckKernelParamHandler(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
savedModProbeCmd := modProbeCmd
savedSysModuleDir := sysModuleDir
@@ -870,9 +834,7 @@ func TestCheckKernelParamHandler(t *testing.T) {
func TestArchRequiredKernelModules(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
_, config, err := makeRuntimeConfig(tmpdir)
assert.NoError(err)
@@ -885,11 +847,7 @@ func TestArchRequiredKernelModules(t *testing.T) {
return
}
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
savedModProbeCmd := modProbeCmd
savedSysModuleDir := sysModuleDir

View File

@@ -6,7 +6,6 @@
package main
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
@@ -22,9 +21,7 @@ func getExpectedHostDetails(tmpdir string) (HostInfo, error) {
func TestEnvGetEnvInfoSetsCPUType(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
savedArchRequiredCPUFlags := archRequiredCPUFlags
savedArchRequiredCPUAttribs := archRequiredCPUAttribs

View File

@@ -9,7 +9,6 @@
package main
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
@@ -18,9 +17,7 @@ import (
func testEnvGetEnvInfoSetsCPUTypeGeneric(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
savedArchRequiredCPUFlags := archRequiredCPUFlags
savedArchRequiredCPUAttribs := archRequiredCPUAttribs

View File

@@ -364,11 +364,7 @@ func TestEnvGetMetaInfo(t *testing.T) {
}
func TestEnvGetHostInfo(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
expectedHostDetails, err := getExpectedHostDetails(tmpdir)
assert.NoError(t, err)
@@ -389,13 +385,9 @@ func TestEnvGetHostInfo(t *testing.T) {
}
func TestEnvGetHostInfoNoProcCPUInfo(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
_, err = getExpectedHostDetails(tmpdir)
_, err := getExpectedHostDetails(tmpdir)
assert.NoError(t, err)
err = os.Remove(procCPUInfo)
@@ -406,13 +398,9 @@ func TestEnvGetHostInfoNoProcCPUInfo(t *testing.T) {
}
func TestEnvGetHostInfoNoOSRelease(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
_, err = getExpectedHostDetails(tmpdir)
_, err := getExpectedHostDetails(tmpdir)
assert.NoError(t, err)
err = os.Remove(osRelease)
@@ -423,13 +411,9 @@ func TestEnvGetHostInfoNoOSRelease(t *testing.T) {
}
func TestEnvGetHostInfoNoProcVersion(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
_, err = getExpectedHostDetails(tmpdir)
_, err := getExpectedHostDetails(tmpdir)
assert.NoError(t, err)
err = os.Remove(procVersion)
@@ -440,11 +424,7 @@ func TestEnvGetHostInfoNoProcVersion(t *testing.T) {
}
func TestEnvGetEnvInfo(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
// Run test twice to ensure the individual component debug+trace
// options are tested.
@@ -474,9 +454,7 @@ func TestEnvGetEnvInfo(t *testing.T) {
func TestEnvGetEnvInfoNoHypervisorVersion(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
configFile, config, err := makeRuntimeConfig(tmpdir)
assert.NoError(err)
@@ -501,20 +479,14 @@ func TestEnvGetEnvInfoNoHypervisorVersion(t *testing.T) {
func TestEnvGetEnvInfoAgentError(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
_, _, err = makeRuntimeConfig(tmpdir)
_, _, err := makeRuntimeConfig(tmpdir)
assert.NoError(err)
}
func TestEnvGetEnvInfoNoOSRelease(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
configFile, config, err := makeRuntimeConfig(tmpdir)
assert.NoError(t, err)
@@ -530,11 +502,7 @@ func TestEnvGetEnvInfoNoOSRelease(t *testing.T) {
}
func TestEnvGetEnvInfoNoProcCPUInfo(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
configFile, config, err := makeRuntimeConfig(tmpdir)
assert.NoError(t, err)
@@ -550,11 +518,7 @@ func TestEnvGetEnvInfoNoProcCPUInfo(t *testing.T) {
}
func TestEnvGetEnvInfoNoProcVersion(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
configFile, config, err := makeRuntimeConfig(tmpdir)
assert.NoError(t, err)
@@ -570,11 +534,7 @@ func TestEnvGetEnvInfoNoProcVersion(t *testing.T) {
}
func TestEnvGetRuntimeInfo(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
configFile, config, err := makeRuntimeConfig(tmpdir)
assert.NoError(t, err)
@@ -587,11 +547,7 @@ func TestEnvGetRuntimeInfo(t *testing.T) {
}
func TestEnvGetAgentInfo(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
_, config, err := makeRuntimeConfig(tmpdir)
assert.NoError(t, err)
@@ -726,11 +682,7 @@ func testEnvShowJSONSettings(t *testing.T, tmpdir string, tmpfile *os.File) erro
}
func TestEnvShowSettings(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
tmpfile, err := os.CreateTemp("", "envShowSettings-")
assert.NoError(t, err)
@@ -747,11 +699,7 @@ func TestEnvShowSettings(t *testing.T) {
}
func TestEnvShowSettingsInvalidFile(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
tmpfile, err := os.CreateTemp("", "envShowSettings-")
assert.NoError(t, err)
@@ -771,11 +719,7 @@ func TestEnvShowSettingsInvalidFile(t *testing.T) {
}
func TestEnvHandleSettings(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
configFile, config, err := makeRuntimeConfig(tmpdir)
assert.NoError(t, err)
@@ -805,9 +749,7 @@ func TestEnvHandleSettings(t *testing.T) {
func TestEnvHandleSettingsInvalidParams(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
configFile, _, err := makeRuntimeConfig(tmpdir)
assert.NoError(err)
@@ -859,11 +801,7 @@ func TestEnvHandleSettingsInvalidRuntimeConfigType(t *testing.T) {
}
func TestEnvCLIFunction(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
configFile, config, err := makeRuntimeConfig(tmpdir)
assert.NoError(t, err)
@@ -904,11 +842,7 @@ func TestEnvCLIFunction(t *testing.T) {
}
func TestEnvCLIFunctionFail(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
configFile, config, err := makeRuntimeConfig(tmpdir)
assert.NoError(t, err)
@@ -940,9 +874,7 @@ func TestEnvCLIFunctionFail(t *testing.T) {
func TestGetHypervisorInfo(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
_, config, err := makeRuntimeConfig(tmpdir)
assert.NoError(err)
@@ -962,9 +894,7 @@ func TestGetHypervisorInfo(t *testing.T) {
func TestGetHypervisorInfoSocket(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
_, config, err := makeRuntimeConfig(tmpdir)
assert.NoError(err)

View File

@@ -258,14 +258,12 @@ func TestMainBeforeSubCommands(t *testing.T) {
func TestMainBeforeSubCommandsInvalidLogFile(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "katatest")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
logFile := filepath.Join(tmpdir, "log")
// create the file as the wrong type to force a failure
err = os.MkdirAll(logFile, testDirMode)
err := os.MkdirAll(logFile, testDirMode)
assert.NoError(err)
set := flag.NewFlagSet("", 0)
@@ -281,9 +279,7 @@ func TestMainBeforeSubCommandsInvalidLogFile(t *testing.T) {
func TestMainBeforeSubCommandsInvalidLogFormat(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "katatest")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
logFile := filepath.Join(tmpdir, "log")
@@ -302,7 +298,7 @@ func TestMainBeforeSubCommandsInvalidLogFormat(t *testing.T) {
ctx := createCLIContext(set)
err = beforeSubcommands(ctx)
err := beforeSubcommands(ctx)
assert.Error(err)
assert.NotNil(kataLog.Logger.Out)
}
@@ -310,9 +306,7 @@ func TestMainBeforeSubCommandsInvalidLogFormat(t *testing.T) {
func TestMainBeforeSubCommandsLoadConfigurationFail(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "katatest")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
logFile := filepath.Join(tmpdir, "log")
configFile := filepath.Join(tmpdir, "config")
@@ -345,9 +339,7 @@ func TestMainBeforeSubCommandsLoadConfigurationFail(t *testing.T) {
func TestMainBeforeSubCommandsShowCCConfigPaths(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "katatest")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
set := flag.NewFlagSet("", 0)
set.Bool("show-default-config-paths", true, "")
@@ -409,9 +401,7 @@ func TestMainBeforeSubCommandsShowCCConfigPaths(t *testing.T) {
func TestMainFatal(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "katatest")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
var exitStatus int
savedExitFunc := exitFunc
@@ -633,9 +623,7 @@ func TestMainCreateRuntime(t *testing.T) {
func TestMainVersionPrinter(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "katatest")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
savedOutputFile := defaultOutputFile

View File

@@ -17,18 +17,14 @@ import (
)
func TestFileExists(t *testing.T) {
dir, err := os.MkdirTemp("", "katatest")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
file := filepath.Join(dir, "foo")
assert.False(t, katautils.FileExists(file),
fmt.Sprintf("File %q should not exist", file))
err = createEmptyFile(file)
err := createEmptyFile(file)
if err != nil {
t.Fatal(err)
}
@@ -54,14 +50,10 @@ func TestGetKernelVersion(t *testing.T) {
{validContents, validVersion, false},
}
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
subDir := filepath.Join(tmpdir, "subdir")
err = os.MkdirAll(subDir, testDirMode)
err := os.MkdirAll(subDir, testDirMode)
assert.NoError(t, err)
_, err = getKernelVersion()
@@ -103,11 +95,7 @@ func TestGetDistroDetails(t *testing.T) {
const unknown = "<<unknown>>"
tmpdir, err := os.MkdirTemp("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testOSRelease := filepath.Join(tmpdir, "os-release")
testOSReleaseClr := filepath.Join(tmpdir, "os-release-clr")
@@ -131,7 +119,7 @@ VERSION_ID="%s"
`, nonClrExpectedName, nonClrExpectedVersion)
subDir := filepath.Join(tmpdir, "subdir")
err = os.MkdirAll(subDir, testDirMode)
err := os.MkdirAll(subDir, testDirMode)
assert.NoError(t, err)
// override

View File

@@ -382,9 +382,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config string, err err
func TestCreateLoadRuntimeConfig(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
config, err := createAllRuntimeConfigFiles(tmpdir, "qemu")
assert.NoError(err)

View File

@@ -776,6 +776,8 @@ func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.E
return empty, errors.New("The exec process does not exist")
}
processStatus = execs.status
} else {
r.All = true
}
// According to CRI specs, kubelet will call StopPodSandbox()

View File

@@ -8,12 +8,14 @@ package containerdshim
import (
"context"
"fmt"
"github.com/sirupsen/logrus"
"github.com/containerd/containerd/api/types/task"
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils"
)
func startContainer(ctx context.Context, s *service, c *container) (retErr error) {
shimLog.WithField("container", c.id).Debug("start container")
defer func() {
if retErr != nil {
// notify the wait goroutine to continue
@@ -78,7 +80,8 @@ func startContainer(ctx context.Context, s *service, c *container) (retErr error
return err
}
c.ttyio = tty
go ioCopy(c.exitIOch, c.stdinCloser, tty, stdin, stdout, stderr)
go ioCopy(shimLog.WithField("container", c.id), c.exitIOch, c.stdinCloser, tty, stdin, stdout, stderr)
} else {
// close the io exit channel, since there is no io for this container,
// otherwise the following wait goroutine will hang on this channel.
@@ -94,6 +97,10 @@ func startContainer(ctx context.Context, s *service, c *container) (retErr error
}
func startExec(ctx context.Context, s *service, containerID, execID string) (e *exec, retErr error) {
shimLog.WithFields(logrus.Fields{
"container": containerID,
"exec": execID,
}).Debug("start container execution")
// start an exec
c, err := s.getContainer(containerID)
if err != nil {
@@ -140,7 +147,10 @@ func startExec(ctx context.Context, s *service, containerID, execID string) (e *
}
execs.ttyio = tty
go ioCopy(execs.exitIOch, execs.stdinCloser, tty, stdin, stdout, stderr)
go ioCopy(shimLog.WithFields(logrus.Fields{
"container": c.id,
"exec": execID,
}), execs.exitIOch, execs.stdinCloser, tty, stdin, stdout, stderr)
go wait(ctx, s, c, execID)

View File

@@ -12,6 +12,7 @@ import (
"syscall"
"github.com/containerd/fifo"
"github.com/sirupsen/logrus"
)
// The buffer size used to specify the buffer for IO streams copy
@@ -86,18 +87,20 @@ func newTtyIO(ctx context.Context, stdin, stdout, stderr string, console bool) (
return ttyIO, nil
}
func ioCopy(exitch, stdinCloser chan struct{}, tty *ttyIO, stdinPipe io.WriteCloser, stdoutPipe, stderrPipe io.Reader) {
func ioCopy(shimLog *logrus.Entry, exitch, stdinCloser chan struct{}, tty *ttyIO, stdinPipe io.WriteCloser, stdoutPipe, stderrPipe io.Reader) {
var wg sync.WaitGroup
if tty.Stdin != nil {
wg.Add(1)
go func() {
shimLog.Debug("stdin io stream copy started")
p := bufPool.Get().(*[]byte)
defer bufPool.Put(p)
io.CopyBuffer(stdinPipe, tty.Stdin, *p)
// notify that we can close process's io safely.
close(stdinCloser)
wg.Done()
shimLog.Debug("stdin io stream copy exited")
}()
}
@@ -105,6 +108,7 @@ func ioCopy(exitch, stdinCloser chan struct{}, tty *ttyIO, stdinPipe io.WriteClo
wg.Add(1)
go func() {
shimLog.Debug("stdout io stream copy started")
p := bufPool.Get().(*[]byte)
defer bufPool.Put(p)
io.CopyBuffer(tty.Stdout, stdoutPipe, *p)
@@ -113,20 +117,24 @@ func ioCopy(exitch, stdinCloser chan struct{}, tty *ttyIO, stdinPipe io.WriteClo
// close stdin to make the other routine stop
tty.Stdin.Close()
}
shimLog.Debug("stdout io stream copy exited")
}()
}
if tty.Stderr != nil && stderrPipe != nil {
wg.Add(1)
go func() {
shimLog.Debug("stderr io stream copy started")
p := bufPool.Get().(*[]byte)
defer bufPool.Put(p)
io.CopyBuffer(tty.Stderr, stderrPipe, *p)
wg.Done()
shimLog.Debug("stderr io stream copy exited")
}()
}
wg.Wait()
tty.close()
close(exitch)
shimLog.Debug("all io stream copy goroutines exited")
}

View File

@@ -7,6 +7,7 @@ package containerdshim
import (
"context"
"github.com/sirupsen/logrus"
"io"
"os"
"path/filepath"
@@ -25,9 +26,7 @@ func TestNewTtyIOFifoReopen(t *testing.T) {
assert := assert.New(t)
ctx := context.TODO()
testDir, err := os.MkdirTemp("", "kata-")
assert.NoError(err)
defer os.RemoveAll(testDir)
testDir := t.TempDir()
fifoPath, err := os.MkdirTemp(testDir, "fifo-path-")
assert.NoError(err)
@@ -103,9 +102,7 @@ func TestIoCopy(t *testing.T) {
testBytes2 := []byte("Test2")
testBytes3 := []byte("Test3")
testDir, err := os.MkdirTemp("", "kata-")
assert.NoError(err)
defer os.RemoveAll(testDir)
testDir := t.TempDir()
fifoPath, err := os.MkdirTemp(testDir, "fifo-path-")
assert.NoError(err)
@@ -179,7 +176,7 @@ func TestIoCopy(t *testing.T) {
defer tty.close()
// start the ioCopy threads : copy from src to dst
go ioCopy(exitioch, stdinCloser, tty, dstInW, srcOutR, srcErrR)
go ioCopy(logrus.WithContext(context.Background()), exitioch, stdinCloser, tty, dstInW, srcOutR, srcErrR)
var firstW, secondW, thirdW io.WriteCloser
var firstR, secondR, thirdR io.Reader

View File

@@ -15,7 +15,6 @@ import (
"github.com/containerd/containerd/api/types/task"
"github.com/containerd/containerd/mount"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"github.com/kata-containers/kata-containers/src/runtime/pkg/oci"
)
@@ -31,12 +30,17 @@ func wait(ctx context.Context, s *service, c *container, execID string) (int32,
if execID == "" {
//wait until the io closed, then wait the container
<-c.exitIOch
shimLog.WithField("container", c.id).Debug("The container io streams closed")
} else {
execs, err = c.getExec(execID)
if err != nil {
return exitCode255, err
}
<-execs.exitIOch
shimLog.WithFields(logrus.Fields{
"container": c.id,
"exec": execID,
}).Debug("The container process io streams closed")
//This wait could be triggered before exec start which
//will get the exec's id, thus this assignment must after
//the exec exit, to make sure it get the exec's id.
@@ -63,6 +67,7 @@ func wait(ctx context.Context, s *service, c *container, execID string) (int32,
if c.cType.IsSandbox() {
// cancel watcher
if s.monitor != nil {
shimLog.WithField("sandbox", s.sandbox.ID()).Info("cancel watcher")
s.monitor <- nil
}
if err = s.sandbox.Stop(ctx, true); err != nil {
@@ -82,13 +87,17 @@ func wait(ctx context.Context, s *service, c *container, execID string) (int32,
c.exitTime = timeStamp
c.exitCh <- uint32(ret)
shimLog.WithField("container", c.id).Debug("The container status is StatusStopped")
} else {
execs.status = task.StatusStopped
execs.exitCode = ret
execs.exitTime = timeStamp
execs.exitCh <- uint32(ret)
shimLog.WithFields(logrus.Fields{
"container": c.id,
"exec": execID,
}).Debug("The container exec status is StatusStopped")
}
s.mu.Unlock()
@@ -102,6 +111,7 @@ func watchSandbox(ctx context.Context, s *service) {
return
}
err := <-s.monitor
shimLog.WithError(err).WithField("sandbox", s.sandbox.ID()).Info("watchSandbox gets an error or stop signal")
if err == nil {
return
}
@@ -147,13 +157,11 @@ func watchOOMEvents(ctx context.Context, s *service) {
default:
containerID, err := s.sandbox.GetOOMEvent(ctx)
if err != nil {
shimLog.WithError(err).Warn("failed to get OOM event from sandbox")
// If the GetOOMEvent call is not implemented, then the agent is most likely an older version,
// stop attempting to get OOM events.
// for rust agent, the response code is not found
if isGRPCErrorCode(codes.NotFound, err) || err.Error() == "Dead agent" {
if err.Error() == "ttrpc: closed" || err.Error() == "Dead agent" {
shimLog.WithError(err).Warn("agent has shutdown, return from watching of OOM events")
return
}
shimLog.WithError(err).Warn("failed to get OOM event from sandbox")
time.Sleep(defaultCheckInterval)
continue
}

View File

@@ -18,9 +18,7 @@ import (
func TestAdd(t *testing.T) {
var err error
kataDirectVolumeRootPath, err = os.MkdirTemp(os.TempDir(), "add-test")
assert.Nil(t, err)
defer os.RemoveAll(kataDirectVolumeRootPath)
kataDirectVolumeRootPath = t.TempDir()
var volumePath = "/a/b/c"
var basePath = "a"
actual := MountInfo{
@@ -54,9 +52,7 @@ func TestAdd(t *testing.T) {
func TestRecordSandboxId(t *testing.T) {
var err error
kataDirectVolumeRootPath, err = os.MkdirTemp(os.TempDir(), "recordSanboxId-test")
assert.Nil(t, err)
defer os.RemoveAll(kataDirectVolumeRootPath)
kataDirectVolumeRootPath = t.TempDir()
var volumePath = "/a/b/c"
mntInfo := MountInfo{
@@ -82,9 +78,7 @@ func TestRecordSandboxId(t *testing.T) {
func TestRecordSandboxIdNoMountInfoFile(t *testing.T) {
var err error
kataDirectVolumeRootPath, err = os.MkdirTemp(os.TempDir(), "recordSanboxId-test")
assert.Nil(t, err)
defer os.RemoveAll(kataDirectVolumeRootPath)
kataDirectVolumeRootPath = t.TempDir()
var volumePath = "/a/b/c"
sandboxId := uuid.Generate().String()

View File

@@ -271,14 +271,12 @@ func TestGetFileContents(t *testing.T) {
{"foo\nbar"},
}
dir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(dir)
dir := t.TempDir()
file := filepath.Join(dir, "foo")
// file doesn't exist
_, err = getFileContents(file)
_, err := getFileContents(file)
assert.Error(err)
for _, d := range data {

View File

@@ -469,11 +469,13 @@ func (h hypervisor) getInitrdAndImage() (initrd string, image string, err error)
image, errImage := h.image()
if image != "" && initrd != "" {
if h.ConfidentialGuest && h.MachineType == vc.QemuCCWVirtio {
if image != "" || initrd != "" {
return "", "", errors.New("Neither the image nor initrd path may be set for Secure Execution")
}
} else if image != "" && initrd != "" {
return "", "", errors.New("having both an image and an initrd defined in the configuration file is not supported")
}
if errInitrd != nil && errImage != nil {
} else if errInitrd != nil && errImage != nil {
return "", "", fmt.Errorf("Either initrd or image must be set to a valid path (initrd: %v) (image: %v)", errInitrd, errImage)
}
@@ -605,16 +607,6 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
return vc.HypervisorConfig{}, err
}
if image != "" && initrd != "" {
return vc.HypervisorConfig{},
errors.New("having both an image and an initrd defined in the configuration file is not supported")
}
if image == "" && initrd == "" {
return vc.HypervisorConfig{},
errors.New("either image or initrd must be defined in the configuration file")
}
firmware, err := h.firmware()
if err != nil {
return vc.HypervisorConfig{}, err

View File

@@ -279,17 +279,13 @@ func testLoadConfiguration(t *testing.T, dir string,
}
func TestConfigLoadConfiguration(t *testing.T) {
tmpdir, err := os.MkdirTemp(testDir, "load-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testLoadConfiguration(t, tmpdir, nil)
}
func TestConfigLoadConfigurationFailBrokenSymLink(t *testing.T) {
tmpdir, err := os.MkdirTemp(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
@@ -297,7 +293,7 @@ func TestConfigLoadConfigurationFailBrokenSymLink(t *testing.T) {
if configFile == config.ConfigPathLink {
// break the symbolic link
err = os.Remove(config.ConfigPathLink)
err := os.Remove(config.ConfigPathLink)
if err != nil {
return expectFail, err
}
@@ -310,9 +306,7 @@ func TestConfigLoadConfigurationFailBrokenSymLink(t *testing.T) {
}
func TestConfigLoadConfigurationFailSymLinkLoop(t *testing.T) {
tmpdir, err := os.MkdirTemp(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
@@ -320,13 +314,13 @@ func TestConfigLoadConfigurationFailSymLinkLoop(t *testing.T) {
if configFile == config.ConfigPathLink {
// remove the config file
err = os.Remove(config.ConfigPath)
err := os.Remove(config.ConfigPath)
if err != nil {
return expectFail, err
}
// now, create a sym-link loop
err := os.Symlink(config.ConfigPathLink, config.ConfigPath)
err = os.Symlink(config.ConfigPathLink, config.ConfigPath)
if err != nil {
return expectFail, err
}
@@ -339,15 +333,13 @@ func TestConfigLoadConfigurationFailSymLinkLoop(t *testing.T) {
}
func TestConfigLoadConfigurationFailMissingHypervisor(t *testing.T) {
tmpdir, err := os.MkdirTemp(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
expectFail := true
err = os.Remove(config.RuntimeConfig.HypervisorConfig.HypervisorPath)
err := os.Remove(config.RuntimeConfig.HypervisorConfig.HypervisorPath)
if err != nil {
return expectFail, err
}
@@ -357,15 +349,13 @@ func TestConfigLoadConfigurationFailMissingHypervisor(t *testing.T) {
}
func TestConfigLoadConfigurationFailMissingImage(t *testing.T) {
tmpdir, err := os.MkdirTemp(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
expectFail := true
err = os.Remove(config.RuntimeConfig.HypervisorConfig.ImagePath)
err := os.Remove(config.RuntimeConfig.HypervisorConfig.ImagePath)
if err != nil {
return expectFail, err
}
@@ -375,15 +365,13 @@ func TestConfigLoadConfigurationFailMissingImage(t *testing.T) {
}
func TestConfigLoadConfigurationFailMissingKernel(t *testing.T) {
tmpdir, err := os.MkdirTemp(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
expectFail := true
err = os.Remove(config.RuntimeConfig.HypervisorConfig.KernelPath)
err := os.Remove(config.RuntimeConfig.HypervisorConfig.KernelPath)
if err != nil {
return expectFail, err
}
@@ -397,16 +385,14 @@ func TestConfigLoadConfigurationFailUnreadableConfig(t *testing.T) {
t.Skip(ktu.TestDisabledNeedNonRoot)
}
tmpdir, err := os.MkdirTemp(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
expectFail := true
// make file unreadable by non-root user
err = os.Chmod(config.ConfigPath, 0000)
err := os.Chmod(config.ConfigPath, 0000)
if err != nil {
return expectFail, err
}
@@ -420,9 +406,7 @@ func TestConfigLoadConfigurationFailTOMLConfigFileInvalidContents(t *testing.T)
t.Skip(ktu.TestDisabledNeedNonRoot)
}
tmpdir, err := os.MkdirTemp(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
@@ -446,9 +430,7 @@ func TestConfigLoadConfigurationFailTOMLConfigFileDuplicatedData(t *testing.T) {
t.Skip(ktu.TestDisabledNeedNonRoot)
}
tmpdir, err := os.MkdirTemp(testDir, "runtime-config-")
assert.NoError(t, err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testLoadConfiguration(t, tmpdir,
func(config testRuntimeConfig, configFile string, ignoreLogging bool) (bool, error) {
@@ -471,11 +453,7 @@ func TestConfigLoadConfigurationFailTOMLConfigFileDuplicatedData(t *testing.T) {
}
func TestMinimalRuntimeConfig(t *testing.T) {
dir, err := os.MkdirTemp(testDir, "minimal-runtime-config-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
hypervisorPath := path.Join(dir, "hypervisor")
defaultHypervisorPath = hypervisorPath
@@ -510,7 +488,7 @@ func TestMinimalRuntimeConfig(t *testing.T) {
defaultKernelPath = kernelPath
for _, file := range []string{defaultImagePath, defaultInitrdPath, defaultHypervisorPath, defaultJailerPath, defaultKernelPath} {
err = WriteFile(file, "foo", testFileMode)
err := WriteFile(file, "foo", testFileMode)
if err != nil {
t.Fatal(err)
}
@@ -531,7 +509,7 @@ func TestMinimalRuntimeConfig(t *testing.T) {
utils.VHostVSockDevicePath = "/dev/null"
configPath := path.Join(dir, "runtime.toml")
err = createConfig(configPath, runtimeMinimalConfig)
err := createConfig(configPath, runtimeMinimalConfig)
if err != nil {
t.Fatal(err)
}
@@ -600,11 +578,7 @@ func TestMinimalRuntimeConfig(t *testing.T) {
}
func TestNewQemuHypervisorConfig(t *testing.T) {
dir, err := os.MkdirTemp(testDir, "hypervisor-config-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
hypervisorPath := path.Join(dir, "hypervisor")
kernelPath := path.Join(dir, "kernel")
@@ -699,11 +673,7 @@ func TestNewQemuHypervisorConfig(t *testing.T) {
}
func TestNewFirecrackerHypervisorConfig(t *testing.T) {
dir, err := os.MkdirTemp(testDir, "hypervisor-config-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
hypervisorPath := path.Join(dir, "hypervisor")
kernelPath := path.Join(dir, "kernel")
@@ -794,9 +764,7 @@ func TestNewFirecrackerHypervisorConfig(t *testing.T) {
func TestNewQemuHypervisorConfigImageAndInitrd(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp(testDir, "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
imagePath := filepath.Join(tmpdir, "image")
initrdPath := filepath.Join(tmpdir, "initrd")
@@ -804,7 +772,7 @@ func TestNewQemuHypervisorConfigImageAndInitrd(t *testing.T) {
kernelPath := path.Join(tmpdir, "kernel")
for _, file := range []string{imagePath, initrdPath, hypervisorPath, kernelPath} {
err = createEmptyFile(file)
err := createEmptyFile(file)
assert.NoError(err)
}
@@ -826,7 +794,7 @@ func TestNewQemuHypervisorConfigImageAndInitrd(t *testing.T) {
PCIeRootPort: pcieRootPort,
}
_, err = newQemuHypervisorConfig(hypervisor)
_, err := newQemuHypervisorConfig(hypervisor)
// specifying both an image+initrd is invalid
assert.Error(err)
@@ -836,9 +804,7 @@ func TestNewClhHypervisorConfig(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp(testDir, "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
hypervisorPath := path.Join(tmpdir, "hypervisor")
kernelPath := path.Join(tmpdir, "kernel")
@@ -846,7 +812,7 @@ func TestNewClhHypervisorConfig(t *testing.T) {
virtioFsDaemon := path.Join(tmpdir, "virtiofsd")
for _, file := range []string{imagePath, hypervisorPath, kernelPath, virtioFsDaemon} {
err = createEmptyFile(file)
err := createEmptyFile(file)
assert.NoError(err)
}
@@ -931,14 +897,12 @@ func TestHypervisorDefaults(t *testing.T) {
func TestHypervisorDefaultsHypervisor(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp(testDir, "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testHypervisorPath := filepath.Join(tmpdir, "hypervisor")
testHypervisorLinkPath := filepath.Join(tmpdir, "hypervisor-link")
err = createEmptyFile(testHypervisorPath)
err := createEmptyFile(testHypervisorPath)
assert.NoError(err)
err = syscall.Symlink(testHypervisorPath, testHypervisorLinkPath)
@@ -967,14 +931,12 @@ func TestHypervisorDefaultsHypervisor(t *testing.T) {
func TestHypervisorDefaultsKernel(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp(testDir, "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testKernelPath := filepath.Join(tmpdir, "kernel")
testKernelLinkPath := filepath.Join(tmpdir, "kernel-link")
err = createEmptyFile(testKernelPath)
err := createEmptyFile(testKernelPath)
assert.NoError(err)
err = syscall.Symlink(testKernelPath, testKernelLinkPath)
@@ -1010,14 +972,12 @@ func TestHypervisorDefaultsKernel(t *testing.T) {
func TestHypervisorDefaultsInitrd(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp(testDir, "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testInitrdPath := filepath.Join(tmpdir, "initrd")
testInitrdLinkPath := filepath.Join(tmpdir, "initrd-link")
err = createEmptyFile(testInitrdPath)
err := createEmptyFile(testInitrdPath)
assert.NoError(err)
err = syscall.Symlink(testInitrdPath, testInitrdLinkPath)
@@ -1047,14 +1007,12 @@ func TestHypervisorDefaultsInitrd(t *testing.T) {
func TestHypervisorDefaultsImage(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp(testDir, "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
testImagePath := filepath.Join(tmpdir, "image")
testImageLinkPath := filepath.Join(tmpdir, "image-link")
err = createEmptyFile(testImagePath)
err := createEmptyFile(testImagePath)
assert.NoError(err)
err = syscall.Symlink(testImagePath, testImageLinkPath)
@@ -1142,16 +1100,14 @@ func TestGetDefaultConfigFilePaths(t *testing.T) {
func TestGetDefaultConfigFile(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp(testDir, "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
hypervisor := "qemu"
confDir := filepath.Join(tmpdir, "conf")
sysConfDir := filepath.Join(tmpdir, "sysconf")
for _, dir := range []string{confDir, sysConfDir} {
err = os.MkdirAll(dir, testDirMode)
err := os.MkdirAll(dir, testDirMode)
assert.NoError(err)
}
@@ -1449,11 +1405,7 @@ func TestUpdateRuntimeConfigurationInvalidKernelParams(t *testing.T) {
func TestCheckHypervisorConfig(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp(testDir, "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
// Not created on purpose
imageENOENT := filepath.Join(dir, "image-ENOENT.img")
@@ -1463,7 +1415,7 @@ func TestCheckHypervisorConfig(t *testing.T) {
initrdEmpty := filepath.Join(dir, "initrd-empty.img")
for _, file := range []string{imageEmpty, initrdEmpty} {
err = createEmptyFile(file)
err := createEmptyFile(file)
assert.NoError(err)
}
@@ -1478,7 +1430,7 @@ func TestCheckHypervisorConfig(t *testing.T) {
fileData := strings.Repeat("X", int(fileSizeBytes))
for _, file := range []string{image, initrd} {
err = WriteFile(file, fileData, testFileMode)
err := WriteFile(file, fileData, testFileMode)
assert.NoError(err)
}
@@ -1612,19 +1564,15 @@ func TestCheckFactoryConfig(t *testing.T) {
func TestValidateBindMounts(t *testing.T) {
assert := assert.New(t)
tmpdir1, err := os.MkdirTemp(testDir, "tmp1-")
assert.NoError(err)
defer os.RemoveAll(tmpdir1)
tmpdir1 := t.TempDir()
tmpdir2, err := os.MkdirTemp(testDir, "tmp2-")
assert.NoError(err)
defer os.RemoveAll(tmpdir2)
tmpdir2 := t.TempDir()
duplicate1 := filepath.Join(tmpdir1, "cat.txt")
duplicate2 := filepath.Join(tmpdir2, "cat.txt")
unique := filepath.Join(tmpdir1, "foobar.txt")
err = os.WriteFile(duplicate1, []byte("kibble-monster"), 0644)
err := os.WriteFile(duplicate1, []byte("kibble-monster"), 0644)
assert.NoError(err)
err = os.WriteFile(duplicate2, []byte("furbag"), 0644)

View File

@@ -124,14 +124,10 @@ func TestSetEphemeralStorageType(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp(testDir, "foo")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
ephePath := filepath.Join(dir, vc.K8sEmptyDir, "tmp-volume")
err = os.MkdirAll(ephePath, testDirMode)
err := os.MkdirAll(ephePath, testDirMode)
assert.Nil(err)
err = syscall.Mount("tmpfs", ephePath, "tmpfs", 0, "")
@@ -308,17 +304,13 @@ func TestCreateSandboxAnnotations(t *testing.T) {
func TestCheckForFips(t *testing.T) {
assert := assert.New(t)
path, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(path)
val := procFIPS
procFIPS = filepath.Join(path, "fips-enabled")
procFIPS = filepath.Join(t.TempDir(), "fips-enabled")
defer func() {
procFIPS = val
}()
err = os.WriteFile(procFIPS, []byte("1"), 0644)
err := os.WriteFile(procFIPS, []byte("1"), 0644)
assert.NoError(err)
hconfig := vc.HypervisorConfig{

View File

@@ -20,7 +20,7 @@ import (
var testKeyHook = "test-key"
var testContainerIDHook = "test-container-id"
var testControllerIDHook = "test-controller-id"
var testBinHookPath = "/usr/bin/virtcontainers/bin/test/hook"
var testBinHookPath = "mockhook/hook"
var testBundlePath = "/test/bundle"
func getMockHookBinPath() string {

View File

@@ -0,0 +1 @@
hook

View File

@@ -0,0 +1,21 @@
# Copyright Red Hat.
#
# SPDX-License-Identifier: Apache-2.0
#
BIN = hook
SRC = hook.go
V = @
Q = $(V:1=)
QUIET_BUILD = $(Q:@=@echo ' BUILD '$@;)
BUILDFLAGS =
all: $(BIN)
$(BIN): $(SRC)
$(QUIET_BUILD)go build $(BUILDFLAGS) -o $@ $^
clean:
rm -f $(BIN)

View File

@@ -23,15 +23,13 @@ import (
func TestGetNetNsFromBindMount(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
mountFile := filepath.Join(tmpdir, "mountInfo")
nsPath := filepath.Join(tmpdir, "ns123")
// Non-existent namespace path
_, err = getNetNsFromBindMount(nsPath, mountFile)
_, err := getNetNsFromBindMount(nsPath, mountFile)
assert.NotNil(err)
tmpNSPath := filepath.Join(tmpdir, "testNetNs")
@@ -85,9 +83,7 @@ func TestHostNetworkingRequested(t *testing.T) {
assert.Error(err)
// Bind-mounted Netns
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
// Create a bind mount to the current network namespace.
tmpFile := filepath.Join(tmpdir, "testNetNs")

View File

@@ -26,10 +26,6 @@ const (
testContainerID = "1"
)
var (
testDir = ""
)
func createFile(file, contents string) error {
return os.WriteFile(file, []byte(contents), testFileMode)
}
@@ -44,17 +40,13 @@ func TestUtilsResolvePathEmptyPath(t *testing.T) {
}
func TestUtilsResolvePathValidPath(t *testing.T) {
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
target := path.Join(dir, "target")
linkDir := path.Join(dir, "a/b/c")
linkFile := path.Join(linkDir, "link")
err = createEmptyFile(target)
err := createEmptyFile(target)
assert.NoError(t, err)
absolute, err := filepath.Abs(target)
@@ -76,16 +68,13 @@ func TestUtilsResolvePathValidPath(t *testing.T) {
}
func TestUtilsResolvePathENOENT(t *testing.T) {
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
dir := t.TempDir()
target := path.Join(dir, "target")
linkDir := path.Join(dir, "a/b/c")
linkFile := path.Join(linkDir, "link")
err = createEmptyFile(target)
err := createEmptyFile(target)
assert.NoError(t, err)
err = os.MkdirAll(linkDir, testDirMode)
@@ -111,16 +100,12 @@ func TestUtilsResolvePathENOENT(t *testing.T) {
func TestFileSize(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp(testDir, "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
file := filepath.Join(dir, "foo")
// ENOENT
_, err = fileSize(file)
_, err := fileSize(file)
assert.Error(err)
err = createEmptyFile(file)
@@ -152,12 +137,10 @@ func TestWriteFileErrWriteFail(t *testing.T) {
func TestWriteFileErrNoPath(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp(testDir, "")
assert.NoError(err)
defer os.RemoveAll(dir)
dir := t.TempDir()
// attempt to write a file over an existing directory
err = WriteFile(dir, "", 0000)
err := WriteFile(dir, "", 0000)
assert.Error(err)
}
@@ -177,16 +160,12 @@ func TestGetFileContents(t *testing.T) {
{"processor : 0\nvendor_id : GenuineIntel\n"},
}
dir, err := os.MkdirTemp(testDir, "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
file := filepath.Join(dir, "foo")
// file doesn't exist
_, err = GetFileContents(file)
_, err := GetFileContents(file)
assert.Error(t, err)
for _, d := range data {

View File

@@ -410,12 +410,10 @@ func TestGetShmSizeBindMounted(t *testing.T) {
t.Skip("Test disabled as requires root privileges")
}
dir, err := os.MkdirTemp("", "")
assert.Nil(t, err)
defer os.RemoveAll(dir)
dir := t.TempDir()
shmPath := filepath.Join(dir, "shm")
err = os.Mkdir(shmPath, 0700)
err := os.Mkdir(shmPath, 0700)
assert.Nil(t, err)
size := 8192
@@ -473,15 +471,13 @@ func TestMain(m *testing.M) {
func TestAddAssetAnnotations(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
// Create a pretend asset file
// (required since the existence of binary asset annotations is verified).
fakeAssetFile := filepath.Join(tmpdir, "fake-binary")
err = os.WriteFile(fakeAssetFile, []byte(""), fileMode)
err := os.WriteFile(fakeAssetFile, []byte(""), fileMode)
assert.NoError(err)
expectedAnnotations := map[string]string{

View File

@@ -51,11 +51,8 @@ func TestGzipAccepted(t *testing.T) {
func TestEnsureDir(t *testing.T) {
const testMode = 0755
tmpdir, err := os.MkdirTemp("", "TestEnsureDir")
assert := assert.New(t)
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
// nolint: govet
testCases := []struct {
@@ -120,9 +117,7 @@ func TestEnsureDir(t *testing.T) {
func TestFirstValidExecutable(t *testing.T) {
assert := assert.New(t)
tmpdir, err := os.MkdirTemp("", "TestFirstValidPath")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
// nolint: govet
testCases := []struct {

View File

@@ -5,11 +5,6 @@
#
PREFIX := /usr
BIN_DIR := $(PREFIX)/bin
VC_BIN_DIR := $(BIN_DIR)/virtcontainers/bin
TEST_BIN_DIR := $(VC_BIN_DIR)/test
HOOK_DIR := hook/mock
HOOK_BIN := hook
MK_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
GOBUILD_FLAGS := -mod=vendor
@@ -25,16 +20,11 @@ QUIET_GOBUILD = $(Q:@=@echo ' GOBUILD '$@;)
# Build
#
all: build binaries
all: build
build:
$(QUIET_GOBUILD)go build $(GOBUILD_FLAGS) $(go list ./... | grep -v /vendor/)
hook:
$(QUIET_GOBUILD)go build $(GOBUILD_FLAGS) -o $(HOOK_DIR)/$@ $(HOOK_DIR)/*.go
binaries: hook
#
# Tests
#
@@ -45,40 +35,7 @@ check-go-static:
bash $(MK_DIR)/../../../ci/static-checks.sh
check-go-test:
bash $(MK_DIR)/../../../ci/go-test.sh \
$(TEST_BIN_DIR)/$(HOOK_BIN)
#
# Install
#
define INSTALL_EXEC
install -D $1 $(VC_BIN_DIR)/ || exit 1;
endef
define INSTALL_TEST_EXEC
install -D $1 $(TEST_BIN_DIR)/ || exit 1;
endef
install:
@mkdir -p $(VC_BIN_DIR)
@mkdir -p $(TEST_BIN_DIR)
$(call INSTALL_TEST_EXEC,$(HOOK_DIR)/$(HOOK_BIN))
#
# Uninstall
#
define UNINSTALL_EXEC
rm -f $(call FILE_SAFE_TO_REMOVE,$(VC_BIN_DIR)/$1) || exit 1;
endef
define UNINSTALL_TEST_EXEC
rm -f $(call FILE_SAFE_TO_REMOVE,$(TEST_BIN_DIR)/$1) || exit 1;
endef
uninstall:
$(call UNINSTALL_TEST_EXEC,$(HOOK_BIN))
bash $(MK_DIR)/../../../ci/go-test.sh
#
# Clean
@@ -90,7 +47,7 @@ define FILE_SAFE_TO_REMOVE =
$(shell test -e "$(1)" && test "$(1)" != "/" && echo "$(1)")
endef
CLEAN_FILES += $(HOOK_DIR)/$(HOOK_BIN)
CLEAN_FILES +=
clean:
rm -f $(foreach f,$(CLEAN_FILES),$(call FILE_SAFE_TO_REMOVE,$(f)))
@@ -98,11 +55,7 @@ clean:
.PHONY: \
all \
build \
hook \
binaries \
check \
check-go-static \
check-go-test \
install \
uninstall \
clean

View File

@@ -259,8 +259,7 @@ func testSetupFakeRootfs(t *testing.T) (testRawFile, loopDev, mntDir string, err
t.Skip(testDisabledAsNonRoot)
}
tmpDir, err := os.MkdirTemp("", "")
assert.NoError(err)
tmpDir := t.TempDir()
testRawFile = filepath.Join(tmpDir, "raw.img")
_, err = os.Stat(testRawFile)
@@ -570,14 +569,9 @@ func TestMountSharedDirMounts(t *testing.T) {
assert := assert.New(t)
testMountPath, err := os.MkdirTemp("", "sandbox-test")
assert.NoError(err)
defer os.RemoveAll(testMountPath)
// create a new shared directory for our test:
kataHostSharedDirSaved := kataHostSharedDir
testHostDir, err := os.MkdirTemp("", "kata-Cleanup")
assert.NoError(err)
testHostDir := t.TempDir()
kataHostSharedDir = func() string {
return testHostDir
}
@@ -611,12 +605,18 @@ func TestMountSharedDirMounts(t *testing.T) {
//
// Create the mounts that we'll test with
//
testMountPath := t.TempDir()
secretpath := filepath.Join(testMountPath, K8sSecret)
err = os.MkdirAll(secretpath, 0777)
assert.NoError(err)
secret := filepath.Join(secretpath, "super-secret-thing")
_, err = os.Create(secret)
f, err := os.Create(secret)
assert.NoError(err)
t.Cleanup(func() {
if err := f.Close(); err != nil {
t.Fatalf("failed to close file: %v", err)
}
})
mountDestination := "/fluffhead/token"
//
@@ -655,6 +655,13 @@ func TestMountSharedDirMounts(t *testing.T) {
assert.Equal(len(updatedMounts), 1)
assert.Equal(updatedMounts[mountDestination].Source, expectedStorageDest)
assert.Equal(updatedMounts[mountDestination].Destination, mountDestination)
// Perform cleanups
err = container.unmountHostMounts(k.ctx)
assert.NoError(err)
err = fsShare.Cleanup(k.ctx)
assert.NoError(err)
}
func TestGetContainerId(t *testing.T) {

View File

@@ -17,9 +17,7 @@ import (
func TestGetBackingFile(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "backing")
assert.NoError(err)
defer os.RemoveAll(dir)
dir := t.TempDir()
orgGetSysDevPath := getSysDevPath
getSysDevPath = func(info DeviceInfo) string {

View File

@@ -29,22 +29,20 @@ func TestAttachVhostUserBlkDevice(t *testing.T) {
rootEnabled = false
}
tmpDir, err := os.MkdirTemp("", "")
tmpDir := t.TempDir()
dm := &deviceManager{
blockDriver: config.VirtioBlock,
devices: make(map[string]api.Device),
vhostUserStoreEnabled: true,
vhostUserStorePath: tmpDir,
}
assert.Nil(t, err)
defer os.RemoveAll(tmpDir)
vhostUserDevNodePath := filepath.Join(tmpDir, "/block/devices/")
vhostUserSockPath := filepath.Join(tmpDir, "/block/sockets/")
deviceNodePath := filepath.Join(vhostUserDevNodePath, "vhostblk0")
deviceSockPath := filepath.Join(vhostUserSockPath, "vhostblk0")
err = os.MkdirAll(vhostUserDevNodePath, dirMode)
err := os.MkdirAll(vhostUserDevNodePath, dirMode)
assert.Nil(t, err)
err = os.MkdirAll(vhostUserSockPath, dirMode)
assert.Nil(t, err)

View File

@@ -34,12 +34,8 @@ func TestNewDevice(t *testing.T) {
major := int64(252)
minor := int64(3)
tmpDir, err := os.MkdirTemp("", "")
assert.Nil(t, err)
config.SysDevPrefix = tmpDir
config.SysDevPrefix = t.TempDir()
defer func() {
os.RemoveAll(tmpDir)
config.SysDevPrefix = savedSysDevPrefix
}()
@@ -53,7 +49,7 @@ func TestNewDevice(t *testing.T) {
DevType: "c",
}
_, err = dm.NewDevice(deviceInfo)
_, err := dm.NewDevice(deviceInfo)
assert.NotNil(t, err)
format := strconv.FormatInt(major, 10) + ":" + strconv.FormatInt(minor, 10)
@@ -99,15 +95,13 @@ func TestAttachVFIODevice(t *testing.T) {
blockDriver: config.VirtioBlock,
devices: make(map[string]api.Device),
}
tmpDir, err := os.MkdirTemp("", "")
assert.Nil(t, err)
defer os.RemoveAll(tmpDir)
tmpDir := t.TempDir()
testFDIOGroup := "2"
testDeviceBDFPath := "0000:00:1c.0"
devicesDir := filepath.Join(tmpDir, testFDIOGroup, "devices")
err = os.MkdirAll(devicesDir, dirMode)
err := os.MkdirAll(devicesDir, dirMode)
assert.Nil(t, err)
deviceBDFDir := filepath.Join(devicesDir, testDeviceBDFPath)

View File

@@ -24,14 +24,11 @@ func TestSandboxSharedFilesystem(t *testing.T) {
assert := assert.New(t)
// create temporary files to mount:
testMountPath, err := os.MkdirTemp("", "sandbox-test")
assert.NoError(err)
defer os.RemoveAll(testMountPath)
testMountPath := t.TempDir()
// create a new shared directory for our test:
kataHostSharedDirSaved := kataHostSharedDir
testHostDir, err := os.MkdirTemp("", "kata-Cleanup")
assert.NoError(err)
testHostDir := t.TempDir()
kataHostSharedDir = func() string {
return testHostDir
}
@@ -66,7 +63,6 @@ func TestSandboxSharedFilesystem(t *testing.T) {
dir := kataHostSharedDir()
err = os.MkdirAll(path.Join(dir, sandbox.id), 0777)
assert.Nil(err)
defer os.RemoveAll(dir)
// Test the prepare function. We expect it to succeed
err = sandbox.fsShare.Prepare(sandbox.ctx)

View File

@@ -527,17 +527,19 @@ func (conf *HypervisorConfig) CheckTemplateConfig() error {
}
func (conf *HypervisorConfig) Valid() error {
// Kata specific checks. Should be done outside the hypervisor
if conf.KernelPath == "" {
return fmt.Errorf("Missing kernel path")
}
if conf.ImagePath == "" && conf.InitrdPath == "" {
if conf.ConfidentialGuest && conf.HypervisorMachineType == QemuCCWVirtio {
if conf.ImagePath != "" || conf.InitrdPath != "" {
fmt.Println("yes, failing")
return fmt.Errorf("Neither the image or initrd path may be set for Secure Execution")
}
} else if conf.ImagePath == "" && conf.InitrdPath == "" {
return fmt.Errorf("Missing image and initrd path")
}
if conf.ImagePath != "" && conf.InitrdPath != "" {
} else if conf.ImagePath != "" && conf.InitrdPath != "" {
return fmt.Errorf("Image and initrd path cannot be both set")
}
@@ -559,7 +561,7 @@ func (conf *HypervisorConfig) Valid() error {
if conf.BlockDeviceDriver == "" {
conf.BlockDeviceDriver = defaultBlockDriver
} else if conf.BlockDeviceDriver == config.VirtioBlock && conf.HypervisorMachineType == "s390-ccw-virtio" {
} else if conf.BlockDeviceDriver == config.VirtioBlock && conf.HypervisorMachineType == QemuCCWVirtio {
conf.BlockDeviceDriver = config.VirtioBlockCCW
}

View File

@@ -144,6 +144,18 @@ func TestHypervisorConfigBothInitrdAndImage(t *testing.T) {
testHypervisorConfigValid(t, hypervisorConfig, false)
}
func TestHypervisorConfigSecureExecution(t *testing.T) {
hypervisorConfig := &HypervisorConfig{
KernelPath: fmt.Sprintf("%s/%s", testDir, testKernel),
InitrdPath: fmt.Sprintf("%s/%s", testDir, testInitrd),
ConfidentialGuest: true,
HypervisorMachineType: QemuCCWVirtio,
}
// Secure Execution should only specify a kernel (encrypted image contains all components)
testHypervisorConfigValid(t, hypervisorConfig, false)
}
func TestHypervisorConfigValidTemplateConfig(t *testing.T) {
hypervisorConfig := &HypervisorConfig{
KernelPath: fmt.Sprintf("%s/%s", testDir, testKernel),
@@ -390,12 +402,10 @@ func TestGetHostMemorySizeKb(t *testing.T) {
},
}
dir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(dir)
dir := t.TempDir()
file := filepath.Join(dir, "meminfo")
_, err = GetHostMemorySizeKb(file)
_, err := GetHostMemorySizeKb(file)
assert.Error(err)
for _, d := range data {

View File

@@ -912,7 +912,6 @@ func (k *kataAgent) constrainGRPCSpec(grpcSpec *grpc.Spec, passSeccomp bool, str
grpcSpec.Linux.Resources.Devices = nil
grpcSpec.Linux.Resources.Pids = nil
grpcSpec.Linux.Resources.BlockIO = nil
grpcSpec.Linux.Resources.HugepageLimits = nil
grpcSpec.Linux.Resources.Network = nil
if grpcSpec.Linux.Resources.CPU != nil {
grpcSpec.Linux.Resources.CPU.Cpus = ""

View File

@@ -609,7 +609,7 @@ func TestConstrainGRPCSpec(t *testing.T) {
assert.NotNil(g.Linux.Resources.Memory)
assert.Nil(g.Linux.Resources.Pids)
assert.Nil(g.Linux.Resources.BlockIO)
assert.Nil(g.Linux.Resources.HugepageLimits)
assert.Len(g.Linux.Resources.HugepageLimits, 0)
assert.Nil(g.Linux.Resources.Network)
assert.NotNil(g.Linux.Resources.CPU)
assert.Equal(g.Process.SelinuxLabel, "")
@@ -751,9 +751,7 @@ func TestHandlePidNamespace(t *testing.T) {
func TestAgentConfigure(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "kata-agent-test")
assert.Nil(err)
defer os.RemoveAll(dir)
dir := t.TempDir()
k := &kataAgent{}
h := &mockHypervisor{}
@@ -761,7 +759,7 @@ func TestAgentConfigure(t *testing.T) {
id := "foobar"
ctx := context.Background()
err = k.configure(ctx, h, id, dir, c)
err := k.configure(ctx, h, id, dir, c)
assert.Nil(err)
err = k.configure(ctx, h, id, dir, c)
@@ -872,9 +870,7 @@ func TestAgentCreateContainer(t *testing.T) {
},
}
dir, err := os.MkdirTemp("", "kata-agent-test")
assert.Nil(err)
defer os.RemoveAll(dir)
dir := t.TempDir()
err = k.configure(context.Background(), &mockHypervisor{}, sandbox.id, dir, KataAgentConfig{})
assert.Nil(err)
@@ -984,8 +980,7 @@ func TestKataCleanupSandbox(t *testing.T) {
kataHostSharedDirSaved := kataHostSharedDir
kataHostSharedDir = func() string {
td, _ := os.MkdirTemp("", "kata-Cleanup")
return td
return t.TempDir()
}
defer func() {
kataHostSharedDir = kataHostSharedDirSaved
@@ -996,7 +991,6 @@ func TestKataCleanupSandbox(t *testing.T) {
}
dir := kataHostSharedDir()
defer os.RemoveAll(dir)
err := os.MkdirAll(path.Join(dir, s.id), 0777)
assert.Nil(err)
@@ -1144,9 +1138,7 @@ func TestHandleHugepages(t *testing.T) {
assert := assert.New(t)
dir, err := ioutil.TempDir("", "hugepages-test")
assert.Nil(err)
defer os.RemoveAll(dir)
dir := t.TempDir()
k := kataAgent{}
var formattedSizes []string

View File

@@ -18,6 +18,8 @@ const (
watcherChannelSize = 128
)
var monitorLog = virtLog.WithField("subsystem", "virtcontainers/monitor")
// nolint: govet
type monitor struct {
watchers []chan error
@@ -33,6 +35,9 @@ type monitor struct {
}
func newMonitor(s *Sandbox) *monitor {
// there should only be one monitor for one sandbox,
// so it's safe to let monitorLog as a global variable.
monitorLog = monitorLog.WithField("sandbox", s.ID())
return &monitor{
sandbox: s,
checkInterval: defaultCheckInterval,
@@ -72,6 +77,7 @@ func (m *monitor) newWatcher(ctx context.Context) (chan error, error) {
}
func (m *monitor) notify(ctx context.Context, err error) {
monitorLog.WithError(err).Warn("notify on errors")
m.sandbox.agent.markDead(ctx)
m.Lock()
@@ -85,18 +91,19 @@ func (m *monitor) notify(ctx context.Context, err error) {
// but just in case...
defer func() {
if x := recover(); x != nil {
virtLog.Warnf("watcher closed channel: %v", x)
monitorLog.Warnf("watcher closed channel: %v", x)
}
}()
for _, c := range m.watchers {
monitorLog.WithError(err).Warn("write error to watcher")
// throw away message can not write to channel
// make it not stuck, the first error is useful.
select {
case c <- err:
default:
virtLog.WithField("channel-size", watcherChannelSize).Warnf("watcher channel is full, throw notify message")
monitorLog.WithField("channel-size", watcherChannelSize).Warnf("watcher channel is full, throw notify message")
}
}
}
@@ -104,6 +111,7 @@ func (m *monitor) notify(ctx context.Context, err error) {
func (m *monitor) stop() {
// wait outside of monitor lock for the watcher channel to exit.
defer m.wg.Wait()
monitorLog.Info("stopping monitor")
m.Lock()
defer m.Unlock()
@@ -122,7 +130,7 @@ func (m *monitor) stop() {
// but just in case...
defer func() {
if x := recover(); x != nil {
virtLog.Warnf("watcher closed channel: %v", x)
monitorLog.Warnf("watcher closed channel: %v", x)
}
}()

View File

@@ -318,14 +318,12 @@ func TestIsWatchable(t *testing.T) {
result = isWatchableMount(path)
assert.False(result)
testPath, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(testPath)
testPath := t.TempDir()
// Verify secret is successful (single file mount):
// /tmppath/kubernetes.io~secret/super-secret-thing
secretpath := filepath.Join(testPath, K8sSecret)
err = os.MkdirAll(secretpath, 0777)
err := os.MkdirAll(secretpath, 0777)
assert.NoError(err)
secret := filepath.Join(secretpath, "super-secret-thing")
_, err = os.Create(secret)

View File

@@ -8,7 +8,6 @@ package virtcontainers
import (
"context"
"encoding/base64"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -19,7 +18,6 @@ import (
)
func TestNydusdStart(t *testing.T) {
assert := assert.New(t)
// nolint: govet
type fields struct {
pid int
@@ -33,13 +31,8 @@ func TestNydusdStart(t *testing.T) {
setupShareDirFn func() error
}
sourcePath, err := ioutil.TempDir("", "")
assert.NoError(err)
defer os.RemoveAll(sourcePath)
socketDir, err := ioutil.TempDir("", "")
assert.NoError(err)
defer os.RemoveAll(socketDir)
sourcePath := t.TempDir()
socketDir := t.TempDir()
sockPath := filepath.Join(socketDir, "vhost-user.sock")
apiSockPath := filepath.Join(socketDir, "api.sock")
@@ -115,13 +108,8 @@ func TestNydusdArgs(t *testing.T) {
func TestNydusdValid(t *testing.T) {
assert := assert.New(t)
sourcePath, err := ioutil.TempDir("", "")
assert.NoError(err)
defer os.RemoveAll(sourcePath)
socketDir, err := ioutil.TempDir("", "")
assert.NoError(err)
defer os.RemoveAll(socketDir)
sourcePath := t.TempDir()
socketDir := t.TempDir()
sockPath := filepath.Join(socketDir, "vhost-user.sock")
apiSockPath := filepath.Join(socketDir, "api.sock")
@@ -135,7 +123,7 @@ func TestNydusdValid(t *testing.T) {
}
}
nd := newNydsudFunc()
err = nd.valid()
err := nd.valid()
assert.NoError(err)
nd = newNydsudFunc()

View File

@@ -758,7 +758,7 @@ func (q *qemu) setupVirtioMem(ctx context.Context) error {
if err != nil {
return err
}
// backend memory size must be multiple of 2Mib
// backend memory size must be multiple of 4Mib
sizeMB := (int(maxMem) - int(q.config.MemorySize)) >> 2 << 2
share, target, memoryBack, err := q.getMemArgs()
@@ -783,7 +783,6 @@ func (q *qemu) setupVirtioMem(ctx context.Context) error {
err = q.qmpMonitorCh.qmp.ExecMemdevAdd(q.qmpMonitorCh.ctx, memoryBack, "virtiomem", target, sizeMB, share, "virtio-mem-pci", "virtiomem0", addr, bridge.ID)
if err == nil {
q.config.VirtioMem = true
q.Logger().Infof("Setup %dMB virtio-mem-pci success", sizeMB)
} else {
help := ""
@@ -1841,7 +1840,7 @@ func (q *qemu) hotplugAddCPUs(amount uint32) (uint32, error) {
threadID := fmt.Sprintf("%d", hc.Properties.Thread)
// If CPU type is IBM pSeries, Z or arm virt, we do not set socketID and threadID
if machine.Type == "pseries" || machine.Type == "s390-ccw-virtio" || machine.Type == "virt" {
if machine.Type == "pseries" || machine.Type == QemuCCWVirtio || machine.Type == "virt" {
socketID = ""
threadID = ""
dieID = ""

View File

@@ -308,9 +308,7 @@ func TestQemuAddDeviceSerialPortDev(t *testing.T) {
func TestQemuAddDeviceKataVSOCK(t *testing.T) {
assert := assert.New(t)
dir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(dir)
dir := t.TempDir()
vsockFilename := filepath.Join(dir, "vsock")

View File

@@ -257,8 +257,8 @@ func testCheckContainerOnDiskState(c *Container, containerState types.ContainerS
// writeContainerConfig write config.json to bundle path
// and return bundle path.
// NOTE: don't forget to delete the bundle path
func writeContainerConfig() (string, error) {
// NOTE: the bundle path is automatically removed by t.Cleanup
func writeContainerConfig(t *testing.T) (string, error) {
basicSpec := `
{
@@ -269,12 +269,9 @@ func writeContainerConfig() (string, error) {
}
}`
configDir, err := os.MkdirTemp("", "vc-tmp-")
if err != nil {
return "", err
}
configDir := t.TempDir()
err = os.MkdirAll(configDir, DirMode)
err := os.MkdirAll(configDir, DirMode)
if err != nil {
return "", err
}
@@ -293,10 +290,7 @@ func TestSandboxSetSandboxAndContainerState(t *testing.T) {
contConfig := newTestContainerConfigNoop(contID)
assert := assert.New(t)
configDir, err := writeContainerConfig()
if err != nil {
os.RemoveAll(configDir)
}
configDir, err := writeContainerConfig(t)
assert.NoError(err)
// set bundle path annotation, fetchSandbox need this annotation to get containers
@@ -526,15 +520,14 @@ func TestContainerStateSetFstype(t *testing.T) {
}
func TestSandboxAttachDevicesVFIO(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "")
assert.Nil(t, err)
tmpDir := t.TempDir()
os.RemoveAll(tmpDir)
testFDIOGroup := "2"
testDeviceBDFPath := "0000:00:1c.0"
devicesDir := filepath.Join(tmpDir, testFDIOGroup, "devices")
err = os.MkdirAll(devicesDir, DirMode)
err := os.MkdirAll(devicesDir, DirMode)
assert.Nil(t, err)
deviceFile := filepath.Join(devicesDir, testDeviceBDFPath)
@@ -596,8 +589,7 @@ func TestSandboxAttachDevicesVhostUserBlk(t *testing.T) {
rootEnabled = false
}
tmpDir, err := os.MkdirTemp("", "")
assert.Nil(t, err)
tmpDir := t.TempDir()
os.RemoveAll(tmpDir)
dm := manager.NewDeviceManager(config.VirtioSCSI, true, tmpDir, nil)
@@ -606,7 +598,7 @@ func TestSandboxAttachDevicesVhostUserBlk(t *testing.T) {
deviceNodePath := filepath.Join(vhostUserDevNodePath, "vhostblk0")
deviceSockPath := filepath.Join(vhostUserSockPath, "vhostblk0")
err = os.MkdirAll(vhostUserDevNodePath, dirMode)
err := os.MkdirAll(vhostUserDevNodePath, dirMode)
assert.Nil(t, err)
err = os.MkdirAll(vhostUserSockPath, dirMode)
assert.Nil(t, err)

View File

@@ -458,12 +458,8 @@ func TestMkdirAllWithInheritedOwnerSuccessful(t *testing.T) {
t.Skip("Test disabled as requires root user")
}
assert := assert.New(t)
tmpDir1, err := os.MkdirTemp("", "test")
assert.NoError(err)
defer os.RemoveAll(tmpDir1)
tmpDir2, err := os.MkdirTemp("", "test")
assert.NoError(err)
defer os.RemoveAll(tmpDir2)
tmpDir1 := t.TempDir()
tmpDir2 := t.TempDir()
testCases := []struct {
before func(rootDir string, uid, gid int)
@@ -474,7 +470,7 @@ func TestMkdirAllWithInheritedOwnerSuccessful(t *testing.T) {
}{
{
before: func(rootDir string, uid, gid int) {
err = syscall.Chown(rootDir, uid, gid)
err := syscall.Chown(rootDir, uid, gid)
assert.NoError(err)
},
rootDir: tmpDir1,
@@ -485,7 +481,7 @@ func TestMkdirAllWithInheritedOwnerSuccessful(t *testing.T) {
{
before: func(rootDir string, uid, gid int) {
// remove the tmpDir2 so the MkdirAllWithInheritedOwner() call creates them from /tmp
err = os.RemoveAll(tmpDir2)
err := os.RemoveAll(tmpDir2)
assert.NoError(err)
},
rootDir: tmpDir2,
@@ -502,8 +498,10 @@ func TestMkdirAllWithInheritedOwnerSuccessful(t *testing.T) {
err := MkdirAllWithInheritedOwner(tc.targetDir, 0700)
assert.NoError(err)
// remove the first parent "/tmp" from the assertion as it's owned by root
for _, p := range getAllParentPaths(tc.targetDir)[1:] {
// tmpDir1: /tmp/TestMkdirAllWithInheritedOwnerSuccessful/001
// tmpDir2: /tmp/TestMkdirAllWithInheritedOwnerSuccessful/002
// remove the first two parent "/tmp/TestMkdirAllWithInheritedOwnerSuccessful" from the assertion as it's owned by root
for _, p := range getAllParentPaths(tc.targetDir)[2:] {
info, err := os.Stat(p)
assert.NoError(err)
assert.True(info.IsDir())
@@ -520,12 +518,10 @@ func TestChownToParent(t *testing.T) {
t.Skip("Test disabled as requires root user")
}
assert := assert.New(t)
rootDir, err := os.MkdirTemp("", "root")
assert.NoError(err)
defer os.RemoveAll(rootDir)
rootDir := t.TempDir()
uid := 1234
gid := 5678
err = syscall.Chown(rootDir, uid, gid)
err := syscall.Chown(rootDir, uid, gid)
assert.NoError(err)
targetDir := path.Join(rootDir, "foo")

View File

@@ -7,7 +7,6 @@ package virtcontainers
import (
"context"
"os"
"path"
"strings"
"testing"
@@ -16,7 +15,6 @@ import (
)
func TestVirtiofsdStart(t *testing.T) {
assert := assert.New(t)
// nolint: govet
type fields struct {
path string
@@ -29,13 +27,8 @@ func TestVirtiofsdStart(t *testing.T) {
ctx context.Context
}
sourcePath, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(sourcePath)
socketDir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(socketDir)
sourcePath := t.TempDir()
socketDir := t.TempDir()
socketPath := path.Join(socketDir, "socket.s")
@@ -103,13 +96,8 @@ func TestVirtiofsdArgs(t *testing.T) {
func TestValid(t *testing.T) {
assert := assert.New(t)
sourcePath, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(sourcePath)
socketDir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(socketDir)
sourcePath := t.TempDir()
socketDir := t.TempDir()
socketPath := socketDir + "socket.s"
@@ -123,7 +111,7 @@ func TestValid(t *testing.T) {
// valid case
v := newVirtiofsdFunc()
err = v.valid()
err := v.valid()
assert.NoError(err)
v = newVirtiofsdFunc()

View File

@@ -18,9 +18,7 @@ import (
func TestNewVM(t *testing.T) {
assert := assert.New(t)
testDir, err := os.MkdirTemp("", "vmfactory-tmp-")
assert.Nil(err)
defer os.RemoveAll(testDir)
testDir := t.TempDir()
config := VMConfig{
HypervisorType: MockHypervisor,
@@ -33,7 +31,7 @@ func TestNewVM(t *testing.T) {
ctx := WithNewAgentFunc(context.Background(), newMockAgent)
var vm *VM
_, err = NewVM(ctx, config)
_, err := NewVM(ctx, config)
assert.Error(err)
config.HypervisorConfig = hyperConfig
@@ -65,9 +63,7 @@ func TestNewVM(t *testing.T) {
defer func() {
urandomDev = savedUrandomDev
}()
tmpdir, err := os.MkdirTemp("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
urandomDev = filepath.Join(tmpdir, "urandom")
data := make([]byte, 512)
err = os.WriteFile(urandomDev, data, os.FileMode(0640))
@@ -98,9 +94,7 @@ func TestVMConfigValid(t *testing.T) {
err := config.Valid()
assert.Error(err)
testDir, err := os.MkdirTemp("", "vmfactory-tmp-")
assert.Nil(err)
defer os.RemoveAll(testDir)
testDir := t.TempDir()
config.HypervisorConfig = HypervisorConfig{
KernelPath: testDir,

View File

@@ -16,7 +16,7 @@ ENV QAT_DRIVER_URL "https://downloadmirror.intel.com/649693/${QAT_DRIVER_VER}"
ENV QAT_CONFIGURE_OPTIONS "--enable-icp-sriov=guest"
ENV KATA_REPO_VERSION "main"
ENV AGENT_VERSION ""
ENV ROOTFS_OS "ubuntu"
ENV ROOTFS_OS "centos"
ENV OUTPUT_DIR "/output"
RUN dnf install -y \

View File

@@ -39,9 +39,9 @@ grab_kata_repos()
# Check out all the repos we will use now, so we can try and ensure they use the specified branch
# Only check out the branch needed, and make it shallow and thus space/bandwidth efficient
# Use a green prompt with white text for easy viewing
/bin/echo -e "\n\e[1;42mClone and checkout Kata repos\e[0m"
git clone --single-branch --branch $KATA_REPO_VERSION --depth=1 https://${kata_repo} ${kata_repo_path}
git clone --single-branch --branch $KATA_REPO_VERSION --depth=1 https://${tests_repo} ${tests_repo_path}
/bin/echo -e "\n\e[1;42mClone and checkout Kata repos\e[0m"
[ -d "${kata_repo_path}" ] || git clone --single-branch --branch $KATA_REPO_VERSION --depth=1 https://${kata_repo} ${kata_repo_path}
[ -d "${tests_repo_path}" ] || git clone --single-branch --branch $KATA_REPO_VERSION --depth=1 https://${tests_repo} ${tests_repo_path}
}
configure_kernel()
@@ -164,6 +164,7 @@ main()
done
shift $((OPTIND-1))
sudo chown -R qatbuilder:qatbuilder /home/qatbuilder
grab_qat_drivers
grab_kata_repos
configure_kernel

View File

@@ -27,7 +27,7 @@ The stable image refers to the last stable releases content.
> **Note:** if you use a tagged version of the repo, the stable image does match that version.
> For instance, if you use the 2.2.1 tagged version of the kata-deploy.yaml file, then the version 2.2.1 of the kata runtime will be deployed.
```sh
```bash
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy-stable.yaml
```
@@ -36,12 +36,15 @@ $ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-contai
```sh
$ GO111MODULE=auto go get github.com/kata-containers/kata-containers
```
```bash
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/packaging/kata-deploy
$ kubectl apply -k kata-deploy/overlays/k3s
```
#### Ensure kata-deploy is ready
```sh
```bash
kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
```
@@ -52,7 +55,7 @@ the `Pod` specification. The `runtimeClass` examples provided define a node sele
which will ensure the workload is only scheduled on a node that has Kata Containers installed
`runtimeClass` is a built-in type in Kubernetes. To apply each Kata Containers `runtimeClass`:
```sh
```bash
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml
```
@@ -85,25 +88,25 @@ spec:
To run an example with `kata-clh`:
```sh
```bash
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/examples/test-deploy-kata-clh.yaml
```
To run an example with `kata-fc`:
```sh
```bash
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/examples/test-deploy-kata-fc.yaml
```
To run an example with `kata-qemu`:
```sh
```bash
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/examples/test-deploy-kata-qemu.yaml
```
The following removes the test pods:
```sh
```bash
$ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/examples/test-deploy-kata-clh.yaml
$ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/examples/test-deploy-kata-fc.yaml
$ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/examples/test-deploy-kata-qemu.yaml
@@ -136,13 +139,13 @@ $ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-conta
#### Removing the stable image
```sh
```bash
$ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy-stable.yaml
$ kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
```
After ensuring kata-deploy has been deleted, cleanup the cluster:
```sh
```bash
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup-stable.yaml
```
@@ -150,7 +153,7 @@ The cleanup daemon-set will run a single time, cleaning up the node-label, which
This process should take, at most, 5 minutes.
After that, let's delete the cleanup daemon-set, the added RBAC and runtime classes:
```sh
```bash
$ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup-stable.yaml
$ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml
$ kubectl delete -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml

View File

@@ -0,0 +1 @@
build/

View File

@@ -15,8 +15,11 @@ endef
kata-tarball: | all-parallel merge-builds
all-parallel:
${MAKE} -f $(MK_PATH) all -j$$(( $$(nproc) - 1 )) NO_TTY="true" V=
$(MK_DIR)/dockerbuild/install_yq.sh:
$(MK_DIR)/kata-deploy-copy-yq-installer.sh
all-parallel: $(MK_DIR)/dockerbuild/install_yq.sh
${MAKE} -f $(MK_PATH) all -j$$(( $$(nproc) - 1 )) V=
all: cloud-hypervisor-tarball \
firecracker-tarball \
@@ -26,7 +29,7 @@ all: cloud-hypervisor-tarball \
rootfs-initrd-tarball \
shim-v2-tarball
%-tarball-build:
%-tarball-build: $(MK_DIR)/dockerbuild/install_yq.sh
$(call BUILD,$*)
cloud-hypervisor-tarball:

View File

@@ -16,25 +16,17 @@ kata_deploy_create="${script_dir}/kata-deploy-binaries.sh"
uid=$(id -u ${USER})
gid=$(id -g ${USER})
TTY_OPT="-i"
NO_TTY="${NO_TTY:-false}"
[ -t 1 ] && [ "${NO_TTY}" == "false" ] && TTY_OPT="-it"
if [ "${script_dir}" != "${PWD}" ]; then
ln -sf "${script_dir}/build" "${PWD}/build"
fi
install_yq_script_path="${script_dir}/../../../../ci/install_yq.sh"
cp "${install_yq_script_path}" "${script_dir}/dockerbuild/install_yq.sh"
docker build -q -t build-kata-deploy \
--build-arg IMG_USER="${USER}" \
--build-arg UID=${uid} \
--build-arg GID=${gid} \
"${script_dir}/dockerbuild/"
docker run ${TTY_OPT} \
docker run \
-v /var/run/docker.sock:/var/run/docker.sock \
--user ${uid}:${gid} \
--env USER=${USER} \

View File

@@ -8,6 +8,7 @@
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
readonly project="kata-containers"
@@ -64,6 +65,7 @@ version: The kata version that will be use to create the tarball
options:
-h|--help : Show this help
-s : Silent mode (produce output in case of failure only)
--build=<asset> :
all
cloud-hypervisor
@@ -195,6 +197,18 @@ handle_build() {
tar tvf "${tarball_name}"
}
silent_mode_error_trap() {
local stdout="$1"
local stderr="$2"
local t="$3"
local log_file="$4"
exec 1>&${stdout}
exec 2>&${stderr}
error "Failed to build: $t, logs:"
cat "${log_file}"
exit 1
}
main() {
local build_targets
local silent
@@ -247,11 +261,15 @@ main() {
(
cd "${builddir}"
if [ "${silent}" == true ]; then
if ! handle_build "${t}" &>"$log_file"; then
error "Failed to build: $t, logs:"
cat "${log_file}"
exit 1
fi
local stdout
local stderr
# Save stdout and stderr, to be restored
# by silent_mode_error_trap() in case of
# build failure.
exec {stdout}>&1
exec {stderr}>&2
trap "silent_mode_error_trap $stdout $stderr $t \"$log_file\"" ERR
handle_build "${t}" &>"$log_file"
else
handle_build "${t}"
fi

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
#
# Copyright (c) 2018-2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
script_dir=$(dirname "$(readlink -f "$0")")
install_yq_script_path="${script_dir}/../../../../ci/install_yq.sh"
cp "${install_yq_script_path}" "${script_dir}/dockerbuild/install_yq.sh"

View File

@@ -0,0 +1,53 @@
#!/usr/bin/env bash
#
# Copyright (c) 2022 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o nounset
set -o pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
script_name="$(basename "${BASH_SOURCE[0]}")"
# This is very much error prone in case we re-structure our
# repos again, but it's also used in a few other places :-/
repo_dir="${script_dir}/../../.."
function usage() {
cat <<EOF
Usage: ${script_name} tarball-name
This script creates a tarball with all the cargo vendored code
that a distro would need to do a full build of the project in
a disconnected environment, generating a "tarball-name" file.
EOF
}
create_vendor_tarball() {
vendor_dir_list=""
pushd ${repo_dir}
for i in $(find . -name 'Cargo.lock'); do
dir="$(dirname $i)"
pushd "${dir}"
[ -d .cargo ] || mkdir .cargo
cargo vendor >> .cargo/config
vendor_dir_list+=" $dir/vendor $dir/.cargo/config"
echo "${vendor_dir_list}"
popd
done
popd
tar -cvzf ${1} ${vendor_dir_list}
}
main () {
[ $# -ne 1 ] && usage && exit 0
create_vendor_tarball ${1}
}
main "$@"

View File

@@ -182,30 +182,22 @@ bump_repo() {
info "Updating kata-deploy / kata-cleanup image tags"
local version_to_replace="${current_version}"
local replacement="${new_version}"
local removed_files=false
if [ "${target_branch}" == "main" ]; then
local need_commit=false
if [ "${target_branch}" == "main" ];then
if [[ "${new_version}" =~ "rc" ]]; then
## this is the case 2) where we remove te kata-deploy / kata-cleanup stable files
## We are bumping from alpha to RC, should drop kata-deploy-stable yamls.
git rm "${kata_deploy_stable_yaml}"
git rm "${kata_cleanup_stable_yaml}"
removed_files=true
need_commit=true
fi
## these are the cases 1) and 2), where "alpha" and "rc0" are tagged as "latest"
version_to_replace="latest"
replacement="latest"
else
if [[ "${new_version}" =~ "rc" ]]; then
## we're in a stable branch, coming from "rcX" (tagged as latest) and we're going
## to "rcX+1", which should still be tagged as latest.
elif [ "${new_version}" != *"rc"* ]; then
## We are on a stable branch and creating new stable releases.
## Need to change kata-deploy / kata-cleanup to use the stable tags.
if [[ "${version_to_replace}" =~ "rc" ]]; then
## Coming from "rcX" so from the latest tag.
version_to_replace="latest"
replacement="latest"
fi
fi
if [ "${version_to_replace}" != "${replacement}" ] || [ "${removed_files}" == "true" ]; then
## this covers case 3), as it has changes on kata-deploy / kata-cleanup files
sed -i "s#${registry}:${version_to_replace}#${registry}:${replacement}#g" "${kata_deploy_yaml}"
sed -i "s#${registry}:${version_to_replace}#${registry}:${replacement}#g" "${kata_cleanup_yaml}"
@@ -214,6 +206,10 @@ bump_repo() {
git add "${kata_deploy_yaml}"
git add "${kata_cleanup_yaml}"
need_commit=true
fi
if [ "${need_commit}" == "true" ]; then
info "Creating the commit with the kata-deploy changes"
local commit_msg="$(generate_kata_deploy_commit $new_version)"
git commit -s -m "${commit_msg}"

View File

@@ -22,6 +22,8 @@ source "${script_dir}/../../scripts/lib.sh"
cloud_hypervisor_repo="${cloud_hypervisor_repo:-}"
cloud_hypervisor_version="${cloud_hypervisor_version:-}"
cloud_hypervisor_pr="${cloud_hypervisor_pr:-}"
cloud_hypervisor_pull_ref_branch="${cloud_hypervisor_pull_ref_branch:-main}"
if [ -z "$cloud_hypervisor_repo" ]; then
info "Get cloud_hypervisor information from runtime versions.yaml"
@@ -31,8 +33,13 @@ if [ -z "$cloud_hypervisor_repo" ]; then
fi
[ -n "$cloud_hypervisor_repo" ] || die "failed to get cloud_hypervisor repo"
[ -n "$cloud_hypervisor_version" ] || cloud_hypervisor_version=$(get_from_kata_deps "assets.hypervisor.cloud_hypervisor.version" "${kata_version}")
[ -n "$cloud_hypervisor_version" ] || die "failed to get cloud_hypervisor version"
if [ -n "$cloud_hypervisor_pr" ]; then
force_build_from_source=true
cloud_hypervisor_version="PR $cloud_hypervisor_pr"
else
[ -n "$cloud_hypervisor_version" ] || cloud_hypervisor_version=$(get_from_kata_deps "assets.hypervisor.cloud_hypervisor.version" "${kata_version}")
[ -n "$cloud_hypervisor_version" ] || die "failed to get cloud_hypervisor version"
fi
pull_clh_released_binary() {
info "Download cloud-hypervisor version: ${cloud_hypervisor_version}"
@@ -50,8 +57,19 @@ build_clh_from_source() {
repo_dir="${repo_dir//.git}"
[ -d "${repo_dir}" ] || git clone "${cloud_hypervisor_repo}"
pushd "${repo_dir}"
git fetch || true
git checkout "${cloud_hypervisor_version}"
if [ -n "${cloud_hypervisor_pr}" ]; then
local pr_branch="PR_${cloud_hypervisor_pr}"
git fetch origin "pull/${cloud_hypervisor_pr}/head:${pr_branch}" || return 1
git checkout "${pr_branch}"
git rebase "origin/${cloud_hypervisor_pull_ref_branch}"
git log --oneline main~1..HEAD
else
git fetch || true
git checkout "${cloud_hypervisor_version}"
fi
if [ -n "${features}" ]; then
info "Build cloud-hypervisor enabling the following features: ${features}"
./scripts/dev_cli.sh build --release --libc musl --features "${features}"

View File

@@ -83,7 +83,7 @@ assets:
uscan-url: >-
https://github.com/firecracker-microvm/firecracker/tags
.*/v?(\d\S+)\.tar\.gz
version: "v0.23.1"
version: "v0.23.4"
qemu:
description: "VMM that uses KVM"