Merge pull request #3665 from stevenhorsman/CCv0-merge-main-14-feb
CCv0: Merge main into CCv0 branch
1
.github/workflows/PR-wip-checks.yaml
vendored
@@ -15,6 +15,7 @@ jobs:
|
||||
name: WIP Check
|
||||
steps:
|
||||
- name: WIP Check
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: tim-actions/wip-check@1c2a1ca6c110026b3e2297bb2ef39e1747b5a755
|
||||
with:
|
||||
labels: '["do-not-merge", "wip", "rfc"]'
|
||||
|
||||
20
.github/workflows/commit-message-check.yaml
vendored
@@ -18,24 +18,32 @@ jobs:
|
||||
name: Commit Message Check
|
||||
steps:
|
||||
- name: Get PR Commits
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
id: 'get-pr-commits'
|
||||
uses: tim-actions/get-pr-commits@v1.0.0
|
||||
uses: tim-actions/get-pr-commits@v1.2.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Filter out revert commits
|
||||
# The format of a revert commit is as follows:
|
||||
#
|
||||
# Revert "<original-subject-line>"
|
||||
#
|
||||
filter_out_pattern: '^Revert "'
|
||||
|
||||
- name: DCO Check
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: tim-actions/dco@2fd0504dc0d27b33f542867c300c60840c6dcb20
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
|
||||
- name: Commit Body Missing Check
|
||||
if: ${{ success() || failure() }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-body-check@v1.0.2
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
|
||||
- name: Check Subject Line Length
|
||||
if: ${{ success() || failure() }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
@@ -44,7 +52,7 @@ jobs:
|
||||
post_error: ${{ env.error_msg }}
|
||||
|
||||
- name: Check Body Line Length
|
||||
if: ${{ success() || failure() }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
@@ -71,7 +79,7 @@ jobs:
|
||||
post_error: ${{ env.error_msg }}
|
||||
|
||||
- name: Check Fixes
|
||||
if: ${{ success() || failure() }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
@@ -82,7 +90,7 @@ jobs:
|
||||
one_pass_all_pass: 'true'
|
||||
|
||||
- name: Check Subsystem
|
||||
if: ${{ success() || failure() }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
|
||||
18
.github/workflows/kata-deploy-push.yaml
vendored
@@ -1,6 +1,15 @@
|
||||
name: kata deploy build
|
||||
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
paths:
|
||||
- tools/**
|
||||
- versions.yaml
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
@@ -19,11 +28,13 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
@@ -33,6 +44,7 @@ jobs:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -45,14 +57,17 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: build
|
||||
- name: merge-artifacts
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
make merge-builds
|
||||
- name: store-artifacts
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
@@ -63,6 +78,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: make kata-tarball
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
make kata-tarball
|
||||
sudo make install-tarball
|
||||
|
||||
37
.github/workflows/kata-deploy-test.yaml
vendored
@@ -48,7 +48,16 @@ jobs:
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
steps:
|
||||
- name: get-PR-ref
|
||||
id: get-PR-ref
|
||||
run: |
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
echo "reference for PR: " ${ref}
|
||||
echo "##[set-output name=pr-ref;]${ref}"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
|
||||
|
||||
- name: Install docker
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
@@ -75,7 +84,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- name: get-PR-ref
|
||||
id: get-PR-ref
|
||||
run: |
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
echo "reference for PR: " ${ref}
|
||||
echo "##[set-output name=pr-ref;]${ref}"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
@@ -94,7 +111,15 @@ jobs:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: get-PR-ref
|
||||
id: get-PR-ref
|
||||
run: |
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
echo "reference for PR: " ${ref}
|
||||
echo "##[set-output name=pr-ref;]${ref}"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
@@ -102,18 +127,14 @@ jobs:
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
pushd $GITHUB_WORKSPACE
|
||||
git checkout $tag
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
PR_SHA=$(git log --format=format:%H -n1)
|
||||
mv kata-static.tar.xz $GITHUB_WORKSPACE/tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t quay.io/kata-containers/kata-deploy-ci:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t quay.io/kata-containers/kata-deploy-ci:$PR_SHA $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$pkg_sha
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$PR_SHA
|
||||
mkdir -p packaging/kata-deploy
|
||||
ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
echo "::set-output name=PKG_SHA::${PR_SHA}"
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./packaging/kata-deploy/action
|
||||
with:
|
||||
|
||||
@@ -16,6 +16,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install hub
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
HUB_ARCH="amd64"
|
||||
HUB_VER=$(curl -sL "https://api.github.com/repos/github/hub/releases/latest" |\
|
||||
@@ -26,6 +27,7 @@ jobs:
|
||||
sudo install hub /usr/local/bin
|
||||
|
||||
- name: Install hub extension script
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
# Clone into a temporary directory to avoid overwriting
|
||||
# any existing github directory.
|
||||
@@ -35,9 +37,11 @@ jobs:
|
||||
popd &>/dev/null
|
||||
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Move issue to "In progress"
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.KATA_GITHUB_ACTIONS_TOKEN }}
|
||||
run: |
|
||||
|
||||
@@ -20,6 +20,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install hub
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
HUB_ARCH="amd64"
|
||||
HUB_VER=$(curl -sL "https://api.github.com/repos/github/hub/releases/latest" |\
|
||||
@@ -30,6 +31,7 @@ jobs:
|
||||
sudo install hub /usr/local/bin
|
||||
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install porting checker script
|
||||
@@ -42,6 +44,7 @@ jobs:
|
||||
popd &>/dev/null
|
||||
|
||||
- name: Stop PR being merged unless it has a correct set of porting labels
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.KATA_GITHUB_ACTIONS_TOKEN }}
|
||||
run: |
|
||||
|
||||
12
.github/workflows/snap.yaml
vendored
@@ -1,17 +1,27 @@
|
||||
name: snap CI
|
||||
on: ["pull_request"]
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
- edited
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Check out
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Snapcraft
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: samuelmeuli/action-snapcraft@v1
|
||||
|
||||
- name: Build snap
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
snapcraft -d snap --destructive-mode
|
||||
|
||||
2
.github/workflows/static-checks.yaml
vendored
@@ -5,8 +5,6 @@ on:
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
name: Static checks
|
||||
jobs:
|
||||
|
||||
93
Glossary.md
@@ -1,94 +1,3 @@
|
||||
# Glossary
|
||||
|
||||
[A](#a), [B](#b), [C](#c), [D](#d), [E](#e), [F](#f), [G](#g), [H](#h), [I](#i), [J](#j), [K](#k), [L](#l), [M](#m), [N](#n), [O](#o), [P](#p), [Q](#q), [R](#r), [S](#s), [T](#t), [U](#u), [V](#v), [W](#w), [X](#x), [Y](#y), [Z](#z)
|
||||
|
||||
## A
|
||||
|
||||
### Auto Scaling
|
||||
a method used in cloud computing, whereby the amount of computational resources in a server farm, typically measured in terms of the number of active servers, which vary automatically based on the load on the farm.
|
||||
|
||||
## B
|
||||
|
||||
## C
|
||||
|
||||
### Container Security Solutions
|
||||
The process of implementing security tools and policies that will give you the assurance that everything in your container is running as intended, and only as intended.
|
||||
|
||||
### Container Software
|
||||
A standard unit of software that packages up code and all its dependencies so the application runs quickly and reliably from one computing environment to another.
|
||||
|
||||
### Container Runtime Interface
|
||||
A plugin interface which enables Kubelet to use a wide variety of container runtimes, without the need to recompile.
|
||||
|
||||
### Container Virtualization
|
||||
A container is a virtual runtime environment that runs on top of a single operating system (OS) kernel and emulates an operating system rather than the underlying hardware.
|
||||
|
||||
## D
|
||||
|
||||
## E
|
||||
|
||||
## F
|
||||
|
||||
## G
|
||||
|
||||
## H
|
||||
|
||||
## I
|
||||
|
||||
### Infrastructure Architecture
|
||||
A structured and modern approach for supporting an organization and facilitating innovation within an enterprise.
|
||||
|
||||
## J
|
||||
|
||||
## K
|
||||
|
||||
### Kata Containers
|
||||
Kata containers is an open source project delivering increased container security and Workload isolation through an implementation of lightweight virtual machines.
|
||||
|
||||
## L
|
||||
|
||||
## M
|
||||
|
||||
## N
|
||||
|
||||
## O
|
||||
|
||||
## P
|
||||
|
||||
### Pod Containers
|
||||
A Group of one or more containers , with shared storage/network, and a specification for how to run the containers.
|
||||
|
||||
### Private Cloud
|
||||
A computing model that offers a proprietary environment dedicated to a single business entity.
|
||||
|
||||
### Public Cloud
|
||||
Computing services offered by third-party providers over the public Internet, making them available to anyone who wants to use or purchase them.
|
||||
|
||||
## Q
|
||||
|
||||
## R
|
||||
|
||||
## S
|
||||
|
||||
### Serverless Containers
|
||||
An architecture in which code is executed on-demand. Serverless workloads are typically in the cloud, but on-premises serverless platforms exist, too.
|
||||
|
||||
## T
|
||||
|
||||
## U
|
||||
|
||||
## V
|
||||
|
||||
### Virtual Machine Monitor
|
||||
Computer software, firmware or hardware that creates and runs virtual machines.
|
||||
|
||||
### Virtual Machine Software
|
||||
A software program or operating system that not only exhibits the behavior of a separate computer, but is also capable of performing tasks such as running applications and programs like a separate computer.
|
||||
|
||||
## W
|
||||
|
||||
## X
|
||||
|
||||
## Y
|
||||
|
||||
## Z
|
||||
See the [project glossary hosted in the wiki](https://github.com/kata-containers/kata-containers/wiki/Glossary).
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2020 Intel Corporation
|
||||
#
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright 2021 Sony Group Corporation
|
||||
#
|
||||
@@ -41,8 +41,7 @@ cflags="-O2"
|
||||
# gperf_version=$(get_version "externals.gperf.version")
|
||||
# gperf_url=$(get_version "externals.gperf.url")
|
||||
gperf_version="3.1"
|
||||
# XXX: gnu.org currently unavailable - see https://github.com/kata-containers/kata-containers/issues/3314
|
||||
gperf_url="https://www.mirrorservice.org/sites/ftp.gnu.org/gnu/gperf"
|
||||
gperf_url="https://ftp.gnu.org/gnu/gperf"
|
||||
gperf_tarball="gperf-${gperf_version}.tar.gz"
|
||||
gperf_tarball_url="${gperf_url}/${gperf_tarball}"
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) 2020 Ant Group
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) 2019 Ant Financial
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
|
||||
@@ -36,7 +36,7 @@ run_static_checks()
|
||||
# Make sure we have the targeting branch
|
||||
git remote set-branches --add origin "${branch}"
|
||||
git fetch -a
|
||||
bash "$tests_repo_dir/.ci/static-checks.sh" "github.com/kata-containers/kata-containers"
|
||||
bash "$tests_repo_dir/.ci/static-checks.sh" "$@"
|
||||
}
|
||||
|
||||
run_go_test()
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
#
|
||||
# This is the build root image for Kata Containers on OpenShift CI.
|
||||
#
|
||||
FROM registry.centos.org/centos:8
|
||||
FROM quay.io/centos/centos:stream8
|
||||
|
||||
RUN yum -y update && \
|
||||
yum -y install \
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2019 Ant Financial
|
||||
#
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2017-2018 Intel Corporation
|
||||
#
|
||||
@@ -9,4 +9,4 @@ set -e
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
run_static_checks
|
||||
run_static_checks "${@:-github.com/kata-containers/kata-containers}"
|
||||
|
||||
@@ -104,31 +104,6 @@ set the size of the `/dev/shm tmpfs` within the container. It is possible to pas
|
||||
|
||||
See issue https://github.com/kata-containers/kata-containers/issues/21 for more information.
|
||||
|
||||
### docker run and sysctl
|
||||
|
||||
The `docker run --sysctl` feature is not implemented. At the runtime
|
||||
level, this equates to the `linux.sysctl` OCI configuration. Docker
|
||||
allows configuring the sysctl settings that support namespacing. From a security and isolation point of view, it might make sense to set them in the VM, which isolates sysctl settings. Also, given that each Kata Container has its own kernel, we can support setting of sysctl settings that are not namespaced. In some cases, we might need to support configuring some of the settings on both the host side Kata Container namespace and the Kata Containers kernel.
|
||||
|
||||
See issue https://github.com/kata-containers/runtime/issues/185 for more information.
|
||||
|
||||
## Docker daemon features
|
||||
|
||||
Some features enabled or implemented via the
|
||||
[`dockerd` daemon](https://docs.docker.com/config/daemon/) configuration are not yet
|
||||
implemented.
|
||||
|
||||
### SELinux support
|
||||
|
||||
The `dockerd` configuration option `"selinux-enabled": true` is not presently implemented
|
||||
in Kata Containers. Enabling this option causes an OCI runtime error.
|
||||
|
||||
See issue https://github.com/kata-containers/runtime/issues/784 for more information.
|
||||
|
||||
The consequence of this is that the [Docker --security-opt is only partially supported](#docker---security-opt-option-partially-supported).
|
||||
|
||||
Kubernetes [SELinux labels](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#assign-selinux-labels-to-a-container) will also not be applied.
|
||||
|
||||
# Architectural limitations
|
||||
|
||||
This section lists items that might not be fixed due to fundamental
|
||||
|
||||
@@ -28,7 +28,6 @@ See the [howto documentation](how-to).
|
||||
## Kata Use-Cases
|
||||
|
||||
* [GPU Passthrough with Kata](./use-cases/GPU-passthrough-and-Kata.md)
|
||||
* [OpenStack Zun with Kata Containers](./use-cases/zun_kata.md)
|
||||
* [SR-IOV with Kata](./use-cases/using-SRIOV-and-kata.md)
|
||||
* [Intel QAT with Kata](./use-cases/using-Intel-QAT-and-kata.md)
|
||||
* [VPP with Kata](./use-cases/using-vpp-and-kata.md)
|
||||
|
||||
@@ -48,6 +48,7 @@
|
||||
### Merge all bump version Pull requests
|
||||
|
||||
- The above step will create a GitHub pull request in the Kata projects. Trigger the CI using `/test` command on each bump Pull request.
|
||||
- Trigger the test-kata-deploy workflow on the kata-containers repository bump Pull request using `/test_kata_deploy` (monitor under the "action" tab).
|
||||
- Check any failures and fix if needed.
|
||||
- Work with the Kata approvers to verify that the CI works and the pull requests are merged.
|
||||
|
||||
@@ -64,7 +65,7 @@
|
||||
|
||||
### Check Git-hub Actions
|
||||
|
||||
We make use of [GitHub actions](https://github.com/features/actions) in this [file](https://github.com/kata-containers/kata-containers/blob/main/.github/workflows/release.yaml) in the `kata-containers/kata-containers` repository to build and upload release artifacts. This action is auto triggered with the above step when a new tag is pushed to the `kata-containers/kata-containers` repository.
|
||||
We make use of [GitHub actions](https://github.com/features/actions) in this [file](../.github/workflows/release.yaml) in the `kata-containers/kata-containers` repository to build and upload release artifacts. This action is auto triggered with the above step when a new tag is pushed to the `kata-containers/kata-containers` repository.
|
||||
|
||||
Check the [actions status page](https://github.com/kata-containers/kata-containers/actions) to verify all steps in the actions workflow have completed successfully. On success, a static tarball containing Kata release artifacts will be uploaded to the [Release page](https://github.com/kata-containers/kata-containers/releases).
|
||||
|
||||
|
||||
@@ -337,7 +337,7 @@ will run if the correct type of user is detected and skipped if not.
|
||||
|
||||
The main repository has the most comprehensive set of skip abilities. See:
|
||||
|
||||
- https://github.com/kata-containers/kata-containers/tree/main/src/runtime/pkg/katatestutils
|
||||
- [`katatestutils`](../src/runtime/pkg/katatestutils)
|
||||
|
||||
### Run Rust tests as a different user
|
||||
|
||||
|
||||
@@ -102,7 +102,7 @@ first
|
||||
[install the latest release](#determine-latest-version).
|
||||
|
||||
See the
|
||||
[manual installation installation documentation](install/README.md#manual-installation)
|
||||
[manual installation documentation](install/README.md#manual-installation)
|
||||
for details on how to automatically install and configuration a static release
|
||||
with containerd.
|
||||
|
||||
|
||||
@@ -154,7 +154,7 @@ func testFoo() error {
|
||||
### Tracing
|
||||
|
||||
Consider if the code needs to create a new
|
||||
[trace span](https://github.com/kata-containers/kata-containers/blob/main/docs/tracing.md).
|
||||
[trace span](./tracing.md).
|
||||
|
||||
Ensure any new trace spans added to the code are completed.
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ Kata Containers design documents:
|
||||
- [Host cgroups](host-cgroups.md)
|
||||
- [`Inotify` support](inotify.md)
|
||||
- [Metrics(Kata 2.0)](kata-2-0-metrics.md)
|
||||
- [Design for Kata Containers `Lazyload` ability with `nydus`](kata-nydus-design.md)
|
||||
|
||||
---
|
||||
|
||||
|
||||
1
docs/design/arch-images/kata-nydus.drawio
Normal file
@@ -0,0 +1 @@
|
||||
<mxfile host="app.diagrams.net" modified="2022-01-18T14:06:01.890Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36" etag="nId-8OV6FDjWTDgzqDu-" version="15.8.9" type="device"><diagram id="bkF_ZONM9sPFCpIYoGFl" name="Page-1">5Vtbj6M2GP01eUyEbW55nGSmM2q70mqnUnf6UrnBCdYSnIIzSfbX14AJYDsLSSDZUWdHGvxhDD7n8F1sdoTm6/1zgjfhJxaQaAStYD9CjyMIgQ1d8SezHAqL504LwyqhgexUGV7pdyKNlrRuaUDSRkfOWMTppmlcsDgmC96w4SRhu2a3JYuad93gFdEMrwsc6dY/acDDwupDr7K/ELoKyzuDcn5rXHaWM0lDHLBdzYSeRmieMMaLo/V+TqIMvBKX4rpfTpw9PlhCYt7lgpd4/J1SSP9++bR8Cb6A1de/XsZ2Mco7jrZywiFLuXxgfihR4GQv7jEL+ToSBiAOU56wb2TOIpYIS8xi0XO2pFGkmHBEV7FoLsRTEmGfvZOEU4HvgzyxpkGQ3Wa2Cyknrxu8yO65E2oStoRt44BkE7BES5+xBCEbk+xrJonAM2FrwpOD6CLP+o5kQ8rRsZ2ivavItWWXsMZrSSKWclodR64QFwcSdDMB8ycYj+f+Hw/+b3jxvN57e/vXsa8RoIHfBMEEU42XJYu5fIugfeSplC5QSBpBtHSyf/LKmr340ZgWZ9z858iHBr6BopN8INDkAwGdj6llIMSxh2JkamDEjbhEqEGN+++WlSfGaY76g+gA3c2+OimOVtnf+BBs03Ea400aMp69DHJY8ZTFyEW/H/AP+uC/D9aQNbFAkzjDiwQ8A3H+ULyVSrqCOARNxInQwjGNSRIMzth0OMacCYJN14csnTFnOkG+Tpo3GGnAQJqCJomDhyySZ1EkwmlKFzlKOOG6uYZr023WUBYTRDOBW3L4mp2cOGXzTV6ZNx738sqidWjEIBJoWYMWlFK2TRakg2DFTFaEt3kkndoab47JQ0pbQiLM6XvzeU1Eyjt8ZjR/W0rluErELD10OUQxT3lVPf9QBrIVV2+7ykAFDtpAua6O075Cauh6x97iH8ZpSNfjb5jj8TscxFn04Aocx2n3A65BUMM5AT0L7c+lwqFcqg8UHKEeAVGJdSOXdAYD0rle4tOTucvw4W8wrhyvyZU7NWQr0KB5dzCq3OupMqaZufcRVWnOzwfNVnxbiTlTg4tCP4h5/dPlXZin1KA7phxjkT3DRtZhTbxj+0Tikbc+k4SKCWWFdGHcU/61HF4cv1UJjWhVI2WNITIYdM/MxIOKStSEomtmosrNVVOcoTOTDosAncWl5LNWm6ykgirVvNX0dCMFdciBC0ruJjWkKAReKjWnZaCBpQZNRfLFUmu6sFYPdmdn1bXcuq9Xc1WFqClIV6mpA3nWjaV2aWlfl9oFkql5QgvYTYkC95Ioexd/Z/9MoVWLiJ39HWiJ0UOLEBpEeF6aDXxTmr3akrRzhv0zbZ9cl5grcdBxJL732j6BpqWDM/k1llHFNthHordZifn9EA6A4gmQYZXjtozraxxzoFFyaU2bB4hBalpggROpX1tRO9gaBNTXILLt6GX6IeH0O8KJBoNTXyOg6+zzAhGOPw6sSi3sGTZkgWlDdjhYTdXxmS7eMbn4NBSwBDQZZJ2s9OwRWfJ+qJmq+bxxq/yGxKAOteStNzc0t2BC6aZeodx1/d/LV0kdfeve8jXtB95ZvtNpO0i3VW+Hrbm2Iv70RjysL0DWS/xbrQkVL+e9qmzfP8H2uVW2Fhrs21bZyLTv2K9KykWd4wJkvx9rtK7HFFnIvZQCLNiXVFxVKt7kxmLRq47yo7g8mpmL63Mrahm4TtbTqXDjNF79nnd7tCvLF0leZmLi8mWUazYUFxIxwmyT4ZIj5czEr0Bznq1IOuJZ56INqrb4zbonfM5i8fiY5pojOOW7bO0okzzHHP+Tz1Sv4HvLiFzHLJ2adD3DZwrDxZRet7vO24MIcBoe43mP7qEQ9f3cg6VwrC6/dHUP6kYXALA//yCa1efuRffqPw2gp/8A</diagram></mxfile>
|
||||
BIN
docs/design/arch-images/kata-nydus.png
Normal file
|
After Width: | Height: | Size: 51 KiB |
BIN
docs/design/arch-images/nydus-performance.png
Normal file
|
After Width: | Height: | Size: 390 KiB |
BIN
docs/design/arch-images/nydusd-arch.png
Normal file
|
After Width: | Height: | Size: 942 KiB |
BIN
docs/design/arch-images/rafs-format.png
Normal file
|
After Width: | Height: | Size: 182 KiB |
@@ -250,7 +250,7 @@ runtime cleans up the environment (which includes terminating the
|
||||
|
||||
If the container manager requests the container be deleted, the
|
||||
[runtime](#runtime) will signal the agent by sending it a
|
||||
`DestroySandbox` [ttRPC API](../../../src/agent/protocols/protos/agent.proto) request.
|
||||
`DestroySandbox` [ttRPC API](../../../src/libs/protocols/protos/agent.proto) request.
|
||||
|
||||
## Guest assets
|
||||
|
||||
@@ -291,7 +291,7 @@ for each VM created.
|
||||
The agent communicates with the other Kata components (primarily the
|
||||
[runtime](#runtime)) using a
|
||||
[`ttRPC`](https://github.com/containerd/ttrpc-rust) based
|
||||
[protocol](../../../src/agent/protocols/protos).
|
||||
[protocol](../../../src/libs/protocols/protos).
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
|
||||
@@ -1,36 +1,37 @@
|
||||
# Networking
|
||||
|
||||
See the [networking document](networking.md).
|
||||
|
||||
Containers will typically live in their own, possibly shared, networking namespace.
|
||||
Containers typically live in their own, possibly shared, networking namespace.
|
||||
At some point in a container lifecycle, container engines will set up that namespace
|
||||
to add the container to a network which is isolated from the host network, but
|
||||
which is shared between containers
|
||||
to add the container to a network which is isolated from the host network.
|
||||
|
||||
In order to do so, container engines will usually add one end of a virtual
|
||||
ethernet (`veth`) pair into the container networking namespace. The other end of
|
||||
the `veth` pair is added to the host networking namespace.
|
||||
In order to setup the network for a container, container engines call into a
|
||||
networking plugin. The network plugin will usually create a virtual
|
||||
ethernet (`veth`) pair adding one end of the `veth` pair into the container
|
||||
networking namespace, while the other end of the `veth` pair is added to the
|
||||
host networking namespace.
|
||||
|
||||
This is a very namespace-centric approach as many hypervisors or VM
|
||||
Managers (VMMs) such as `virt-manager` cannot handle `veth`
|
||||
interfaces. Typically, `TAP` interfaces are created for VM
|
||||
connectivity.
|
||||
interfaces. Typically, [`TAP`](https://www.kernel.org/doc/Documentation/networking/tuntap.txt)
|
||||
interfaces are created for VM connectivity.
|
||||
|
||||
To overcome incompatibility between typical container engines expectations
|
||||
and virtual machines, Kata Containers networking transparently connects `veth`
|
||||
interfaces with `TAP` ones using Traffic Control:
|
||||
interfaces with `TAP` ones using [Traffic Control](https://man7.org/linux/man-pages/man8/tc.8.html):
|
||||
|
||||

|
||||
|
||||
With a TC filter in place, a redirection is created between the container network and the
|
||||
virtual machine. As an example, the CNI may create a device, `eth0`, in the container's network
|
||||
namespace, which is a VETH device. Kata Containers will create a tap device for the VM, `tap0_kata`,
|
||||
and setup a TC redirection filter to mirror traffic from `eth0`'s ingress to `tap0_kata`'s egress,
|
||||
and a second to mirror traffic from `tap0_kata`'s ingress to `eth0`'s egress.
|
||||
With a TC filter rules in place, a redirection is created between the container network
|
||||
and the virtual machine. As an example, the network plugin may place a device,
|
||||
`eth0`, in the container's network namespace, which is one end of a VETH device.
|
||||
Kata Containers will create a tap device for the VM, `tap0_kata`,
|
||||
and setup a TC redirection filter to redirect traffic from `eth0`'s ingress to `tap0_kata`'s egress,
|
||||
and a second TC filter to redirect traffic from `tap0_kata`'s ingress to `eth0`'s egress.
|
||||
|
||||
Kata Containers maintains support for MACVTAP, which was an earlier implementation used in Kata. TC-filter
|
||||
is the default because it allows for simpler configuration, better CNI plugin compatibility, and performance
|
||||
on par with MACVTAP.
|
||||
Kata Containers maintains support for MACVTAP, which was an earlier implementation used in Kata.
|
||||
With this method, Kata created a MACVTAP device to connect directly to the `eth0` device.
|
||||
TC-filter is the default because it allows for simpler configuration, better CNI plugin
|
||||
compatibility, and performance on par with MACVTAP.
|
||||
|
||||
Kata Containers has deprecated support for bridge due to lacking performance relative to TC-filter and MACVTAP.
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ Cgroups are hierarchical, and this can be seen with the following pod example:
|
||||
- Container 2: `cgroupsPath=/kubepods/pod1/container2`
|
||||
|
||||
- Pod 2: `cgroupsPath=/kubepods/pod2`
|
||||
- Container 1: `cgroupsPath=/kubepods/pod2/container2`
|
||||
- Container 1: `cgroupsPath=/kubepods/pod2/container1`
|
||||
- Container 2: `cgroupsPath=/kubepods/pod2/container2`
|
||||
|
||||
Depending on the upper-level orchestration layers, the cgroup under which the pod is placed is
|
||||
|
||||
93
docs/design/kata-nydus-design.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# Background
|
||||
|
||||
[Research](https://www.usenix.org/conference/fast16/technical-sessions/presentation/harter) shows that time to take for pull operation accounts for 76% of container startup time but only 6.4% of that data is read. So if we can get data on demand (lazy load), it will speed up the container start. [`Nydus`](https://github.com/dragonflyoss/image-service) is a project which build image with new format and can get data on demand when container start.
|
||||
|
||||
The following benchmarking result shows the performance improvement compared with the OCI image for the container cold startup elapsed time on containerd. As the OCI image size increases, the container startup time of using `nydus` image remains very short. [Click here](https://github.com/dragonflyoss/image-service/blob/master/docs/nydus-design.md) to see `nydus` design.
|
||||
|
||||

|
||||
|
||||
## Proposal - Bring `lazyload` ability to Kata Containers
|
||||
|
||||
`Nydusd` is a fuse/`virtiofs` daemon which is provided by `nydus` project and it supports `PassthroughFS` and [RAFS](https://github.com/dragonflyoss/image-service/blob/master/docs/nydus-design.md) (Registry Acceleration File System) natively, so in Kata Containers, we can use `nydusd` in place of `virtiofsd` and mount `nydus` image to guest in the meanwhile.
|
||||
|
||||
The process of creating/starting Kata Containers with `virtiofsd`,
|
||||
|
||||
1. When creating sandbox, the Kata Containers Containerd v2 [shim](https://github.com/kata-containers/kata-containers/blob/main/docs/design/architecture/README.md#runtime) will launch `virtiofsd` before VM starts and share directories with VM.
|
||||
2. When creating container, the Kata Containers Containerd v2 shim will mount rootfs to `kataShared`(/run/kata-containers/shared/sandboxes/\<SANDBOX\>/mounts/\<CONTAINER\>/rootfs), so it can be seen at the path `/run/kata-containers/shared/containers/shared/\<CONTAINER\>/rootfs` in the guest and used as container's rootfs.
|
||||
|
||||
The process of creating/starting Kata Containers with `nydusd`,
|
||||
|
||||

|
||||
|
||||
1. When creating sandbox, the Kata Containers Containerd v2 shim will launch `nydusd` daemon before VM starts.
|
||||
After VM starts, `kata-agent` will mount `virtiofs` at the path `/run/kata-containers/shared` and Kata Containers Containerd v2 shim mount `passthroughfs` filesystem to path `/run/kata-containers/shared/containers` when the VM starts.
|
||||
|
||||
```bash
|
||||
# start nydusd
|
||||
$ sandbox_id=my-test-sandbox
|
||||
$ sudo /usr/local/bin/nydusd --log-level info --sock /run/vc/vm/${sandbox_id}/vhost-user-fs.sock --apisock /run/vc/vm/${sandbox_id}/api.sock
|
||||
```
|
||||
|
||||
```bash
|
||||
# source: the host sharedir which will pass through to guest
|
||||
$ sudo curl -v --unix-socket /run/vc/vm/${sandbox_id}/api.sock \
|
||||
-X POST "http://localhost/api/v1/mount?mountpoint=/containers" -H "accept: */*" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"source":"/path/to/sharedir",
|
||||
"fs_type":"passthrough_fs",
|
||||
"config":""
|
||||
}'
|
||||
```
|
||||
|
||||
2. When creating normal container, the Kata Containers Containerd v2 shim send request to `nydusd` to mount `rafs` at the path `/run/kata-containers/shared/rafs/<container_id>/lowerdir` in guest.
|
||||
|
||||
```bash
|
||||
# source: the metafile of nydus image
|
||||
# config: the config of this image
|
||||
$ sudo curl --unix-socket /run/vc/vm/${sandbox_id}/api.sock \
|
||||
-X POST "http://localhost/api/v1/mount?mountpoint=/rafs/<container_id>/lowerdir" -H "accept: */*" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"source":"/path/to/bootstrap",
|
||||
"fs_type":"rafs",
|
||||
"config":"config":"{\"device\":{\"backend\":{\"type\":\"localfs\",\"config\":{\"dir\":\"blobs\"}},\"cache\":{\"type\":\"blobcache\",\"config\":{\"work_dir\":\"cache\"}}},\"mode\":\"direct\",\"digest_validate\":true}",
|
||||
}'
|
||||
```
|
||||
|
||||
The Kata Containers Containerd v2 shim will also bind mount `snapshotdir` which `nydus-snapshotter` assigns to `sharedir`。
|
||||
So in guest, container rootfs=overlay(`lowerdir=rafs`, `upperdir=snapshotdir/fs`, `workdir=snapshotdir/work`)
|
||||
|
||||
> how to transfer the `rafs` info from `nydus-snapshotter` to the Kata Containers Containerd v2 shim?
|
||||
|
||||
By default, when creating `OCI` image container, `nydus-snapshotter` will return [`struct` Mount slice](https://github.com/containerd/containerd/blob/main/mount/mount.go#L21) below to containerd and containerd use them to mount rootfs
|
||||
|
||||
```
|
||||
[
|
||||
{
|
||||
Type: "overlay",
|
||||
Source: "overlay",
|
||||
Options: [lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.nydus/snapshots/<snapshot_A>/mnt,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.nydus/snapshots/<snapshot_B>/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.nydus/snapshots/<snapshot_B>/work],
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Then, we can append `rafs` info into `Options`, but if do this, containerd will mount failed, as containerd can not identify `rafs` info. Here, we can refer to [containerd mount helper](https://github.com/containerd/containerd/blob/main/mount/mount_linux.go#L42) and provide a binary called `nydus-overlayfs`. The `Mount` slice which `nydus-snapshotter` returned becomes
|
||||
|
||||
```
|
||||
[
|
||||
{
|
||||
Type: "fuse.nydus-overlayfs",
|
||||
Source: "overlay",
|
||||
Options: [lowerdir=/var/lib/containerd/io.containerd.snapshotter.v1.nydus/snapshots/<snapshot_A>/mnt,upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.nydus/snapshots/<snapshot_B>/fs,workdir=/var/lib/containerd/io.containerd.snapshotter.v1.nydus/snapshots/<snapshot_B>/work,extraoption=base64({source:xxx,config:xxx,snapshotdir:xxx})],
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
When containerd find `Type` is `fuse.nydus-overlayfs`,
|
||||
|
||||
1. containerd will call `mount.fuse` command;
|
||||
2. in `mount.fuse`, it will call `nydus-overlayfs`.
|
||||
3. in `nydus-overlayfs`, it will ignore the `extraoption` and do the overlay mount.
|
||||
|
||||
Finally, in the Kata Containers Containerd v2 shim, it parse `extraoption` and get the `rafs` info to mount the image in guest.
|
||||
@@ -157,6 +157,32 @@ docker run --cpus 4 -ti debian bash -c "nproc; cat /sys/fs/cgroup/cpu,cpuacct/cp
|
||||
400000 # cfs quota
|
||||
```
|
||||
|
||||
## Virtual CPU handling without hotplug
|
||||
|
||||
In some cases, the hardware and/or software architecture being utilized does not support
|
||||
hotplug. For example, Firecracker VMM does not support CPU or memory hotplug. Similarly,
|
||||
the current Linux Kernel for aarch64 does not support CPU or memory hotplug. To appropriately
|
||||
size the virtual machine for the workload within the container or pod, we provide a `static_sandbox_resource_mgmt`
|
||||
flag within the Kata Containers configuration. When this is set, the runtime will:
|
||||
- Size the VM based on the workload requirements as well as the `default_vcpus` option specified in the configuration.
|
||||
- Not resize the virtual machine after it has been launched.
|
||||
|
||||
VM size determination varies depending on the type of container being run, and may not always
|
||||
be available. If workload sizing information is not available, the virtual machine will be started with the
|
||||
`default_vcpus`.
|
||||
|
||||
In the case of a pod, the initial sandbox container (pause container) typically doesn't contain any resource
|
||||
information in its runtime `spec`. It is possible that the upper layer runtime
|
||||
(i.e. containerd or CRI-O) may pass sandbox sizing annotations within the pause container's
|
||||
`spec`. If these are provided, we will use this to appropriately size the VM. In particular,
|
||||
we'll calculate the number of CPUs required for the workload and augment this by `default_vcpus`
|
||||
configuration option, and use this for the virtual machine size.
|
||||
|
||||
In the case of a single container (i.e., not a pod), if the container specifies resource requirements,
|
||||
the container's `spec` will provide the sizing information directly. If these are set, we will
|
||||
calculate the number of CPUs required for the workload and augment this by `default_vcpus`
|
||||
configuration option, and use this for the virtual machine size.
|
||||
|
||||
|
||||
[1]: https://docs.docker.com/config/containers/resource_constraints/#cpu
|
||||
[2]: https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource
|
||||
|
||||
@@ -40,7 +40,7 @@ Kata Containers with QEMU has complete compatibility with Kubernetes.
|
||||
|
||||
Depending on the host architecture, Kata Containers supports various machine types,
|
||||
for example `pc` and `q35` on x86 systems, `virt` on ARM systems and `pseries` on IBM Power systems. The default Kata Containers
|
||||
machine type is `pc`. The machine type and its [`Machine accelerators`](#machine-accelerators) can
|
||||
machine type is `q35`. The machine type and its [`Machine accelerators`](#machine-accelerators) can
|
||||
be changed by editing the runtime [`configuration`](architecture/README.md#configuration) file.
|
||||
|
||||
Devices and features used:
|
||||
|
||||
@@ -37,6 +37,7 @@
|
||||
- [How to setup swap devices in guest kernel](how-to-setup-swap-devices-in-guest-kernel.md)
|
||||
- [How to run rootless vmm](how-to-run-rootless-vmm.md)
|
||||
- [How to run Docker with Kata Containers](how-to-run-docker-with-kata.md)
|
||||
- [How to run Kata Containers with `nydus`](how-to-use-virtio-fs-nydus-with-kata.md)
|
||||
|
||||
## Confidential Containers
|
||||
- [How to use build and test the Confidential Containers `CCv0` proof of concept](how-to-build-and-test-ccv0.md)
|
||||
|
||||
@@ -188,7 +188,7 @@ If you use Containerd older than v1.2.4 or a version of Kata older than v1.6.0
|
||||
shell script with the following:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
KATA_CONF_FILE=/etc/kata-containers/firecracker.toml containerd-shim-kata-v2 $@
|
||||
```
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
This document describes how to import Kata Containers logs into [Fluentd](https://www.fluentd.org/),
|
||||
typically for importing into an
|
||||
Elastic/Fluentd/Kibana([EFK](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/fluentd-elasticsearch#running-efk-stack-in-production))
|
||||
Elastic/Fluentd/Kibana([EFK](https://github.com/kubernetes-sigs/instrumentation-addons/tree/master/fluentd-elasticsearch#running-efk-stack-in-production))
|
||||
or Elastic/Logstash/Kibana([ELK](https://www.elastic.co/elastic-stack)) stack.
|
||||
|
||||
The majority of this document focusses on CRI-O based (classic) Kata runtime. Much of that information
|
||||
@@ -257,14 +257,14 @@ go directly to a full Kata specific JSON format logfile test.
|
||||
|
||||
Kata runtime has the ability to generate JSON logs directly, rather than its default `logfmt` format. Passing
|
||||
the `--log-format=json` argument to the Kata runtime enables this. The easiest way to pass in this extra
|
||||
parameter from a [Kata deploy](https://github.com/kata-containers/kata-containers/tree/main/tools/packaging/kata-deploy) installation
|
||||
parameter from a [Kata deploy](../../tools/packaging/kata-deploy) installation
|
||||
is to edit the `/opt/kata/bin/kata-qemu` shell script.
|
||||
|
||||
At the same time, we will add the `--log=/var/log/kata-runtime.log` argument to store the Kata logs in their
|
||||
own file (rather than into the system journal).
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
/opt/kata/bin/kata-runtime --config "/opt/kata/share/defaults/kata-containers/configuration-qemu.toml" --log-format=json --log=/var/log/kata-runtime.log $@
|
||||
```
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ You can learn more about about Docker-in-Docker at the following links:
|
||||
- [`docker` image Docker Hub page](https://hub.docker.com/_/docker/) (this page lists the `-dind` releases)
|
||||
|
||||
While normally DinD refers to running `docker` from inside a Docker container,
|
||||
Kata Containers 2.x allows only supported runtimes (such as [`containerd`](../install/container-manager/containerd/containerd-install.md)).
|
||||
Kata Containers 2.x allows only [supported runtimes][kata-2.x-supported-runtimes] (such as [`containerd`](../install/container-manager/containerd/containerd-install.md)).
|
||||
|
||||
Running `docker` in a Kata Container implies creating Docker containers from inside a container managed by `containerd` (or another supported container manager), as illustrated below:
|
||||
|
||||
@@ -37,7 +37,7 @@ container manager -> Kata Containers shim -> Docker Daemon -> Docker contain
|
||||
|
||||
[OverlayFS]: https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html
|
||||
[v2.0.0]: https://github.com/kata-containers/kata-containers/releases/tag/2.0.0
|
||||
[kata-2.x-supported-runtimes]: https://github.com/kata-containers/kata-containers/blob/5737b36a3513f4da11a9dc7301b0c97ea22a51cf/docs/install/container-manager/containerd/containerd-install.md
|
||||
[kata-2.x-supported-runtimes]: ../install/container-manager/containerd/containerd-install.md
|
||||
|
||||
## Why Docker in Kata Containers 2.x requires special measures
|
||||
|
||||
|
||||
@@ -56,13 +56,14 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.enable_iommu` | `boolean` | enable `iommu` on Q35 (QEMU x86_64) |
|
||||
| `io.katacontainers.config.hypervisor.enable_iothreads` | `boolean`| enable IO to be processed in a separate thread. Supported currently for virtio-`scsi` driver |
|
||||
| `io.katacontainers.config.hypervisor.enable_mem_prealloc` | `boolean` | the memory space used for `nvdimm` device by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.enable_swap` | `boolean` | enable swap of VM memory |
|
||||
| `io.katacontainers.config.hypervisor.enable_vhost_user_store` | `boolean` | enable vhost-user storage device (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.enable_virtio_mem` | `boolean` | enable virtio-mem (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.entropy_source` (R) | string| the path to a host source of entropy (`/dev/random`, `/dev/urandom` or real hardware RNG device) |
|
||||
| `io.katacontainers.config.hypervisor.file_mem_backend` (R) | string | file based memory backend root directory |
|
||||
| `io.katacontainers.config.hypervisor.firmware_hash` | string | container firmware SHA-512 hash value |
|
||||
| `io.katacontainers.config.hypervisor.firmware` | string | the guest firmware that will run the container VM |
|
||||
| `io.katacontainers.config.hypervisor.firmware_volume_hash` | string | container firmware volume SHA-512 hash value |
|
||||
| `io.katacontainers.config.hypervisor.firmware_volume` | string | the guest firmware volume that will be passed to the container VM |
|
||||
| `io.katacontainers.config.hypervisor.guest_hook_path` | string | the path within the VM that will be used for drop in hooks |
|
||||
| `io.katacontainers.config.hypervisor.hotplug_vfio_on_root_bus` | `boolean` | indicate if devices need to be hotplugged on the root bus instead of a bridge|
|
||||
| `io.katacontainers.config.hypervisor.hypervisor_hash` | string | container hypervisor binary SHA-512 hash value |
|
||||
|
||||
@@ -154,7 +154,7 @@ From Kubernetes v1.12, users can use [`RuntimeClass`](https://kubernetes.io/docs
|
||||
|
||||
```bash
|
||||
$ cat > runtime.yaml <<EOF
|
||||
apiVersion: node.k8s.io/v1beta1
|
||||
apiVersion: node.k8s.io/v1
|
||||
kind: RuntimeClass
|
||||
metadata:
|
||||
name: kata
|
||||
|
||||
57
docs/how-to/how-to-use-virtio-fs-nydus-with-kata.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# Kata Containers with virtio-fs-nydus
|
||||
|
||||
## Introduction
|
||||
|
||||
Refer to [kata-`nydus`-design](../design/kata-nydus-design.md)
|
||||
|
||||
## How to
|
||||
|
||||
You can use Kata Containers with `nydus` as follows,
|
||||
|
||||
1. Use [`nydus` latest branch](https://github.com/dragonflyoss/image-service);
|
||||
|
||||
2. Deploy `nydus` environment as [`Nydus` Setup for Containerd Environment](https://github.com/dragonflyoss/image-service/blob/master/docs/containerd-env-setup.md);
|
||||
|
||||
3. Start `nydus-snapshotter` with `enable_nydus_overlayfs` enabled;
|
||||
|
||||
4. Use [kata-containers](https://github.com/kata-containers/kata-containers) `latest` branch to compile and build `kata-containers.img`;
|
||||
|
||||
5. Update `configuration-qemu.toml` to include:
|
||||
|
||||
```toml
|
||||
shared_fs = "virtio-fs-nydus"
|
||||
virtio_fs_daemon = "<nydusd binary path>"
|
||||
virtio_fs_extra_args = []
|
||||
```
|
||||
|
||||
6. run `crictl run -r kata-qemu nydus-container.yaml nydus-sandbox.yaml`;
|
||||
|
||||
The `nydus-sandbox.yaml` looks like below:
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
attempt: 1
|
||||
name: nydus-sandbox
|
||||
namespace: default
|
||||
log_directory: /tmp
|
||||
linux:
|
||||
security_context:
|
||||
namespace_options:
|
||||
network: 2
|
||||
annotations:
|
||||
"io.containerd.osfeature": "nydus.remoteimage.v1"
|
||||
```
|
||||
|
||||
The `nydus-container.yaml` looks like below:
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
name: nydus-container
|
||||
image:
|
||||
image: localhost:5000/ubuntu-nydus:latest
|
||||
command:
|
||||
- /bin/sleep
|
||||
args:
|
||||
- 600
|
||||
log_path: container.1.log
|
||||
```
|
||||
@@ -6,4 +6,4 @@ Container deployments utilize explicit or implicit file sharing between host fil
|
||||
|
||||
As of the 2.0 release of Kata Containers, [virtio-fs](https://virtio-fs.gitlab.io/) is the default filesystem sharing mechanism.
|
||||
|
||||
virtio-fs support works out of the box for `cloud-hypervisor` and `qemu`, when Kata Containers is deployed using `kata-deploy`. Learn more about `kata-deploy` and how to use `kata-deploy` in Kubernetes [here](https://github.com/kata-containers/kata-containers/tree/main/tools/packaging/kata-deploy#kubernetes-quick-start).
|
||||
virtio-fs support works out of the box for `cloud-hypervisor` and `qemu`, when Kata Containers is deployed using `kata-deploy`. Learn more about `kata-deploy` and how to use `kata-deploy` in Kubernetes [here](../../tools/packaging/kata-deploy/README.md#kubernetes-quick-start).
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
cluster locally. It creates a single node Kubernetes stack in a local VM.
|
||||
|
||||
[Kata Containers](https://github.com/kata-containers) can be installed into a Minikube cluster using
|
||||
[`kata-deploy`](https://github.com/kata-containers/kata-containers/tree/main/tools/packaging/kata-deploy).
|
||||
[`kata-deploy`](../../tools/packaging/kata-deploy).
|
||||
|
||||
This document details the pre-requisites, installation steps, and how to check
|
||||
the installation has been successful.
|
||||
@@ -123,7 +123,7 @@ $ kubectl apply -f kata-deploy/base/kata-deploy.yaml
|
||||
This installs the Kata Containers components into `/opt/kata` inside the Minikube node. It can take
|
||||
a few minutes for the operation to complete. You can check the installation has worked by checking
|
||||
the status of the `kata-deploy` pod, which will be executing
|
||||
[this script](https://github.com/kata-containers/kata-containers/tree/main/tools/packaging/kata-deploy/scripts/kata-deploy.sh),
|
||||
[this script](../../tools/packaging/kata-deploy/scripts/kata-deploy.sh),
|
||||
and will be executing a `sleep infinity` once it has successfully completed its work.
|
||||
You can accomplish this by running the following:
|
||||
|
||||
|
||||
@@ -39,8 +39,8 @@ can be used as runtime.
|
||||
|
||||
Read the following documents to know how to run Kata Containers 2.x with `containerd`.
|
||||
|
||||
* [How to use Kata Containers and Containerd](https://github.com/kata-containers/kata-containers/blob/main/docs/how-to/containerd-kata.md)
|
||||
* [Install Kata Containers with containerd](https://github.com/kata-containers/kata-containers/blob/main/docs/install/container-manager/containerd/containerd-install.md)
|
||||
* [How to use Kata Containers and Containerd](../how-to/containerd-kata.md)
|
||||
* [Install Kata Containers with containerd](./container-manager/containerd/containerd-install.md)
|
||||
|
||||
|
||||
## Remove Kata Containers snap package
|
||||
|
||||
@@ -203,11 +203,11 @@ is highly recommended. For working with the agent, you may also wish to
|
||||
[enable a debug console][setup-debug-console]
|
||||
to allow you to access the VM environment.
|
||||
|
||||
[enable-full-debug]: https://github.com/kata-containers/kata-containers/blob/main/docs/Developer-Guide.md#enable-full-debug
|
||||
[enable-full-debug]: ./Developer-Guide.md#enable-full-debug
|
||||
[jaeger-all-in-one]: https://www.jaegertracing.io/docs/getting-started/
|
||||
[jaeger-tracing]: https://www.jaegertracing.io
|
||||
[opentelemetry]: https://opentelemetry.io
|
||||
[osbuilder]: https://github.com/kata-containers/kata-containers/blob/main/tools/osbuilder
|
||||
[setup-debug-console]: https://github.com/kata-containers/kata-containers/blob/main/docs/Developer-Guide.md#set-up-a-debug-console
|
||||
[osbuilder]: ../tools/osbuilder
|
||||
[setup-debug-console]: ./Developer-Guide.md#set-up-a-debug-console
|
||||
[trace-forwarder]: /src/tools/trace-forwarder
|
||||
[vsock]: https://wiki.qemu.org/Features/VirtioVsock
|
||||
|
||||
|
Before Width: | Height: | Size: 113 KiB |
|
Before Width: | Height: | Size: 114 KiB |
|
Before Width: | Height: | Size: 100 KiB |
|
Before Width: | Height: | Size: 250 KiB |
|
Before Width: | Height: | Size: 77 KiB |
@@ -231,7 +231,7 @@ $ cp ${GOPATH}/${LINUX_VER}/vmlinux ${KATA_KERNEL_LOCATION}/${KATA_KERNEL_NAME}
|
||||
These instructions build upon the OS builder instructions located in the
|
||||
[Developer Guide](../Developer-Guide.md). At this point it is recommended that
|
||||
[Docker](https://docs.docker.com/engine/install/ubuntu/) is installed first, and
|
||||
then [Kata-deploy](https://github.com/kata-containers/kata-containers/tree/main/tools/packaging/kata-deploy)
|
||||
then [Kata-deploy](../../tools/packaging/kata-deploy)
|
||||
is use to install Kata. This will make sure that the correct `agent` version
|
||||
is installed into the rootfs in the steps below.
|
||||
|
||||
@@ -355,10 +355,10 @@ this small script so that it redirects to be able to use either QEMU or
|
||||
Cloud Hypervisor with Kata.
|
||||
|
||||
```bash
|
||||
$ echo '#!/bin/bash' | sudo tee /usr/local/bin/containerd-shim-kata-qemu-v2
|
||||
$ echo '#!/usr/bin/env bash' | sudo tee /usr/local/bin/containerd-shim-kata-qemu-v2
|
||||
$ echo 'KATA_CONF_FILE=/opt/kata/share/defaults/kata-containers/configuration-qemu.toml /opt/kata/bin/containerd-shim-kata-v2 $@' | sudo tee -a /usr/local/bin/containerd-shim-kata-qemu-v2
|
||||
$ sudo chmod +x /usr/local/bin/containerd-shim-kata-qemu-v2
|
||||
$ echo '#!/bin/bash' | sudo tee /usr/local/bin/containerd-shim-kata-clh-v2
|
||||
$ echo '#!/usr/bin/env bash' | sudo tee /usr/local/bin/containerd-shim-kata-clh-v2
|
||||
$ echo 'KATA_CONF_FILE=/opt/kata/share/defaults/kata-containers/configuration-clh.toml /opt/kata/bin/containerd-shim-kata-v2 $@' | sudo tee -a /usr/local/bin/containerd-shim-kata-clh-v2
|
||||
$ sudo chmod +x /usr/local/bin/containerd-shim-kata-clh-v2
|
||||
```
|
||||
@@ -419,11 +419,11 @@ You might need to disable Docker before initializing Kubernetes. Be aware
|
||||
that the OpenSSL container image built above will need to be exported from
|
||||
Docker and imported into containerd.
|
||||
|
||||
If Kata is installed through [`kata-deploy`](https://github.com/kata-containers/kata-containers/blob/stable-2.0/tools/packaging/kata-deploy/README.md)
|
||||
If Kata is installed through [`kata-deploy`](../../tools/packaging/kata-deploy/README.md)
|
||||
there will be multiple `configuration.toml` files associated with different
|
||||
hypervisors. Rather than add in the custom Kata kernel, Kata rootfs, and
|
||||
kernel modules to each `configuration.toml` as the default, instead use
|
||||
[annotations](https://github.com/kata-containers/kata-containers/blob/stable-2.0/docs/how-to/how-to-load-kernel-modules-with-kata.md)
|
||||
[annotations](../how-to/how-to-load-kernel-modules-with-kata.md)
|
||||
in the Kubernetes YAML file to tell Kata which kernel and rootfs to use. The
|
||||
easy way to do this is to use `kata-deploy` which will install the Kata binaries
|
||||
to `/opt` and properly configure the `/etc/containerd/config.toml` with annotation
|
||||
|
||||
@@ -17,7 +17,7 @@ CONFIG_X86_SGX_KVM=y
|
||||
```
|
||||
|
||||
* Kubernetes cluster configured with:
|
||||
* [`kata-deploy`](https://github.com/kata-containers/kata-containers/tree/main/tools/packaging/kata-deploy) based Kata Containers installation
|
||||
* [`kata-deploy`](../../tools/packaging/kata-deploy) based Kata Containers installation
|
||||
* [Intel SGX Kubernetes device plugin](https://github.com/intel/intel-device-plugins-for-kubernetes/tree/main/cmd/sgx_plugin#deploying-with-pre-built-images)
|
||||
|
||||
> Note: Kata Containers supports creating VM sandboxes with Intel® SGX enabled
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
# OpenStack Zun DevStack working with Kata Containers
|
||||
|
||||
## Introduction
|
||||
|
||||
This guide describes how to get Kata Containers to work with OpenStack Zun
|
||||
using DevStack on Ubuntu 16.04. Running DevStack with this guide will setup
|
||||
Docker and Clear Containers 2.0, which you replace with Kata Containers.
|
||||
Currently, the instructions are based on the following links:
|
||||
|
||||
- https://docs.openstack.org/zun/latest/contributor/quickstart.html
|
||||
|
||||
- https://docs.openstack.org/zun/latest/admin/clear-containers.html
|
||||
|
||||
## Install Git to use with DevStack
|
||||
|
||||
```sh
|
||||
$ sudo apt install git
|
||||
```
|
||||
|
||||
## Setup OpenStack DevStack
|
||||
The following commands will sync DevStack from GitHub, create your
|
||||
`local.conf` file, assign your host IP to this file, enable Clear
|
||||
Containers, start DevStack, and set the environment variables to use
|
||||
`zun` on the command line.
|
||||
|
||||
```sh
|
||||
$ sudo mkdir -p /opt/stack
|
||||
$ sudo chown $USER /opt/stack
|
||||
$ git clone https://github.com/openstack-dev/devstack /opt/stack/devstack
|
||||
$ HOST_IP="$(ip addr | grep 'state UP' -A2 | tail -n1 | awk '{print $2}' | cut -f1 -d'/')"
|
||||
$ git clone https://github.com/openstack/zun /opt/stack/zun
|
||||
$ cat /opt/stack/zun/devstack/local.conf.sample \
|
||||
$ | sed "s/HOST_IP=.*/HOST_IP=$HOST_IP/" \
|
||||
$ > /opt/stack/devstack/local.conf
|
||||
$ sed -i "s/KURYR_CAPABILITY_SCOPE=.*/KURYR_CAPABILITY_SCOPE=local/" /opt/stack/devstack/local.conf
|
||||
$ echo "ENABLE_CLEAR_CONTAINER=true" >> /opt/stack/devstack/local.conf
|
||||
$ echo "enable_plugin zun-ui https://git.openstack.org/openstack/zun-ui" >> /opt/stack/devstack/local.conf
|
||||
$ /opt/stack/devstack/stack.sh
|
||||
$ source /opt/stack/devstack/openrc admin admin
|
||||
```
|
||||
|
||||
The previous commands start OpenStack DevStack with Zun support. You can test
|
||||
it using `runc` as shown by the following commands to make sure everything
|
||||
installed correctly and is working.
|
||||
|
||||
```sh
|
||||
$ zun run --name test cirros ping -c 4 8.8.8.8
|
||||
$ zun list
|
||||
$ zun logs test
|
||||
$ zun delete test
|
||||
```
|
||||
|
||||
## Install Kata Containers
|
||||
|
||||
Follow [these instructions](../install/README.md)
|
||||
to install the Kata Containers components.
|
||||
|
||||
## Update Docker with new Kata Containers runtime
|
||||
|
||||
The following commands replace the Clear Containers 2.x runtime setup with
|
||||
DevStack, with Kata Containers:
|
||||
|
||||
```sh
|
||||
$ sudo sed -i 's/"cor"/"kata-runtime"/' /etc/docker/daemon.json
|
||||
$ sudo sed -i 's/"\/usr\/bin\/cc-oci-runtime"/"\/usr\/bin\/kata-runtime"/' /etc/docker/daemon.json
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl restart docker
|
||||
```
|
||||
|
||||
## Test that everything works in both Docker and OpenStack Zun
|
||||
|
||||
```sh
|
||||
$ sudo docker run -ti --runtime kata-runtime busybox sh
|
||||
$ zun run --name kata --runtime kata-runtime cirros ping -c 4 8.8.8.8
|
||||
$ zun list
|
||||
$ zun logs kata
|
||||
$ zun delete kata
|
||||
```
|
||||
|
||||
## Stop DevStack and clean up system (Optional)
|
||||
|
||||
```sh
|
||||
$ /opt/stack/devstack/unstack.sh
|
||||
$ /opt/stack/devstack/clean.sh
|
||||
```
|
||||
|
||||
## Restart DevStack and reset CC 2.x runtime to `kata-runtime`
|
||||
|
||||
Run the following commands if you already setup Kata Containers and want to
|
||||
restart DevStack:
|
||||
|
||||
```sh
|
||||
$ /opt/stack/devstack/unstack.sh
|
||||
$ /opt/stack/devstack/clean.sh
|
||||
$ /opt/stack/devstack/stack.sh
|
||||
$ source /opt/stack/devstack/openrc admin admin
|
||||
$ sudo sed -i 's/"cor"/"kata-runtime"/' /etc/docker/daemon.json
|
||||
$ sudo sed -i 's/"\/usr\/bin\/cc-oci-runtime"/"\/usr\/bin\/kata-runtime"/' /etc/docker/daemon.json
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl restart docker
|
||||
```
|
||||
|
||||

|
||||
|
||||
Figure 1: Create a BusyBox container image
|
||||
|
||||

|
||||
|
||||
Figure 2: Select `kata-runtime` to use
|
||||
|
||||

|
||||
|
||||
Figure 3: Two BusyBox containers successfully launched
|
||||
|
||||

|
||||
|
||||
Figure 4: Test connectivity between Kata Containers
|
||||
|
||||

|
||||
|
||||
Figure 5: CLI for Zun
|
||||
@@ -262,24 +262,11 @@ parts:
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
|
||||
versions_file="${kata_dir}/versions.yaml"
|
||||
# arch-specific definition
|
||||
case "$(uname -m)" in
|
||||
"aarch64")
|
||||
branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.architecture.aarch64.version)"
|
||||
url="$(${yq} r ${versions_file} assets.hypervisor.qemu.url)"
|
||||
commit="$(${yq} r ${versions_file} assets.hypervisor.qemu.architecture.aarch64.commit)"
|
||||
patches_dir="${kata_dir}/tools/packaging/qemu/patches/$(echo ${branch} | sed -e 's/.[[:digit:]]*$//' -e 's/^v//').x"
|
||||
patches_version_dir="${kata_dir}/tools/packaging/qemu/patches/tag_patches/${branch}"
|
||||
;;
|
||||
|
||||
*)
|
||||
branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.version)"
|
||||
url="$(${yq} r ${versions_file} assets.hypervisor.qemu.url)"
|
||||
commit=""
|
||||
patches_dir="${kata_dir}/tools/packaging/qemu/patches/$(echo ${branch} | sed -e 's/.[[:digit:]]*$//' -e 's/^v//').x"
|
||||
patches_version_dir="${kata_dir}/tools/packaging/qemu/patches/tag_patches/${branch}"
|
||||
;;
|
||||
esac
|
||||
branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.version)"
|
||||
url="$(${yq} r ${versions_file} assets.hypervisor.qemu.url)"
|
||||
commit=""
|
||||
patches_dir="${kata_dir}/tools/packaging/qemu/patches/$(echo ${branch} | sed -e 's/.[[:digit:]]*$//' -e 's/^v//').x"
|
||||
patches_version_dir="${kata_dir}/tools/packaging/qemu/patches/tag_patches/${branch}"
|
||||
|
||||
# download source
|
||||
qemu_dir=${SNAPCRAFT_STAGE}/qemu
|
||||
|
||||
117
src/agent/Cargo.lock
generated
@@ -117,6 +117,17 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.0.1"
|
||||
@@ -282,6 +293,36 @@ dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "3.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1121e32687f7f90b905d4775273305baa4f32cd418923e9b0fa726533221857"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"bitflags",
|
||||
"clap_derive",
|
||||
"indexmap",
|
||||
"lazy_static",
|
||||
"os_str_bytes",
|
||||
"strsim",
|
||||
"termcolor",
|
||||
"textwrap",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "3.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7cbcf660a32ad0eda4b11996d8761432f499034f6e685bc6072337db662c85f8"
|
||||
dependencies = [
|
||||
"heck 0.4.0",
|
||||
"proc-macro-error",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "core-foundation"
|
||||
version = "0.9.2"
|
||||
@@ -636,6 +677,12 @@ dependencies = [
|
||||
"unicode-segmentation",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.19"
|
||||
@@ -915,6 +962,7 @@ dependencies = [
|
||||
"async-trait",
|
||||
"capctl",
|
||||
"cgroups-rs",
|
||||
"clap",
|
||||
"futures",
|
||||
"ipnetwork",
|
||||
"lazy_static",
|
||||
@@ -1401,6 +1449,15 @@ dependencies = [
|
||||
"tokio-stream",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "os_str_bytes"
|
||||
version = "6.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.11.2"
|
||||
@@ -1539,6 +1596,30 @@ version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-error"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
|
||||
dependencies = [
|
||||
"proc-macro-error-attr",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"version_check 0.9.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-error-attr"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"version_check 0.9.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.19"
|
||||
@@ -1633,7 +1714,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603"
|
||||
dependencies = [
|
||||
"bytes 1.1.0",
|
||||
"heck",
|
||||
"heck 0.3.3",
|
||||
"itertools",
|
||||
"log",
|
||||
"multimap",
|
||||
@@ -1651,7 +1732,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5"
|
||||
dependencies = [
|
||||
"bytes 1.1.0",
|
||||
"heck",
|
||||
"heck 0.3.3",
|
||||
"itertools",
|
||||
"lazy_static",
|
||||
"log",
|
||||
@@ -1745,8 +1826,6 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"protobuf",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"ttrpc",
|
||||
"ttrpc-codegen",
|
||||
]
|
||||
@@ -2179,6 +2258,12 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
|
||||
|
||||
[[package]]
|
||||
name = "subtle"
|
||||
version = "2.4.1"
|
||||
@@ -2216,6 +2301,21 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
|
||||
dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap"
|
||||
version = "0.14.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0066c8d12af8b5acd21e00547c3797fde4e8677254a7ee429176ccebbe93dd80"
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "1.0.30"
|
||||
@@ -2857,6 +2957,15 @@ version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
|
||||
@@ -5,9 +5,9 @@ authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
oci = { path = "oci" }
|
||||
oci = { path = "../libs/oci" }
|
||||
rustjail = { path = "rustjail" }
|
||||
protocols = { path = "protocols" }
|
||||
protocols = { path = "../libs/protocols" }
|
||||
lazy_static = "1.3.0"
|
||||
ttrpc = { version = "0.5.0", features = ["async", "protobuf-codec"], default-features = false }
|
||||
protobuf = "=2.14.0"
|
||||
@@ -60,6 +60,7 @@ vsock-exporter = { path = "vsock-exporter" }
|
||||
# Configuration
|
||||
serde = { version = "1.0.129", features = ["derive"] }
|
||||
toml = "0.5.8"
|
||||
clap = { version = "3.0.1", features = ["derive"] }
|
||||
|
||||
# Image pull/decrypt
|
||||
oci-distribution = "0.7.0"
|
||||
@@ -73,8 +74,6 @@ tempfile = "3.1.0"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
"oci",
|
||||
"protocols",
|
||||
"rustjail",
|
||||
]
|
||||
|
||||
|
||||
@@ -216,4 +216,4 @@ codecov-html: check_tarpaulin
|
||||
|
||||
##TARGET generate-protocols: generate/update grpc agent protocols
|
||||
generate-protocols:
|
||||
protocols/hack/update-generated-proto.sh all
|
||||
../libs/protocols/hack/update-generated-proto.sh all
|
||||
|
||||
@@ -60,7 +60,7 @@ $ make -C ${dir}/kata-containers/src/agent
|
||||
|
||||
The Kata runtime communicates with the Kata agent using a ttRPC based API protocol.
|
||||
|
||||
This ttRPC API is defined by a set of [protocol buffers files](protocols/protos).
|
||||
This ttRPC API is defined by a set of [protocol buffers files](../libs/protocols/protos).
|
||||
The protocol files are used to generate the bindings for the following components:
|
||||
|
||||
| Component | Language | Generation method `[*]` | Tooling required |
|
||||
@@ -74,7 +74,7 @@ The protocol files are used to generate the bindings for the following component
|
||||
|
||||
If you wish to change the API, these files must be regenerated. Although the
|
||||
rust code will be automatically generated by the
|
||||
[build script](protocols/build.rs),
|
||||
[build script](../libs/protocols/build.rs),
|
||||
the Golang code generation requires the external `protoc` command to be
|
||||
available in `$PATH`.
|
||||
|
||||
|
||||
@@ -8,8 +8,8 @@ edition = "2018"
|
||||
serde = "1.0.91"
|
||||
serde_json = "1.0.39"
|
||||
serde_derive = "1.0.91"
|
||||
oci = { path = "../oci" }
|
||||
protocols = { path ="../protocols" }
|
||||
oci = { path = "../../libs/oci" }
|
||||
protocols = { path ="../../libs/protocols" }
|
||||
caps = "0.5.0"
|
||||
nix = "0.23.0"
|
||||
scopeguard = "1.0.0"
|
||||
|
||||
@@ -685,8 +685,8 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
Mode::from_bits_truncate(0),
|
||||
)?;
|
||||
unistd::close(fifofd)?;
|
||||
let mut buf: &mut [u8] = &mut [0];
|
||||
unistd::read(fd, &mut buf)?;
|
||||
let buf: &mut [u8] = &mut [0];
|
||||
unistd::read(fd, buf)?;
|
||||
}
|
||||
|
||||
// With NoNewPrivileges, we should set seccomp as close to
|
||||
@@ -1482,7 +1482,12 @@ async fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
return Err(anyhow!(nix::Error::EINVAL));
|
||||
}
|
||||
|
||||
let args = h.args.clone();
|
||||
let mut args = h.args.clone();
|
||||
// the hook.args[0] is the hook binary name which shouldn't be included
|
||||
// in the Command.args
|
||||
if args.len() > 1 {
|
||||
args.remove(0);
|
||||
}
|
||||
let env: HashMap<String, String> = h
|
||||
.env
|
||||
.iter()
|
||||
@@ -1529,7 +1534,7 @@ async fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
// Close stdin so that hook program could receive EOF
|
||||
child.stdin.take();
|
||||
|
||||
// read something from stdout for debug
|
||||
// read something from stdout and stderr for debug
|
||||
let mut out = String::new();
|
||||
child
|
||||
.stdout
|
||||
@@ -1540,6 +1545,16 @@ async fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
.unwrap();
|
||||
info!(logger, "child stdout: {}", out.as_str());
|
||||
|
||||
let mut err = String::new();
|
||||
child
|
||||
.stderr
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.read_to_string(&mut err)
|
||||
.await
|
||||
.unwrap();
|
||||
info!(logger, "child stderr: {}", err.as_str());
|
||||
|
||||
match child.wait().await {
|
||||
Ok(exit) => {
|
||||
let code = exit
|
||||
@@ -1547,7 +1562,10 @@ async fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
.ok_or_else(|| anyhow!("hook exit status has no status code"))?;
|
||||
|
||||
if code != 0 {
|
||||
error!(logger, "hook {} exit status is {}", &path, code);
|
||||
error!(
|
||||
logger,
|
||||
"hook {} exit status is {}, error message is {}", &path, code, err
|
||||
);
|
||||
return Err(anyhow!(nix::Error::UnknownErrno));
|
||||
}
|
||||
|
||||
@@ -1624,13 +1642,44 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_hook() {
|
||||
let xargs = which("xargs").await;
|
||||
let temp_file = "/tmp/test_execute_hook";
|
||||
|
||||
let touch = which("touch").await;
|
||||
|
||||
defer!(fs::remove_file(temp_file).unwrap(););
|
||||
|
||||
execute_hook(
|
||||
&slog_scope::logger(),
|
||||
&Hook {
|
||||
path: xargs,
|
||||
args: vec![],
|
||||
path: touch,
|
||||
args: vec!["touch".to_string(), temp_file.to_string()],
|
||||
env: vec![],
|
||||
timeout: Some(10),
|
||||
},
|
||||
&OCIState {
|
||||
version: "1.2.3".to_string(),
|
||||
id: "321".to_string(),
|
||||
status: ContainerState::Running,
|
||||
pid: 2,
|
||||
bundle: "".to_string(),
|
||||
annotations: Default::default(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(Path::new(&temp_file).exists(), true);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_hook_with_error() {
|
||||
let ls = which("ls").await;
|
||||
|
||||
let res = execute_hook(
|
||||
&slog_scope::logger(),
|
||||
&Hook {
|
||||
path: ls,
|
||||
args: vec!["ls".to_string(), "/tmp/not-exist".to_string()],
|
||||
env: vec![],
|
||||
timeout: None,
|
||||
},
|
||||
@@ -1643,8 +1692,13 @@ mod tests {
|
||||
annotations: Default::default(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.await;
|
||||
|
||||
let expected_err = nix::Error::UnknownErrno;
|
||||
assert_eq!(
|
||||
res.unwrap_err().downcast::<nix::Error>().unwrap(),
|
||||
expected_err
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -1655,7 +1709,7 @@ mod tests {
|
||||
&slog_scope::logger(),
|
||||
&Hook {
|
||||
path: sleep,
|
||||
args: vec!["2".to_string()],
|
||||
args: vec!["sleep".to_string(), "2".to_string()],
|
||||
env: vec![],
|
||||
timeout: Some(1),
|
||||
},
|
||||
|
||||
@@ -728,7 +728,7 @@ fn secure_join(rootfs: &str, unsafe_path: &str) -> String {
|
||||
path.push(it);
|
||||
if let Ok(v) = path.read_link() {
|
||||
if v.is_absolute() {
|
||||
path = PathBuf::from(format!("{}{}", rootfs, v.to_str().unwrap().to_string()));
|
||||
path = PathBuf::from(format!("{}{}", rootfs, v.to_str().unwrap()));
|
||||
} else {
|
||||
path.pop();
|
||||
for it in v.iter() {
|
||||
@@ -1385,7 +1385,7 @@ mod tests {
|
||||
|
||||
for (i, t) in tests.iter().enumerate() {
|
||||
// Create a string containing details of the test
|
||||
let msg = format!("test[{}]: {:?}", i, t);
|
||||
let msg = format!("test[{}]: {:?}", i, t.name);
|
||||
|
||||
// if is_symlink, then should be prepare the softlink environment
|
||||
if t.symlink_path != "" {
|
||||
|
||||
@@ -973,6 +973,11 @@ mod tests {
|
||||
assert_eq!(d.container_pipe_size, config.container_pipe_size, "{}", msg);
|
||||
assert_eq!(d.server_addr, config.server_addr, "{}", msg);
|
||||
assert_eq!(d.tracing, config.tracing, "{}", msg);
|
||||
assert_eq!(
|
||||
d.container_policy_path, config.container_policy_path,
|
||||
"{}",
|
||||
msg
|
||||
);
|
||||
|
||||
for v in vars_to_unset {
|
||||
env::remove_var(v);
|
||||
|
||||
@@ -51,6 +51,7 @@ pub const DRIVER_VFIO_GK_TYPE: &str = "vfio-gk";
|
||||
// VFIO device to be bound to vfio-pci and made available inside the
|
||||
// container as a VFIO device node
|
||||
pub const DRIVER_VFIO_TYPE: &str = "vfio";
|
||||
pub const DRIVER_OVERLAYFS_TYPE: &str = "overlayfs";
|
||||
|
||||
#[instrument]
|
||||
pub fn online_device(path: &str) -> Result<()> {
|
||||
@@ -592,38 +593,38 @@ fn update_spec_devices(spec: &mut Spec, mut updates: HashMap<&str, DevUpdate>) -
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// update_spec_pci PCI addresses in the OCI spec to be guest addresses
|
||||
// instead of host addresses. It is given a map of (host address =>
|
||||
// guest address)
|
||||
// update_env_pci alters PCI addresses in a set of environment
|
||||
// variables to be correct for the VM instead of the host. It is
|
||||
// given a map of (host address => guest address)
|
||||
#[instrument]
|
||||
fn update_spec_pci(spec: &mut Spec, updates: HashMap<pci::Address, pci::Address>) -> Result<()> {
|
||||
// Correct PCI addresses in the environment
|
||||
if let Some(process) = spec.process.as_mut() {
|
||||
for envvar in process.env.iter_mut() {
|
||||
let eqpos = envvar
|
||||
.find('=')
|
||||
.ok_or_else(|| anyhow!("Malformed OCI env entry {:?}", envvar))?;
|
||||
pub fn update_env_pci(
|
||||
env: &mut [String],
|
||||
pcimap: &HashMap<pci::Address, pci::Address>,
|
||||
) -> Result<()> {
|
||||
for envvar in env {
|
||||
let eqpos = envvar
|
||||
.find('=')
|
||||
.ok_or_else(|| anyhow!("Malformed OCI env entry {:?}", envvar))?;
|
||||
|
||||
let (name, eqval) = envvar.split_at(eqpos);
|
||||
let val = &eqval[1..];
|
||||
let (name, eqval) = envvar.split_at(eqpos);
|
||||
let val = &eqval[1..];
|
||||
|
||||
if !name.starts_with("PCIDEVICE_") {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut guest_addrs = Vec::<String>::new();
|
||||
|
||||
for host_addr in val.split(',') {
|
||||
let host_addr = pci::Address::from_str(host_addr)
|
||||
.with_context(|| format!("Can't parse {} environment variable", name))?;
|
||||
let guest_addr = updates
|
||||
.get(&host_addr)
|
||||
.ok_or_else(|| anyhow!("Unable to translate host PCI address {}", host_addr))?;
|
||||
guest_addrs.push(format!("{}", guest_addr));
|
||||
}
|
||||
|
||||
envvar.replace_range(eqpos + 1.., guest_addrs.join(",").as_str());
|
||||
if !name.starts_with("PCIDEVICE_") {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut guest_addrs = Vec::<String>::new();
|
||||
|
||||
for host_addr in val.split(',') {
|
||||
let host_addr = pci::Address::from_str(host_addr)
|
||||
.with_context(|| format!("Can't parse {} environment variable", name))?;
|
||||
let guest_addr = pcimap
|
||||
.get(&host_addr)
|
||||
.ok_or_else(|| anyhow!("Unable to translate host PCI address {}", host_addr))?;
|
||||
guest_addrs.push(format!("{}", guest_addr));
|
||||
}
|
||||
|
||||
envvar.replace_range(eqpos + 1.., guest_addrs.join(",").as_str());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -768,7 +769,6 @@ pub async fn add_devices(
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
) -> Result<()> {
|
||||
let mut dev_updates = HashMap::<&str, DevUpdate>::with_capacity(devices.len());
|
||||
let mut pci_updates = HashMap::<pci::Address, pci::Address>::new();
|
||||
|
||||
for device in devices.iter() {
|
||||
let update = add_device(device, sandbox).await?;
|
||||
@@ -783,8 +783,9 @@ pub async fn add_devices(
|
||||
));
|
||||
}
|
||||
|
||||
let mut sb = sandbox.lock().await;
|
||||
for (host, guest) in update.pci {
|
||||
if let Some(other_guest) = pci_updates.insert(host, guest) {
|
||||
if let Some(other_guest) = sb.pcimap.insert(host, guest) {
|
||||
return Err(anyhow!(
|
||||
"Conflicting guest address for host device {} ({} versus {})",
|
||||
host,
|
||||
@@ -796,6 +797,9 @@ pub async fn add_devices(
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(process) = spec.process.as_mut() {
|
||||
update_env_pci(&mut process.env, &sandbox.lock().await.pcimap)?
|
||||
}
|
||||
update_spec_devices(spec, dev_updates)
|
||||
}
|
||||
|
||||
@@ -860,7 +864,7 @@ pub fn update_device_cgroup(spec: &mut Spec) -> Result<()> {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::uevent::spawn_test_watcher;
|
||||
use oci::{Linux, Process};
|
||||
use oci::Linux;
|
||||
use std::iter::FromIterator;
|
||||
use tempfile::tempdir;
|
||||
|
||||
@@ -1199,7 +1203,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_spec_pci() {
|
||||
fn test_update_env_pci() {
|
||||
let example_map = [
|
||||
// Each is a host,guest pair of pci addresses
|
||||
("0000:1a:01.0", "0000:01:01.0"),
|
||||
@@ -1209,17 +1213,11 @@ mod tests {
|
||||
("0000:01:01.0", "ffff:02:1f.7"),
|
||||
];
|
||||
|
||||
let mut spec = Spec {
|
||||
process: Some(Process {
|
||||
env: vec![
|
||||
"PCIDEVICE_x=0000:1a:01.0,0000:1b:02.0".to_string(),
|
||||
"PCIDEVICE_y=0000:01:01.0".to_string(),
|
||||
"NOTAPCIDEVICE_blah=abcd:ef:01.0".to_string(),
|
||||
],
|
||||
..Process::default()
|
||||
}),
|
||||
..Spec::default()
|
||||
};
|
||||
let mut env = vec![
|
||||
"PCIDEVICE_x=0000:1a:01.0,0000:1b:02.0".to_string(),
|
||||
"PCIDEVICE_y=0000:01:01.0".to_string(),
|
||||
"NOTAPCIDEVICE_blah=abcd:ef:01.0".to_string(),
|
||||
];
|
||||
|
||||
let pci_fixups = example_map
|
||||
.iter()
|
||||
@@ -1231,10 +1229,9 @@ mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let res = update_spec_pci(&mut spec, pci_fixups);
|
||||
let res = update_env_pci(&mut env, &pci_fixups);
|
||||
assert!(res.is_ok());
|
||||
|
||||
let env = &spec.process.as_ref().unwrap().env;
|
||||
assert_eq!(env[0], "PCIDEVICE_x=0000:01:01.0,0000:01:02.0");
|
||||
assert_eq!(env[1], "PCIDEVICE_y=ffff:02:1f.7");
|
||||
assert_eq!(env[2], "NOTAPCIDEVICE_blah=abcd:ef:01.0");
|
||||
|
||||
@@ -20,6 +20,7 @@ extern crate scopeguard;
|
||||
extern crate slog;
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use clap::{AppSettings, Parser};
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::socket::{self, AddressFamily, SockAddr, SockFlag, SockType};
|
||||
use nix::unistd::{self, dup, Pid};
|
||||
@@ -81,10 +82,32 @@ const NAME: &str = "kata-agent";
|
||||
|
||||
lazy_static! {
|
||||
static ref AGENT_CONFIG: Arc<RwLock<AgentConfig>> = Arc::new(RwLock::new(
|
||||
// Note: We can't do AgentOpts.parse() here to send through the processed arguments to AgentConfig
|
||||
// clap::Parser::parse() greedily process all command line input including cargo test parameters,
|
||||
// so should only be used inside main.
|
||||
AgentConfig::from_cmdline("/proc/cmdline", env::args().collect()).unwrap()
|
||||
));
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
// The default clap version info doesn't match our form, so we need to override it
|
||||
#[clap(global_setting(AppSettings::DisableVersionFlag))]
|
||||
struct AgentOpts {
|
||||
/// Print the version information
|
||||
#[clap(short, long)]
|
||||
version: bool,
|
||||
#[clap(subcommand)]
|
||||
subcmd: Option<SubCommand>,
|
||||
/// Specify a custom agent config file
|
||||
#[clap(short, long)]
|
||||
config: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
enum SubCommand {
|
||||
Init {},
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn announce(logger: &Logger, config: &AgentConfig) {
|
||||
info!(logger, "announce";
|
||||
@@ -256,9 +279,9 @@ async fn real_main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
}
|
||||
|
||||
fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let args = AgentOpts::parse();
|
||||
|
||||
if args.len() == 2 && args[1] == "--version" {
|
||||
if args.version {
|
||||
println!(
|
||||
"{} version {} (api version: {}, commit version: {}, type: rust)",
|
||||
NAME,
|
||||
@@ -266,11 +289,10 @@ fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
version::API_VERSION,
|
||||
version::VERSION_COMMIT,
|
||||
);
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
if args.len() == 2 && args[1] == "init" {
|
||||
if let Some(SubCommand::Init {}) = args.subcmd {
|
||||
reset_sigpipe();
|
||||
rustjail::container::init_child();
|
||||
exit(0);
|
||||
|
||||
@@ -23,8 +23,8 @@ use regex::Regex;
|
||||
use crate::device::{
|
||||
get_scsi_device_name, get_virtio_blk_pci_device_name, online_device, wait_for_pmem_device,
|
||||
DRIVER_9P_TYPE, DRIVER_BLK_CCW_TYPE, DRIVER_BLK_TYPE, DRIVER_EPHEMERAL_TYPE, DRIVER_LOCAL_TYPE,
|
||||
DRIVER_MMIO_BLK_TYPE, DRIVER_NVDIMM_TYPE, DRIVER_SCSI_TYPE, DRIVER_VIRTIOFS_TYPE,
|
||||
DRIVER_WATCHABLE_BIND_TYPE,
|
||||
DRIVER_MMIO_BLK_TYPE, DRIVER_NVDIMM_TYPE, DRIVER_OVERLAYFS_TYPE, DRIVER_SCSI_TYPE,
|
||||
DRIVER_VIRTIOFS_TYPE, DRIVER_WATCHABLE_BIND_TYPE,
|
||||
};
|
||||
use crate::linux_abi::*;
|
||||
use crate::pci;
|
||||
@@ -130,6 +130,7 @@ pub const STORAGE_HANDLER_LIST: &[&str] = &[
|
||||
DRIVER_9P_TYPE,
|
||||
DRIVER_VIRTIOFS_TYPE,
|
||||
DRIVER_EPHEMERAL_TYPE,
|
||||
DRIVER_OVERLAYFS_TYPE,
|
||||
DRIVER_MMIO_BLK_TYPE,
|
||||
DRIVER_LOCAL_TYPE,
|
||||
DRIVER_SCSI_TYPE,
|
||||
@@ -233,6 +234,15 @@ async fn ephemeral_storage_handler(
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn overlayfs_storage_handler(
|
||||
logger: &Logger,
|
||||
storage: &Storage,
|
||||
_sandbox: Arc<Mutex<Sandbox>>,
|
||||
) -> Result<String> {
|
||||
common_storage_handler(logger, storage)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn local_storage_handler(
|
||||
_logger: &Logger,
|
||||
@@ -546,6 +556,9 @@ pub async fn add_storages(
|
||||
DRIVER_EPHEMERAL_TYPE => {
|
||||
ephemeral_storage_handler(&logger, &storage, sandbox.clone()).await
|
||||
}
|
||||
DRIVER_OVERLAYFS_TYPE => {
|
||||
overlayfs_storage_handler(&logger, &storage, sandbox.clone()).await
|
||||
}
|
||||
DRIVER_MMIO_BLK_TYPE => {
|
||||
virtiommio_blk_storage_handler(&logger, &storage, sandbox.clone()).await
|
||||
}
|
||||
|
||||
@@ -23,12 +23,7 @@ pub const NSTYPEPID: &str = "pid";
|
||||
|
||||
#[instrument]
|
||||
pub fn get_current_thread_ns_path(ns_type: &str) -> String {
|
||||
format!(
|
||||
"/proc/{}/task/{}/ns/{}",
|
||||
getpid().to_string(),
|
||||
gettid().to_string(),
|
||||
ns_type
|
||||
)
|
||||
format!("/proc/{}/task/{}/ns/{}", getpid(), gettid(), ns_type)
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -100,7 +95,7 @@ impl Namespace {
|
||||
self.path = new_ns_path.clone().into_os_string().into_string().unwrap();
|
||||
let hostname = self.hostname.clone();
|
||||
|
||||
let new_thread = tokio::spawn(async move {
|
||||
let new_thread = std::thread::spawn(move || {
|
||||
if let Err(err) = || -> Result<()> {
|
||||
let origin_ns_path = get_current_thread_ns_path(ns_type.get());
|
||||
|
||||
@@ -148,7 +143,7 @@ impl Namespace {
|
||||
});
|
||||
|
||||
new_thread
|
||||
.await
|
||||
.join()
|
||||
.map_err(|e| anyhow!("Failed to join thread {:?}!", e))??;
|
||||
|
||||
Ok(self)
|
||||
|
||||
@@ -14,7 +14,7 @@ use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use ttrpc::{
|
||||
self,
|
||||
error::get_rpc_status as ttrpc_error,
|
||||
error::get_rpc_status,
|
||||
r#async::{Server as TtrpcServer, TtrpcContext},
|
||||
};
|
||||
|
||||
@@ -43,7 +43,9 @@ use nix::sys::stat;
|
||||
use nix::unistd::{self, Pid};
|
||||
use rustjail::process::ProcessOperations;
|
||||
|
||||
use crate::device::{add_devices, get_virtio_blk_pci_device_name, update_device_cgroup};
|
||||
use crate::device::{
|
||||
add_devices, get_virtio_blk_pci_device_name, update_device_cgroup, update_env_pci,
|
||||
};
|
||||
use crate::image_rpc;
|
||||
use crate::linux_abi::*;
|
||||
use crate::metrics::get_metrics;
|
||||
@@ -89,6 +91,13 @@ macro_rules! sl {
|
||||
};
|
||||
}
|
||||
|
||||
// Convenience macro to wrap an error and response to ttrpc client
|
||||
macro_rules! ttrpc_error {
|
||||
($code:path, $err:expr $(,)?) => {
|
||||
get_rpc_status($code, format!("{:?}", $err))
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! is_allowed {
|
||||
($req:ident) => {
|
||||
if !AGENT_CONFIG
|
||||
@@ -96,7 +105,7 @@ macro_rules! is_allowed {
|
||||
.await
|
||||
.is_allowed_endpoint($req.descriptor().name())
|
||||
{
|
||||
return Err(ttrpc_error(
|
||||
return Err(ttrpc_error!(
|
||||
ttrpc::Code::UNIMPLEMENTED,
|
||||
format!("{} is blocked", $req.descriptor().name()),
|
||||
));
|
||||
@@ -371,11 +380,14 @@ impl AgentService {
|
||||
let s = self.sandbox.clone();
|
||||
let mut sandbox = s.lock().await;
|
||||
|
||||
let process = req
|
||||
let mut process = req
|
||||
.process
|
||||
.into_option()
|
||||
.ok_or_else(|| anyhow!(nix::Error::EINVAL))?;
|
||||
|
||||
// Apply any necessary corrections for PCI addresses
|
||||
update_env_pci(&mut process.Env, &sandbox.pcimap)?;
|
||||
|
||||
let pipe_size = AGENT_CONFIG.read().await.container_pipe_size;
|
||||
let ocip = rustjail::process_grpc_to_oci(&process);
|
||||
let p = Process::new(&sl!(), &ocip, exec_id.as_str(), false, pipe_size)?;
|
||||
@@ -632,7 +644,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
trace_rpc_call!(ctx, "create_container", req);
|
||||
is_allowed!(req);
|
||||
match self.do_create_container(req).await {
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Err(e) => Err(ttrpc_error!(ttrpc::Code::INTERNAL, e)),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
}
|
||||
}
|
||||
@@ -645,7 +657,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
trace_rpc_call!(ctx, "start_container", req);
|
||||
is_allowed!(req);
|
||||
match self.do_start_container(req).await {
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Err(e) => Err(ttrpc_error!(ttrpc::Code::INTERNAL, e)),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
}
|
||||
}
|
||||
@@ -659,7 +671,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
is_allowed!(req);
|
||||
|
||||
match self.do_remove_container(req).await {
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Err(e) => Err(ttrpc_error!(ttrpc::Code::INTERNAL, e)),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
}
|
||||
}
|
||||
@@ -672,7 +684,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
trace_rpc_call!(ctx, "exec_process", req);
|
||||
is_allowed!(req);
|
||||
match self.do_exec_process(req).await {
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Err(e) => Err(ttrpc_error!(ttrpc::Code::INTERNAL, e)),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
}
|
||||
}
|
||||
@@ -685,7 +697,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
trace_rpc_call!(ctx, "signal_process", req);
|
||||
is_allowed!(req);
|
||||
match self.do_signal_process(req).await {
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Err(e) => Err(ttrpc_error!(ttrpc::Code::INTERNAL, e)),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
}
|
||||
}
|
||||
@@ -699,7 +711,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
is_allowed!(req);
|
||||
self.do_wait_process(req)
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))
|
||||
}
|
||||
|
||||
async fn update_container(
|
||||
@@ -716,7 +728,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
let mut sandbox = s.lock().await;
|
||||
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
)
|
||||
@@ -728,7 +740,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
let oci_res = rustjail::resources_grpc_to_oci(res);
|
||||
match ctr.set(oci_res) {
|
||||
Err(e) => {
|
||||
return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()));
|
||||
return Err(ttrpc_error!(ttrpc::Code::INTERNAL, e));
|
||||
}
|
||||
|
||||
Ok(_) => return Ok(resp),
|
||||
@@ -750,14 +762,14 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
let mut sandbox = s.lock().await;
|
||||
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
ctr.stats()
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))
|
||||
}
|
||||
|
||||
async fn pause_container(
|
||||
@@ -772,14 +784,14 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
let mut sandbox = s.lock().await;
|
||||
|
||||
let ctr = sandbox.get_container(cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
ctr.pause()
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -796,14 +808,14 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
let mut sandbox = s.lock().await;
|
||||
|
||||
let ctr = sandbox.get_container(cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
ctr.resume()
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -816,7 +828,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
is_allowed!(req);
|
||||
self.do_write_stream(req)
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))
|
||||
}
|
||||
|
||||
async fn read_stdout(
|
||||
@@ -827,7 +839,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
is_allowed!(req);
|
||||
self.do_read_stream(req, true)
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))
|
||||
}
|
||||
|
||||
async fn read_stderr(
|
||||
@@ -838,7 +850,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
is_allowed!(req);
|
||||
self.do_read_stream(req, false)
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))
|
||||
}
|
||||
|
||||
async fn close_stdin(
|
||||
@@ -857,7 +869,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
let p = sandbox
|
||||
.find_container_process(cid.as_str(), eid.as_str())
|
||||
.map_err(|e| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
format!("invalid argument: {:?}", e),
|
||||
)
|
||||
@@ -883,7 +895,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
let p = sandbox
|
||||
.find_container_process(cid.as_str(), eid.as_str())
|
||||
.map_err(|e| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::UNAVAILABLE,
|
||||
format!("invalid argument: {:?}", e),
|
||||
)
|
||||
@@ -900,11 +912,11 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
|
||||
let err = libc::ioctl(fd, TIOCSWINSZ, &win);
|
||||
Errno::result(err).map(drop).map_err(|e| {
|
||||
ttrpc_error(ttrpc::Code::INTERNAL, format!("ioctl error: {:?}", e))
|
||||
ttrpc_error!(ttrpc::Code::INTERNAL, format!("ioctl error: {:?}", e))
|
||||
})?;
|
||||
}
|
||||
} else {
|
||||
return Err(ttrpc_error(ttrpc::Code::UNAVAILABLE, "no tty".to_string()));
|
||||
return Err(ttrpc_error!(ttrpc::Code::UNAVAILABLE, "no tty".to_string()));
|
||||
}
|
||||
|
||||
Ok(Empty::new())
|
||||
@@ -919,7 +931,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
is_allowed!(req);
|
||||
|
||||
let interface = req.interface.into_option().ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"empty update interface request".to_string(),
|
||||
)
|
||||
@@ -932,7 +944,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
.update_interface(&interface)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ttrpc_error(ttrpc::Code::INTERNAL, format!("update interface: {:?}", e))
|
||||
ttrpc_error!(ttrpc::Code::INTERNAL, format!("update interface: {:?}", e))
|
||||
})?;
|
||||
|
||||
Ok(interface)
|
||||
@@ -951,7 +963,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
.into_option()
|
||||
.map(|r| r.Routes.into_vec())
|
||||
.ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"empty update routes request".to_string(),
|
||||
)
|
||||
@@ -960,14 +972,14 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
let mut sandbox = self.sandbox.lock().await;
|
||||
|
||||
sandbox.rtnl.update_routes(new_routes).await.map_err(|e| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::INTERNAL,
|
||||
format!("Failed to update routes: {:?}", e),
|
||||
)
|
||||
})?;
|
||||
|
||||
let list = sandbox.rtnl.list_routes().await.map_err(|e| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::INTERNAL,
|
||||
format!("Failed to list routes after update: {:?}", e),
|
||||
)
|
||||
@@ -995,7 +1007,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
.list_interfaces()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::INTERNAL,
|
||||
format!("Failed to list interfaces: {:?}", e),
|
||||
)
|
||||
@@ -1022,7 +1034,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
.rtnl
|
||||
.list_routes()
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, format!("list routes: {:?}", e)))?;
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, format!("list routes: {:?}", e)))?;
|
||||
|
||||
Ok(protocols::agent::Routes {
|
||||
Routes: RepeatedField::from_vec(list),
|
||||
@@ -1062,13 +1074,12 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
}
|
||||
|
||||
for m in req.kernel_modules.iter() {
|
||||
load_kernel_module(m)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
load_kernel_module(m).map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?;
|
||||
}
|
||||
|
||||
s.setup_shared_namespaces()
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?;
|
||||
}
|
||||
|
||||
match add_storages(sl!(), req.storages.to_vec(), self.sandbox.clone(), None).await {
|
||||
@@ -1077,7 +1088,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
let mut s = sandbox.lock().await;
|
||||
s.mounts = m
|
||||
}
|
||||
Err(e) => return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Err(e) => return Err(ttrpc_error!(ttrpc::Code::INTERNAL, e)),
|
||||
};
|
||||
|
||||
match setup_guest_dns(sl!(), req.dns.to_vec()) {
|
||||
@@ -1090,7 +1101,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
.iter()
|
||||
.map(|dns| s.network.set_dns(dns.to_string()));
|
||||
}
|
||||
Err(e) => return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Err(e) => return Err(ttrpc_error!(ttrpc::Code::INTERNAL, e)),
|
||||
};
|
||||
|
||||
Ok(Empty::new())
|
||||
@@ -1111,7 +1122,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
sandbox
|
||||
.destroy()
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?;
|
||||
// Close get_oom_event connection,
|
||||
// otherwise it will block the shutdown of ttrpc.
|
||||
sandbox.event_tx.take();
|
||||
@@ -1120,13 +1131,13 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
.sender
|
||||
.take()
|
||||
.ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::INTERNAL,
|
||||
"failed to get sandbox sender channel".to_string(),
|
||||
)
|
||||
})?
|
||||
.send(1)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1144,7 +1155,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
.into_option()
|
||||
.map(|n| n.ARPNeighbors.into_vec())
|
||||
.ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"empty add arp neighbours request".to_string(),
|
||||
)
|
||||
@@ -1157,7 +1168,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
.add_arp_neighbors(neighs)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ttrpc_error(
|
||||
ttrpc_error!(
|
||||
ttrpc::Code::INTERNAL,
|
||||
format!("Failed to add ARP neighbours: {:?}", e),
|
||||
)
|
||||
@@ -1178,7 +1189,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
|
||||
sandbox
|
||||
.online_cpu_memory(&req)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1192,7 +1203,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
is_allowed!(req);
|
||||
|
||||
random::reseed_rng(req.data.as_slice())
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1215,7 +1226,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
}
|
||||
Err(e) => {
|
||||
info!(sl!(), "fail to get memory info!");
|
||||
return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()));
|
||||
return Err(ttrpc_error!(ttrpc::Code::INTERNAL, e));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1235,7 +1246,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
is_allowed!(req);
|
||||
|
||||
do_mem_hotplug_by_probe(&req.memHotplugProbeAddr)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1249,7 +1260,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
is_allowed!(req);
|
||||
|
||||
do_set_guest_date_time(req.Sec, req.Usec)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1262,7 +1273,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
trace_rpc_call!(ctx, "copy_file", req);
|
||||
is_allowed!(req);
|
||||
|
||||
do_copy_file(&req).map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
do_copy_file(&req).map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1276,7 +1287,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
is_allowed!(req);
|
||||
|
||||
match get_metrics(&req) {
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Err(e) => Err(ttrpc_error!(ttrpc::Code::INTERNAL, e)),
|
||||
Ok(s) => {
|
||||
let mut metrics = Metrics::new();
|
||||
metrics.set_metrics(s);
|
||||
@@ -1307,7 +1318,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
return Ok(resp);
|
||||
}
|
||||
|
||||
Err(ttrpc_error(ttrpc::Code::INTERNAL, ""))
|
||||
Err(ttrpc_error!(ttrpc::Code::INTERNAL, ""))
|
||||
}
|
||||
|
||||
async fn add_swap(
|
||||
@@ -1320,7 +1331,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
|
||||
do_add_swap(&self.sandbox, &req)
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
.map_err(|e| ttrpc_error!(ttrpc::Code::INTERNAL, e))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ use crate::mount::{get_mount_fs_type, remove_mounts, TYPE_ROOTFS};
|
||||
use crate::namespace::Namespace;
|
||||
use crate::netlink::Handle;
|
||||
use crate::network::Network;
|
||||
use crate::pci;
|
||||
use crate::uevent::{Uevent, UeventMatcher};
|
||||
use crate::watcher::BindWatcher;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
@@ -56,6 +57,7 @@ pub struct Sandbox {
|
||||
pub event_rx: Arc<Mutex<Receiver<String>>>,
|
||||
pub event_tx: Option<Sender<String>>,
|
||||
pub bind_watcher: BindWatcher,
|
||||
pub pcimap: HashMap<pci::Address, pci::Address>,
|
||||
pub images: HashMap<String, String>,
|
||||
}
|
||||
|
||||
@@ -89,6 +91,7 @@ impl Sandbox {
|
||||
event_rx,
|
||||
event_tx: Some(tx),
|
||||
bind_watcher: BindWatcher::new(),
|
||||
pcimap: HashMap::new(),
|
||||
images: HashMap::new(),
|
||||
})
|
||||
}
|
||||
@@ -438,11 +441,8 @@ fn online_cpus(logger: &Logger, num: i32) -> Result<i32> {
|
||||
r"cpu[0-9]+",
|
||||
num - onlined_count,
|
||||
);
|
||||
if r.is_err() {
|
||||
return r;
|
||||
}
|
||||
|
||||
onlined_count += r.unwrap();
|
||||
onlined_count += r?;
|
||||
if onlined_count == num {
|
||||
info!(logger, "online {} CPU(s) after {} retries", num, i);
|
||||
return Ok(num);
|
||||
|
||||
@@ -90,7 +90,6 @@ mod tests {
|
||||
#[derive(Debug, Default, Clone)]
|
||||
struct BufWriter {
|
||||
data: Arc<Mutex<Vec<u8>>>,
|
||||
slow_write: bool,
|
||||
write_delay: Duration,
|
||||
}
|
||||
|
||||
@@ -98,7 +97,6 @@ mod tests {
|
||||
fn new() -> Self {
|
||||
BufWriter {
|
||||
data: Arc::new(Mutex::new(Vec::<u8>::new())),
|
||||
slow_write: false,
|
||||
write_delay: Duration::new(0, 0),
|
||||
}
|
||||
}
|
||||
@@ -179,45 +177,35 @@ mod tests {
|
||||
#[derive(Debug)]
|
||||
struct TestData {
|
||||
reader_value: String,
|
||||
result: io::Result<u64>,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
reader_value: "".into(),
|
||||
result: Ok(0),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "a".into(),
|
||||
result: Ok(1),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "foo".into(),
|
||||
result: Ok(3),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "b".repeat(BUF_SIZE - 1),
|
||||
result: Ok((BUF_SIZE - 1) as u64),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "c".repeat(BUF_SIZE),
|
||||
result: Ok((BUF_SIZE) as u64),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "d".repeat(BUF_SIZE + 1),
|
||||
result: Ok((BUF_SIZE + 1) as u64),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "e".repeat((2 * BUF_SIZE) - 1),
|
||||
result: Ok(((2 * BUF_SIZE) - 1) as u64),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "f".repeat(2 * BUF_SIZE),
|
||||
result: Ok((2 * BUF_SIZE) as u64),
|
||||
},
|
||||
TestData {
|
||||
reader_value: "g".repeat((2 * BUF_SIZE) + 1),
|
||||
result: Ok(((2 * BUF_SIZE) + 1) as u64),
|
||||
},
|
||||
];
|
||||
|
||||
|
||||
@@ -284,7 +284,7 @@ impl Storage {
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to strip prefix: {} - {}",
|
||||
source_file_path.as_ref().display().to_string(),
|
||||
source_file_path.as_ref().display(),
|
||||
&self.source_mount_point.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
1
src/libs/oci/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Cargo.lock
|
||||
8
src/libs/protocols/.gitignore
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
Cargo.lock
|
||||
src/agent.rs
|
||||
src/agent_ttrpc.rs
|
||||
src/empty.rs
|
||||
src/health.rs
|
||||
src/health_ttrpc.rs
|
||||
src/oci.rs
|
||||
src/types.rs
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# //
|
||||
# // Copyright (c) 2020 Ant Group
|
||||
@@ -47,17 +47,17 @@ show_usage() {
|
||||
}
|
||||
|
||||
generate_go_sources() {
|
||||
local cmd="protoc -I$GOPATH/src:$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos \
|
||||
local cmd="protoc -I$GOPATH/src:$GOPATH/src/github.com/kata-containers/kata-containers/src/libs/protocols/protos \
|
||||
--gogottrpc_out=plugins=ttrpc+fieldpath,\
|
||||
import_path=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc,\
|
||||
\
|
||||
Mgithub.com/kata-containers/kata-containers/src/agent/protocols/protos/types.proto=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols,\
|
||||
Mgithub.com/kata-containers/kata-containers/src/libs/protocols/protos/types.proto=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols,\
|
||||
\
|
||||
Mgithub.com/kata-containers/kata-containers/src/agent/protocols/protos/oci.proto=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc,\
|
||||
Mgithub.com/kata-containers/kata-containers/src/libs/protocols/protos/oci.proto=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc,\
|
||||
\
|
||||
Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor,Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/empty.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/field_mask.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types,Mgoogle/rpc/status.proto=github.com/gogo/googleapis/google/rpc\
|
||||
:$GOPATH/src \
|
||||
$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos/$1"
|
||||
$GOPATH/src/github.com/kata-containers/kata-containers/src/libs/protocols/protos/$1"
|
||||
|
||||
echo $cmd
|
||||
$cmd
|
||||
@@ -104,6 +104,7 @@ KERNELDIR := $(PKGDATADIR)
|
||||
|
||||
IMAGEPATH := $(PKGDATADIR)/$(IMAGENAME)
|
||||
FIRMWAREPATH :=
|
||||
FIRMWAREVOLUMEPATH :=
|
||||
|
||||
# Name of default configuration file the runtime will use.
|
||||
CONFIG_FILE = configuration.toml
|
||||
@@ -187,6 +188,8 @@ DEFVFIOMODE := guest-kernel
|
||||
# Default cgroup model
|
||||
DEFSANDBOXCGROUPONLY ?= false
|
||||
|
||||
DEFSTATICRESOURCEMGMT ?= false
|
||||
|
||||
DEFBINDMOUNTS := []
|
||||
|
||||
# Features
|
||||
@@ -279,6 +282,7 @@ ifneq (,$(FCCMD))
|
||||
# firecracker-specific options (all should be suffixed by "_FC")
|
||||
DEFBLOCKSTORAGEDRIVER_FC := virtio-mmio
|
||||
DEFNETWORKMODEL_FC := tcfilter
|
||||
DEFSTATICRESOURCEMGMT_FC = true
|
||||
KERNELTYPE_FC = uncompressed
|
||||
KERNEL_NAME_FC = $(call MAKE_KERNEL_NAME,$(KERNELTYPE_FC))
|
||||
KERNELPATH_FC = $(KERNELDIR)/$(KERNEL_NAME_FC)
|
||||
@@ -361,7 +365,6 @@ USER_VARS += CONFIG_PATH
|
||||
USER_VARS += CONFIG_QEMU_IN
|
||||
USER_VARS += DESTDIR
|
||||
USER_VARS += DEFAULT_HYPERVISOR
|
||||
USER_VARS += DEFENABLEMSWAP
|
||||
USER_VARS += ACRNCMD
|
||||
USER_VARS += ACRNCTLCMD
|
||||
USER_VARS += ACRNPATH
|
||||
@@ -391,6 +394,7 @@ USER_VARS += KERNELPATH_CLH
|
||||
USER_VARS += KERNELPATH_FC
|
||||
USER_VARS += KERNELVIRTIOFSPATH
|
||||
USER_VARS += FIRMWAREPATH
|
||||
USER_VARS += FIRMWAREVOLUMEPATH
|
||||
USER_VARS += MACHINEACCELERATORS
|
||||
USER_VARS += CPUFEATURES
|
||||
USER_VARS += DEFMACHINETYPE_CLH
|
||||
@@ -450,6 +454,8 @@ USER_VARS += DEFMSIZE9P
|
||||
USER_VARS += DEFENTROPYSOURCE
|
||||
USER_VARS += DEFVALIDENTROPYSOURCES
|
||||
USER_VARS += DEFSANDBOXCGROUPONLY
|
||||
USER_VARS += DEFSTATICRESOURCEMGMT
|
||||
USER_VARS += DEFSTATICRESOURCEMGMT_FC
|
||||
USER_VARS += DEFBINDMOUNTS
|
||||
USER_VARS += DEFVFIOMODE
|
||||
USER_VARS += FEATURE_SELINUX
|
||||
@@ -487,7 +493,11 @@ BUILDFLAGS := -buildmode=pie -mod=vendor ${BUILDTAGS}
|
||||
|
||||
# whether stipping the binary
|
||||
ifeq ($(STRIP),yes)
|
||||
KATA_LDFLAGS := -ldflags "-w -s"
|
||||
KATA_LDFLAGS = -w -s
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH),s390x)
|
||||
KATA_LDFLAGS += -extldflags=-Wl,--s390-pgste
|
||||
endif
|
||||
|
||||
# Return non-empty string if specified directory exists
|
||||
@@ -550,10 +560,10 @@ endef
|
||||
GENERATED_FILES += pkg/katautils/config-settings.go
|
||||
|
||||
$(RUNTIME_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) | show-summary
|
||||
$(QUIET_BUILD)(cd $(RUNTIME_DIR) && go build $(KATA_LDFLAGS) $(BUILDFLAGS) -o $@ .)
|
||||
$(QUIET_BUILD)(cd $(RUNTIME_DIR) && go build -ldflags "$(KATA_LDFLAGS)" $(BUILDFLAGS) -o $@ .)
|
||||
|
||||
$(SHIMV2_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST)
|
||||
$(QUIET_BUILD)(cd $(SHIMV2_DIR)/ && go build $(KATA_LDFLAGS) $(BUILDFLAGS) -o $@ .)
|
||||
$(QUIET_BUILD)(cd $(SHIMV2_DIR)/ && go build -ldflags "$(KATA_LDFLAGS)" $(BUILDFLAGS) -o $@ .)
|
||||
|
||||
$(MONITOR_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) .git-commit
|
||||
$(QUIET_BUILD)(cd $(MONITOR_DIR)/ && CGO_ENABLED=0 go build \
|
||||
|
||||
@@ -26,7 +26,7 @@ to work seamlessly with both Docker and Kubernetes respectively.
|
||||
## License
|
||||
|
||||
The code is licensed under an Apache 2.0 license.
|
||||
See [the license file](https://github.com/kata-containers/kata-containers/blob/main/LICENSE) for further details.
|
||||
See [the license file](../../LICENSE) for further details.
|
||||
|
||||
## Platform support
|
||||
|
||||
@@ -60,7 +60,7 @@ $ kata-runtime check
|
||||
|
||||
[](https://snapcraft.io/kata-containers)
|
||||
|
||||
See the [installation guides](https://github.com/kata-containers/kata-containers/blob/main/docs/install/README.md)
|
||||
See the [installation guides](../../docs/install/README.md)
|
||||
available for various operating systems.
|
||||
|
||||
## Quick start for developers
|
||||
|
||||
@@ -205,7 +205,6 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# `disable_new_netns` conflicts with `internetworking_model=bridged` and `internetworking_model=macvtap`. It works only
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# If you are using docker, `disable_new_netns` only works with `docker run --net=none`
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
|
||||
|
||||
@@ -180,13 +180,6 @@ block_device_driver = "virtio-blk"
|
||||
# the container network interface
|
||||
# Options:
|
||||
#
|
||||
# - bridged (Deprecated)
|
||||
# Uses a linux bridge to interconnect the container interface to
|
||||
# the VM. Works for most cases except macvlan and ipvlan.
|
||||
# ***NOTE: This feature has been deprecated with plans to remove this
|
||||
# feature in the future. Please use other network models listed below.
|
||||
#
|
||||
#
|
||||
# - macvtap
|
||||
# Used when the Container network interface can be bridged using
|
||||
# macvtap.
|
||||
@@ -224,10 +217,9 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
# `disable_new_netns` conflicts with `internetworking_model=bridged` and `internetworking_model=macvtap`. It works only
|
||||
# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# If you are using docker, `disable_new_netns` only works with `docker run --net=none`
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
|
||||
@@ -239,6 +231,15 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
# when a hardware architecture or hypervisor solutions is utilized which does not support CPU and/or memory hotplug.
|
||||
# Compatibility for determining appropriate sandbox (VM) size:
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
|
||||
@@ -153,10 +153,6 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
|
||||
# Enable swap of vm memory. Default false.
|
||||
# The behaviour is undefined if mem_prealloc is also set to true
|
||||
#enable_swap = true
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available.
|
||||
#
|
||||
@@ -333,11 +329,10 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# If you are using docker, `disable_new_netns` only works with `docker run --net=none`
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
|
||||
# if enable, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox.
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
@@ -345,6 +340,15 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
# when a hardware architecture or hypervisor solutions is utilized which does not support CPU and/or memory hotplug.
|
||||
# Compatibility for determining appropriate sandbox (VM) size:
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_FC@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
|
||||
@@ -56,6 +56,12 @@ kernel_params = "@KERNELPARAMS@"
|
||||
# If you want that qemu uses the default firmware leave this option empty
|
||||
firmware = "@FIRMWAREPATH@"
|
||||
|
||||
# Path to the firmware volume.
|
||||
# firmware TDVF or OVMF can be split into FIRMWARE_VARS.fd (UEFI variables
|
||||
# as configuration) and FIRMWARE_CODE.fd (UEFI program image). UEFI variables
|
||||
# can be customized per each user while UEFI code is kept same.
|
||||
firmware_volume = "@FIRMWAREVOLUMEPATH@"
|
||||
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
|
||||
@@ -135,6 +141,7 @@ disable_block_device_use = @DEFDISABLEBLOCK@
|
||||
# Shared file system type:
|
||||
# - virtio-fs (default)
|
||||
# - virtio-9p
|
||||
# - virtio-fs-nydus
|
||||
shared_fs = "@DEFSHAREDFS_QEMU_VIRTIOFS@"
|
||||
|
||||
# Path to vhost-user-fs daemon.
|
||||
@@ -250,10 +257,6 @@ valid_vhost_user_store_paths = @DEFVALIDVHOSTUSERSTOREPATHS@
|
||||
# Your distribution recommends: @DEFVALIDFILEMEMBACKENDS@
|
||||
valid_file_mem_backends = @DEFVALIDFILEMEMBACKENDS@
|
||||
|
||||
# Enable swap of vm memory. Default false.
|
||||
# The behaviour is undefined if mem_prealloc is also set to true
|
||||
#enable_swap = true
|
||||
|
||||
# -pflash can add image file to VM. The arguments of it should be in format
|
||||
# of ["/path/to/flash0.img", "/path/to/flash1.img"]
|
||||
pflashes = []
|
||||
@@ -509,7 +512,6 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# If you are using docker, `disable_new_netns` only works with `docker run --net=none`
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
|
||||
@@ -521,6 +523,15 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
# when a hardware architecture or hypervisor solutions is utilized which does not support CPU and/or memory hotplug.
|
||||
# Compatibility for determining appropriate sandbox (VM) size:
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
|
||||