CCv0: Merge main into CCv0 branch

Merge remote-tracking branch 'upstream/main' into CCv0

Fixes: #4651
Signed-off-by: Megan Wright <megan.wright@ibm.com>
This commit is contained in:
Megan Wright
2022-07-13 14:32:08 +01:00
41 changed files with 714 additions and 331 deletions

View File

@@ -10,35 +10,32 @@ jobs:
go-version: [1.17.x] go-version: [1.17.x]
os: [ubuntu-20.04] os: [ubuntu-20.04]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
# don't run this action on forks
if: github.repository_owner == 'kata-containers'
env: env:
target_branch: ${{ github.base_ref }} target_branch: ${{ github.base_ref }}
steps: steps:
- name: Install Go - name: Install Go
if: github.repository_owner == 'kata-containers'
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: ${{ matrix.go-version }} go-version: ${{ matrix.go-version }}
env: env:
GOPATH: ${{ runner.workspace }}/kata-containers GOPATH: ${{ runner.workspace }}/kata-containers
- name: Set env - name: Set env
if: github.repository_owner == 'kata-containers'
run: | run: |
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
- name: Checkout code - name: Checkout code
if: github.repository_owner == 'kata-containers'
uses: actions/checkout@v2 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
path: ./src/github.com/${{ github.repository }} path: ./src/github.com/${{ github.repository }}
- name: Setup - name: Setup
if: github.repository_owner == 'kata-containers'
run: | run: |
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
env: env:
GOPATH: ${{ runner.workspace }}/kata-containers GOPATH: ${{ runner.workspace }}/kata-containers
# docs url alive check # docs url alive check
- name: Docs URL Alive Check - name: Docs URL Alive Check
if: github.repository_owner == 'kata-containers'
run: | run: |
cd ${GOPATH}/src/github.com/${{ github.repository }} && make docs-url-alive-check cd ${GOPATH}/src/github.com/${{ github.repository }} && make docs-url-alive-check

View File

@@ -425,7 +425,7 @@ To build utilizing the same options as Kata, you should make use of the `configu
$ cd $your_qemu_directory $ cd $your_qemu_directory
$ $packaging_dir/scripts/configure-hypervisor.sh kata-qemu > kata.cfg $ $packaging_dir/scripts/configure-hypervisor.sh kata-qemu > kata.cfg
$ eval ./configure "$(cat kata.cfg)" $ eval ./configure "$(cat kata.cfg)"
$ make -j $(nproc) $ make -j $(nproc --ignore=1)
$ sudo -E make install $ sudo -E make install
``` ```

View File

@@ -5,7 +5,7 @@
- [Run Kata containers with `crictl`](run-kata-with-crictl.md) - [Run Kata containers with `crictl`](run-kata-with-crictl.md)
- [Run Kata Containers with Kubernetes](run-kata-with-k8s.md) - [Run Kata Containers with Kubernetes](run-kata-with-k8s.md)
- [How to use Kata Containers and Containerd](containerd-kata.md) - [How to use Kata Containers and Containerd](containerd-kata.md)
- [How to use Kata Containers and CRI (containerd) with Kubernetes](how-to-use-k8s-with-cri-containerd-and-kata.md) - [How to use Kata Containers and containerd with Kubernetes](how-to-use-k8s-with-containerd-and-kata.md)
- [Kata Containers and service mesh for Kubernetes](service-mesh.md) - [Kata Containers and service mesh for Kubernetes](service-mesh.md)
- [How to import Kata Containers logs into Fluentd](how-to-import-kata-logs-with-fluentd.md) - [How to import Kata Containers logs into Fluentd](how-to-import-kata-logs-with-fluentd.md)

View File

@@ -132,9 +132,9 @@ The `RuntimeClass` is suggested.
The following configuration includes two runtime classes: The following configuration includes two runtime classes:
- `plugins.cri.containerd.runtimes.runc`: the runc, and it is the default runtime. - `plugins.cri.containerd.runtimes.runc`: the runc, and it is the default runtime.
- `plugins.cri.containerd.runtimes.kata`: The function in containerd (reference [the document here](https://github.com/containerd/containerd/tree/master/runtime/v2#binary-naming)) - `plugins.cri.containerd.runtimes.kata`: The function in containerd (reference [the document here](https://github.com/containerd/containerd/tree/main/runtime/v2#binary-naming))
where the dot-connected string `io.containerd.kata.v2` is translated to `containerd-shim-kata-v2` (i.e. the where the dot-connected string `io.containerd.kata.v2` is translated to `containerd-shim-kata-v2` (i.e. the
binary name of the Kata implementation of [Containerd Runtime V2 (Shim API)](https://github.com/containerd/containerd/tree/master/runtime/v2)). binary name of the Kata implementation of [Containerd Runtime V2 (Shim API)](https://github.com/containerd/containerd/tree/main/runtime/v2)).
```toml ```toml
[plugins.cri.containerd] [plugins.cri.containerd]

View File

@@ -19,7 +19,7 @@ Also you should ensure that `kubectl` working correctly.
> **Note**: More information about Kubernetes integrations: > **Note**: More information about Kubernetes integrations:
> - [Run Kata Containers with Kubernetes](run-kata-with-k8s.md) > - [Run Kata Containers with Kubernetes](run-kata-with-k8s.md)
> - [How to use Kata Containers and Containerd](containerd-kata.md) > - [How to use Kata Containers and Containerd](containerd-kata.md)
> - [How to use Kata Containers and CRI (containerd plugin) with Kubernetes](how-to-use-k8s-with-cri-containerd-and-kata.md) > - [How to use Kata Containers and containerd with Kubernetes](how-to-use-k8s-with-containerd-and-kata.md)
## Configure Prometheus ## Configure Prometheus

View File

@@ -1,15 +1,15 @@
# How to use Kata Containers and CRI (containerd plugin) with Kubernetes # How to use Kata Containers and containerd with Kubernetes
This document describes how to set up a single-machine Kubernetes (k8s) cluster. This document describes how to set up a single-machine Kubernetes (k8s) cluster.
The Kubernetes cluster will use the The Kubernetes cluster will use the
[CRI containerd](https://github.com/containerd/containerd/) and [containerd](https://github.com/containerd/containerd/) and
[Kata Containers](https://katacontainers.io) to launch untrusted workloads. [Kata Containers](https://katacontainers.io) to launch workloads.
## Requirements ## Requirements
- Kubernetes, Kubelet, `kubeadm` - Kubernetes, Kubelet, `kubeadm`
- containerd with `cri` plug-in - containerd
- Kata Containers - Kata Containers
> **Note:** For information about the supported versions of these components, > **Note:** For information about the supported versions of these components,
@@ -149,7 +149,7 @@ $ sudo -E kubectl taint nodes --all node-role.kubernetes.io/master-
## Create runtime class for Kata Containers ## Create runtime class for Kata Containers
By default, all pods are created with the default runtime configured in CRI containerd plugin. By default, all pods are created with the default runtime configured in containerd.
From Kubernetes v1.12, users can use [`RuntimeClass`](https://kubernetes.io/docs/concepts/containers/runtime-class/#runtime-class) to specify a different runtime for Pods. From Kubernetes v1.12, users can use [`RuntimeClass`](https://kubernetes.io/docs/concepts/containers/runtime-class/#runtime-class) to specify a different runtime for Pods.
```bash ```bash
@@ -166,7 +166,7 @@ $ sudo -E kubectl apply -f runtime.yaml
## Run pod in Kata Containers ## Run pod in Kata Containers
If a pod has the `runtimeClassName` set to `kata`, the CRI plugin runs the pod with the If a pod has the `runtimeClassName` set to `kata`, the CRI runs the pod with the
[Kata Containers runtime](../../src/runtime/README.md). [Kata Containers runtime](../../src/runtime/README.md).
- Create an pod configuration that using Kata Containers runtime - Create an pod configuration that using Kata Containers runtime

View File

@@ -40,7 +40,7 @@ See below example config:
ConfigPath = "/opt/kata/share/defaults/kata-containers/configuration.toml" ConfigPath = "/opt/kata/share/defaults/kata-containers/configuration.toml"
``` ```
- [Kata Containers with Containerd and CRI documentation](how-to-use-k8s-with-cri-containerd-and-kata.md) - [How to use Kata Containers and containerd with Kubernetes](how-to-use-k8s-with-containerd-and-kata.md)
- [Containerd CRI config documentation](https://github.com/containerd/containerd/blob/main/docs/cri/config.md) - [Containerd CRI config documentation](https://github.com/containerd/containerd/blob/main/docs/cri/config.md)
#### CRI-O #### CRI-O

View File

@@ -15,7 +15,7 @@ After choosing one CRI implementation, you must make the appropriate configurati
to ensure it integrates with Kata Containers. to ensure it integrates with Kata Containers.
Kata Containers 1.5 introduced the `shimv2` for containerd 1.2.0, reducing the components Kata Containers 1.5 introduced the `shimv2` for containerd 1.2.0, reducing the components
required to spawn pods and containers, and this is the preferred way to run Kata Containers with Kubernetes ([as documented here](../how-to/how-to-use-k8s-with-cri-containerd-and-kata.md#configure-containerd-to-use-kata-containers)). required to spawn pods and containers, and this is the preferred way to run Kata Containers with Kubernetes ([as documented here](../how-to/how-to-use-k8s-with-containerd-and-kata.md#configure-containerd-to-use-kata-containers)).
An equivalent shim implementation for CRI-O is planned. An equivalent shim implementation for CRI-O is planned.
@@ -57,7 +57,7 @@ content shown below:
To customize containerd to select Kata Containers runtime, follow our To customize containerd to select Kata Containers runtime, follow our
"Configure containerd to use Kata Containers" internal documentation "Configure containerd to use Kata Containers" internal documentation
[here](../how-to/how-to-use-k8s-with-cri-containerd-and-kata.md#configure-containerd-to-use-kata-containers). [here](../how-to/how-to-use-k8s-with-containerd-and-kata.md#configure-containerd-to-use-kata-containers).
## Install Kubernetes ## Install Kubernetes
@@ -85,7 +85,7 @@ Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --runtime-request-tim
Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock" Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
``` ```
For more information about containerd see the "Configure Kubelet to use containerd" For more information about containerd see the "Configure Kubelet to use containerd"
documentation [here](../how-to/how-to-use-k8s-with-cri-containerd-and-kata.md#configure-kubelet-to-use-containerd). documentation [here](../how-to/how-to-use-k8s-with-containerd-and-kata.md#configure-kubelet-to-use-containerd).
## Run a Kubernetes pod with Kata Containers ## Run a Kubernetes pod with Kata Containers

View File

@@ -279,8 +279,8 @@ $ export KERNEL_EXTRAVERSION=$(awk '/^EXTRAVERSION =/{print $NF}' $GOPATH/$LINUX
$ export KERNEL_ROOTFS_DIR=${KERNEL_MAJOR_VERSION}.${KERNEL_PATHLEVEL}.${KERNEL_SUBLEVEL}${KERNEL_EXTRAVERSION} $ export KERNEL_ROOTFS_DIR=${KERNEL_MAJOR_VERSION}.${KERNEL_PATHLEVEL}.${KERNEL_SUBLEVEL}${KERNEL_EXTRAVERSION}
$ cd $QAT_SRC $ cd $QAT_SRC
$ KERNEL_SOURCE_ROOT=$GOPATH/$LINUX_VER ./configure --enable-icp-sriov=guest $ KERNEL_SOURCE_ROOT=$GOPATH/$LINUX_VER ./configure --enable-icp-sriov=guest
$ sudo -E make all -j$(nproc) $ sudo -E make all -j $($(nproc ${CI:+--ignore 1}))
$ sudo -E make INSTALL_MOD_PATH=$ROOTFS_DIR qat-driver-install -j$(nproc) $ sudo -E make INSTALL_MOD_PATH=$ROOTFS_DIR qat-driver-install -j $($(nproc ${CI:+--ignore 1}))
``` ```
The `usdm_drv` module also needs to be copied into the rootfs modules path and The `usdm_drv` module also needs to be copied into the rootfs modules path and

View File

@@ -193,7 +193,7 @@ parts:
# Setup and build kernel # Setup and build kernel
./build-kernel.sh -v "${kernel_version}" -d setup ./build-kernel.sh -v "${kernel_version}" -d setup
cd ${kernel_dir_prefix}* cd ${kernel_dir_prefix}*
make -j $(($(nproc)-1)) EXTRAVERSION=".container" make -j $(nproc ${CI:+--ignore 1}) EXTRAVERSION=".container"
kernel_suffix="${kernel_version}.container" kernel_suffix="${kernel_version}.container"
kata_kernel_dir="${SNAPCRAFT_PART_INSTALL}/usr/share/kata-containers" kata_kernel_dir="${SNAPCRAFT_PART_INSTALL}/usr/share/kata-containers"
@@ -282,7 +282,7 @@ parts:
esac esac
# build and install # build and install
make -j $(($(nproc)-1)) make -j $(nproc ${CI:+--ignore 1})
make install DESTDIR="${SNAPCRAFT_PART_INSTALL}" make install DESTDIR="${SNAPCRAFT_PART_INSTALL}"
prime: prime:
- -snap/ - -snap/

View File

@@ -118,6 +118,9 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_ACRN@"
# but it will not abort container execution. # but it will not abort container execution.
#guest_hook_path = "/usr/share/oci/hooks" #guest_hook_path = "/usr/share/oci/hooks"
# disable applying SELinux on the VMM process (default false)
disable_selinux=@DEFDISABLESELINUX@
[agent.@PROJECT_TYPE@] [agent.@PROJECT_TYPE@]
# If enabled, make the agent display debug-level messages. # If enabled, make the agent display debug-level messages.
# (default: disabled) # (default: disabled)
@@ -186,9 +189,6 @@ internetworking_model="@DEFNETWORKMODEL_ACRN@"
# (default: true) # (default: true)
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
# disable applying SELinux on the VMM process (default false)
disable_selinux=@DEFDISABLESELINUX@
# If enabled, the runtime will create opentracing.io traces and spans. # If enabled, the runtime will create opentracing.io traces and spans.
# (See https://www.jaegertracing.io/docs/getting-started). # (See https://www.jaegertracing.io/docs/getting-started).
# (default: disabled) # (default: disabled)

View File

@@ -39,6 +39,9 @@ image = "@IMAGEPATH@"
# Default false # Default false
# confidential_guest = true # confidential_guest = true
# disable applying SELinux on the VMM process (default false)
disable_selinux=@DEFDISABLESELINUX@
# Path to the firmware. # Path to the firmware.
# If you want Cloud Hypervisor to use a specific firmware, set its path below. # If you want Cloud Hypervisor to use a specific firmware, set its path below.
# This is option is only used when confidential_guest is enabled. # This is option is only used when confidential_guest is enabled.
@@ -319,9 +322,6 @@ internetworking_model="@DEFNETWORKMODEL_CLH@"
# (default: true) # (default: true)
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
# disable applying SELinux on the VMM process (default false)
disable_selinux=@DEFDISABLESELINUX@
# If enabled, the runtime will create opentracing.io traces and spans. # If enabled, the runtime will create opentracing.io traces and spans.
# (See https://www.jaegertracing.io/docs/getting-started). # (See https://www.jaegertracing.io/docs/getting-started).
# (default: disabled) # (default: disabled)

View File

@@ -221,6 +221,9 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
# Default 0-sized value means unlimited rate. # Default 0-sized value means unlimited rate.
#tx_rate_limiter_max_rate = 0 #tx_rate_limiter_max_rate = 0
# disable applying SELinux on the VMM process (default false)
disable_selinux=@DEFDISABLESELINUX@
[factory] [factory]
# VM templating support. Once enabled, new VMs are created from template # VM templating support. Once enabled, new VMs are created from template
# using vm cloning. They will share the same initial kernel, initramfs and # using vm cloning. They will share the same initial kernel, initramfs and
@@ -309,9 +312,6 @@ internetworking_model="@DEFNETWORKMODEL_FC@"
# (default: true) # (default: true)
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
# disable applying SELinux on the VMM process (default false)
disable_selinux=@DEFDISABLESELINUX@
# If enabled, the runtime will create opentracing.io traces and spans. # If enabled, the runtime will create opentracing.io traces and spans.
# (See https://www.jaegertracing.io/docs/getting-started). # (See https://www.jaegertracing.io/docs/getting-started).
# (default: disabled) # (default: disabled)

View File

@@ -406,6 +406,9 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
# use legacy serial for guest console if available and implemented for architecture. Default false # use legacy serial for guest console if available and implemented for architecture. Default false
#use_legacy_serial = true #use_legacy_serial = true
# disable applying SELinux on the VMM process (default false)
disable_selinux=@DEFDISABLESELINUX@
[factory] [factory]
# VM templating support. Once enabled, new VMs are created from template # VM templating support. Once enabled, new VMs are created from template
# using vm cloning. They will share the same initial kernel, initramfs and # using vm cloning. They will share the same initial kernel, initramfs and
@@ -523,9 +526,6 @@ internetworking_model="@DEFNETWORKMODEL_QEMU@"
# (default: true) # (default: true)
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
# disable applying SELinux on the VMM process (default false)
disable_selinux=@DEFDISABLESELINUX@
# If enabled, the runtime will create opentracing.io traces and spans. # If enabled, the runtime will create opentracing.io traces and spans.
# (See https://www.jaegertracing.io/docs/getting-started). # (See https://www.jaegertracing.io/docs/getting-started).
# (default: disabled) # (default: disabled)

View File

@@ -669,6 +669,7 @@ func newFirecrackerHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
RxRateLimiterMaxRate: rxRateLimiterMaxRate, RxRateLimiterMaxRate: rxRateLimiterMaxRate,
TxRateLimiterMaxRate: txRateLimiterMaxRate, TxRateLimiterMaxRate: txRateLimiterMaxRate,
EnableAnnotations: h.EnableAnnotations, EnableAnnotations: h.EnableAnnotations,
DisableSeLinux: h.DisableSeLinux,
}, nil }, nil
} }
@@ -805,6 +806,7 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
GuestSwap: h.GuestSwap, GuestSwap: h.GuestSwap,
Rootless: h.Rootless, Rootless: h.Rootless,
LegacySerial: h.LegacySerial, LegacySerial: h.LegacySerial,
DisableSeLinux: h.DisableSeLinux,
}, nil }, nil
} }
@@ -869,6 +871,7 @@ func newAcrnHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
BlockDeviceDriver: blockDriver, BlockDeviceDriver: blockDriver,
DisableVhostNet: h.DisableVhostNet, DisableVhostNet: h.DisableVhostNet,
GuestHookPath: h.guestHookPath(), GuestHookPath: h.guestHookPath(),
DisableSeLinux: h.DisableSeLinux,
EnableAnnotations: h.EnableAnnotations, EnableAnnotations: h.EnableAnnotations,
}, nil }, nil
} }

View File

@@ -1499,15 +1499,12 @@ func (clh *cloudHypervisor) addVolume(volume types.Volume) error {
return err return err
} }
// disable DAX if VirtioFSCacheSize is 0
dax := clh.config.VirtioFSCacheSize != 0
// numQueues and queueSize are required, let's use the // numQueues and queueSize are required, let's use the
// default values defined by cloud-hypervisor // default values defined by cloud-hypervisor
numQueues := int32(1) numQueues := int32(1)
queueSize := int32(1024) queueSize := int32(1024)
fs := chclient.NewFsConfig(volume.MountTag, vfsdSockPath, numQueues, queueSize, dax, int64(clh.config.VirtioFSCacheSize<<20)) fs := chclient.NewFsConfig(volume.MountTag, vfsdSockPath, numQueues, queueSize)
clh.vmconfig.Fs = &[]chclient.FsConfig{*fs} clh.vmconfig.Fs = &[]chclient.FsConfig{*fs}
clh.Logger().Debug("Adding share volume to hypervisor: ", volume.MountTag) clh.Logger().Debug("Adding share volume to hypervisor: ", volume.MountTag)

View File

@@ -39,6 +39,7 @@ docs/TokenBucket.md
docs/VdpaConfig.md docs/VdpaConfig.md
docs/VmAddDevice.md docs/VmAddDevice.md
docs/VmConfig.md docs/VmConfig.md
docs/VmCoredumpData.md
docs/VmInfo.md docs/VmInfo.md
docs/VmRemoveDevice.md docs/VmRemoveDevice.md
docs/VmResize.md docs/VmResize.md
@@ -81,6 +82,7 @@ model_token_bucket.go
model_vdpa_config.go model_vdpa_config.go
model_vm_add_device.go model_vm_add_device.go
model_vm_config.go model_vm_config.go
model_vm_coredump_data.go
model_vm_info.go model_vm_info.go
model_vm_remove_device.go model_vm_remove_device.go
model_vm_resize.go model_vm_resize.go

View File

@@ -94,6 +94,7 @@ Class | Method | HTTP request | Description
*DefaultApi* | [**VmAddPmemPut**](docs/DefaultApi.md#vmaddpmemput) | **Put** /vm.add-pmem | Add a new pmem device to the VM *DefaultApi* | [**VmAddPmemPut**](docs/DefaultApi.md#vmaddpmemput) | **Put** /vm.add-pmem | Add a new pmem device to the VM
*DefaultApi* | [**VmAddVdpaPut**](docs/DefaultApi.md#vmaddvdpaput) | **Put** /vm.add-vdpa | Add a new vDPA device to the VM *DefaultApi* | [**VmAddVdpaPut**](docs/DefaultApi.md#vmaddvdpaput) | **Put** /vm.add-vdpa | Add a new vDPA device to the VM
*DefaultApi* | [**VmAddVsockPut**](docs/DefaultApi.md#vmaddvsockput) | **Put** /vm.add-vsock | Add a new vsock device to the VM *DefaultApi* | [**VmAddVsockPut**](docs/DefaultApi.md#vmaddvsockput) | **Put** /vm.add-vsock | Add a new vsock device to the VM
*DefaultApi* | [**VmCoredumpPut**](docs/DefaultApi.md#vmcoredumpput) | **Put** /vm.coredump | Takes a VM coredump.
*DefaultApi* | [**VmCountersGet**](docs/DefaultApi.md#vmcountersget) | **Get** /vm.counters | Get counters from the VM *DefaultApi* | [**VmCountersGet**](docs/DefaultApi.md#vmcountersget) | **Get** /vm.counters | Get counters from the VM
*DefaultApi* | [**VmInfoGet**](docs/DefaultApi.md#vminfoget) | **Get** /vm.info | Returns general information about the cloud-hypervisor Virtual Machine (VM) instance. *DefaultApi* | [**VmInfoGet**](docs/DefaultApi.md#vminfoget) | **Get** /vm.info | Returns general information about the cloud-hypervisor Virtual Machine (VM) instance.
*DefaultApi* | [**VmReceiveMigrationPut**](docs/DefaultApi.md#vmreceivemigrationput) | **Put** /vm.receive-migration | Receive a VM migration from URL *DefaultApi* | [**VmReceiveMigrationPut**](docs/DefaultApi.md#vmreceivemigrationput) | **Put** /vm.receive-migration | Receive a VM migration from URL
@@ -140,6 +141,7 @@ Class | Method | HTTP request | Description
- [VdpaConfig](docs/VdpaConfig.md) - [VdpaConfig](docs/VdpaConfig.md)
- [VmAddDevice](docs/VmAddDevice.md) - [VmAddDevice](docs/VmAddDevice.md)
- [VmConfig](docs/VmConfig.md) - [VmConfig](docs/VmConfig.md)
- [VmCoredumpData](docs/VmCoredumpData.md)
- [VmInfo](docs/VmInfo.md) - [VmInfo](docs/VmInfo.md)
- [VmRemoveDevice](docs/VmRemoveDevice.md) - [VmRemoveDevice](docs/VmRemoveDevice.md)
- [VmResize](docs/VmResize.md) - [VmResize](docs/VmResize.md)

View File

@@ -347,6 +347,23 @@ paths:
description: The VM instance could not be snapshotted because it is not description: The VM instance could not be snapshotted because it is not
booted. booted.
summary: Returns a VM snapshot. summary: Returns a VM snapshot.
/vm.coredump:
put:
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/VmCoredumpData'
description: The coredump configuration
required: true
responses:
"204":
description: The VM instance was successfully coredumped.
"404":
description: The VM instance could not be coredumped because it is not created.
"405":
description: The VM instance could not be coredumped because it is not booted.
summary: Takes a VM coredump.
/vm.restore: /vm.restore:
put: put:
requestBody: requestBody:
@@ -408,7 +425,7 @@ components:
VmInfo: VmInfo:
description: Virtual Machine information description: Virtual Machine information
example: example:
memory_actual_size: 7 memory_actual_size: 3
state: Created state: Created
config: config:
console: console:
@@ -496,6 +513,7 @@ components:
cpus: cpus:
features: features:
amx: true amx: true
kvm_hyperv: false
topology: topology:
dies_per_package: 5 dies_per_package: 5
threads_per_core: 1 threads_per_core: 1
@@ -514,58 +532,58 @@ components:
- 3 - 3
- 3 - 3
devices: devices:
- pci_segment: 3 - pci_segment: 6
path: path path: path
iommu: false iommu: false
id: id id: id
- pci_segment: 3 - pci_segment: 6
path: path path: path
iommu: false iommu: false
id: id id: id
kernel: kernel:
path: path path: path
vdpa: vdpa:
- pci_segment: 7 - pci_segment: 3
path: path path: path
num_queues: 3 num_queues: 3
iommu: false iommu: false
id: id id: id
- pci_segment: 7 - pci_segment: 3
path: path path: path
num_queues: 3 num_queues: 3
iommu: false iommu: false
id: id id: id
numa: numa:
- distances: - distances:
- distance: 7 - distance: 8
destination: 8 destination: 4
- distance: 7 - distance: 8
destination: 8 destination: 4
cpus: cpus:
- 4 - 0
- 4 - 0
sgx_epc_sections: sgx_epc_sections:
- sgx_epc_sections - sgx_epc_sections
- sgx_epc_sections - sgx_epc_sections
memory_zones: memory_zones:
- memory_zones - memory_zones
- memory_zones - memory_zones
guest_numa_id: 0 guest_numa_id: 6
- distances: - distances:
- distance: 7 - distance: 8
destination: 8 destination: 4
- distance: 7 - distance: 8
destination: 8 destination: 4
cpus: cpus:
- 4 - 0
- 4 - 0
sgx_epc_sections: sgx_epc_sections:
- sgx_epc_sections - sgx_epc_sections
- sgx_epc_sections - sgx_epc_sections
memory_zones: memory_zones:
- memory_zones - memory_zones
- memory_zones - memory_zones
guest_numa_id: 0 guest_numa_id: 6
tdx: tdx:
firmware: firmware firmware: firmware
rng: rng:
@@ -573,30 +591,26 @@ components:
src: /dev/urandom src: /dev/urandom
sgx_epc: sgx_epc:
- prefault: false - prefault: false
size: 6 size: 7
id: id id: id
- prefault: false - prefault: false
size: 6 size: 7
id: id id: id
fs: fs:
- pci_segment: 6 - pci_segment: 6
num_queues: 1 num_queues: 1
queue_size: 2 queue_size: 2
cache_size: 6
dax: true
tag: tag tag: tag
socket: socket socket: socket
id: id id: id
- pci_segment: 6 - pci_segment: 6
num_queues: 1 num_queues: 1
queue_size: 2 queue_size: 2
cache_size: 6
dax: true
tag: tag tag: tag
socket: socket socket: socket
id: id id: id
vsock: vsock:
pci_segment: 7 pci_segment: 0
iommu: false iommu: false
socket: socket socket: socket
id: id id: id
@@ -605,20 +619,18 @@ components:
iommu_segments: iommu_segments:
- 3 - 3
- 3 - 3
num_pci_segments: 3 num_pci_segments: 7
serial_number: serial_number serial_number: serial_number
pmem: pmem:
- pci_segment: 6 - pci_segment: 5
mergeable: false
file: file file: file
size: 5 size: 6
iommu: false iommu: false
id: id id: id
discard_writes: false discard_writes: false
- pci_segment: 6 - pci_segment: 5
mergeable: false
file: file file: file
size: 5 size: 6
iommu: false iommu: false
id: id id: id
discard_writes: false discard_writes: false
@@ -839,6 +851,7 @@ components:
cpus: cpus:
features: features:
amx: true amx: true
kvm_hyperv: false
topology: topology:
dies_per_package: 5 dies_per_package: 5
threads_per_core: 1 threads_per_core: 1
@@ -857,58 +870,58 @@ components:
- 3 - 3
- 3 - 3
devices: devices:
- pci_segment: 3 - pci_segment: 6
path: path path: path
iommu: false iommu: false
id: id id: id
- pci_segment: 3 - pci_segment: 6
path: path path: path
iommu: false iommu: false
id: id id: id
kernel: kernel:
path: path path: path
vdpa: vdpa:
- pci_segment: 7 - pci_segment: 3
path: path path: path
num_queues: 3 num_queues: 3
iommu: false iommu: false
id: id id: id
- pci_segment: 7 - pci_segment: 3
path: path path: path
num_queues: 3 num_queues: 3
iommu: false iommu: false
id: id id: id
numa: numa:
- distances: - distances:
- distance: 7 - distance: 8
destination: 8 destination: 4
- distance: 7 - distance: 8
destination: 8 destination: 4
cpus: cpus:
- 4 - 0
- 4 - 0
sgx_epc_sections: sgx_epc_sections:
- sgx_epc_sections - sgx_epc_sections
- sgx_epc_sections - sgx_epc_sections
memory_zones: memory_zones:
- memory_zones - memory_zones
- memory_zones - memory_zones
guest_numa_id: 0 guest_numa_id: 6
- distances: - distances:
- distance: 7 - distance: 8
destination: 8 destination: 4
- distance: 7 - distance: 8
destination: 8 destination: 4
cpus: cpus:
- 4 - 0
- 4 - 0
sgx_epc_sections: sgx_epc_sections:
- sgx_epc_sections - sgx_epc_sections
- sgx_epc_sections - sgx_epc_sections
memory_zones: memory_zones:
- memory_zones - memory_zones
- memory_zones - memory_zones
guest_numa_id: 0 guest_numa_id: 6
tdx: tdx:
firmware: firmware firmware: firmware
rng: rng:
@@ -916,30 +929,26 @@ components:
src: /dev/urandom src: /dev/urandom
sgx_epc: sgx_epc:
- prefault: false - prefault: false
size: 6 size: 7
id: id id: id
- prefault: false - prefault: false
size: 6 size: 7
id: id id: id
fs: fs:
- pci_segment: 6 - pci_segment: 6
num_queues: 1 num_queues: 1
queue_size: 2 queue_size: 2
cache_size: 6
dax: true
tag: tag tag: tag
socket: socket socket: socket
id: id id: id
- pci_segment: 6 - pci_segment: 6
num_queues: 1 num_queues: 1
queue_size: 2 queue_size: 2
cache_size: 6
dax: true
tag: tag tag: tag
socket: socket socket: socket
id: id id: id
vsock: vsock:
pci_segment: 7 pci_segment: 0
iommu: false iommu: false
socket: socket socket: socket
id: id id: id
@@ -948,20 +957,18 @@ components:
iommu_segments: iommu_segments:
- 3 - 3
- 3 - 3
num_pci_segments: 3 num_pci_segments: 7
serial_number: serial_number serial_number: serial_number
pmem: pmem:
- pci_segment: 6 - pci_segment: 5
mergeable: false
file: file file: file
size: 5 size: 6
iommu: false iommu: false
id: id id: id
discard_writes: false discard_writes: false
- pci_segment: 6 - pci_segment: 5
mergeable: false
file: file file: file
size: 5 size: 6
iommu: false iommu: false
id: id id: id
discard_writes: false discard_writes: false
@@ -1125,6 +1132,7 @@ components:
example: example:
features: features:
amx: true amx: true
kvm_hyperv: false
topology: topology:
dies_per_package: 5 dies_per_package: 5
threads_per_core: 1 threads_per_core: 1
@@ -1153,6 +1161,9 @@ components:
type: integer type: integer
topology: topology:
$ref: '#/components/schemas/CpuTopology' $ref: '#/components/schemas/CpuTopology'
kvm_hyperv:
default: false
type: boolean
max_phys_bits: max_phys_bits:
type: integer type: integer
affinity: affinity:
@@ -1170,7 +1181,7 @@ components:
iommu_segments: iommu_segments:
- 3 - 3
- 3 - 3
num_pci_segments: 3 num_pci_segments: 7
serial_number: serial_number serial_number: serial_number
properties: properties:
num_pci_segments: num_pci_segments:
@@ -1538,8 +1549,6 @@ components:
pci_segment: 6 pci_segment: 6
num_queues: 1 num_queues: 1
queue_size: 2 queue_size: 2
cache_size: 6
dax: true
tag: tag tag: tag
socket: socket socket: socket
id: id id: id
@@ -1554,20 +1563,12 @@ components:
queue_size: queue_size:
default: 1024 default: 1024
type: integer type: integer
dax:
default: true
type: boolean
cache_size:
format: int64
type: integer
pci_segment: pci_segment:
format: int16 format: int16
type: integer type: integer
id: id:
type: string type: string
required: required:
- cache_size
- dax
- num_queues - num_queues
- queue_size - queue_size
- socket - socket
@@ -1575,10 +1576,9 @@ components:
type: object type: object
PmemConfig: PmemConfig:
example: example:
pci_segment: 6 pci_segment: 5
mergeable: false
file: file file: file
size: 5 size: 6
iommu: false iommu: false
id: id id: id
discard_writes: false discard_writes: false
@@ -1591,9 +1591,6 @@ components:
iommu: iommu:
default: false default: false
type: boolean type: boolean
mergeable:
default: false
type: boolean
discard_writes: discard_writes:
default: false default: false
type: boolean type: boolean
@@ -1629,7 +1626,7 @@ components:
type: object type: object
DeviceConfig: DeviceConfig:
example: example:
pci_segment: 3 pci_segment: 6
path: path path: path
iommu: false iommu: false
id: id id: id
@@ -1649,7 +1646,7 @@ components:
type: object type: object
VdpaConfig: VdpaConfig:
example: example:
pci_segment: 7 pci_segment: 3
path: path path: path
num_queues: 3 num_queues: 3
iommu: false iommu: false
@@ -1674,7 +1671,7 @@ components:
type: object type: object
VsockConfig: VsockConfig:
example: example:
pci_segment: 7 pci_segment: 0
iommu: false iommu: false
socket: socket socket: socket
id: id id: id
@@ -1703,7 +1700,7 @@ components:
SgxEpcConfig: SgxEpcConfig:
example: example:
prefault: false prefault: false
size: 6 size: 7
id: id id: id
properties: properties:
id: id:
@@ -1731,8 +1728,8 @@ components:
type: object type: object
NumaDistance: NumaDistance:
example: example:
distance: 7 distance: 8
destination: 8 destination: 4
properties: properties:
destination: destination:
format: int32 format: int32
@@ -1747,20 +1744,20 @@ components:
NumaConfig: NumaConfig:
example: example:
distances: distances:
- distance: 7 - distance: 8
destination: 8 destination: 4
- distance: 7 - distance: 8
destination: 8 destination: 4
cpus: cpus:
- 4 - 0
- 4 - 0
sgx_epc_sections: sgx_epc_sections:
- sgx_epc_sections - sgx_epc_sections
- sgx_epc_sections - sgx_epc_sections
memory_zones: memory_zones:
- memory_zones - memory_zones
- memory_zones - memory_zones
guest_numa_id: 0 guest_numa_id: 6
properties: properties:
guest_numa_id: guest_numa_id:
format: int32 format: int32
@@ -1843,6 +1840,13 @@ components:
destination_url: destination_url:
type: string type: string
type: object type: object
VmCoredumpData:
example:
destination_url: destination_url
properties:
destination_url:
type: string
type: object
RestoreConfig: RestoreConfig:
example: example:
prefault: true prefault: true

View File

@@ -1607,6 +1607,106 @@ func (a *DefaultApiService) VmAddVsockPutExecute(r ApiVmAddVsockPutRequest) (Pci
return localVarReturnValue, localVarHTTPResponse, nil return localVarReturnValue, localVarHTTPResponse, nil
} }
type ApiVmCoredumpPutRequest struct {
ctx _context.Context
ApiService *DefaultApiService
vmCoredumpData *VmCoredumpData
}
// The coredump configuration
func (r ApiVmCoredumpPutRequest) VmCoredumpData(vmCoredumpData VmCoredumpData) ApiVmCoredumpPutRequest {
r.vmCoredumpData = &vmCoredumpData
return r
}
func (r ApiVmCoredumpPutRequest) Execute() (*_nethttp.Response, error) {
return r.ApiService.VmCoredumpPutExecute(r)
}
/*
VmCoredumpPut Takes a VM coredump.
@param ctx _context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
@return ApiVmCoredumpPutRequest
*/
func (a *DefaultApiService) VmCoredumpPut(ctx _context.Context) ApiVmCoredumpPutRequest {
return ApiVmCoredumpPutRequest{
ApiService: a,
ctx: ctx,
}
}
// Execute executes the request
func (a *DefaultApiService) VmCoredumpPutExecute(r ApiVmCoredumpPutRequest) (*_nethttp.Response, error) {
var (
localVarHTTPMethod = _nethttp.MethodPut
localVarPostBody interface{}
localVarFormFileName string
localVarFileName string
localVarFileBytes []byte
)
localBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, "DefaultApiService.VmCoredumpPut")
if err != nil {
return nil, GenericOpenAPIError{error: err.Error()}
}
localVarPath := localBasePath + "/vm.coredump"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := _neturl.Values{}
localVarFormParams := _neturl.Values{}
if r.vmCoredumpData == nil {
return nil, reportError("vmCoredumpData is required and must be specified")
}
// to determine the Content-Type header
localVarHTTPContentTypes := []string{"application/json"}
// set Content-Type header
localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)
if localVarHTTPContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHTTPContentType
}
// to determine the Accept header
localVarHTTPHeaderAccepts := []string{}
// set Accept header
localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)
if localVarHTTPHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept
}
// body params
localVarPostBody = r.vmCoredumpData
req, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)
if err != nil {
return nil, err
}
localVarHTTPResponse, err := a.client.callAPI(req)
if err != nil || localVarHTTPResponse == nil {
return localVarHTTPResponse, err
}
localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)
localVarHTTPResponse.Body.Close()
localVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))
if err != nil {
return localVarHTTPResponse, err
}
if localVarHTTPResponse.StatusCode >= 300 {
newErr := GenericOpenAPIError{
body: localVarBody,
error: localVarHTTPResponse.Status,
}
return localVarHTTPResponse, newErr
}
return localVarHTTPResponse, nil
}
type ApiVmCountersGetRequest struct { type ApiVmCountersGetRequest struct {
ctx _context.Context ctx _context.Context
ApiService *DefaultApiService ApiService *DefaultApiService

View File

@@ -7,6 +7,7 @@ Name | Type | Description | Notes
**BootVcpus** | **int32** | | [default to 1] **BootVcpus** | **int32** | | [default to 1]
**MaxVcpus** | **int32** | | [default to 1] **MaxVcpus** | **int32** | | [default to 1]
**Topology** | Pointer to [**CpuTopology**](CpuTopology.md) | | [optional] **Topology** | Pointer to [**CpuTopology**](CpuTopology.md) | | [optional]
**KvmHyperv** | Pointer to **bool** | | [optional] [default to false]
**MaxPhysBits** | Pointer to **int32** | | [optional] **MaxPhysBits** | Pointer to **int32** | | [optional]
**Affinity** | Pointer to [**[]CpuAffinity**](CpuAffinity.md) | | [optional] **Affinity** | Pointer to [**[]CpuAffinity**](CpuAffinity.md) | | [optional]
**Features** | Pointer to [**CpuFeatures**](CpuFeatures.md) | | [optional] **Features** | Pointer to [**CpuFeatures**](CpuFeatures.md) | | [optional]
@@ -95,6 +96,31 @@ SetTopology sets Topology field to given value.
HasTopology returns a boolean if a field has been set. HasTopology returns a boolean if a field has been set.
### GetKvmHyperv
`func (o *CpusConfig) GetKvmHyperv() bool`
GetKvmHyperv returns the KvmHyperv field if non-nil, zero value otherwise.
### GetKvmHypervOk
`func (o *CpusConfig) GetKvmHypervOk() (*bool, bool)`
GetKvmHypervOk returns a tuple with the KvmHyperv field if it's non-nil, zero value otherwise
and a boolean to check if the value has been set.
### SetKvmHyperv
`func (o *CpusConfig) SetKvmHyperv(v bool)`
SetKvmHyperv sets KvmHyperv field to given value.
### HasKvmHyperv
`func (o *CpusConfig) HasKvmHyperv() bool`
HasKvmHyperv returns a boolean if a field has been set.
### GetMaxPhysBits ### GetMaxPhysBits
`func (o *CpusConfig) GetMaxPhysBits() int32` `func (o *CpusConfig) GetMaxPhysBits() int32`

View File

@@ -20,6 +20,7 @@ Method | HTTP request | Description
[**VmAddPmemPut**](DefaultApi.md#VmAddPmemPut) | **Put** /vm.add-pmem | Add a new pmem device to the VM [**VmAddPmemPut**](DefaultApi.md#VmAddPmemPut) | **Put** /vm.add-pmem | Add a new pmem device to the VM
[**VmAddVdpaPut**](DefaultApi.md#VmAddVdpaPut) | **Put** /vm.add-vdpa | Add a new vDPA device to the VM [**VmAddVdpaPut**](DefaultApi.md#VmAddVdpaPut) | **Put** /vm.add-vdpa | Add a new vDPA device to the VM
[**VmAddVsockPut**](DefaultApi.md#VmAddVsockPut) | **Put** /vm.add-vsock | Add a new vsock device to the VM [**VmAddVsockPut**](DefaultApi.md#VmAddVsockPut) | **Put** /vm.add-vsock | Add a new vsock device to the VM
[**VmCoredumpPut**](DefaultApi.md#VmCoredumpPut) | **Put** /vm.coredump | Takes a VM coredump.
[**VmCountersGet**](DefaultApi.md#VmCountersGet) | **Get** /vm.counters | Get counters from the VM [**VmCountersGet**](DefaultApi.md#VmCountersGet) | **Get** /vm.counters | Get counters from the VM
[**VmInfoGet**](DefaultApi.md#VmInfoGet) | **Get** /vm.info | Returns general information about the cloud-hypervisor Virtual Machine (VM) instance. [**VmInfoGet**](DefaultApi.md#VmInfoGet) | **Get** /vm.info | Returns general information about the cloud-hypervisor Virtual Machine (VM) instance.
[**VmReceiveMigrationPut**](DefaultApi.md#VmReceiveMigrationPut) | **Put** /vm.receive-migration | Receive a VM migration from URL [**VmReceiveMigrationPut**](DefaultApi.md#VmReceiveMigrationPut) | **Put** /vm.receive-migration | Receive a VM migration from URL
@@ -698,7 +699,7 @@ import (
) )
func main() { func main() {
fsConfig := *openapiclient.NewFsConfig("Tag_example", "Socket_example", int32(123), int32(123), false, int64(123)) // FsConfig | The details of the new virtio-fs fsConfig := *openapiclient.NewFsConfig("Tag_example", "Socket_example", int32(123), int32(123)) // FsConfig | The details of the new virtio-fs
configuration := openapiclient.NewConfiguration() configuration := openapiclient.NewConfiguration()
api_client := openapiclient.NewAPIClient(configuration) api_client := openapiclient.NewAPIClient(configuration)
@@ -999,6 +1000,68 @@ No authorization required
[[Back to README]](../README.md) [[Back to README]](../README.md)
## VmCoredumpPut
> VmCoredumpPut(ctx).VmCoredumpData(vmCoredumpData).Execute()
Takes a VM coredump.
### Example
```go
package main
import (
"context"
"fmt"
"os"
openapiclient "./openapi"
)
func main() {
vmCoredumpData := *openapiclient.NewVmCoredumpData() // VmCoredumpData | The coredump configuration
configuration := openapiclient.NewConfiguration()
api_client := openapiclient.NewAPIClient(configuration)
resp, r, err := api_client.DefaultApi.VmCoredumpPut(context.Background()).VmCoredumpData(vmCoredumpData).Execute()
if err != nil {
fmt.Fprintf(os.Stderr, "Error when calling `DefaultApi.VmCoredumpPut``: %v\n", err)
fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r)
}
}
```
### Path Parameters
### Other Parameters
Other parameters are passed through a pointer to a apiVmCoredumpPutRequest struct via the builder pattern
Name | Type | Description | Notes
------------- | ------------- | ------------- | -------------
**vmCoredumpData** | [**VmCoredumpData**](VmCoredumpData.md) | The coredump configuration |
### Return type
(empty response body)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: application/json
- **Accept**: Not defined
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints)
[[Back to Model list]](../README.md#documentation-for-models)
[[Back to README]](../README.md)
## VmCountersGet ## VmCountersGet
> map[string]map[string]int64 VmCountersGet(ctx).Execute() > map[string]map[string]int64 VmCountersGet(ctx).Execute()

View File

@@ -8,8 +8,6 @@ Name | Type | Description | Notes
**Socket** | **string** | | **Socket** | **string** | |
**NumQueues** | **int32** | | [default to 1] **NumQueues** | **int32** | | [default to 1]
**QueueSize** | **int32** | | [default to 1024] **QueueSize** | **int32** | | [default to 1024]
**Dax** | **bool** | | [default to true]
**CacheSize** | **int64** | |
**PciSegment** | Pointer to **int32** | | [optional] **PciSegment** | Pointer to **int32** | | [optional]
**Id** | Pointer to **string** | | [optional] **Id** | Pointer to **string** | | [optional]
@@ -17,7 +15,7 @@ Name | Type | Description | Notes
### NewFsConfig ### NewFsConfig
`func NewFsConfig(tag string, socket string, numQueues int32, queueSize int32, dax bool, cacheSize int64, ) *FsConfig` `func NewFsConfig(tag string, socket string, numQueues int32, queueSize int32, ) *FsConfig`
NewFsConfig instantiates a new FsConfig object NewFsConfig instantiates a new FsConfig object
This constructor will assign default values to properties that have it defined, This constructor will assign default values to properties that have it defined,
@@ -112,46 +110,6 @@ and a boolean to check if the value has been set.
SetQueueSize sets QueueSize field to given value. SetQueueSize sets QueueSize field to given value.
### GetDax
`func (o *FsConfig) GetDax() bool`
GetDax returns the Dax field if non-nil, zero value otherwise.
### GetDaxOk
`func (o *FsConfig) GetDaxOk() (*bool, bool)`
GetDaxOk returns a tuple with the Dax field if it's non-nil, zero value otherwise
and a boolean to check if the value has been set.
### SetDax
`func (o *FsConfig) SetDax(v bool)`
SetDax sets Dax field to given value.
### GetCacheSize
`func (o *FsConfig) GetCacheSize() int64`
GetCacheSize returns the CacheSize field if non-nil, zero value otherwise.
### GetCacheSizeOk
`func (o *FsConfig) GetCacheSizeOk() (*int64, bool)`
GetCacheSizeOk returns a tuple with the CacheSize field if it's non-nil, zero value otherwise
and a boolean to check if the value has been set.
### SetCacheSize
`func (o *FsConfig) SetCacheSize(v int64)`
SetCacheSize sets CacheSize field to given value.
### GetPciSegment ### GetPciSegment
`func (o *FsConfig) GetPciSegment() int32` `func (o *FsConfig) GetPciSegment() int32`

View File

@@ -7,7 +7,6 @@ Name | Type | Description | Notes
**File** | **string** | | **File** | **string** | |
**Size** | Pointer to **int64** | | [optional] **Size** | Pointer to **int64** | | [optional]
**Iommu** | Pointer to **bool** | | [optional] [default to false] **Iommu** | Pointer to **bool** | | [optional] [default to false]
**Mergeable** | Pointer to **bool** | | [optional] [default to false]
**DiscardWrites** | Pointer to **bool** | | [optional] [default to false] **DiscardWrites** | Pointer to **bool** | | [optional] [default to false]
**PciSegment** | Pointer to **int32** | | [optional] **PciSegment** | Pointer to **int32** | | [optional]
**Id** | Pointer to **string** | | [optional] **Id** | Pointer to **string** | | [optional]
@@ -101,31 +100,6 @@ SetIommu sets Iommu field to given value.
HasIommu returns a boolean if a field has been set. HasIommu returns a boolean if a field has been set.
### GetMergeable
`func (o *PmemConfig) GetMergeable() bool`
GetMergeable returns the Mergeable field if non-nil, zero value otherwise.
### GetMergeableOk
`func (o *PmemConfig) GetMergeableOk() (*bool, bool)`
GetMergeableOk returns a tuple with the Mergeable field if it's non-nil, zero value otherwise
and a boolean to check if the value has been set.
### SetMergeable
`func (o *PmemConfig) SetMergeable(v bool)`
SetMergeable sets Mergeable field to given value.
### HasMergeable
`func (o *PmemConfig) HasMergeable() bool`
HasMergeable returns a boolean if a field has been set.
### GetDiscardWrites ### GetDiscardWrites
`func (o *PmemConfig) GetDiscardWrites() bool` `func (o *PmemConfig) GetDiscardWrites() bool`

View File

@@ -0,0 +1,56 @@
# VmCoredumpData
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**DestinationUrl** | Pointer to **string** | | [optional]
## Methods
### NewVmCoredumpData
`func NewVmCoredumpData() *VmCoredumpData`
NewVmCoredumpData instantiates a new VmCoredumpData object
This constructor will assign default values to properties that have it defined,
and makes sure properties required by API are set, but the set of arguments
will change when the set of required properties is changed
### NewVmCoredumpDataWithDefaults
`func NewVmCoredumpDataWithDefaults() *VmCoredumpData`
NewVmCoredumpDataWithDefaults instantiates a new VmCoredumpData object
This constructor will only assign default values to properties that have it defined,
but it doesn't guarantee that properties required by API are set
### GetDestinationUrl
`func (o *VmCoredumpData) GetDestinationUrl() string`
GetDestinationUrl returns the DestinationUrl field if non-nil, zero value otherwise.
### GetDestinationUrlOk
`func (o *VmCoredumpData) GetDestinationUrlOk() (*string, bool)`
GetDestinationUrlOk returns a tuple with the DestinationUrl field if it's non-nil, zero value otherwise
and a boolean to check if the value has been set.
### SetDestinationUrl
`func (o *VmCoredumpData) SetDestinationUrl(v string)`
SetDestinationUrl sets DestinationUrl field to given value.
### HasDestinationUrl
`func (o *VmCoredumpData) HasDestinationUrl() bool`
HasDestinationUrl returns a boolean if a field has been set.
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@@ -19,6 +19,7 @@ type CpusConfig struct {
BootVcpus int32 `json:"boot_vcpus"` BootVcpus int32 `json:"boot_vcpus"`
MaxVcpus int32 `json:"max_vcpus"` MaxVcpus int32 `json:"max_vcpus"`
Topology *CpuTopology `json:"topology,omitempty"` Topology *CpuTopology `json:"topology,omitempty"`
KvmHyperv *bool `json:"kvm_hyperv,omitempty"`
MaxPhysBits *int32 `json:"max_phys_bits,omitempty"` MaxPhysBits *int32 `json:"max_phys_bits,omitempty"`
Affinity *[]CpuAffinity `json:"affinity,omitempty"` Affinity *[]CpuAffinity `json:"affinity,omitempty"`
Features *CpuFeatures `json:"features,omitempty"` Features *CpuFeatures `json:"features,omitempty"`
@@ -32,6 +33,8 @@ func NewCpusConfig(bootVcpus int32, maxVcpus int32) *CpusConfig {
this := CpusConfig{} this := CpusConfig{}
this.BootVcpus = bootVcpus this.BootVcpus = bootVcpus
this.MaxVcpus = maxVcpus this.MaxVcpus = maxVcpus
var kvmHyperv bool = false
this.KvmHyperv = &kvmHyperv
return &this return &this
} }
@@ -44,6 +47,8 @@ func NewCpusConfigWithDefaults() *CpusConfig {
this.BootVcpus = bootVcpus this.BootVcpus = bootVcpus
var maxVcpus int32 = 1 var maxVcpus int32 = 1
this.MaxVcpus = maxVcpus this.MaxVcpus = maxVcpus
var kvmHyperv bool = false
this.KvmHyperv = &kvmHyperv
return &this return &this
} }
@@ -127,6 +132,38 @@ func (o *CpusConfig) SetTopology(v CpuTopology) {
o.Topology = &v o.Topology = &v
} }
// GetKvmHyperv returns the KvmHyperv field value if set, zero value otherwise.
func (o *CpusConfig) GetKvmHyperv() bool {
if o == nil || o.KvmHyperv == nil {
var ret bool
return ret
}
return *o.KvmHyperv
}
// GetKvmHypervOk returns a tuple with the KvmHyperv field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *CpusConfig) GetKvmHypervOk() (*bool, bool) {
if o == nil || o.KvmHyperv == nil {
return nil, false
}
return o.KvmHyperv, true
}
// HasKvmHyperv returns a boolean if a field has been set.
func (o *CpusConfig) HasKvmHyperv() bool {
if o != nil && o.KvmHyperv != nil {
return true
}
return false
}
// SetKvmHyperv gets a reference to the given bool and assigns it to the KvmHyperv field.
func (o *CpusConfig) SetKvmHyperv(v bool) {
o.KvmHyperv = &v
}
// GetMaxPhysBits returns the MaxPhysBits field value if set, zero value otherwise. // GetMaxPhysBits returns the MaxPhysBits field value if set, zero value otherwise.
func (o *CpusConfig) GetMaxPhysBits() int32 { func (o *CpusConfig) GetMaxPhysBits() int32 {
if o == nil || o.MaxPhysBits == nil { if o == nil || o.MaxPhysBits == nil {
@@ -234,6 +271,9 @@ func (o CpusConfig) MarshalJSON() ([]byte, error) {
if o.Topology != nil { if o.Topology != nil {
toSerialize["topology"] = o.Topology toSerialize["topology"] = o.Topology
} }
if o.KvmHyperv != nil {
toSerialize["kvm_hyperv"] = o.KvmHyperv
}
if o.MaxPhysBits != nil { if o.MaxPhysBits != nil {
toSerialize["max_phys_bits"] = o.MaxPhysBits toSerialize["max_phys_bits"] = o.MaxPhysBits
} }

View File

@@ -20,8 +20,6 @@ type FsConfig struct {
Socket string `json:"socket"` Socket string `json:"socket"`
NumQueues int32 `json:"num_queues"` NumQueues int32 `json:"num_queues"`
QueueSize int32 `json:"queue_size"` QueueSize int32 `json:"queue_size"`
Dax bool `json:"dax"`
CacheSize int64 `json:"cache_size"`
PciSegment *int32 `json:"pci_segment,omitempty"` PciSegment *int32 `json:"pci_segment,omitempty"`
Id *string `json:"id,omitempty"` Id *string `json:"id,omitempty"`
} }
@@ -30,14 +28,12 @@ type FsConfig struct {
// This constructor will assign default values to properties that have it defined, // This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments // and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed // will change when the set of required properties is changed
func NewFsConfig(tag string, socket string, numQueues int32, queueSize int32, dax bool, cacheSize int64) *FsConfig { func NewFsConfig(tag string, socket string, numQueues int32, queueSize int32) *FsConfig {
this := FsConfig{} this := FsConfig{}
this.Tag = tag this.Tag = tag
this.Socket = socket this.Socket = socket
this.NumQueues = numQueues this.NumQueues = numQueues
this.QueueSize = queueSize this.QueueSize = queueSize
this.Dax = dax
this.CacheSize = cacheSize
return &this return &this
} }
@@ -50,8 +46,6 @@ func NewFsConfigWithDefaults() *FsConfig {
this.NumQueues = numQueues this.NumQueues = numQueues
var queueSize int32 = 1024 var queueSize int32 = 1024
this.QueueSize = queueSize this.QueueSize = queueSize
var dax bool = true
this.Dax = dax
return &this return &this
} }
@@ -151,54 +145,6 @@ func (o *FsConfig) SetQueueSize(v int32) {
o.QueueSize = v o.QueueSize = v
} }
// GetDax returns the Dax field value
func (o *FsConfig) GetDax() bool {
if o == nil {
var ret bool
return ret
}
return o.Dax
}
// GetDaxOk returns a tuple with the Dax field value
// and a boolean to check if the value has been set.
func (o *FsConfig) GetDaxOk() (*bool, bool) {
if o == nil {
return nil, false
}
return &o.Dax, true
}
// SetDax sets field value
func (o *FsConfig) SetDax(v bool) {
o.Dax = v
}
// GetCacheSize returns the CacheSize field value
func (o *FsConfig) GetCacheSize() int64 {
if o == nil {
var ret int64
return ret
}
return o.CacheSize
}
// GetCacheSizeOk returns a tuple with the CacheSize field value
// and a boolean to check if the value has been set.
func (o *FsConfig) GetCacheSizeOk() (*int64, bool) {
if o == nil {
return nil, false
}
return &o.CacheSize, true
}
// SetCacheSize sets field value
func (o *FsConfig) SetCacheSize(v int64) {
o.CacheSize = v
}
// GetPciSegment returns the PciSegment field value if set, zero value otherwise. // GetPciSegment returns the PciSegment field value if set, zero value otherwise.
func (o *FsConfig) GetPciSegment() int32 { func (o *FsConfig) GetPciSegment() int32 {
if o == nil || o.PciSegment == nil { if o == nil || o.PciSegment == nil {
@@ -277,12 +223,6 @@ func (o FsConfig) MarshalJSON() ([]byte, error) {
if true { if true {
toSerialize["queue_size"] = o.QueueSize toSerialize["queue_size"] = o.QueueSize
} }
if true {
toSerialize["dax"] = o.Dax
}
if true {
toSerialize["cache_size"] = o.CacheSize
}
if o.PciSegment != nil { if o.PciSegment != nil {
toSerialize["pci_segment"] = o.PciSegment toSerialize["pci_segment"] = o.PciSegment
} }

View File

@@ -19,7 +19,6 @@ type PmemConfig struct {
File string `json:"file"` File string `json:"file"`
Size *int64 `json:"size,omitempty"` Size *int64 `json:"size,omitempty"`
Iommu *bool `json:"iommu,omitempty"` Iommu *bool `json:"iommu,omitempty"`
Mergeable *bool `json:"mergeable,omitempty"`
DiscardWrites *bool `json:"discard_writes,omitempty"` DiscardWrites *bool `json:"discard_writes,omitempty"`
PciSegment *int32 `json:"pci_segment,omitempty"` PciSegment *int32 `json:"pci_segment,omitempty"`
Id *string `json:"id,omitempty"` Id *string `json:"id,omitempty"`
@@ -34,8 +33,6 @@ func NewPmemConfig(file string) *PmemConfig {
this.File = file this.File = file
var iommu bool = false var iommu bool = false
this.Iommu = &iommu this.Iommu = &iommu
var mergeable bool = false
this.Mergeable = &mergeable
var discardWrites bool = false var discardWrites bool = false
this.DiscardWrites = &discardWrites this.DiscardWrites = &discardWrites
return &this return &this
@@ -48,8 +45,6 @@ func NewPmemConfigWithDefaults() *PmemConfig {
this := PmemConfig{} this := PmemConfig{}
var iommu bool = false var iommu bool = false
this.Iommu = &iommu this.Iommu = &iommu
var mergeable bool = false
this.Mergeable = &mergeable
var discardWrites bool = false var discardWrites bool = false
this.DiscardWrites = &discardWrites this.DiscardWrites = &discardWrites
return &this return &this
@@ -143,38 +138,6 @@ func (o *PmemConfig) SetIommu(v bool) {
o.Iommu = &v o.Iommu = &v
} }
// GetMergeable returns the Mergeable field value if set, zero value otherwise.
func (o *PmemConfig) GetMergeable() bool {
if o == nil || o.Mergeable == nil {
var ret bool
return ret
}
return *o.Mergeable
}
// GetMergeableOk returns a tuple with the Mergeable field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *PmemConfig) GetMergeableOk() (*bool, bool) {
if o == nil || o.Mergeable == nil {
return nil, false
}
return o.Mergeable, true
}
// HasMergeable returns a boolean if a field has been set.
func (o *PmemConfig) HasMergeable() bool {
if o != nil && o.Mergeable != nil {
return true
}
return false
}
// SetMergeable gets a reference to the given bool and assigns it to the Mergeable field.
func (o *PmemConfig) SetMergeable(v bool) {
o.Mergeable = &v
}
// GetDiscardWrites returns the DiscardWrites field value if set, zero value otherwise. // GetDiscardWrites returns the DiscardWrites field value if set, zero value otherwise.
func (o *PmemConfig) GetDiscardWrites() bool { func (o *PmemConfig) GetDiscardWrites() bool {
if o == nil || o.DiscardWrites == nil { if o == nil || o.DiscardWrites == nil {
@@ -282,9 +245,6 @@ func (o PmemConfig) MarshalJSON() ([]byte, error) {
if o.Iommu != nil { if o.Iommu != nil {
toSerialize["iommu"] = o.Iommu toSerialize["iommu"] = o.Iommu
} }
if o.Mergeable != nil {
toSerialize["mergeable"] = o.Mergeable
}
if o.DiscardWrites != nil { if o.DiscardWrites != nil {
toSerialize["discard_writes"] = o.DiscardWrites toSerialize["discard_writes"] = o.DiscardWrites
} }

View File

@@ -0,0 +1,113 @@
/*
Cloud Hypervisor API
Local HTTP based API for managing and inspecting a cloud-hypervisor virtual machine.
API version: 0.3.0
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package openapi
import (
"encoding/json"
)
// VmCoredumpData struct for VmCoredumpData
type VmCoredumpData struct {
DestinationUrl *string `json:"destination_url,omitempty"`
}
// NewVmCoredumpData instantiates a new VmCoredumpData object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewVmCoredumpData() *VmCoredumpData {
this := VmCoredumpData{}
return &this
}
// NewVmCoredumpDataWithDefaults instantiates a new VmCoredumpData object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewVmCoredumpDataWithDefaults() *VmCoredumpData {
this := VmCoredumpData{}
return &this
}
// GetDestinationUrl returns the DestinationUrl field value if set, zero value otherwise.
func (o *VmCoredumpData) GetDestinationUrl() string {
if o == nil || o.DestinationUrl == nil {
var ret string
return ret
}
return *o.DestinationUrl
}
// GetDestinationUrlOk returns a tuple with the DestinationUrl field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VmCoredumpData) GetDestinationUrlOk() (*string, bool) {
if o == nil || o.DestinationUrl == nil {
return nil, false
}
return o.DestinationUrl, true
}
// HasDestinationUrl returns a boolean if a field has been set.
func (o *VmCoredumpData) HasDestinationUrl() bool {
if o != nil && o.DestinationUrl != nil {
return true
}
return false
}
// SetDestinationUrl gets a reference to the given string and assigns it to the DestinationUrl field.
func (o *VmCoredumpData) SetDestinationUrl(v string) {
o.DestinationUrl = &v
}
func (o VmCoredumpData) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.DestinationUrl != nil {
toSerialize["destination_url"] = o.DestinationUrl
}
return json.Marshal(toSerialize)
}
type NullableVmCoredumpData struct {
value *VmCoredumpData
isSet bool
}
func (v NullableVmCoredumpData) Get() *VmCoredumpData {
return v.value
}
func (v *NullableVmCoredumpData) Set(val *VmCoredumpData) {
v.value = val
v.isSet = true
}
func (v NullableVmCoredumpData) IsSet() bool {
return v.isSet
}
func (v *NullableVmCoredumpData) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableVmCoredumpData(val *VmCoredumpData) *NullableVmCoredumpData {
return &NullableVmCoredumpData{value: val, isSet: true}
}
func (v NullableVmCoredumpData) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableVmCoredumpData) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}

View File

@@ -366,6 +366,24 @@ paths:
405: 405:
description: The VM instance could not be snapshotted because it is not booted. description: The VM instance could not be snapshotted because it is not booted.
/vm.coredump:
put:
summary: Takes a VM coredump.
requestBody:
description: The coredump configuration
content:
application/json:
schema:
$ref: '#/components/schemas/VmCoredumpData'
required: true
responses:
204:
description: The VM instance was successfully coredumped.
404:
description: The VM instance could not be coredumped because it is not created.
405:
description: The VM instance could not be coredumped because it is not booted.
/vm.restore: /vm.restore:
put: put:
summary: Restore a VM from a snapshot. summary: Restore a VM from a snapshot.
@@ -596,6 +614,9 @@ components:
type: integer type: integer
topology: topology:
$ref: '#/components/schemas/CpuTopology' $ref: '#/components/schemas/CpuTopology'
kvm_hyperv:
type: boolean
default: false
max_phys_bits: max_phys_bits:
type: integer type: integer
affinity: affinity:
@@ -870,8 +891,6 @@ components:
FsConfig: FsConfig:
required: required:
- cache_size
- dax
- num_queues - num_queues
- queue_size - queue_size
- socket - socket
@@ -888,13 +907,6 @@ components:
queue_size: queue_size:
type: integer type: integer
default: 1024 default: 1024
dax:
type: boolean
default: true
cache_size:
type: integer
format: int64
default: 8589934592
pci_segment: pci_segment:
type: integer type: integer
format: int16 format: int16
@@ -914,9 +926,6 @@ components:
iommu: iommu:
type: boolean type: boolean
default: false default: false
mergeable:
type: boolean
default: false
discard_writes: discard_writes:
type: boolean type: boolean
default: false default: false
@@ -1110,6 +1119,12 @@ components:
destination_url: destination_url:
type: string type: string
VmCoredumpData:
type: object
properties:
destination_url:
type: string
RestoreConfig: RestoreConfig:
required: required:
- source_url - source_url

View File

@@ -90,14 +90,14 @@ build_qat_drivers()
KERNEL_ROOTFS_DIR=${KERNEL_MAJOR_VERSION}.${KERNEL_PATHLEVEL}.${KERNEL_SUBLEVEL}${KERNEL_EXTRAVERSION} KERNEL_ROOTFS_DIR=${KERNEL_MAJOR_VERSION}.${KERNEL_PATHLEVEL}.${KERNEL_SUBLEVEL}${KERNEL_EXTRAVERSION}
cd $QAT_SRC cd $QAT_SRC
KERNEL_SOURCE_ROOT=${linux_kernel_path} ./configure ${QAT_CONFIGURE_OPTIONS} KERNEL_SOURCE_ROOT=${linux_kernel_path} ./configure ${QAT_CONFIGURE_OPTIONS}
make all -j$(nproc) make all -j $($(nproc ${CI:+--ignore 1}))
} }
add_qat_to_rootfs() add_qat_to_rootfs()
{ {
/bin/echo -e "\n\e[1;42mCopy driver modules to rootfs\e[0m" /bin/echo -e "\n\e[1;42mCopy driver modules to rootfs\e[0m"
cd $QAT_SRC cd $QAT_SRC
sudo -E make INSTALL_MOD_PATH=${ROOTFS_DIR} qat-driver-install -j$(nproc) sudo -E make INSTALL_MOD_PATH=${ROOTFS_DIR} qat-driver-install -j$(nproc --ignore=1)
sudo cp $QAT_SRC/build/usdm_drv.ko ${ROOTFS_DIR}/lib/modules/${KERNEL_ROOTFS_DIR}/updates/drivers sudo cp $QAT_SRC/build/usdm_drv.ko ${ROOTFS_DIR}/lib/modules/${KERNEL_ROOTFS_DIR}/updates/drivers
sudo depmod -a -b ${ROOTFS_DIR} ${KERNEL_ROOTFS_DIR} sudo depmod -a -b ${ROOTFS_DIR} ${KERNEL_ROOTFS_DIR}
cd ${kata_repo_path}/tools/osbuilder/image-builder cd ${kata_repo_path}/tools/osbuilder/image-builder

View File

@@ -0,0 +1,46 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kubelet-kata-cleanup
namespace: kube-system
spec:
selector:
matchLabels:
name: kubelet-kata-cleanup
template:
metadata:
labels:
name: kubelet-kata-cleanup
spec:
serviceAccountName: kata-label-node
nodeSelector:
katacontainers.io/kata-runtime: cleanup
containers:
- name: kube-kata-cleanup
image: quay.io/kata-containers/kata-deploy:stable
imagePullPolicy: Always
command: [ "bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh reset" ]
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: false
volumeMounts:
- name: dbus
mountPath: /var/run/dbus
- name: systemd
mountPath: /run/systemd
volumes:
- name: dbus
hostPath:
path: /var/run/dbus
- name: systemd
hostPath:
path: /run/systemd
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate

View File

@@ -0,0 +1,69 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kata-deploy
namespace: kube-system
spec:
selector:
matchLabels:
name: kata-deploy
template:
metadata:
labels:
name: kata-deploy
spec:
serviceAccountName: kata-label-node
containers:
- name: kube-kata
image: quay.io/kata-containers/kata-deploy:stable
imagePullPolicy: Always
lifecycle:
preStop:
exec:
command: ["bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh cleanup"]
command: [ "bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh install" ]
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: false
volumeMounts:
- name: crio-conf
mountPath: /etc/crio/
- name: containerd-conf
mountPath: /etc/containerd/
- name: kata-artifacts
mountPath: /opt/kata/
- name: dbus
mountPath: /var/run/dbus
- name: systemd
mountPath: /run/systemd
- name: local-bin
mountPath: /usr/local/bin/
volumes:
- name: crio-conf
hostPath:
path: /etc/crio/
- name: containerd-conf
hostPath:
path: /etc/containerd/
- name: kata-artifacts
hostPath:
path: /opt/kata/
type: DirectoryOrCreate
- name: dbus
hostPath:
path: /var/run/dbus
- name: systemd
hostPath:
path: /run/systemd
- name: local-bin
hostPath:
path: /usr/local/bin/
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate

View File

@@ -19,7 +19,7 @@ $(MK_DIR)/dockerbuild/install_yq.sh:
$(MK_DIR)/kata-deploy-copy-yq-installer.sh $(MK_DIR)/kata-deploy-copy-yq-installer.sh
all-parallel: $(MK_DIR)/dockerbuild/install_yq.sh all-parallel: $(MK_DIR)/dockerbuild/install_yq.sh
${MAKE} -f $(MK_PATH) all -j$$(( $$(nproc) - 1 )) V= ${MAKE} -f $(MK_PATH) all -j $(shell nproc ${CI:+--ignore 1}) V=
all: cloud-hypervisor-tarball \ all: cloud-hypervisor-tarball \
firecracker-tarball \ firecracker-tarball \

View File

@@ -23,8 +23,13 @@ RUN apt-get update && \
ARG IMG_USER=kata-builder ARG IMG_USER=kata-builder
ARG UID=1000 ARG UID=1000
ARG GID=1000 ARG GID=1000
# gid of the docker group on the host, required for running docker in docker builds.
ARG HOST_DOCKER_GID
RUN if [ ${IMG_USER} != "root" ]; then groupadd --gid=${GID} ${IMG_USER};fi RUN if [ ${IMG_USER} != "root" ]; then groupadd --gid=${GID} ${IMG_USER};fi
RUN if [ ${IMG_USER} != "root" ]; then adduser ${IMG_USER} --uid=${UID} --gid=${GID};fi RUN if [ ${IMG_USER} != "root" ]; then adduser ${IMG_USER} --uid=${UID} --gid=${GID};fi
RUN if [ ${IMG_USER} != "root" ] && [ ! -z ${HOST_DOCKER_GID} ]; then groupadd --gid=${HOST_DOCKER_GID} docker_on_host;fi
RUN if [ ${IMG_USER} != "root" ] && [ ! -z ${HOST_DOCKER_GID} ]; then usermod -a -G docker_on_host ${IMG_USER};fi
RUN sh -c "echo '${IMG_USER} ALL=NOPASSWD: ALL' >> /etc/sudoers" RUN sh -c "echo '${IMG_USER} ALL=NOPASSWD: ALL' >> /etc/sudoers"
#FIXME: gcc is required as agent is build out of a container build. #FIXME: gcc is required as agent is build out of a container build.
@@ -40,4 +45,4 @@ RUN apt-get update && \
apt-get clean && rm -rf /var/lib/apt/lists apt-get clean && rm -rf /var/lib/apt/lists
ENV USER ${IMG_USER} ENV USER ${IMG_USER}
USER ${UID}:${GID} USER ${IMG_USER}

View File

@@ -20,10 +20,22 @@ if [ "${script_dir}" != "${PWD}" ]; then
ln -sf "${script_dir}/build" "${PWD}/build" ln -sf "${script_dir}/build" "${PWD}/build"
fi fi
# This is the gid of the "docker" group on host. In case of docker in docker builds
# for some of the targets (clh builds from source), the nested container user needs to
# be part of this group.
docker_gid=$(getent group docker | cut -d: -f3 || { echo >&2 "Missing docker group, docker needs to be installed" && false; })
# If docker gid is the effective group id of the user, do not pass it as
# an additional group.
if [ ${docker_gid} == ${gid} ]; then
docker_gid=""
fi
docker build -q -t build-kata-deploy \ docker build -q -t build-kata-deploy \
--build-arg IMG_USER="${USER}" \ --build-arg IMG_USER="${USER}" \
--build-arg UID=${uid} \ --build-arg UID=${uid} \
--build-arg GID=${gid} \ --build-arg GID=${gid} \
--build-arg HOST_DOCKER_GID=${docker_gid} \
"${script_dir}/dockerbuild/" "${script_dir}/dockerbuild/"
docker run \ docker run \
@@ -38,4 +50,3 @@ docker run \
--rm \ --rm \
-w ${script_dir} \ -w ${script_dir} \
build-kata-deploy "${kata_deploy_create}" $@ build-kata-deploy "${kata_deploy_create}" $@

View File

@@ -98,10 +98,7 @@ function configure_different_shims_base() {
fi fi
fi fi
cat << EOF | tee "$shim_file" ln -sf /opt/kata/bin/containerd-shim-kata-v2 "${shim_file}"
#!/usr/bin/env bash
KATA_CONF_FILE=/opt/kata/share/defaults/kata-containers/configuration-${shim}.toml /opt/kata/bin/containerd-shim-kata-v2 "\$@"
EOF
chmod +x "$shim_file" chmod +x "$shim_file"
if [ "${shim}" == "${default_shim}" ]; then if [ "${shim}" == "${default_shim}" ]; then
@@ -129,12 +126,15 @@ function cleanup_different_shims_base() {
function configure_crio_runtime() { function configure_crio_runtime() {
local runtime="kata" local runtime="kata"
local configuration="configuration"
if [ -n "${1-}" ]; then if [ -n "${1-}" ]; then
runtime+="-$1" runtime+="-$1"
configuration+="-$1"
fi fi
local kata_path="/usr/local/bin/containerd-shim-${runtime}-v2" local kata_path="/usr/local/bin/containerd-shim-${runtime}-v2"
local kata_conf="crio.runtime.runtimes.${runtime}" local kata_conf="crio.runtime.runtimes.${runtime}"
local kata_config_path="/opt/kata/share/defaults/kata-containers/$configuration.toml"
cat <<EOF | tee -a "$crio_drop_in_conf_file" cat <<EOF | tee -a "$crio_drop_in_conf_file"
@@ -143,6 +143,7 @@ function configure_crio_runtime() {
runtime_path = "${kata_path}" runtime_path = "${kata_path}"
runtime_type = "vm" runtime_type = "vm"
runtime_root = "/run/vc" runtime_root = "/run/vc"
runtime_config_path = "${kata_config_path}"
privileged_without_host_devices = true privileged_without_host_devices = true
EOF EOF
} }

View File

@@ -403,9 +403,9 @@ build_kernel() {
[ -n "${arch_target}" ] || arch_target="$(uname -m)" [ -n "${arch_target}" ] || arch_target="$(uname -m)"
arch_target=$(arch_to_kernel "${arch_target}") arch_target=$(arch_to_kernel "${arch_target}")
pushd "${kernel_path}" >>/dev/null pushd "${kernel_path}" >>/dev/null
make -j $(nproc) ARCH="${arch_target}" make -j $(nproc ${CI:+--ignore 1}) ARCH="${arch_target}"
if [ "${conf_guest}" == "sev" ]; then if [ "${conf_guest}" == "sev" ]; then
make -j $(nproc --ignore=1) INSTALL_MOD_STRIP=1 INSTALL_MOD_PATH=${kernel_path} modules_install make -j $(nproc ${CI:+--ignore 1}) INSTALL_MOD_STRIP=1 INSTALL_MOD_PATH=${kernel_path} modules_install
fi fi
[ "$arch_target" != "powerpc" ] && ([ -e "arch/${arch_target}/boot/bzImage" ] || [ -e "arch/${arch_target}/boot/Image.gz" ]) [ "$arch_target" != "powerpc" ] && ([ -e "arch/${arch_target}/boot/bzImage" ] || [ -e "arch/${arch_target}/boot/Image.gz" ])
[ -e "vmlinux" ] [ -e "vmlinux" ]

View File

@@ -56,6 +56,7 @@ build_clh_from_source() {
repo_dir="${repo_dir//.git}" repo_dir="${repo_dir//.git}"
rm -rf "${repo_dir}" rm -rf "${repo_dir}"
git clone "${cloud_hypervisor_repo}" git clone "${cloud_hypervisor_repo}"
git config --global --add safe.directory "$PWD/repo_dir"
pushd "${repo_dir}" pushd "${repo_dir}"
if [ -n "${cloud_hypervisor_pr}" ]; then if [ -n "${cloud_hypervisor_pr}" ]; then

View File

@@ -76,6 +76,6 @@ RUN git clone --depth=1 "${QEMU_REPO}" qemu && \
[ -n "${BUILD_SUFFIX}" ] && PKGVERSION="kata-static-${BUILD_SUFFIX}" || PKGVERSION="kata-static" && \ [ -n "${BUILD_SUFFIX}" ] && PKGVERSION="kata-static-${BUILD_SUFFIX}" || PKGVERSION="kata-static" && \
(PREFIX="${PREFIX}" /root/configure-hypervisor.sh -s "${HYPERVISOR_NAME}" | xargs ./configure \ (PREFIX="${PREFIX}" /root/configure-hypervisor.sh -s "${HYPERVISOR_NAME}" | xargs ./configure \
--with-pkgversion="${PKGVERSION}") && \ --with-pkgversion="${PKGVERSION}") && \
make -j"$(nproc)" && \ make -j"$(nproc ${CI:+--ignore 1})" && \
make install DESTDIR="${QEMU_DESTDIR}" && \ make install DESTDIR="${QEMU_DESTDIR}" && \
/root/static-build/scripts/qemu-build-post.sh /root/static-build/scripts/qemu-build-post.sh

View File

@@ -75,7 +75,7 @@ assets:
url: "https://github.com/cloud-hypervisor/cloud-hypervisor" url: "https://github.com/cloud-hypervisor/cloud-hypervisor"
uscan-url: >- uscan-url: >-
https://github.com/cloud-hypervisor/cloud-hypervisor/tags.*/v?(\d\S+)\.tar\.gz https://github.com/cloud-hypervisor/cloud-hypervisor/tags.*/v?(\d\S+)\.tar\.gz
version: "v24.0" version: "v25.0"
firecracker: firecracker:
description: "Firecracker micro-VMM" description: "Firecracker micro-VMM"