CCv0: Merge main into CCv0 branch

Merge remote-tracking branch 'upstream/main' into CCv0

Fixes: #6241
Signed-off-by: Megan Wright megan.wright@ibm.com
This commit is contained in:
Megan Wright
2023-02-21 11:53:30 +00:00
132 changed files with 4060 additions and 516 deletions

View File

@@ -320,7 +320,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config string, err err
kernelPath := path.Join(dir, "kernel")
kernelParams := "foo=bar xyz"
imagePath := path.Join(dir, "image")
shimPath := path.Join(dir, "shim")
rootfsType := "ext4"
logDir := path.Join(dir, "logs")
logPath := path.Join(logDir, "runtime.log")
machineType := "machineType"
@@ -338,9 +338,9 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config string, err err
HypervisorPath: hypervisorPath,
KernelPath: kernelPath,
ImagePath: imagePath,
RootfsType: rootfsType,
KernelParams: kernelParams,
MachineType: machineType,
ShimPath: shimPath,
LogPath: logPath,
DisableBlock: disableBlockDevice,
BlockDeviceDriver: blockDeviceDriver,
@@ -360,7 +360,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config string, err err
return "", err
}
files := []string{hypervisorPath, kernelPath, imagePath, shimPath}
files := []string{hypervisorPath, kernelPath, imagePath}
for _, file := range files {
// create the resource (which must be >0 bytes)

View File

@@ -9,9 +9,11 @@ import (
"context"
cgroupsv1 "github.com/containerd/cgroups/stats/v1"
cgroupsv2 "github.com/containerd/cgroups/v2/stats"
"github.com/containerd/typeurl"
google_protobuf "github.com/gogo/protobuf/types"
resCtrl "github.com/kata-containers/kata-containers/src/runtime/pkg/resourcecontrol"
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
)
@@ -21,7 +23,18 @@ func marshalMetrics(ctx context.Context, s *service, containerID string) (*googl
return nil, err
}
metrics := statsToMetrics(&stats)
isCgroupV1, err := resCtrl.IsCgroupV1()
if err != nil {
return nil, err
}
var metrics interface{}
if isCgroupV1 {
metrics = statsToMetricsV1(&stats)
} else {
metrics = statsToMetricsV2(&stats)
}
data, err := typeurl.MarshalAny(metrics)
if err != nil {
@@ -31,25 +44,40 @@ func marshalMetrics(ctx context.Context, s *service, containerID string) (*googl
return data, nil
}
func statsToMetrics(stats *vc.ContainerStats) *cgroupsv1.Metrics {
func statsToMetricsV1(stats *vc.ContainerStats) *cgroupsv1.Metrics {
metrics := &cgroupsv1.Metrics{}
if stats.CgroupStats != nil {
metrics = &cgroupsv1.Metrics{
Hugetlb: setHugetlbStats(stats.CgroupStats.HugetlbStats),
Pids: setPidsStats(stats.CgroupStats.PidsStats),
CPU: setCPUStats(stats.CgroupStats.CPUStats),
Memory: setMemoryStats(stats.CgroupStats.MemoryStats),
Blkio: setBlkioStats(stats.CgroupStats.BlkioStats),
Hugetlb: setHugetlbStatsV1(stats.CgroupStats.HugetlbStats),
Pids: setPidsStatsV1(stats.CgroupStats.PidsStats),
CPU: setCPUStatsV1(stats.CgroupStats.CPUStats),
Memory: setMemoryStatsV1(stats.CgroupStats.MemoryStats),
Blkio: setBlkioStatsV1(stats.CgroupStats.BlkioStats),
}
}
metrics.Network = setNetworkStats(stats.NetworkStats)
return metrics
}
func setHugetlbStats(vcHugetlb map[string]vc.HugetlbStats) []*cgroupsv1.HugetlbStat {
func statsToMetricsV2(stats *vc.ContainerStats) *cgroupsv2.Metrics {
metrics := &cgroupsv2.Metrics{}
if stats.CgroupStats != nil {
metrics = &cgroupsv2.Metrics{
Hugetlb: setHugetlbStatsV2(stats.CgroupStats.HugetlbStats),
Pids: setPidsStatsV2(stats.CgroupStats.PidsStats),
CPU: setCPUStatsV2(stats.CgroupStats.CPUStats),
Memory: setMemoryStatsV2(stats.CgroupStats.MemoryStats),
Io: setBlkioStatsV2(stats.CgroupStats.BlkioStats),
}
}
return metrics
}
func setHugetlbStatsV1(vcHugetlb map[string]vc.HugetlbStats) []*cgroupsv1.HugetlbStat {
var hugetlbStats []*cgroupsv1.HugetlbStat
for k, v := range vcHugetlb {
hugetlbStats = append(
@@ -65,7 +93,22 @@ func setHugetlbStats(vcHugetlb map[string]vc.HugetlbStats) []*cgroupsv1.HugetlbS
return hugetlbStats
}
func setPidsStats(vcPids vc.PidsStats) *cgroupsv1.PidsStat {
func setHugetlbStatsV2(vcHugetlb map[string]vc.HugetlbStats) []*cgroupsv2.HugeTlbStat {
var hugetlbStats []*cgroupsv2.HugeTlbStat
for k, v := range vcHugetlb {
hugetlbStats = append(
hugetlbStats,
&cgroupsv2.HugeTlbStat{
Current: v.Usage,
Max: v.MaxUsage,
Pagesize: k,
})
}
return hugetlbStats
}
func setPidsStatsV1(vcPids vc.PidsStats) *cgroupsv1.PidsStat {
pidsStats := &cgroupsv1.PidsStat{
Current: vcPids.Current,
Limit: vcPids.Limit,
@@ -74,8 +117,16 @@ func setPidsStats(vcPids vc.PidsStats) *cgroupsv1.PidsStat {
return pidsStats
}
func setCPUStats(vcCPU vc.CPUStats) *cgroupsv1.CPUStat {
func setPidsStatsV2(vcPids vc.PidsStats) *cgroupsv2.PidsStat {
pidsStats := &cgroupsv2.PidsStat{
Current: vcPids.Current,
Limit: vcPids.Limit,
}
return pidsStats
}
func setCPUStatsV1(vcCPU vc.CPUStats) *cgroupsv1.CPUStat {
var perCPU []uint64
perCPU = append(perCPU, vcCPU.CPUUsage.PercpuUsage...)
@@ -96,7 +147,20 @@ func setCPUStats(vcCPU vc.CPUStats) *cgroupsv1.CPUStat {
return cpuStats
}
func setMemoryStats(vcMemory vc.MemoryStats) *cgroupsv1.MemoryStat {
func setCPUStatsV2(vcCPU vc.CPUStats) *cgroupsv2.CPUStat {
cpuStats := &cgroupsv2.CPUStat{
UsageUsec: vcCPU.CPUUsage.TotalUsage / 1000,
UserUsec: vcCPU.CPUUsage.UsageInKernelmode / 1000,
SystemUsec: vcCPU.CPUUsage.UsageInUsermode / 1000,
NrPeriods: vcCPU.ThrottlingData.Periods,
NrThrottled: vcCPU.ThrottlingData.ThrottledPeriods,
ThrottledUsec: vcCPU.ThrottlingData.ThrottledTime / 1000,
}
return cpuStats
}
func setMemoryStatsV1(vcMemory vc.MemoryStats) *cgroupsv1.MemoryStat {
memoryStats := &cgroupsv1.MemoryStat{
Usage: &cgroupsv1.MemoryEntry{
Limit: vcMemory.Usage.Limit,
@@ -146,22 +210,41 @@ func setMemoryStats(vcMemory vc.MemoryStats) *cgroupsv1.MemoryStat {
return memoryStats
}
func setBlkioStats(vcBlkio vc.BlkioStats) *cgroupsv1.BlkIOStat {
func setMemoryStatsV2(vcMemory vc.MemoryStats) *cgroupsv2.MemoryStat {
memoryStats := &cgroupsv2.MemoryStat{
Usage: vcMemory.Usage.Usage,
UsageLimit: vcMemory.Usage.Limit,
SwapUsage: vcMemory.SwapUsage.Usage,
SwapLimit: vcMemory.SwapUsage.Limit,
}
return memoryStats
}
func setBlkioStatsV1(vcBlkio vc.BlkioStats) *cgroupsv1.BlkIOStat {
blkioStats := &cgroupsv1.BlkIOStat{
IoServiceBytesRecursive: copyBlkio(vcBlkio.IoServiceBytesRecursive),
IoServicedRecursive: copyBlkio(vcBlkio.IoServicedRecursive),
IoQueuedRecursive: copyBlkio(vcBlkio.IoQueuedRecursive),
SectorsRecursive: copyBlkio(vcBlkio.SectorsRecursive),
IoServiceTimeRecursive: copyBlkio(vcBlkio.IoServiceTimeRecursive),
IoWaitTimeRecursive: copyBlkio(vcBlkio.IoWaitTimeRecursive),
IoMergedRecursive: copyBlkio(vcBlkio.IoMergedRecursive),
IoTimeRecursive: copyBlkio(vcBlkio.IoTimeRecursive),
IoServiceBytesRecursive: copyBlkioV1(vcBlkio.IoServiceBytesRecursive),
IoServicedRecursive: copyBlkioV1(vcBlkio.IoServicedRecursive),
IoQueuedRecursive: copyBlkioV1(vcBlkio.IoQueuedRecursive),
SectorsRecursive: copyBlkioV1(vcBlkio.SectorsRecursive),
IoServiceTimeRecursive: copyBlkioV1(vcBlkio.IoServiceTimeRecursive),
IoWaitTimeRecursive: copyBlkioV1(vcBlkio.IoWaitTimeRecursive),
IoMergedRecursive: copyBlkioV1(vcBlkio.IoMergedRecursive),
IoTimeRecursive: copyBlkioV1(vcBlkio.IoTimeRecursive),
}
return blkioStats
}
func copyBlkio(s []vc.BlkioStatEntry) []*cgroupsv1.BlkIOEntry {
func setBlkioStatsV2(vcBlkio vc.BlkioStats) *cgroupsv2.IOStat {
ioStats := &cgroupsv2.IOStat{
Usage: copyBlkioV2(vcBlkio.IoServiceBytesRecursive),
}
return ioStats
}
func copyBlkioV1(s []vc.BlkioStatEntry) []*cgroupsv1.BlkIOEntry {
ret := make([]*cgroupsv1.BlkIOEntry, len(s))
for i, v := range s {
ret[i] = &cgroupsv1.BlkIOEntry{
@@ -175,6 +258,28 @@ func copyBlkio(s []vc.BlkioStatEntry) []*cgroupsv1.BlkIOEntry {
return ret
}
func copyBlkioV2(s []vc.BlkioStatEntry) []*cgroupsv2.IOEntry {
var ret []*cgroupsv2.IOEntry
item := cgroupsv2.IOEntry{}
for _, v := range s {
switch v.Op {
case "read":
item.Rbytes = v.Value
case "write":
item.Wbytes = v.Value
case "rios":
item.Rios = v.Value
case "wios":
item.Wios = v.Value
}
item.Major = v.Major
item.Minor = v.Minor
}
ret = append(ret, &item)
return ret
}
func setNetworkStats(vcNetwork []*vc.NetworkStats) []*cgroupsv1.NetworkStat {
networkStats := make([]*cgroupsv1.NetworkStat, len(vcNetwork))
for i, v := range vcNetwork {

View File

@@ -17,8 +17,7 @@ import (
)
func TestStatNetworkMetric(t *testing.T) {
assert := assert.New(t)
assertions := assert.New(t)
var err error
mockNetwork := []*vc.NetworkStats{
@@ -52,8 +51,8 @@ func TestStatNetworkMetric(t *testing.T) {
}()
resp, err := sandbox.StatsContainer(context.Background(), testContainerID)
assert.NoError(err)
assertions.NoError(err)
metrics := statsToMetrics(&resp)
assert.Equal(expectedNetwork, metrics.Network)
metrics := statsToMetricsV1(&resp)
assertions.Equal(expectedNetwork, metrics.Network)
}

View File

@@ -315,11 +315,11 @@ func GetSandboxesStoragePathRust() string {
// SocketAddress returns the address of the unix domain socket for communicating with the
// shim management endpoint
func SocketAddress(id string) string {
socketAddress := fmt.Sprintf("unix://%s", filepath.Join(string(filepath.Separator), GetSandboxesStoragePath(), id, "shim-monitor.sock"))
_, err := os.Stat(socketAddress)
// if the path not exist, check the rust runtime path
if err != nil {
// get the go runtime uds path
socketPath := filepath.Join(string(filepath.Separator), GetSandboxesStoragePath(), id, "shim-monitor.sock")
// if the path not exist, use the rust runtime uds path instead
if _, err := os.Stat(socketPath); err != nil {
return fmt.Sprintf("unix://%s", filepath.Join(string(filepath.Separator), GetSandboxesStoragePathRust(), id, "shim-monitor.sock"))
}
return socketAddress
return fmt.Sprintf("unix://%s", socketPath)
}

View File

@@ -10,6 +10,7 @@ import (
"io"
"os"
"path/filepath"
"runtime"
"syscall"
"testing"
"time"
@@ -96,6 +97,10 @@ func TestNewTtyIOFifoReopen(t *testing.T) {
}
func TestIoCopy(t *testing.T) {
// This test fails on aarch64 regularly, temporarily skip it
if runtime.GOARCH == "arm64" {
t.Skip("Skip TestIoCopy for aarch64")
}
assert := assert.New(t)
ctx := context.TODO()

View File

@@ -87,6 +87,8 @@ const (
// Define the string key for DriverOptions in DeviceInfo struct
FsTypeOpt = "fstype"
BlockDriverOpt = "block-driver"
VhostUserReconnectTimeOutOpt = "vhost-user-reconnect-timeout"
)
const (
@@ -97,6 +99,15 @@ const (
VhostUserSCSIMajor = 242
)
const (
// The timeout for reconnecting on non-server sockets when the remote end
// goes away.
// qemu will delay this many seconds and then attempt to reconnect. Zero
// disables reconnecting, and is the default.
DefaultVhostUserReconnectTimeOut = 0
)
// Defining these as a variable instead of a const, to allow
// overriding this in the tests.
@@ -320,6 +331,9 @@ type VhostUserDeviceAttrs struct {
CacheSize uint32
QueueSize uint32
// Reconnect timeout for socket of vhost user block device
ReconnectTime uint32
}
// GetHostPathFunc is function pointer used to mock GetHostPath in tests.

View File

@@ -8,6 +8,7 @@ package drivers
import (
"context"
"strconv"
"github.com/kata-containers/kata-containers/src/runtime/pkg/device/api"
"github.com/kata-containers/kata-containers/src/runtime/pkg/device/config"
@@ -72,17 +73,19 @@ func (device *VhostUserBlkDevice) Attach(ctx context.Context, devReceiver api.De
}
vAttrs := &config.VhostUserDeviceAttrs{
DevID: utils.MakeNameID("blk", device.DeviceInfo.ID, maxDevIDSize),
SocketPath: device.DeviceInfo.HostPath,
Type: config.VhostUserBlk,
Index: index,
DevID: utils.MakeNameID("blk", device.DeviceInfo.ID, maxDevIDSize),
SocketPath: device.DeviceInfo.HostPath,
Type: config.VhostUserBlk,
Index: index,
ReconnectTime: vhostUserReconnect(device.DeviceInfo.DriverOptions),
}
deviceLogger().WithFields(logrus.Fields{
"device": device.DeviceInfo.HostPath,
"SocketPath": vAttrs.SocketPath,
"Type": config.VhostUserBlk,
"Index": index,
"device": device.DeviceInfo.HostPath,
"SocketPath": vAttrs.SocketPath,
"Type": config.VhostUserBlk,
"Index": index,
"ReconnectTime": vAttrs.ReconnectTime,
}).Info("Attaching device")
device.VhostUserDeviceAttrs = vAttrs
@@ -93,6 +96,24 @@ func (device *VhostUserBlkDevice) Attach(ctx context.Context, devReceiver api.De
return nil
}
func vhostUserReconnect(customOptions map[string]string) uint32 {
var vhostUserReconnectTimeout uint32
if customOptions == nil {
vhostUserReconnectTimeout = config.DefaultVhostUserReconnectTimeOut
} else {
reconnectTimeoutStr := customOptions[config.VhostUserReconnectTimeOutOpt]
if reconnectTimeout, err := strconv.Atoi(reconnectTimeoutStr); err != nil {
vhostUserReconnectTimeout = config.DefaultVhostUserReconnectTimeOut
deviceLogger().WithField("reconnect", reconnectTimeoutStr).WithError(err).Warn("Failed to get reconnect timeout for vhost-user-blk device")
} else {
vhostUserReconnectTimeout = uint32(reconnectTimeout)
}
}
return vhostUserReconnectTimeout
}
func isVirtioBlkBlockDriver(customOptions map[string]string) bool {
var blockDriverOption string

View File

@@ -10,6 +10,7 @@ import (
"context"
"encoding/hex"
"errors"
"fmt"
"sync"
"github.com/sirupsen/logrus"
@@ -42,6 +43,8 @@ type deviceManager struct {
sync.RWMutex
vhostUserStoreEnabled bool
vhostUserReconnectTimeout uint32
}
func deviceLogger() *logrus.Entry {
@@ -49,11 +52,12 @@ func deviceLogger() *logrus.Entry {
}
// NewDeviceManager creates a deviceManager object behaved as api.DeviceManager
func NewDeviceManager(blockDriver string, vhostUserStoreEnabled bool, vhostUserStorePath string, devices []api.Device) api.DeviceManager {
func NewDeviceManager(blockDriver string, vhostUserStoreEnabled bool, vhostUserStorePath string, vhostUserReconnect uint32, devices []api.Device) api.DeviceManager {
dm := &deviceManager{
vhostUserStoreEnabled: vhostUserStoreEnabled,
vhostUserStorePath: vhostUserStorePath,
devices: make(map[string]api.Device),
vhostUserStoreEnabled: vhostUserStoreEnabled,
vhostUserStorePath: vhostUserStorePath,
vhostUserReconnectTimeout: vhostUserReconnect,
devices: make(map[string]api.Device),
}
if blockDriver == config.VirtioMmio {
dm.blockDriver = config.VirtioMmio
@@ -119,6 +123,7 @@ func (dm *deviceManager) createDevice(devInfo config.DeviceInfo) (dev api.Device
devInfo.DriverOptions = make(map[string]string)
}
devInfo.DriverOptions[config.BlockDriverOpt] = dm.blockDriver
devInfo.DriverOptions[config.VhostUserReconnectTimeOutOpt] = fmt.Sprintf("%d", dm.vhostUserReconnectTimeout)
return drivers.NewVhostUserBlkDevice(&devInfo), nil
} else if isBlock(devInfo) {
if devInfo.DriverOptions == nil {

View File

@@ -208,7 +208,7 @@ func TestAttachBlockDevice(t *testing.T) {
}
func TestAttachDetachDevice(t *testing.T) {
dm := NewDeviceManager(config.VirtioSCSI, false, "", nil)
dm := NewDeviceManager(config.VirtioSCSI, false, "", 0, nil)
path := "/dev/hda"
deviceInfo := config.DeviceInfo{

View File

@@ -2668,8 +2668,9 @@ type Config struct {
qemuParams []string
}
// appendFDs append a list of file descriptors to the qemu configuration and
// returns a slice of offset file descriptors that will be seen by the qemu process.
// appendFDs appends a list of arbitrary file descriptors to the qemu configuration and
// returns a slice of consecutive file descriptors that will be seen by the qemu process.
// Please see the comment below for details.
func (config *Config) appendFDs(fds []*os.File) []int {
var fdInts []int
@@ -2681,6 +2682,10 @@ func (config *Config) appendFDs(fds []*os.File) []int {
// ExtraFiles specifies additional open files to be inherited by the
// new process. It does not include standard input, standard output, or
// standard error. If non-nil, entry i becomes file descriptor 3+i.
// This means that arbitrary file descriptors fd0, fd1... fdN passed in
// the array will be presented to the guest as consecutive descriptors
// 3, 4... N+3. The golang library internally relies on dup2() to do
// the renumbering.
for i := range fds {
fdInts = append(fdInts, oldLen+3+i)
}

View File

@@ -1540,7 +1540,7 @@ func (q *QMP) ExecuteGetFD(ctx context.Context, fdname string, fd *os.File) erro
// ExecuteCharDevUnixSocketAdd adds a character device using as backend a unix socket,
// id is an identifier for the device, path specifies the local path of the unix socket,
// wait is to block waiting for a client to connect, server specifies that the socket is a listening socket.
func (q *QMP) ExecuteCharDevUnixSocketAdd(ctx context.Context, id, path string, wait, server bool) error {
func (q *QMP) ExecuteCharDevUnixSocketAdd(ctx context.Context, id, path string, wait, server bool, reconnect uint32) error {
data := map[string]interface{}{
"server": server,
"addr": map[string]interface{}{
@@ -1556,6 +1556,10 @@ func (q *QMP) ExecuteCharDevUnixSocketAdd(ctx context.Context, id, path string,
data["wait"] = wait
}
if reconnect > 0 {
data["reconnect"] = reconnect
}
args := map[string]interface{}{
"id": id,
"backend": map[string]interface{}{

View File

@@ -1445,7 +1445,7 @@ func TestExecuteCharDevUnixSocketAdd(t *testing.T) {
cfg := QMPConfig{Logger: qmpTestLogger{}}
q := startQMPLoop(buf, cfg, connectedCh, disconnectedCh)
checkVersion(t, connectedCh)
err := q.ExecuteCharDevUnixSocketAdd(context.Background(), "foo", "foo.sock", false, true)
err := q.ExecuteCharDevUnixSocketAdd(context.Background(), "foo", "foo.sock", false, true, 1)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}

View File

@@ -211,9 +211,9 @@ type RuntimeConfigOptions struct {
DefaultGuestHookPath string
KernelPath string
ImagePath string
RootfsType string
KernelParams string
MachineType string
ShimPath string
LogPath string
BlockDeviceDriver string
BlockDeviceAIO string
@@ -236,7 +236,6 @@ type RuntimeConfigOptions struct {
HypervisorDebug bool
RuntimeDebug bool
RuntimeTrace bool
ShimDebug bool
AgentDebug bool
AgentTrace bool
EnablePprof bool
@@ -309,6 +308,7 @@ func MakeRuntimeConfigFileData(config RuntimeConfigOptions) string {
block_device_aio = "` + config.BlockDeviceAIO + `"
kernel_params = "` + config.KernelParams + `"
image = "` + config.ImagePath + `"
rootfs_type = "` + config.RootfsType + `"
machine_type = "` + config.MachineType + `"
default_vcpus = ` + strconv.FormatUint(uint64(config.DefaultVCPUCount), 10) + `
default_maxvcpus = ` + strconv.FormatUint(uint64(config.DefaultMaxVCPUCount), 10) + `
@@ -323,10 +323,6 @@ func MakeRuntimeConfigFileData(config RuntimeConfigOptions) string {
shared_fs = "` + config.SharedFS + `"
virtio_fs_daemon = "` + config.VirtioFSDaemon + `"
[shim.kata]
path = "` + config.ShimPath + `"
enable_debug = ` + strconv.FormatBool(config.ShimDebug) + `
[agent.kata]
enable_debug = ` + strconv.FormatBool(config.AgentDebug) + `
enable_tracing = ` + strconv.FormatBool(config.AgentTrace) + `

View File

@@ -44,6 +44,7 @@ var defaultJailerPath = "/usr/bin/jailer"
var defaultImagePath = "/usr/share/kata-containers/kata-containers.img"
var defaultKernelPath = "/usr/share/kata-containers/vmlinuz.container"
var defaultInitrdPath = "/usr/share/kata-containers/kata-containers-initrd.img"
var defaultRootfsType = "ext4"
var defaultFirmwarePath = ""
var defaultFirmwareVolumePath = ""
var defaultMachineAccelerators = ""
@@ -80,9 +81,10 @@ const defaultHotplugVFIOOnRootBus bool = false
const defaultPCIeRootPort = 0
const defaultEntropySource = "/dev/urandom"
const defaultGuestHookPath string = ""
const defaultVirtioFSCacheMode = "none"
const defaultVirtioFSCacheMode = "never"
const defaultDisableImageNvdimm = false
const defaultVhostUserStorePath string = "/var/run/kata-containers/vhost-user/"
const defaultVhostUserDeviceReconnect = 0
const defaultRxRateLimiterMaxRate = uint64(0)
const defaultTxRateLimiterMaxRate = uint64(0)
const defaultConfidentialGuest = false

View File

@@ -84,6 +84,7 @@ type hypervisor struct {
CtlPath string `toml:"ctlpath"`
Initrd string `toml:"initrd"`
Image string `toml:"image"`
RootfsType string `toml:"rootfs_type"`
Firmware string `toml:"firmware"`
FirmwareVolume string `toml:"firmware_volume"`
MachineAccelerators string `toml:"machine_accelerators"`
@@ -146,6 +147,7 @@ type hypervisor struct {
BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"`
BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"`
EnableVhostUserStore bool `toml:"enable_vhost_user_store"`
VhostUserDeviceReconnect uint32 `toml:"vhost_user_reconnect_timeout_sec"`
DisableBlockDeviceUse bool `toml:"disable_block_device_use"`
MemPrealloc bool `toml:"enable_mem_prealloc"`
HugePages bool `toml:"enable_hugepages"`
@@ -272,6 +274,16 @@ func (h hypervisor) image() (string, error) {
return ResolvePath(p)
}
func (h hypervisor) rootfsType() (string, error) {
p := h.RootfsType
if p == "" {
p = "ext4"
}
return p, nil
}
func (h hypervisor) firmware() (string, error) {
p := h.Firmware
@@ -666,6 +678,11 @@ func newFirecrackerHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
return vc.HypervisorConfig{}, err
}
rootfsType, err := h.rootfsType()
if err != nil {
return vc.HypervisorConfig{}, err
}
firmware, err := h.firmware()
if err != nil {
return vc.HypervisorConfig{}, err
@@ -689,6 +706,7 @@ func newFirecrackerHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
KernelPath: kernel,
InitrdPath: initrd,
ImagePath: image,
RootfsType: rootfsType,
FirmwarePath: firmware,
KernelParams: vc.DeserializeParams(strings.Fields(kernelParams)),
NumVCPUs: h.defaultVCPUs(),
@@ -736,6 +754,11 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
return vc.HypervisorConfig{}, err
}
rootfsType, err := h.rootfsType()
if err != nil {
return vc.HypervisorConfig{}, err
}
pflashes, err := h.PFlash()
if err != nil {
return vc.HypervisorConfig{}, err
@@ -866,6 +889,7 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
SNPGuestPolicy: h.getSnpGuestPolicy(),
SEVCertChainPath: h.SEVCertChainPath,
DisableGuestSeLinux: h.DisableGuestSeLinux,
RootfsType: rootfsType,
}, nil
}
@@ -895,6 +919,11 @@ func newAcrnHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
errors.New("image must be defined in the configuration file")
}
rootfsType, err := h.rootfsType()
if err != nil {
return vc.HypervisorConfig{}, err
}
firmware, err := h.firmware()
if err != nil {
return vc.HypervisorConfig{}, err
@@ -912,6 +941,7 @@ func newAcrnHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
HypervisorPathList: h.HypervisorPathList,
KernelPath: kernel,
ImagePath: image,
RootfsType: rootfsType,
HypervisorCtlPath: hypervisorctl,
HypervisorCtlPathList: h.CtlPathList,
FirmwarePath: firmware,
@@ -962,6 +992,11 @@ func newClhHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
errors.New("image or initrd must be defined in the configuration file")
}
rootfsType, err := h.rootfsType()
if err != nil {
return vc.HypervisorConfig{}, err
}
firmware, err := h.firmware()
if err != nil {
return vc.HypervisorConfig{}, err
@@ -996,6 +1031,7 @@ func newClhHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
KernelPath: kernel,
InitrdPath: initrd,
ImagePath: image,
RootfsType: rootfsType,
FirmwarePath: firmware,
MachineAccelerators: machineAccelerators,
KernelParams: vc.DeserializeParams(strings.Fields(kernelParams)),
@@ -1055,15 +1091,23 @@ func newDragonballHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
if err != nil {
return vc.HypervisorConfig{}, err
}
image, err := h.image()
if err != nil {
return vc.HypervisorConfig{}, err
}
rootfsType, err := h.rootfsType()
if err != nil {
return vc.HypervisorConfig{}, err
}
kernelParams := h.kernelParams()
return vc.HypervisorConfig{
KernelPath: kernel,
ImagePath: image,
RootfsType: rootfsType,
KernelParams: vc.DeserializeParams(strings.Fields(kernelParams)),
NumVCPUs: h.defaultVCPUs(),
DefaultMaxVCPUs: h.defaultMaxVCPUs(),
@@ -1287,6 +1331,8 @@ func GetDefaultHypervisorConfig() vc.HypervisorConfig {
SEVGuestPolicy: defaultSEVGuestPolicy,
SNPGuestPolicy: defaultSNPGuestPolicy,
SEVCertChainPath: defaultSEVCertChainPath,
VhostUserDeviceReconnect: defaultVhostUserDeviceReconnect,
RootfsType: defaultRootfsType,
}
}

View File

@@ -74,6 +74,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf
kernelPath := path.Join(dir, "kernel")
kernelParams := "foo=bar xyz"
imagePath := path.Join(dir, "image")
rootfsType := "ext4"
logDir := path.Join(dir, "logs")
logPath := path.Join(logDir, "runtime.log")
machineType := "machineType"
@@ -94,6 +95,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf
HypervisorPath: hypervisorPath,
KernelPath: kernelPath,
ImagePath: imagePath,
RootfsType: rootfsType,
KernelParams: kernelParams,
MachineType: machineType,
LogPath: logPath,
@@ -153,6 +155,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf
HypervisorPath: hypervisorPath,
KernelPath: kernelPath,
ImagePath: imagePath,
RootfsType: rootfsType,
KernelParams: vc.DeserializeParams(strings.Fields(kernelParams)),
HypervisorMachineType: machineType,
NumVCPUs: defaultVCPUCount,
@@ -543,6 +546,7 @@ func TestMinimalRuntimeConfig(t *testing.T) {
KernelPath: defaultKernelPath,
ImagePath: defaultImagePath,
InitrdPath: defaultInitrdPath,
RootfsType: defaultRootfsType,
HypervisorMachineType: defaultMachineType,
NumVCPUs: defaultVCPUCount,
DefaultMaxVCPUs: defaultMaxVCPUCount,

View File

@@ -481,6 +481,12 @@ func addHypervisorConfigOverrides(ocispec specs.Spec, config *vc.SandboxConfig,
return err
}
if err := newAnnotationConfiguration(ocispec, vcAnnotations.VhostUserDeviceReconnect).setUint(func(reconnect uint64) {
config.HypervisorConfig.VhostUserDeviceReconnect = uint32(reconnect)
}); err != nil {
return err
}
if value, ok := ocispec.Annotations[vcAnnotations.GuestHookPath]; ok {
if value != "" {
config.HypervisorConfig.GuestHookPath = value

View File

@@ -19,9 +19,6 @@ var (
ErrCgroupMode = errors.New("cgroup controller type error")
)
// DefaultResourceControllerID runtime-determined location in the cgroups hierarchy.
const DefaultResourceControllerID = "/vc"
func DeviceToCgroupDeviceRule(device string) (*devices.Rule, error) {
var st unix.Stat_t
deviceRule := devices.Rule{

View File

@@ -18,6 +18,9 @@ import (
"golang.org/x/sys/unix"
)
// DefaultResourceControllerID runtime-determined location in the cgroups hierarchy.
const DefaultResourceControllerID = "/vc"
// ValidCgroupPathV1 returns a valid cgroup path for cgroup v1.
// see https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#cgroups-path
func ValidCgroupPathV1(path string, systemdCgroup bool) (string, error) {
@@ -140,6 +143,16 @@ func getSliceAndUnit(cgroupPath string) (string, string, error) {
return "", "", fmt.Errorf("Path: %s is not valid systemd's cgroups path", cgroupPath)
}
func IsCgroupV1() (bool, error) {
if cgroups.Mode() == cgroups.Legacy || cgroups.Mode() == cgroups.Hybrid {
return true, nil
} else if cgroups.Mode() == cgroups.Unified {
return false, nil
} else {
return false, ErrCgroupMode
}
}
func SetThreadAffinity(threadID int, cpuSetSlice []int) error {
unixCPUSet := unix.CPUSet{}