Merge pull request #2881 from mcastelino/topic/hypervisor-rename

Expose top level hypervisor methods -
This commit is contained in:
Peng Tao
2021-10-25 10:25:49 +08:00
committed by GitHub
43 changed files with 644 additions and 637 deletions

View File

@@ -155,14 +155,14 @@ func (a *Acrn) kernelParameters() string {
}
// Adds all capabilities supported by Acrn implementation of hypervisor interface
func (a *Acrn) capabilities(ctx context.Context) types.Capabilities {
span, _ := katatrace.Trace(ctx, a.Logger(), "capabilities", acrnTracingTags, map[string]string{"sandbox_id": a.id})
func (a *Acrn) Capabilities(ctx context.Context) types.Capabilities {
span, _ := katatrace.Trace(ctx, a.Logger(), "Capabilities", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
return a.arch.capabilities()
}
func (a *Acrn) hypervisorConfig() HypervisorConfig {
func (a *Acrn) HypervisorConfig() HypervisorConfig {
return a.config
}
@@ -248,7 +248,7 @@ func (a *Acrn) buildDevices(ctx context.Context, imagePath string) ([]Device, er
return nil, fmt.Errorf("Image Path should not be empty: %s", imagePath)
}
_, console, err := a.getSandboxConsole(ctx, a.id)
_, console, err := a.GetVMConsole(ctx, a.id)
if err != nil {
return nil, err
}
@@ -344,7 +344,7 @@ func (a *Acrn) createDummyVirtioBlkDev(ctx context.Context, devices []Device) ([
}
func (a *Acrn) setConfig(config *HypervisorConfig) error {
if err := config.valid(); err != nil {
if err := config.Valid(); err != nil {
return err
}
@@ -353,12 +353,12 @@ func (a *Acrn) setConfig(config *HypervisorConfig) error {
return nil
}
// createSandbox is the Hypervisor sandbox creation.
func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
// CreateVM is the VM creation
func (a *Acrn) CreateVM(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
// Save the tracing context
a.ctx = ctx
span, ctx := katatrace.Trace(ctx, a.Logger(), "createSandbox", acrnTracingTags, map[string]string{"sandbox_id": a.id})
span, ctx := katatrace.Trace(ctx, a.Logger(), "CreateVM", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
if err := a.setup(ctx, id, hypervisorConfig); err != nil {
@@ -422,8 +422,8 @@ func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNa
}
// startSandbox will start the Sandbox's VM.
func (a *Acrn) startSandbox(ctx context.Context, timeoutSecs int) error {
span, ctx := katatrace.Trace(ctx, a.Logger(), "startSandbox", acrnTracingTags, map[string]string{"sandbox_id": a.id})
func (a *Acrn) StartVM(ctx context.Context, timeoutSecs int) error {
span, ctx := katatrace.Trace(ctx, a.Logger(), "StartVM", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
if a.config.Debug {
@@ -459,7 +459,7 @@ func (a *Acrn) startSandbox(ctx context.Context, timeoutSecs int) error {
}
a.state.PID = PID
if err = a.waitSandbox(ctx, timeoutSecs); err != nil {
if err = a.waitVM(ctx, timeoutSecs); err != nil {
a.Logger().WithField("acrn wait failed:", err).Debug()
return err
}
@@ -467,9 +467,9 @@ func (a *Acrn) startSandbox(ctx context.Context, timeoutSecs int) error {
return nil
}
// waitSandbox will wait for the Sandbox's VM to be up and running.
func (a *Acrn) waitSandbox(ctx context.Context, timeoutSecs int) error {
span, _ := katatrace.Trace(ctx, a.Logger(), "waitSandbox", acrnTracingTags, map[string]string{"sandbox_id": a.id})
// waitVM will wait for the Sandbox's VM to be up and running.
func (a *Acrn) waitVM(ctx context.Context, timeoutSecs int) error {
span, _ := katatrace.Trace(ctx, a.Logger(), "waitVM", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
if timeoutSecs < 0 {
@@ -482,15 +482,15 @@ func (a *Acrn) waitSandbox(ctx context.Context, timeoutSecs int) error {
}
// stopSandbox will stop the Sandbox's VM.
func (a *Acrn) stopSandbox(ctx context.Context, waitOnly bool) (err error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "stopSandbox", acrnTracingTags, map[string]string{"sandbox_id": a.id})
func (a *Acrn) StopVM(ctx context.Context, waitOnly bool) (err error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "StopVM", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
a.Logger().Info("Stopping acrn VM")
defer func() {
if err != nil {
a.Logger().Info("stopSandbox failed")
a.Logger().Info("StopVM failed")
} else {
a.Logger().Info("acrn VM stopped")
}
@@ -501,7 +501,7 @@ func (a *Acrn) stopSandbox(ctx context.Context, waitOnly bool) (err error) {
Idx := acrnUUIDsToIdx[uuid]
if err = a.loadInfo(); err != nil {
a.Logger().Info("Failed to load UUID availabiity info")
a.Logger().Info("Failed to Load UUID availabiity info")
return err
}
@@ -554,22 +554,22 @@ func (a *Acrn) updateBlockDevice(drive *config.BlockDrive) error {
return err
}
func (a *Acrn) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "hotplugAddDevice", acrnTracingTags, map[string]string{"sandbox_id": a.id})
func (a *Acrn) HotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "HotplugAddDevice", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
switch devType {
case blockDev:
case BlockDev:
//The drive placeholder has to exist prior to Update
return nil, a.updateBlockDevice(devInfo.(*config.BlockDrive))
default:
return nil, fmt.Errorf("hotplugAddDevice: unsupported device: devInfo:%v, deviceType%v",
return nil, fmt.Errorf("HotplugAddDevice: unsupported device: devInfo:%v, deviceType%v",
devInfo, devType)
}
}
func (a *Acrn) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "hotplugRemoveDevice", acrnTracingTags, map[string]string{"sandbox_id": a.id})
func (a *Acrn) HotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "HotplugRemoveDevice", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
// Not supported. return success
@@ -577,8 +577,8 @@ func (a *Acrn) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, dev
return nil, nil
}
func (a *Acrn) pauseSandbox(ctx context.Context) error {
span, _ := katatrace.Trace(ctx, a.Logger(), "pauseSandbox", acrnTracingTags, map[string]string{"sandbox_id": a.id})
func (a *Acrn) PauseVM(ctx context.Context) error {
span, _ := katatrace.Trace(ctx, a.Logger(), "PauseVM", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
// Not supported. return success
@@ -586,8 +586,8 @@ func (a *Acrn) pauseSandbox(ctx context.Context) error {
return nil
}
func (a *Acrn) resumeSandbox(ctx context.Context) error {
span, _ := katatrace.Trace(ctx, a.Logger(), "resumeSandbox", acrnTracingTags, map[string]string{"sandbox_id": a.id})
func (a *Acrn) ResumeVM(ctx context.Context) error {
span, _ := katatrace.Trace(ctx, a.Logger(), "ResumeVM", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
// Not supported. return success
@@ -596,9 +596,9 @@ func (a *Acrn) resumeSandbox(ctx context.Context) error {
}
// addDevice will add extra devices to acrn command line.
func (a *Acrn) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
func (a *Acrn) AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error {
var err error
span, _ := katatrace.Trace(ctx, a.Logger(), "addDevice", acrnTracingTags, map[string]string{"sandbox_id": a.id})
span, _ := katatrace.Trace(ctx, a.Logger(), "AddDevice", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
switch v := devInfo.(type) {
@@ -630,8 +630,8 @@ func (a *Acrn) addDevice(ctx context.Context, devInfo interface{}, devType devic
// getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox.
func (a *Acrn) getSandboxConsole(ctx context.Context, id string) (string, string, error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "getSandboxConsole", acrnTracingTags, map[string]string{"sandbox_id": a.id})
func (a *Acrn) GetVMConsole(ctx context.Context, id string) (string, string, error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "GetVMConsole", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
consoleURL, err := utils.BuildSocketPath(a.store.RunVMStoragePath(), id, acrnConsoleSocket)
@@ -642,51 +642,51 @@ func (a *Acrn) getSandboxConsole(ctx context.Context, id string) (string, string
return consoleProtoUnix, consoleURL, nil
}
func (a *Acrn) saveSandbox() error {
a.Logger().Info("save sandbox")
func (a *Acrn) SaveVM() error {
a.Logger().Info("Save sandbox")
// Not supported. return success
return nil
}
func (a *Acrn) disconnect(ctx context.Context) {
span, _ := katatrace.Trace(ctx, a.Logger(), "disconnect", acrnTracingTags, map[string]string{"sandbox_id": a.id})
func (a *Acrn) Disconnect(ctx context.Context) {
span, _ := katatrace.Trace(ctx, a.Logger(), "Disconnect", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
// Not supported.
}
func (a *Acrn) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "getThreadIDs", acrnTracingTags, map[string]string{"sandbox_id": a.id})
func (a *Acrn) GetThreadIDs(ctx context.Context) (VcpuThreadIDs, error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "GetThreadIDs", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
// Not supported. return success
//Just allocating an empty map
return vcpuThreadIDs{}, nil
return VcpuThreadIDs{}, nil
}
func (a *Acrn) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
return 0, memoryDevice{}, nil
func (a *Acrn) ResizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
return 0, MemoryDevice{}, nil
}
func (a *Acrn) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
func (a *Acrn) ResizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
return 0, 0, nil
}
func (a *Acrn) cleanup(ctx context.Context) error {
span, _ := katatrace.Trace(ctx, a.Logger(), "cleanup", acrnTracingTags, map[string]string{"sandbox_id": a.id})
func (a *Acrn) Cleanup(ctx context.Context) error {
span, _ := katatrace.Trace(ctx, a.Logger(), "Cleanup", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End()
return nil
}
func (a *Acrn) getPids() []int {
func (a *Acrn) GetPids() []int {
return []int{a.state.PID}
}
func (a *Acrn) getVirtioFsPid() *int {
func (a *Acrn) GetVirtioFsPid() *int {
return nil
}
@@ -698,19 +698,19 @@ func (a *Acrn) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("acrn is not supported by VM cache")
}
func (a *Acrn) save() (s persistapi.HypervisorState) {
func (a *Acrn) Save() (s persistapi.HypervisorState) {
s.Pid = a.state.PID
s.Type = string(AcrnHypervisor)
s.UUID = a.state.UUID
return
}
func (a *Acrn) load(s persistapi.HypervisorState) {
func (a *Acrn) Load(s persistapi.HypervisorState) {
a.state.PID = s.Pid
a.state.UUID = s.UUID
}
func (a *Acrn) check() error {
func (a *Acrn) Check() error {
if err := syscall.Kill(a.state.PID, syscall.Signal(0)); err != nil {
return errors.Wrapf(err, "failed to ping acrn process")
}
@@ -718,7 +718,7 @@ func (a *Acrn) check() error {
return nil
}
func (a *Acrn) generateSocket(id string) (interface{}, error) {
func (a *Acrn) GenerateSocket(id string) (interface{}, error) {
return generateVMSocket(id, a.store.RunVMStoragePath())
}
@@ -810,7 +810,7 @@ func (a *Acrn) loadInfo() error {
return nil
}
func (a *Acrn) isRateLimiterBuiltin() bool {
func (a *Acrn) IsRateLimiterBuiltin() bool {
return false
}

View File

@@ -77,19 +77,19 @@ func TestAcrnCapabilities(t *testing.T) {
arch: &acrnArchBase{},
}
caps := a.capabilities(a.ctx)
caps := a.Capabilities(a.ctx)
assert.True(caps.IsBlockDeviceSupported())
assert.True(caps.IsBlockDeviceHotplugSupported())
}
func testAcrnAddDevice(t *testing.T, devInfo interface{}, devType deviceType, expected []Device) {
func testAcrnAddDevice(t *testing.T, devInfo interface{}, devType DeviceType, expected []Device) {
assert := assert.New(t)
a := &Acrn{
ctx: context.Background(),
arch: &acrnArchBase{},
}
err := a.addDevice(context.Background(), devInfo, devType)
err := a.AddDevice(context.Background(), devInfo, devType)
assert.NoError(err)
assert.Exactly(a.acrnConfig.Devices, expected)
}
@@ -112,7 +112,7 @@ func TestAcrnAddDeviceSerialPortDev(t *testing.T) {
Name: name,
}
testAcrnAddDevice(t, socket, serialPortDev, expectedOut)
testAcrnAddDevice(t, socket, SerialPortDev, expectedOut)
}
func TestAcrnAddDeviceBlockDev(t *testing.T) {
@@ -131,7 +131,7 @@ func TestAcrnAddDeviceBlockDev(t *testing.T) {
Index: index,
}
testAcrnAddDevice(t, drive, blockDev, expectedOut)
testAcrnAddDevice(t, drive, BlockDev, expectedOut)
}
func TestAcrnHotplugUnsupportedDeviceType(t *testing.T) {
@@ -144,7 +144,7 @@ func TestAcrnHotplugUnsupportedDeviceType(t *testing.T) {
config: acrnConfig,
}
_, err := a.hotplugAddDevice(a.ctx, &memoryDevice{0, 128, uint64(0), false}, fsDev)
_, err := a.HotplugAddDevice(a.ctx, &MemoryDevice{0, 128, uint64(0), false}, FsDev)
assert.Error(err)
}
@@ -205,13 +205,13 @@ func TestAcrnGetSandboxConsole(t *testing.T) {
sandboxID := "testSandboxID"
expected := filepath.Join(a.store.RunVMStoragePath(), sandboxID, consoleSocket)
proto, result, err := a.getSandboxConsole(a.ctx, sandboxID)
proto, result, err := a.GetVMConsole(a.ctx, sandboxID)
assert.NoError(err)
assert.Equal(result, expected)
assert.Equal(proto, consoleProtoUnix)
}
func TestAcrnCreateSandbox(t *testing.T) {
func TestAcrnCreateVM(t *testing.T) {
assert := assert.New(t)
acrnConfig := newAcrnConfig()
store, err := persist.GetDriver()
@@ -235,7 +235,7 @@ func TestAcrnCreateSandbox(t *testing.T) {
//set PID to 1 to ignore hypercall to get UUID and set a random UUID
a.state.PID = 1
a.state.UUID = "f81d4fae-7dec-11d0-a765-00a0c91e6bf6"
err = a.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
err = a.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
assert.NoError(err)
assert.Exactly(acrnConfig, a.config)
}

View File

@@ -140,10 +140,10 @@ type agent interface {
resumeContainer(ctx context.Context, sandbox *Sandbox, c Container) error
// configure will update agent settings based on provided arguments
configure(ctx context.Context, h hypervisor, id, sharePath string, config KataAgentConfig) error
configure(ctx context.Context, h Hypervisor, id, sharePath string, config KataAgentConfig) error
// configureFromGrpc will update agent settings based on provided arguments which from Grpc
configureFromGrpc(ctx context.Context, h hypervisor, id string, config KataAgentConfig) error
configureFromGrpc(ctx context.Context, h Hypervisor, id string, config KataAgentConfig) error
// reseedRNG will reseed the guest random number generator
reseedRNG(ctx context.Context, data []byte) error

View File

@@ -63,7 +63,7 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
return nil, err
}
// cleanup sandbox resources in case of any failure
// Cleanup sandbox resources in case of any failure
defer func() {
if err != nil {
s.Delete(ctx)

View File

@@ -9,13 +9,13 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/fs"
"os"
"path/filepath"
"strings"
"testing"
ktu "github.com/kata-containers/kata-containers/src/runtime/pkg/katatestutils"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/mock"

View File

@@ -100,7 +100,7 @@ func (endpoint *BridgedMacvlanEndpoint) Attach(ctx context.Context, s *Sandbox)
return err
}
return h.addDevice(ctx, endpoint, netDev)
return h.AddDevice(ctx, endpoint, NetDev)
}
// Detach for the virtual endpoint tears down the tap and bridge
@@ -121,12 +121,12 @@ func (endpoint *BridgedMacvlanEndpoint) Detach(ctx context.Context, netNsCreated
}
// HotAttach for bridged macvlan endpoint not supported yet
func (endpoint *BridgedMacvlanEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
func (endpoint *BridgedMacvlanEndpoint) HotAttach(ctx context.Context, h Hypervisor) error {
return fmt.Errorf("BridgedMacvlanEndpoint does not support Hot attach")
}
// HotDetach for bridged macvlan endpoint not supported yet
func (endpoint *BridgedMacvlanEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
func (endpoint *BridgedMacvlanEndpoint) HotDetach(ctx context.Context, h Hypervisor, netNsCreated bool, netNsPath string) error {
return fmt.Errorf("BridgedMacvlanEndpoint does not support Hot detach")
}

View File

@@ -171,7 +171,7 @@ type cloudHypervisor struct {
var clhKernelParams = []Param{
{"root", "/dev/pmem0p1"},
{"panic", "1"}, // upon kernel panic wait 1 second before reboot
{"no_timer_check", ""}, // do not check broken timer IRQ resources
{"no_timer_check", ""}, // do not Check broken timer IRQ resources
{"noreplace-smp", ""}, // do not replace SMP instructions
{"rootflags", "dax,data=ordered,errors=remount-ro ro"}, // mount the root filesystem as readonly
{"rootfstype", "ext4"},
@@ -189,7 +189,7 @@ var clhDebugKernelParams = []Param{
//###########################################################
func (clh *cloudHypervisor) setConfig(config *HypervisorConfig) error {
err := config.valid()
err := config.Valid()
if err != nil {
return err
}
@@ -200,11 +200,11 @@ func (clh *cloudHypervisor) setConfig(config *HypervisorConfig) error {
}
// For cloudHypervisor this call only sets the internal structure up.
// The VM will be created and started through startSandbox().
func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
// The VM will be created and started through StartVM().
func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
clh.ctx = ctx
span, newCtx := katatrace.Trace(clh.ctx, clh.Logger(), "createSandbox", clhTracingTags, map[string]string{"sandbox_id": clh.id})
span, newCtx := katatrace.Trace(clh.ctx, clh.Logger(), "CreateVM", clhTracingTags, map[string]string{"sandbox_id": clh.id})
clh.ctx = newCtx
defer span.End()
@@ -215,7 +215,7 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
clh.id = id
clh.state.state = clhNotReady
clh.Logger().WithField("function", "createSandbox").Info("creating Sandbox")
clh.Logger().WithField("function", "CreateVM").Info("creating Sandbox")
virtiofsdSocketPath, err := clh.virtioFsSocketPath(clh.id)
if err != nil {
@@ -223,7 +223,7 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
}
if clh.state.PID > 0 {
clh.Logger().WithField("function", "createSandbox").Info("Sandbox already exist, loading from state")
clh.Logger().WithField("function", "CreateVM").Info("Sandbox already exist, loading from state")
clh.virtiofsd = &virtiofsd{
PID: clh.state.VirtiofsdPID,
sourcePath: filepath.Join(getSharePath(clh.id)),
@@ -235,7 +235,7 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
// No need to return an error from there since there might be nothing
// to fetch if this is the first time the hypervisor is created.
clh.Logger().WithField("function", "createSandbox").Info("Sandbox not found creating")
clh.Logger().WithField("function", "CreateVM").Info("Sandbox not found creating")
// Make sure the kernel path is valid
kernelPath, err := clh.config.KernelAssetPath()
@@ -251,7 +251,7 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
clh.vmconfig.Memory.Shared = func(b bool) *bool { return &b }(true)
// Enable hugepages if needed
clh.vmconfig.Memory.Hugepages = func(b bool) *bool { return &b }(clh.config.HugePages)
hostMemKb, err := getHostMemorySizeKb(procMemInfo)
hostMemKb, err := GetHostMemorySizeKb(procMemInfo)
if err != nil {
return nil
}
@@ -365,14 +365,14 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
}
// startSandbox will start the VMM and boot the virtual machine for the given sandbox.
func (clh *cloudHypervisor) startSandbox(ctx context.Context, timeout int) error {
span, _ := katatrace.Trace(ctx, clh.Logger(), "startSandbox", clhTracingTags, map[string]string{"sandbox_id": clh.id})
func (clh *cloudHypervisor) StartVM(ctx context.Context, timeout int) error {
span, _ := katatrace.Trace(ctx, clh.Logger(), "StartVM", clhTracingTags, map[string]string{"sandbox_id": clh.id})
defer span.End()
ctx, cancel := context.WithTimeout(context.Background(), clhAPITimeout*time.Second)
defer cancel()
clh.Logger().WithField("function", "startSandbox").Info("starting Sandbox")
clh.Logger().WithField("function", "StartVM").Info("starting Sandbox")
vmPath := filepath.Join(clh.store.RunVMStoragePath(), clh.id)
err := os.MkdirAll(vmPath, DirMode)
@@ -394,9 +394,9 @@ func (clh *cloudHypervisor) startSandbox(ctx context.Context, timeout int) error
defer label.SetProcessLabel("")
if clh.config.SharedFS == config.VirtioFS {
clh.Logger().WithField("function", "startSandbox").Info("Starting virtiofsd")
clh.Logger().WithField("function", "StartVM").Info("Starting virtiofsd")
pid, err := clh.virtiofsd.Start(ctx, func() {
clh.stopSandbox(ctx, false)
clh.StopVM(ctx, false)
})
if err != nil {
return err
@@ -425,8 +425,8 @@ func (clh *cloudHypervisor) startSandbox(ctx context.Context, timeout int) error
// getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox.
func (clh *cloudHypervisor) getSandboxConsole(ctx context.Context, id string) (string, string, error) {
clh.Logger().WithField("function", "getSandboxConsole").WithField("id", id).Info("Get Sandbox Console")
func (clh *cloudHypervisor) GetVMConsole(ctx context.Context, id string) (string, string, error) {
clh.Logger().WithField("function", "GetVMConsole").WithField("id", id).Info("Get Sandbox Console")
master, slave, err := console.NewPty()
if err != nil {
clh.Logger().WithError(err).Error("Error create pseudo tty")
@@ -437,15 +437,15 @@ func (clh *cloudHypervisor) getSandboxConsole(ctx context.Context, id string) (s
return consoleProtoPty, slave, nil
}
func (clh *cloudHypervisor) disconnect(ctx context.Context) {
clh.Logger().WithField("function", "disconnect").Info("Disconnecting Sandbox Console")
func (clh *cloudHypervisor) Disconnect(ctx context.Context) {
clh.Logger().WithField("function", "Disconnect").Info("Disconnecting Sandbox Console")
}
func (clh *cloudHypervisor) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
func (clh *cloudHypervisor) GetThreadIDs(ctx context.Context) (VcpuThreadIDs, error) {
clh.Logger().WithField("function", "getThreadIDs").Info("get thread ID's")
clh.Logger().WithField("function", "GetThreadIDs").Info("get thread ID's")
var vcpuInfo vcpuThreadIDs
var vcpuInfo VcpuThreadIDs
vcpuInfo.vcpus = make(map[int]int)
@@ -550,15 +550,15 @@ func (clh *cloudHypervisor) hotPlugVFIODevice(device *config.VFIODev) error {
return err
}
func (clh *cloudHypervisor) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, clh.Logger(), "hotplugAddDevice", clhTracingTags, map[string]string{"sandbox_id": clh.id})
func (clh *cloudHypervisor) HotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, clh.Logger(), "HotplugAddDevice", clhTracingTags, map[string]string{"sandbox_id": clh.id})
defer span.End()
switch devType {
case blockDev:
case BlockDev:
drive := devInfo.(*config.BlockDrive)
return nil, clh.hotplugAddBlockDevice(drive)
case vfioDev:
case VfioDev:
device := devInfo.(*config.VFIODev)
return nil, clh.hotPlugVFIODevice(device)
default:
@@ -567,20 +567,20 @@ func (clh *cloudHypervisor) hotplugAddDevice(ctx context.Context, devInfo interf
}
func (clh *cloudHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, clh.Logger(), "hotplugRemoveDevice", clhTracingTags, map[string]string{"sandbox_id": clh.id})
func (clh *cloudHypervisor) HotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, clh.Logger(), "HotplugRemoveDevice", clhTracingTags, map[string]string{"sandbox_id": clh.id})
defer span.End()
var deviceID string
switch devType {
case blockDev:
case BlockDev:
deviceID = clhDriveIndexToID(devInfo.(*config.BlockDrive).Index)
case vfioDev:
case VfioDev:
deviceID = devInfo.(*config.VFIODev).ID
default:
clh.Logger().WithFields(log.Fields{"devInfo": devInfo,
"deviceType": devType}).Error("hotplugRemoveDevice: unsupported device")
"deviceType": devType}).Error("HotplugRemoveDevice: unsupported device")
return nil, fmt.Errorf("Could not hot remove device: unsupported device: %v, type: %v",
devInfo, devType)
}
@@ -599,40 +599,40 @@ func (clh *cloudHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo int
return nil, err
}
func (clh *cloudHypervisor) hypervisorConfig() HypervisorConfig {
func (clh *cloudHypervisor) HypervisorConfig() HypervisorConfig {
return clh.config
}
func (clh *cloudHypervisor) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
func (clh *cloudHypervisor) ResizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
// TODO: Add support for virtio-mem
if probe {
return 0, memoryDevice{}, errors.New("probe memory is not supported for cloud-hypervisor")
return 0, MemoryDevice{}, errors.New("probe memory is not supported for cloud-hypervisor")
}
if reqMemMB == 0 {
// This is a corner case if requested to resize to 0 means something went really wrong.
return 0, memoryDevice{}, errors.New("Can not resize memory to 0")
return 0, MemoryDevice{}, errors.New("Can not resize memory to 0")
}
info, err := clh.vmInfo()
if err != nil {
return 0, memoryDevice{}, err
return 0, MemoryDevice{}, err
}
currentMem := utils.MemUnit(info.Config.Memory.Size) * utils.Byte
newMem := utils.MemUnit(reqMemMB) * utils.MiB
// Early check to verify if boot memory is the same as requested
// Early Check to verify if boot memory is the same as requested
if currentMem == newMem {
clh.Logger().WithField("memory", reqMemMB).Debugf("VM already has requested memory")
return uint32(currentMem.ToMiB()), memoryDevice{}, nil
return uint32(currentMem.ToMiB()), MemoryDevice{}, nil
}
if currentMem > newMem {
clh.Logger().Warn("Remove memory is not supported, nothing to do")
return uint32(currentMem.ToMiB()), memoryDevice{}, nil
return uint32(currentMem.ToMiB()), MemoryDevice{}, nil
}
blockSize := utils.MemUnit(memoryBlockSizeMB) * utils.MiB
@@ -645,11 +645,11 @@ func (clh *cloudHypervisor) resizeMemory(ctx context.Context, reqMemMB uint32, m
newMem = alignedRequest
}
// Check if memory is the same as requested, a second check is done
// Check if memory is the same as requested, a second Check is done
// to consider the memory request now that is updated to be memory aligned
if currentMem == newMem {
clh.Logger().WithFields(log.Fields{"current-memory": currentMem, "new-memory": newMem}).Debug("VM already has requested memory(after alignment)")
return uint32(currentMem.ToMiB()), memoryDevice{}, nil
return uint32(currentMem.ToMiB()), MemoryDevice{}, nil
}
cl := clh.client()
@@ -663,33 +663,33 @@ func (clh *cloudHypervisor) resizeMemory(ctx context.Context, reqMemMB uint32, m
if _, err = cl.VmResizePut(ctx, resize); err != nil {
clh.Logger().WithError(err).WithFields(log.Fields{"current-memory": currentMem, "new-memory": newMem}).Warnf("failed to update memory %s", openAPIClientError(err))
err = fmt.Errorf("Failed to resize memory from %d to %d: %s", currentMem, newMem, openAPIClientError(err))
return uint32(currentMem.ToMiB()), memoryDevice{}, openAPIClientError(err)
return uint32(currentMem.ToMiB()), MemoryDevice{}, openAPIClientError(err)
}
return uint32(newMem.ToMiB()), memoryDevice{sizeMB: int(hotplugSize.ToMiB())}, nil
return uint32(newMem.ToMiB()), MemoryDevice{SizeMB: int(hotplugSize.ToMiB())}, nil
}
func (clh *cloudHypervisor) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
func (clh *cloudHypervisor) ResizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
cl := clh.client()
// Retrieve the number of current vCPUs via HTTP API
info, err := clh.vmInfo()
if err != nil {
clh.Logger().WithField("function", "resizeVCPUs").WithError(err).Info("[clh] vmInfo failed")
clh.Logger().WithField("function", "ResizeVCPUs").WithError(err).Info("[clh] vmInfo failed")
return 0, 0, openAPIClientError(err)
}
currentVCPUs = uint32(info.Config.Cpus.BootVcpus)
newVCPUs = currentVCPUs
// Sanity check
// Sanity Check
if reqVCPUs == 0 {
clh.Logger().WithField("function", "resizeVCPUs").Debugf("Cannot resize vCPU to 0")
clh.Logger().WithField("function", "ResizeVCPUs").Debugf("Cannot resize vCPU to 0")
return currentVCPUs, newVCPUs, fmt.Errorf("Cannot resize vCPU to 0")
}
if reqVCPUs > uint32(info.Config.Cpus.MaxVcpus) {
clh.Logger().WithFields(log.Fields{
"function": "resizeVCPUs",
"function": "ResizeVCPUs",
"reqVCPUs": reqVCPUs,
"clhMaxVCPUs": info.Config.Cpus.MaxVcpus,
}).Warn("exceeding the 'clhMaxVCPUs' (resizing to 'clhMaxVCPUs')")
@@ -711,31 +711,31 @@ func (clh *cloudHypervisor) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (c
return currentVCPUs, newVCPUs, nil
}
func (clh *cloudHypervisor) cleanup(ctx context.Context) error {
clh.Logger().WithField("function", "cleanup").Info("cleanup")
func (clh *cloudHypervisor) Cleanup(ctx context.Context) error {
clh.Logger().WithField("function", "Cleanup").Info("Cleanup")
return nil
}
func (clh *cloudHypervisor) pauseSandbox(ctx context.Context) error {
clh.Logger().WithField("function", "pauseSandbox").Info("Pause Sandbox")
func (clh *cloudHypervisor) PauseVM(ctx context.Context) error {
clh.Logger().WithField("function", "PauseVM").Info("Pause Sandbox")
return nil
}
func (clh *cloudHypervisor) saveSandbox() error {
func (clh *cloudHypervisor) SaveVM() error {
clh.Logger().WithField("function", "saveSandboxC").Info("Save Sandbox")
return nil
}
func (clh *cloudHypervisor) resumeSandbox(ctx context.Context) error {
clh.Logger().WithField("function", "resumeSandbox").Info("Resume Sandbox")
func (clh *cloudHypervisor) ResumeVM(ctx context.Context) error {
clh.Logger().WithField("function", "ResumeVM").Info("Resume Sandbox")
return nil
}
// stopSandbox will stop the Sandbox's VM.
func (clh *cloudHypervisor) stopSandbox(ctx context.Context, waitOnly bool) (err error) {
span, _ := katatrace.Trace(ctx, clh.Logger(), "stopSandbox", clhTracingTags, map[string]string{"sandbox_id": clh.id})
func (clh *cloudHypervisor) StopVM(ctx context.Context, waitOnly bool) (err error) {
span, _ := katatrace.Trace(ctx, clh.Logger(), "StopVM", clhTracingTags, map[string]string{"sandbox_id": clh.id})
defer span.End()
clh.Logger().WithField("function", "stopSandbox").Info("Stop Sandbox")
clh.Logger().WithField("function", "StopVM").Info("Stop Sandbox")
return clh.terminate(ctx, waitOnly)
}
@@ -747,7 +747,7 @@ func (clh *cloudHypervisor) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("cloudHypervisor is not supported by VM cache")
}
func (clh *cloudHypervisor) save() (s persistapi.HypervisorState) {
func (clh *cloudHypervisor) Save() (s persistapi.HypervisorState) {
s.Pid = clh.state.PID
s.Type = string(ClhHypervisor)
s.VirtiofsdPid = clh.state.VirtiofsdPID
@@ -755,13 +755,13 @@ func (clh *cloudHypervisor) save() (s persistapi.HypervisorState) {
return
}
func (clh *cloudHypervisor) load(s persistapi.HypervisorState) {
func (clh *cloudHypervisor) Load(s persistapi.HypervisorState) {
clh.state.PID = s.Pid
clh.state.VirtiofsdPID = s.VirtiofsdPid
clh.state.apiSocket = s.APISocket
}
func (clh *cloudHypervisor) check() error {
func (clh *cloudHypervisor) Check() error {
cl := clh.client()
ctx, cancel := context.WithTimeout(context.Background(), clhAPITimeout*time.Second)
defer cancel()
@@ -770,16 +770,16 @@ func (clh *cloudHypervisor) check() error {
return err
}
func (clh *cloudHypervisor) getPids() []int {
func (clh *cloudHypervisor) GetPids() []int {
return []int{clh.state.PID}
}
func (clh *cloudHypervisor) getVirtioFsPid() *int {
func (clh *cloudHypervisor) GetVirtioFsPid() *int {
return &clh.state.VirtiofsdPID
}
func (clh *cloudHypervisor) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
span, _ := katatrace.Trace(ctx, clh.Logger(), "addDevice", clhTracingTags, map[string]string{"sandbox_id": clh.id})
func (clh *cloudHypervisor) AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error {
span, _ := katatrace.Trace(ctx, clh.Logger(), "AddDevice", clhTracingTags, map[string]string{"sandbox_id": clh.id})
defer span.End()
var err error
@@ -794,7 +794,7 @@ func (clh *cloudHypervisor) addDevice(ctx context.Context, devInfo interface{},
case types.Volume:
err = clh.addVolume(v)
default:
clh.Logger().WithField("function", "addDevice").Warnf("Add device of type %v is not supported.", v)
clh.Logger().WithField("function", "AddDevice").Warnf("Add device of type %v is not supported.", v)
return fmt.Errorf("Not implemented support for %s", v)
}
@@ -812,11 +812,11 @@ func (clh *cloudHypervisor) Logger() *log.Entry {
}
// Adds all capabilities supported by cloudHypervisor implementation of hypervisor interface
func (clh *cloudHypervisor) capabilities(ctx context.Context) types.Capabilities {
span, _ := katatrace.Trace(ctx, clh.Logger(), "capabilities", clhTracingTags, map[string]string{"sandbox_id": clh.id})
func (clh *cloudHypervisor) Capabilities(ctx context.Context) types.Capabilities {
span, _ := katatrace.Trace(ctx, clh.Logger(), "Capabilities", clhTracingTags, map[string]string{"sandbox_id": clh.id})
defer span.End()
clh.Logger().WithField("function", "capabilities").Info("get Capabilities")
clh.Logger().WithField("function", "Capabilities").Info("get Capabilities")
var caps types.Capabilities
caps.SetFsSharingSupport()
caps.SetBlockDeviceHotplugSupport()
@@ -834,7 +834,7 @@ func (clh *cloudHypervisor) terminate(ctx context.Context, waitOnly bool) (err e
}
defer func() {
clh.Logger().Debug("cleanup VM")
clh.Logger().Debug("Cleanup VM")
if err1 := clh.cleanupVM(true); err1 != nil {
clh.Logger().WithError(err1).Error("failed to cleanupVM")
}
@@ -873,7 +873,7 @@ func (clh *cloudHypervisor) reset() {
clh.state.reset()
}
func (clh *cloudHypervisor) generateSocket(id string) (interface{}, error) {
func (clh *cloudHypervisor) GenerateSocket(id string) (interface{}, error) {
udsPath, err := clh.vsockSocketPath(id)
if err != nil {
clh.Logger().Info("Can't generate socket path for cloud-hypervisor")
@@ -1206,7 +1206,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
}
}
// cleanup vm path
// Cleanup vm path
dir := filepath.Join(clh.store.RunVMStoragePath(), clh.id)
// If it's a symlink, remove both dir and the target.
@@ -1218,7 +1218,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
clh.Logger().WithFields(log.Fields{
"link": link,
"dir": dir,
}).Infof("cleanup vm path")
}).Infof("Cleanup vm path")
if err := os.RemoveAll(dir); err != nil {
if !force {
@@ -1263,7 +1263,7 @@ func (clh *cloudHypervisor) vmInfo() (chclient.VmInfo, error) {
return info, openAPIClientError(err)
}
func (clh *cloudHypervisor) isRateLimiterBuiltin() bool {
func (clh *cloudHypervisor) IsRateLimiterBuiltin() bool {
return false
}

View File

@@ -226,7 +226,7 @@ func TestCloudHypervisorCleanupVM(t *testing.T) {
assert.True(os.IsNotExist(err), "persist.GetDriver() unexpected error")
}
func TestClhCreateSandbox(t *testing.T) {
func TestClhCreateVM(t *testing.T) {
assert := assert.New(t)
clhConfig, err := newClhConfig()
@@ -248,7 +248,7 @@ func TestClhCreateSandbox(t *testing.T) {
},
}
err = clh.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
err = clh.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
assert.NoError(err)
assert.Exactly(clhConfig, clh.config)
}
@@ -268,7 +268,7 @@ func TestClooudHypervisorStartSandbox(t *testing.T) {
store: store,
}
err = clh.startSandbox(context.Background(), 10)
err = clh.StartVM(context.Background(), 10)
assert.NoError(err)
}
@@ -282,13 +282,13 @@ func TestCloudHypervisorResizeMemory(t *testing.T) {
tests := []struct {
name string
args args
expectedMemDev memoryDevice
expectedMemDev MemoryDevice
wantErr bool
}{
{"Resize to zero", args{0, 128}, memoryDevice{probe: false, sizeMB: 0}, FAIL},
{"Resize to aligned size", args{clhConfig.MemorySize + 128, 128}, memoryDevice{probe: false, sizeMB: 128}, PASS},
{"Resize to aligned size", args{clhConfig.MemorySize + 129, 128}, memoryDevice{probe: false, sizeMB: 256}, PASS},
{"Resize to NOT aligned size", args{clhConfig.MemorySize + 125, 128}, memoryDevice{probe: false, sizeMB: 128}, PASS},
{"Resize to zero", args{0, 128}, MemoryDevice{Probe: false, SizeMB: 0}, FAIL},
{"Resize to aligned size", args{clhConfig.MemorySize + 128, 128}, MemoryDevice{Probe: false, SizeMB: 128}, PASS},
{"Resize to aligned size", args{clhConfig.MemorySize + 129, 128}, MemoryDevice{Probe: false, SizeMB: 256}, PASS},
{"Resize to NOT aligned size", args{clhConfig.MemorySize + 125, 128}, MemoryDevice{Probe: false, SizeMB: 128}, PASS},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -303,10 +303,10 @@ func TestCloudHypervisorResizeMemory(t *testing.T) {
clh.APIClient = mockClient
clh.config = clhConfig
newMem, memDev, err := clh.resizeMemory(context.Background(), tt.args.reqMemMB, tt.args.memoryBlockSizeMB, false)
newMem, memDev, err := clh.ResizeMemory(context.Background(), tt.args.reqMemMB, tt.args.memoryBlockSizeMB, false)
if (err != nil) != tt.wantErr {
t.Errorf("cloudHypervisor.resizeMemory() error = %v, expected to fail = %v", err, tt.wantErr)
t.Errorf("cloudHypervisor.ResizeMemory() error = %v, expected to fail = %v", err, tt.wantErr)
return
}
@@ -314,14 +314,14 @@ func TestCloudHypervisorResizeMemory(t *testing.T) {
return
}
expectedMem := clhConfig.MemorySize + uint32(tt.expectedMemDev.sizeMB)
expectedMem := clhConfig.MemorySize + uint32(tt.expectedMemDev.SizeMB)
if newMem != expectedMem {
t.Errorf("cloudHypervisor.resizeMemory() got = %+v, want %+v", newMem, expectedMem)
t.Errorf("cloudHypervisor.ResizeMemory() got = %+v, want %+v", newMem, expectedMem)
}
if !reflect.DeepEqual(memDev, tt.expectedMemDev) {
t.Errorf("cloudHypervisor.resizeMemory() got = %+v, want %+v", memDev, tt.expectedMemDev)
t.Errorf("cloudHypervisor.ResizeMemory() got = %+v, want %+v", memDev, tt.expectedMemDev)
}
})
}
@@ -359,13 +359,13 @@ func TestCloudHypervisorHotplugRemoveDevice(t *testing.T) {
clh.config = clhConfig
clh.APIClient = &clhClientMock{}
_, err = clh.hotplugRemoveDevice(context.Background(), &config.BlockDrive{}, blockDev)
_, err = clh.HotplugRemoveDevice(context.Background(), &config.BlockDrive{}, BlockDev)
assert.NoError(err, "Hotplug remove block device expected no error")
_, err = clh.hotplugRemoveDevice(context.Background(), &config.VFIODev{}, vfioDev)
_, err = clh.HotplugRemoveDevice(context.Background(), &config.VFIODev{}, VfioDev)
assert.NoError(err, "Hotplug remove vfio block device expected no error")
_, err = clh.hotplugRemoveDevice(context.Background(), nil, netDev)
_, err = clh.HotplugRemoveDevice(context.Background(), nil, NetDev)
assert.Error(err, "Hotplug remove pmem block device expected error")
}
@@ -381,7 +381,7 @@ func TestClhGenerateSocket(t *testing.T) {
clh.addVSock(1, "path")
s, err := clh.generateSocket("c")
s, err := clh.GenerateSocket("c")
assert.NoError(err)
assert.NotNil(s)

View File

@@ -392,7 +392,7 @@ func (c *Container) GetAnnotations() map[string]string {
// This OCI specification was patched when the sandbox was created
// by containerCapabilities(), SetEphemeralStorageType() and others
// in order to support:
// * capabilities
// * Capabilities
// * Ephemeral storage
// * k8s empty dir
// If you need the original (vanilla) OCI spec,
@@ -431,7 +431,7 @@ func (c *Container) shareFiles(ctx context.Context, m Mount, idx int) (string, b
// copy file to contaier's rootfs if filesystem sharing is not supported, otherwise
// bind mount it in the shared directory.
caps := c.sandbox.hypervisor.capabilities(ctx)
caps := c.sandbox.hypervisor.Capabilities(ctx)
if !caps.IsFsSharingSupported() {
c.Logger().Debug("filesystem sharing is not supported, files will be copied")
@@ -573,7 +573,7 @@ func (c *Container) mountSharedDirMounts(ctx context.Context, sharedDirMounts, i
// manually update the path that is mounted into the container).
// Based on this, let's make sure we update the sharedDirMount structure with the new watchable-mount as
// the source (this is what is utilized to update the OCI spec).
caps := c.sandbox.hypervisor.capabilities(ctx)
caps := c.sandbox.hypervisor.Capabilities(ctx)
if isWatchableMount(m.Source) && caps.IsFsSharingSupported() {
// Create path in shared directory for creating watchable mount:
@@ -663,7 +663,7 @@ func filterDevices(c *Container, devices []ContainerDevice) (ret []ContainerDevi
return
}
// Add any mount based block devices to the device manager and save the
// Add any mount based block devices to the device manager and Save the
// device ID for the particular mount. This'll occur when the mountpoint source
// is a block device.
func (c *Container) createBlockDevices(ctx context.Context) error {
@@ -705,7 +705,7 @@ func (c *Container) createBlockDevices(ctx context.Context) error {
Minor: int64(unix.Minor(stat.Rdev)),
ReadOnly: m.ReadOnly,
}
// check whether source can be used as a pmem device
// Check whether source can be used as a pmem device
} else if di, err = config.PmemDeviceInfo(m.Source, m.Destination); err != nil {
c.Logger().WithError(err).
WithField("mount-source", m.Source).
@@ -859,7 +859,7 @@ func (c *Container) rollbackFailingContainerCreation(ctx context.Context) {
func (c *Container) checkBlockDeviceSupport(ctx context.Context) bool {
if !c.sandbox.config.HypervisorConfig.DisableBlockDeviceUse {
agentCaps := c.sandbox.agent.capabilities()
hypervisorCaps := c.sandbox.hypervisor.capabilities(ctx)
hypervisorCaps := c.sandbox.hypervisor.Capabilities(ctx)
if agentCaps.IsBlockDeviceSupported() && hypervisorCaps.IsBlockDeviceHotplugSupported() {
return true
@@ -982,7 +982,7 @@ func (c *Container) checkSandboxRunning(cmd string) error {
}
func (c *Container) getSystemMountInfo() {
// check if /dev needs to be bind mounted from host /dev
// Check if /dev needs to be bind mounted from host /dev
c.systemMountsInfo.BindMountDev = false
for _, m := range c.mounts {
@@ -1055,7 +1055,7 @@ func (c *Container) stop(ctx context.Context, force bool) error {
// Save device and drive data.
// TODO: can we merge this saving with setContainerState()?
if err := c.sandbox.Save(); err != nil {
c.Logger().WithError(err).Info("save container state failed")
c.Logger().WithError(err).Info("Save container state failed")
}
}()

View File

@@ -99,7 +99,7 @@ func TestContainerRemoveDrive(t *testing.T) {
container.state.Fstype = ""
err := container.removeDrive(sandbox.ctx)
// hotplugRemoveDevice for hypervisor should not be called.
// HotplugRemoveDevice for hypervisor should not be called.
// test should pass without a hypervisor created for the container's sandbox.
assert.Nil(t, err, "remove drive should succeed")
@@ -329,7 +329,7 @@ func TestContainerAddDriveDir(t *testing.T) {
rootFs: RootFs{Target: fakeRootfs, Mounted: true},
}
// Make the checkStorageDriver func variable point to a fake check function
// Make the checkStorageDriver func variable point to a fake Check function
savedFunc := checkStorageDriver
checkStorageDriver = func(major, minor int) (bool, error) {
return true, nil
@@ -562,7 +562,7 @@ func TestMountSharedDirMounts(t *testing.T) {
// create a new shared directory for our test:
kataHostSharedDirSaved := kataHostSharedDir
testHostDir, err := ioutil.TempDir("", "kata-cleanup")
testHostDir, err := ioutil.TempDir("", "kata-Cleanup")
assert.NoError(err)
kataHostSharedDir = func() string {
return testHostDir

View File

@@ -26,8 +26,8 @@ type Endpoint interface {
SetPciPath(vcTypes.PciPath)
Attach(context.Context, *Sandbox) error
Detach(ctx context.Context, netNsCreated bool, netNsPath string) error
HotAttach(ctx context.Context, h hypervisor) error
HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error
HotAttach(ctx context.Context, h Hypervisor) error
HotDetach(ctx context.Context, h Hypervisor, netNsCreated bool, netNsPath string) error
save() persistapi.NetworkEndpoint
load(persistapi.NetworkEndpoint)

View File

@@ -87,7 +87,7 @@ func TestIncorrectEndpointTypeString(t *testing.T) {
func TestSaveLoadIfPair(t *testing.T) {
macAddr := net.HardwareAddr{0x02, 0x00, 0xCA, 0xFE, 0x00, 0x04}
tmpfile, err := ioutil.TempFile("", "vc-save-load-net-")
tmpfile, err := ioutil.TempFile("", "vc-Save-Load-net-")
assert.Nil(t, err)
defer os.Remove(tmpfile.Name())
@@ -109,7 +109,7 @@ func TestSaveLoadIfPair(t *testing.T) {
NetInterworkingModel: DefaultNetInterworkingModel,
}
// Save to disk then load it back.
// Save to disk then Load it back.
savedIfPair := saveNetIfPair(netPair)
loadedIfPair := loadNetIfPair(savedIfPair)

View File

@@ -62,7 +62,7 @@ const (
fcTimeout = 10
fcSocket = "firecracker.socket"
//Name of the files within jailer root
//Having predefined names helps with cleanup
//Having predefined names helps with Cleanup
fcKernel = "vmlinux"
fcRootfs = "rootfs"
fcStopSandboxTimeout = 15
@@ -166,7 +166,7 @@ type firecracker struct {
type firecrackerDevice struct {
dev interface{}
devType deviceType
devType DeviceType
}
// Logger returns a logrus logger appropriate for logging firecracker messages
@@ -188,7 +188,7 @@ func (fc *firecracker) truncateID(id string) string {
}
func (fc *firecracker) setConfig(config *HypervisorConfig) error {
err := config.valid()
err := config.Valid()
if err != nil {
return err
}
@@ -198,15 +198,15 @@ func (fc *firecracker) setConfig(config *HypervisorConfig) error {
return nil
}
// For firecracker this call only sets the internal structure up.
// CreateVM For firecracker this call only sets the internal structure up.
// The sandbox will be created and started through startSandbox().
func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
func (fc *firecracker) CreateVM(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
fc.ctx = ctx
span, _ := katatrace.Trace(ctx, fc.Logger(), "createSandbox", fcTracingTags, map[string]string{"sandbox_id": fc.id})
span, _ := katatrace.Trace(ctx, fc.Logger(), "CreateVM", fcTracingTags, map[string]string{"sandbox_id": fc.id})
defer span.End()
//TODO: check validity of the hypervisor config provided
//TODO: Check validity of the hypervisor config provided
//https://github.com/kata-containers/runtime/issues/1065
fc.id = fc.truncateID(id)
fc.state.set(notReady)
@@ -217,7 +217,7 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N
fc.setPaths(&fc.config)
// So we need to repopulate this at startSandbox where it is valid
// So we need to repopulate this at StartVM where it is valid
fc.netNSPath = networkNS.NetNsPath
// Till we create lower privileged kata user run as root
@@ -303,7 +303,7 @@ func (fc *firecracker) getVersionNumber() (string, error) {
func (fc *firecracker) parseVersion(data string) (string, error) {
// Firecracker versions 0.25 and over contains multiline output on "version" command.
// So we have to check it and use first line of output to parse version.
// So we have to Check it and use first line of output to parse version.
lines := strings.Split(data, "\n")
var version string
@@ -359,7 +359,7 @@ func (fc *firecracker) fcInit(ctx context.Context, timeout int) error {
defer span.End()
var err error
//FC version set and check
//FC version set and Check
if fc.info.Version, err = fc.getVersionNumber(); err != nil {
return err
}
@@ -751,7 +751,7 @@ func (fc *firecracker) fcInitConfiguration(ctx context.Context) error {
fc.state.set(cfReady)
for _, d := range fc.pendingDevices {
if err := fc.addDevice(ctx, d.dev, d.devType); err != nil {
if err := fc.AddDevice(ctx, d.dev, d.devType); err != nil {
return err
}
}
@@ -765,8 +765,8 @@ func (fc *firecracker) fcInitConfiguration(ctx context.Context) error {
// startSandbox will start the hypervisor for the given sandbox.
// In the context of firecracker, this will start the hypervisor,
// for configuration, but not yet start the actual virtual machine
func (fc *firecracker) startSandbox(ctx context.Context, timeout int) error {
span, _ := katatrace.Trace(ctx, fc.Logger(), "startSandbox", fcTracingTags, map[string]string{"sandbox_id": fc.id})
func (fc *firecracker) StartVM(ctx context.Context, timeout int) error {
span, _ := katatrace.Trace(ctx, fc.Logger(), "StartVM", fcTracingTags, map[string]string{"sandbox_id": fc.id})
defer span.End()
if err := fc.fcInitConfiguration(ctx); err != nil {
@@ -878,22 +878,22 @@ func (fc *firecracker) cleanupJail(ctx context.Context) {
}
// stopSandbox will stop the Sandbox's VM.
func (fc *firecracker) stopSandbox(ctx context.Context, waitOnly bool) (err error) {
span, _ := katatrace.Trace(ctx, fc.Logger(), "stopSandbox", fcTracingTags, map[string]string{"sandbox_id": fc.id})
func (fc *firecracker) StopVM(ctx context.Context, waitOnly bool) (err error) {
span, _ := katatrace.Trace(ctx, fc.Logger(), "StopVM", fcTracingTags, map[string]string{"sandbox_id": fc.id})
defer span.End()
return fc.fcEnd(ctx, waitOnly)
}
func (fc *firecracker) pauseSandbox(ctx context.Context) error {
func (fc *firecracker) PauseVM(ctx context.Context) error {
return nil
}
func (fc *firecracker) saveSandbox() error {
func (fc *firecracker) SaveVM() error {
return nil
}
func (fc *firecracker) resumeSandbox(ctx context.Context) error {
func (fc *firecracker) ResumeVM(ctx context.Context) error {
return nil
}
@@ -1023,8 +1023,8 @@ func (fc *firecracker) fcUpdateBlockDrive(ctx context.Context, path, id string)
// addDevice will add extra devices to firecracker. Limited to configure before the
// virtual machine starts. Devices include drivers and network interfaces only.
func (fc *firecracker) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
span, _ := katatrace.Trace(ctx, fc.Logger(), "addDevice", fcTracingTags, map[string]string{"sandbox_id": fc.id})
func (fc *firecracker) AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error {
span, _ := katatrace.Trace(ctx, fc.Logger(), "AddDevice", fcTracingTags, map[string]string{"sandbox_id": fc.id})
defer span.End()
fc.state.RLock()
@@ -1060,7 +1060,7 @@ func (fc *firecracker) addDevice(ctx context.Context, devInfo interface{}, devTy
// hotplugBlockDevice supported in Firecracker VMM
// hot add or remove a block device.
func (fc *firecracker) hotplugBlockDevice(ctx context.Context, drive config.BlockDrive, op operation) (interface{}, error) {
func (fc *firecracker) hotplugBlockDevice(ctx context.Context, drive config.BlockDrive, op Operation) (interface{}, error) {
if drive.Swap {
return nil, fmt.Errorf("firecracker doesn't support swap")
}
@@ -1069,7 +1069,7 @@ func (fc *firecracker) hotplugBlockDevice(ctx context.Context, drive config.Bloc
var err error
driveID := fcDriveIndexToID(drive.Index)
if op == addDevice {
if op == AddDevice {
//The drive placeholder has to exist prior to Update
path, err = fc.fcJailResource(drive.File, driveID)
if err != nil {
@@ -1093,32 +1093,32 @@ func (fc *firecracker) hotplugBlockDevice(ctx context.Context, drive config.Bloc
}
// hotplugAddDevice supported in Firecracker VMM
func (fc *firecracker) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, fc.Logger(), "hotplugAddDevice", fcTracingTags, map[string]string{"sandbox_id": fc.id})
func (fc *firecracker) HotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, fc.Logger(), "HotplugAddDevice", fcTracingTags, map[string]string{"sandbox_id": fc.id})
defer span.End()
switch devType {
case blockDev:
return fc.hotplugBlockDevice(ctx, *devInfo.(*config.BlockDrive), addDevice)
case BlockDev:
return fc.hotplugBlockDevice(ctx, *devInfo.(*config.BlockDrive), AddDevice)
default:
fc.Logger().WithFields(logrus.Fields{"devInfo": devInfo,
"deviceType": devType}).Warn("hotplugAddDevice: unsupported device")
"deviceType": devType}).Warn("HotplugAddDevice: unsupported device")
return nil, fmt.Errorf("Could not hot add device: unsupported device: %v, type: %v",
devInfo, devType)
}
}
// hotplugRemoveDevice supported in Firecracker VMM
func (fc *firecracker) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, fc.Logger(), "hotplugRemoveDevice", fcTracingTags, map[string]string{"sandbox_id": fc.id})
func (fc *firecracker) HotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, fc.Logger(), "HotplugRemoveDevice", fcTracingTags, map[string]string{"sandbox_id": fc.id})
defer span.End()
switch devType {
case blockDev:
return fc.hotplugBlockDevice(ctx, *devInfo.(*config.BlockDrive), removeDevice)
case BlockDev:
return fc.hotplugBlockDevice(ctx, *devInfo.(*config.BlockDrive), RemoveDevice)
default:
fc.Logger().WithFields(logrus.Fields{"devInfo": devInfo,
"deviceType": devType}).Error("hotplugRemoveDevice: unsupported device")
"deviceType": devType}).Error("HotplugRemoveDevice: unsupported device")
return nil, fmt.Errorf("Could not hot remove device: unsupported device: %v, type: %v",
devInfo, devType)
}
@@ -1126,7 +1126,7 @@ func (fc *firecracker) hotplugRemoveDevice(ctx context.Context, devInfo interfac
// getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox.
func (fc *firecracker) getSandboxConsole(ctx context.Context, id string) (string, string, error) {
func (fc *firecracker) GetVMConsole(ctx context.Context, id string) (string, string, error) {
master, slave, err := console.NewPty()
if err != nil {
fc.Logger().Debugf("Error create pseudo tty: %v", err)
@@ -1137,13 +1137,13 @@ func (fc *firecracker) getSandboxConsole(ctx context.Context, id string) (string
return consoleProtoPty, slave, nil
}
func (fc *firecracker) disconnect(ctx context.Context) {
func (fc *firecracker) Disconnect(ctx context.Context) {
fc.state.set(notReady)
}
// Adds all capabilities supported by firecracker implementation of hypervisor interface
func (fc *firecracker) capabilities(ctx context.Context) types.Capabilities {
span, _ := katatrace.Trace(ctx, fc.Logger(), "capabilities", fcTracingTags, map[string]string{"sandbox_id": fc.id})
func (fc *firecracker) Capabilities(ctx context.Context) types.Capabilities {
span, _ := katatrace.Trace(ctx, fc.Logger(), "Capabilities", fcTracingTags, map[string]string{"sandbox_id": fc.id})
defer span.End()
var caps types.Capabilities
caps.SetBlockDeviceHotplugSupport()
@@ -1151,15 +1151,15 @@ func (fc *firecracker) capabilities(ctx context.Context) types.Capabilities {
return caps
}
func (fc *firecracker) hypervisorConfig() HypervisorConfig {
func (fc *firecracker) HypervisorConfig() HypervisorConfig {
return fc.config
}
func (fc *firecracker) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
return 0, memoryDevice{}, nil
func (fc *firecracker) ResizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
return 0, MemoryDevice{}, nil
}
func (fc *firecracker) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
func (fc *firecracker) ResizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
return 0, 0, nil
}
@@ -1167,8 +1167,8 @@ func (fc *firecracker) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (curren
//
// As suggested by https://github.com/firecracker-microvm/firecracker/issues/718,
// let's use `ps -T -p <pid>` to get fc vcpu info.
func (fc *firecracker) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
var vcpuInfo vcpuThreadIDs
func (fc *firecracker) GetThreadIDs(ctx context.Context) (VcpuThreadIDs, error) {
var vcpuInfo VcpuThreadIDs
vcpuInfo.vcpus = make(map[int]int)
parent, err := utils.NewProc(fc.info.PID)
@@ -1205,16 +1205,16 @@ func (fc *firecracker) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error)
return vcpuInfo, nil
}
func (fc *firecracker) cleanup(ctx context.Context) error {
func (fc *firecracker) Cleanup(ctx context.Context) error {
fc.cleanupJail(ctx)
return nil
}
func (fc *firecracker) getPids() []int {
func (fc *firecracker) GetPids() []int {
return []int{fc.info.PID}
}
func (fc *firecracker) getVirtioFsPid() *int {
func (fc *firecracker) GetVirtioFsPid() *int {
return nil
}
@@ -1226,17 +1226,17 @@ func (fc *firecracker) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("firecracker is not supported by VM cache")
}
func (fc *firecracker) save() (s persistapi.HypervisorState) {
func (fc *firecracker) Save() (s persistapi.HypervisorState) {
s.Pid = fc.info.PID
s.Type = string(FirecrackerHypervisor)
return
}
func (fc *firecracker) load(s persistapi.HypervisorState) {
func (fc *firecracker) Load(s persistapi.HypervisorState) {
fc.info.PID = s.Pid
}
func (fc *firecracker) check() error {
func (fc *firecracker) Check() error {
if err := syscall.Kill(fc.info.PID, syscall.Signal(0)); err != nil {
return errors.Wrapf(err, "failed to ping fc process")
}
@@ -1244,7 +1244,7 @@ func (fc *firecracker) check() error {
return nil
}
func (fc *firecracker) generateSocket(id string) (interface{}, error) {
func (fc *firecracker) GenerateSocket(id string) (interface{}, error) {
fc.Logger().Debug("Using hybrid-vsock endpoint")
// Method is being run outside of the normal container workflow
@@ -1259,7 +1259,7 @@ func (fc *firecracker) generateSocket(id string) (interface{}, error) {
}, nil
}
func (fc *firecracker) isRateLimiterBuiltin() bool {
func (fc *firecracker) IsRateLimiterBuiltin() bool {
return true
}

View File

@@ -472,7 +472,7 @@ type PerformanceMetrics struct {
FullCreateSnapshot uint64 `json:"full_create_snapshot"`
// Measures the snapshot diff create time, at the API (user) level, in microseconds.
DiffCreateSnapshot uint64 `json:"diff_create_snapshot"`
// Measures the snapshot load time, at the API (user) level, in microseconds.
// Measures the snapshot Load time, at the API (user) level, in microseconds.
LoadSnapshot uint64 `json:"load_snapshot"`
// Measures the microVM pausing duration, at the API (user) level, in microseconds.
PauseVM uint64 `json:"pause_vm"`
@@ -482,7 +482,7 @@ type PerformanceMetrics struct {
VmmFullCreateSnapshot uint64 `json:"vmm_full_create_snapshot"`
// Measures the snapshot diff create time, at the VMM level, in microseconds.
VmmDiffCreateSnapshot uint64 `json:"vmm_diff_create_snapshot"`
// Measures the snapshot load time, at the VMM level, in microseconds.
// Measures the snapshot Load time, at the VMM level, in microseconds.
VmmLoadSnapshot uint64 `json:"vmm_load_snapshot"`
// Measures the microVM pausing duration, at the VMM level, in microseconds.
VmmPauseVM uint64 `json:"vmm_pause_vm"`

View File

@@ -17,7 +17,7 @@ func TestFCGenerateSocket(t *testing.T) {
assert := assert.New(t)
fc := firecracker{}
i, err := fc.generateSocket("a")
i, err := fc.GenerateSocket("a")
assert.NoError(err)
assert.NotNil(i)

View File

@@ -24,11 +24,11 @@ import (
// HypervisorType describes an hypervisor type.
type HypervisorType string
type operation int
type Operation int
const (
addDevice operation = iota
removeDevice
AddDevice Operation = iota
RemoveDevice
)
const (
@@ -98,50 +98,50 @@ var commonVirtioblkKernelRootParams = []Param{ //nolint: unused, deadcode, varch
{"rootfstype", "ext4"},
}
// deviceType describes a virtualized device type.
type deviceType int
// DeviceType describes a virtualized device type.
type DeviceType int
const (
// ImgDev is the image device type.
imgDev deviceType = iota
ImgDev DeviceType = iota
// FsDev is the filesystem device type.
fsDev
FsDev
// NetDev is the network device type.
netDev
NetDev
// BlockDev is the block device type.
blockDev
BlockDev
// SerialPortDev is the serial port device type.
serialPortDev
SerialPortDev
// vSockPCIDev is the vhost vsock PCI device type.
vSockPCIDev
// VSockPCIDev is the vhost vsock PCI device type.
VSockPCIDev
// VFIODevice is VFIO device type
vfioDev
VfioDev
// vhostuserDev is a Vhost-user device type
vhostuserDev
// VhostuserDev is a Vhost-user device type
VhostuserDev
// CPUDevice is CPU device type
cpuDev
CpuDev
// memoryDevice is memory device type
memoryDev
// MemoryDev is memory device type
MemoryDev
// hybridVirtioVsockDev is a hybrid virtio-vsock device supported
// HybridVirtioVsockDev is a hybrid virtio-vsock device supported
// only on certain hypervisors, like firecracker.
hybridVirtioVsockDev
HybridVirtioVsockDev
)
type memoryDevice struct {
slot int
sizeMB int
addr uint64
probe bool
type MemoryDevice struct {
Slot int
SizeMB int
Addr uint64
Probe bool
}
// Set sets an hypervisor type based on the input string.
@@ -186,7 +186,7 @@ func (hType *HypervisorType) String() string {
}
// NewHypervisor returns an hypervisor from and hypervisor type.
func NewHypervisor(hType HypervisorType) (hypervisor, error) {
func NewHypervisor(hType HypervisorType) (Hypervisor, error) {
store, err := persist.GetDriver()
if err != nil {
return nil, err
@@ -235,7 +235,7 @@ func GetHypervisorSocketTemplate(hType HypervisorType, config *HypervisorConfig)
// Tag that is used to represent the name of a sandbox
const sandboxID = "{ID}"
socket, err := hypervisor.generateSocket(sandboxID)
socket, err := hypervisor.GenerateSocket(sandboxID)
if err != nil {
return "", err
}
@@ -514,11 +514,11 @@ type HypervisorConfig struct {
}
// vcpu mapping from vcpu number to thread number
type vcpuThreadIDs struct {
type VcpuThreadIDs struct {
vcpus map[int]int
}
func (conf *HypervisorConfig) checkTemplateConfig() error {
func (conf *HypervisorConfig) CheckTemplateConfig() error {
if conf.BootToBeTemplate && conf.BootFromTemplate {
return fmt.Errorf("Cannot set both 'to be' and 'from' vm tempate")
}
@@ -529,14 +529,16 @@ func (conf *HypervisorConfig) checkTemplateConfig() error {
}
if conf.BootFromTemplate && conf.DevicesStatePath == "" {
return fmt.Errorf("Missing DevicesStatePath to load from vm template")
return fmt.Errorf("Missing DevicesStatePath to Load from vm template")
}
}
return nil
}
func (conf *HypervisorConfig) valid() error {
func (conf *HypervisorConfig) Valid() error {
// Kata specific checks. Should be done outside the hypervisor
if conf.KernelPath == "" {
return fmt.Errorf("Missing kernel path")
}
@@ -545,7 +547,7 @@ func (conf *HypervisorConfig) valid() error {
return fmt.Errorf("Missing image and initrd path")
}
if err := conf.checkTemplateConfig(); err != nil {
if err := conf.CheckTemplateConfig(); err != nil {
return err
}
@@ -590,7 +592,7 @@ func (conf *HypervisorConfig) AddKernelParam(p Param) error {
return nil
}
func (conf *HypervisorConfig) addCustomAsset(a *types.Asset) error {
func (conf *HypervisorConfig) AddCustomAsset(a *types.Asset) error {
if a == nil || a.Path() == "" {
// We did not get a custom asset, we will use the default one.
return nil
@@ -744,7 +746,7 @@ func DeserializeParams(parameters []string) []Param {
return params
}
func getHostMemorySizeKb(memInfoPath string) (uint64, error) {
func GetHostMemorySizeKb(memInfoPath string) (uint64, error) {
f, err := os.Open(memInfoPath)
if err != nil {
return 0, err
@@ -780,7 +782,7 @@ func getHostMemorySizeKb(memInfoPath string) (uint64, error) {
// CheckCmdline checks whether an option or parameter is present in the kernel command line.
// Search is case-insensitive.
// Takes path to file that contains the kernel command line, desired option, and permitted values
// (empty values to check for options).
// (empty values to Check for options).
func CheckCmdline(kernelCmdlinePath, searchParam string, searchValues []string) (bool, error) {
f, err := os.Open(kernelCmdlinePath)
if err != nil {
@@ -788,8 +790,8 @@ func CheckCmdline(kernelCmdlinePath, searchParam string, searchValues []string)
}
defer f.Close()
// Create check function -- either check for verbatim option
// or check for parameter and permitted values
// Create Check function -- either Check for verbatim option
// or Check for parameter and permitted values
var check func(string, string, []string) bool
if len(searchValues) == 0 {
check = func(option, searchParam string, _ []string) bool {
@@ -872,8 +874,8 @@ func RunningOnVMM(cpuInfoPath string) (bool, error) {
return false, nil
}
func getHypervisorPid(h hypervisor) int {
pids := h.getPids()
func GetHypervisorPid(h Hypervisor) int {
pids := h.GetPids()
if len(pids) == 0 {
return 0
}
@@ -895,43 +897,44 @@ func generateVMSocket(id string, vmStogarePath string) (interface{}, error) {
// hypervisor is the virtcontainers hypervisor interface.
// The default hypervisor implementation is Qemu.
type hypervisor interface {
setConfig(config *HypervisorConfig) error
createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error
startSandbox(ctx context.Context, timeout int) error
type Hypervisor interface {
CreateVM(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error
StartVM(ctx context.Context, timeout int) error
// If wait is set, don't actively stop the sandbox:
// just perform cleanup.
stopSandbox(ctx context.Context, waitOnly bool) error
pauseSandbox(ctx context.Context) error
saveSandbox() error
resumeSandbox(ctx context.Context) error
addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error
hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error)
hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error)
resizeMemory(ctx context.Context, memMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error)
resizeVCPUs(ctx context.Context, vcpus uint32) (uint32, uint32, error)
getSandboxConsole(ctx context.Context, sandboxID string) (string, string, error)
disconnect(ctx context.Context)
capabilities(ctx context.Context) types.Capabilities
hypervisorConfig() HypervisorConfig
getThreadIDs(ctx context.Context) (vcpuThreadIDs, error)
cleanup(ctx context.Context) error
StopVM(ctx context.Context, waitOnly bool) error
PauseVM(ctx context.Context) error
SaveVM() error
ResumeVM(ctx context.Context) error
AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error
HotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error)
HotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error)
ResizeMemory(ctx context.Context, memMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error)
ResizeVCPUs(ctx context.Context, vcpus uint32) (uint32, uint32, error)
GetVMConsole(ctx context.Context, sandboxID string) (string, string, error)
Disconnect(ctx context.Context)
Capabilities(ctx context.Context) types.Capabilities
HypervisorConfig() HypervisorConfig
GetThreadIDs(ctx context.Context) (VcpuThreadIDs, error)
Cleanup(ctx context.Context) error
// getPids returns a slice of hypervisor related process ids.
// The hypervisor pid must be put at index 0.
getPids() []int
getVirtioFsPid() *int
setConfig(config *HypervisorConfig) error
GetPids() []int
GetVirtioFsPid() *int
fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error
toGrpc(ctx context.Context) ([]byte, error)
check() error
Check() error
save() persistapi.HypervisorState
load(persistapi.HypervisorState)
Save() persistapi.HypervisorState
Load(persistapi.HypervisorState)
// generate the socket to communicate the host and guest
generateSocket(id string) (interface{}, error)
GenerateSocket(id string) (interface{}, error)
// check if hypervisor supports built-in rate limiter.
isRateLimiterBuiltin() bool
IsRateLimiterBuiltin() bool
setSandbox(sandbox *Sandbox)
}

View File

@@ -65,7 +65,7 @@ func TestStringFromUnknownHypervisorType(t *testing.T) {
testStringFromHypervisorType(t, hypervisorType, "")
}
func testNewHypervisorFromHypervisorType(t *testing.T, hypervisorType HypervisorType, expected hypervisor) {
func testNewHypervisorFromHypervisorType(t *testing.T, hypervisorType HypervisorType, expected Hypervisor) {
assert := assert.New(t)
hy, err := NewHypervisor(hypervisorType)
assert.NoError(err)
@@ -88,7 +88,7 @@ func TestNewHypervisorFromUnknownHypervisorType(t *testing.T) {
}
func testHypervisorConfigValid(t *testing.T, hypervisorConfig *HypervisorConfig, success bool) {
err := hypervisorConfig.valid()
err := hypervisorConfig.Valid()
assert := assert.New(t)
assert.False(success && err != nil)
assert.False(!success && err == nil)
@@ -385,7 +385,7 @@ func TestGetHostMemorySizeKb(t *testing.T) {
defer os.RemoveAll(dir)
file := filepath.Join(dir, "meminfo")
_, err = getHostMemorySizeKb(file)
_, err = GetHostMemorySizeKb(file)
assert.Error(err)
for _, d := range data {
@@ -393,7 +393,7 @@ func TestGetHostMemorySizeKb(t *testing.T) {
assert.NoError(err)
defer os.Remove(file)
hostMemKb, err := getHostMemorySizeKb(file)
hostMemKb, err := GetHostMemorySizeKb(file)
assert.False((d.expectError && err == nil))
assert.False((!d.expectError && err != nil))

View File

@@ -93,6 +93,7 @@ func (endpoint *IPVlanEndpoint) NetworkPair() *NetworkInterfacePair {
// Attach for ipvlan endpoint bridges the network pair and adds the
// tap interface of the network pair to the hypervisor.
// tap interface of the network pair to the Hypervisor.
func (endpoint *IPVlanEndpoint) Attach(ctx context.Context, s *Sandbox) error {
span, ctx := ipvlanTrace(ctx, "Attach", endpoint)
defer span.End()
@@ -103,7 +104,7 @@ func (endpoint *IPVlanEndpoint) Attach(ctx context.Context, s *Sandbox) error {
return err
}
return h.addDevice(ctx, endpoint, netDev)
return h.AddDevice(ctx, endpoint, NetDev)
}
// Detach for the ipvlan endpoint tears down the tap and bridge
@@ -124,12 +125,12 @@ func (endpoint *IPVlanEndpoint) Detach(ctx context.Context, netNsCreated bool, n
}
// HotAttach for ipvlan endpoint not supported yet
func (endpoint *IPVlanEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
func (endpoint *IPVlanEndpoint) HotAttach(ctx context.Context, h Hypervisor) error {
return fmt.Errorf("IPVlanEndpoint does not support Hot attach")
}
// HotDetach for ipvlan endpoint not supported yet
func (endpoint *IPVlanEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
func (endpoint *IPVlanEndpoint) HotDetach(ctx context.Context, h Hypervisor, netNsCreated bool, netNsPath string) error {
return fmt.Errorf("IPVlanEndpoint does not support Hot detach")
}

View File

@@ -297,7 +297,7 @@ func (k *kataAgent) handleTraceSettings(config KataAgentConfig) bool {
}
func (k *kataAgent) init(ctx context.Context, sandbox *Sandbox, config KataAgentConfig) (disableVMShutdown bool, err error) {
// save
// Save
k.ctx = sandbox.ctx
span, _ := katatrace.Trace(ctx, k.Logger(), "init", kataAgentTracingTags)
@@ -327,18 +327,18 @@ func (k *kataAgent) agentURL() (string, error) {
func (k *kataAgent) capabilities() types.Capabilities {
var caps types.Capabilities
// add all capabilities supported by agent
// add all Capabilities supported by agent
caps.SetBlockDeviceSupport()
return caps
}
func (k *kataAgent) internalConfigure(ctx context.Context, h hypervisor, id string, config KataAgentConfig) error {
func (k *kataAgent) internalConfigure(ctx context.Context, h Hypervisor, id string, config KataAgentConfig) error {
span, _ := katatrace.Trace(ctx, k.Logger(), "configure", kataAgentTracingTags)
defer span.End()
var err error
if k.vmSocket, err = h.generateSocket(id); err != nil {
if k.vmSocket, err = h.GenerateSocket(id); err != nil {
return err
}
k.keepConn = config.LongLiveConn
@@ -367,11 +367,11 @@ func (k *kataAgent) setupSandboxBindMounts(ctx context.Context, sandbox *Sandbox
if err != nil {
for _, mnt := range mountedList {
if derr := syscall.Unmount(mnt, syscall.MNT_DETACH|UmountNoFollow); derr != nil {
k.Logger().WithError(derr).Errorf("cleanup: couldn't unmount %s", mnt)
k.Logger().WithError(derr).Errorf("Cleanup: couldn't unmount %s", mnt)
}
}
if derr := os.RemoveAll(sandboxMountDir); derr != nil {
k.Logger().WithError(derr).Errorf("cleanup: failed to remove %s", sandboxMountDir)
k.Logger().WithError(derr).Errorf("Cleanup: failed to remove %s", sandboxMountDir)
}
}
@@ -421,7 +421,7 @@ func (k *kataAgent) cleanupSandboxBindMounts(sandbox *Sandbox) error {
return retErr
}
func (k *kataAgent) configure(ctx context.Context, h hypervisor, id, sharePath string, config KataAgentConfig) error {
func (k *kataAgent) configure(ctx context.Context, h Hypervisor, id, sharePath string, config KataAgentConfig) error {
span, ctx := katatrace.Trace(ctx, k.Logger(), "configure", kataAgentTracingTags)
defer span.End()
@@ -432,11 +432,11 @@ func (k *kataAgent) configure(ctx context.Context, h hypervisor, id, sharePath s
switch s := k.vmSocket.(type) {
case types.VSock:
if err = h.addDevice(ctx, s, vSockPCIDev); err != nil {
if err = h.AddDevice(ctx, s, VSockPCIDev); err != nil {
return err
}
case types.HybridVSock:
err = h.addDevice(ctx, s, hybridVirtioVsockDev)
err = h.AddDevice(ctx, s, HybridVirtioVsockDev)
if err != nil {
return err
}
@@ -447,7 +447,7 @@ func (k *kataAgent) configure(ctx context.Context, h hypervisor, id, sharePath s
// Neither create shared directory nor add 9p device if hypervisor
// doesn't support filesystem sharing.
caps := h.capabilities(ctx)
caps := h.Capabilities(ctx)
if !caps.IsFsSharingSupported() {
return nil
}
@@ -463,10 +463,10 @@ func (k *kataAgent) configure(ctx context.Context, h hypervisor, id, sharePath s
return err
}
return h.addDevice(ctx, sharedVolume, fsDev)
return h.AddDevice(ctx, sharedVolume, FsDev)
}
func (k *kataAgent) configureFromGrpc(ctx context.Context, h hypervisor, id string, config KataAgentConfig) error {
func (k *kataAgent) configureFromGrpc(ctx context.Context, h Hypervisor, id string, config KataAgentConfig) error {
return k.internalConfigure(ctx, h, id, config)
}
@@ -764,7 +764,7 @@ func (k *kataAgent) getDNS(sandbox *Sandbox) ([]string, error) {
}
func (k *kataAgent) startSandbox(ctx context.Context, sandbox *Sandbox) error {
span, ctx := katatrace.Trace(ctx, k.Logger(), "startSandbox", kataAgentTracingTags)
span, ctx := katatrace.Trace(ctx, k.Logger(), "StartVM", kataAgentTracingTags)
defer span.End()
if err := k.setAgentURL(); err != nil {
@@ -781,7 +781,7 @@ func (k *kataAgent) startSandbox(ctx context.Context, sandbox *Sandbox) error {
return err
}
// check grpc server is serving
// Check grpc server is serving
if err = k.check(ctx); err != nil {
return err
}
@@ -853,7 +853,7 @@ func setupKernelModules(kmodules []string) []*grpc.KernelModule {
func setupStorages(ctx context.Context, sandbox *Sandbox) []*grpc.Storage {
storages := []*grpc.Storage{}
caps := sandbox.hypervisor.capabilities(ctx)
caps := sandbox.hypervisor.Capabilities(ctx)
// append 9p shared volume to storages only if filesystem sharing is supported
if caps.IsFsSharingSupported() {
@@ -917,7 +917,7 @@ func setupStorages(ctx context.Context, sandbox *Sandbox) []*grpc.Storage {
}
func (k *kataAgent) stopSandbox(ctx context.Context, sandbox *Sandbox) error {
span, ctx := katatrace.Trace(ctx, k.Logger(), "stopSandbox", kataAgentTracingTags)
span, ctx := katatrace.Trace(ctx, k.Logger(), "StopVM", kataAgentTracingTags)
defer span.End()
req := &grpc.DestroySandboxRequest{}
@@ -1849,7 +1849,7 @@ func (k *kataAgent) connect(ctx context.Context) error {
}
func (k *kataAgent) disconnect(ctx context.Context) error {
span, _ := katatrace.Trace(ctx, k.Logger(), "disconnect", kataAgentTracingTags)
span, _ := katatrace.Trace(ctx, k.Logger(), "Disconnect", kataAgentTracingTags)
defer span.End()
k.Lock()
@@ -1873,7 +1873,7 @@ func (k *kataAgent) disconnect(ctx context.Context) error {
func (k *kataAgent) check(ctx context.Context) error {
_, err := k.sendReq(ctx, &grpc.CheckRequest{})
if err != nil {
err = fmt.Errorf("Failed to check if grpc server is working: %s", err)
err = fmt.Errorf("Failed to Check if grpc server is working: %s", err)
}
return err
}
@@ -2200,12 +2200,12 @@ func (k *kataAgent) markDead(ctx context.Context) {
func (k *kataAgent) cleanup(ctx context.Context, s *Sandbox) {
if err := k.cleanupSandboxBindMounts(s); err != nil {
k.Logger().WithError(err).Errorf("failed to cleanup sandbox bindmounts")
k.Logger().WithError(err).Errorf("failed to Cleanup sandbox bindmounts")
}
// Unmount shared path
path := getSharePath(s.id)
k.Logger().WithField("path", path).Infof("cleanup agent")
k.Logger().WithField("path", path).Infof("Cleanup agent")
if err := syscall.Unmount(path, syscall.MNT_DETACH|UmountNoFollow); err != nil {
k.Logger().WithError(err).Errorf("failed to unmount vm share path %s", path)
}
@@ -2216,7 +2216,7 @@ func (k *kataAgent) cleanup(ctx context.Context, s *Sandbox) {
k.Logger().WithError(err).Errorf("failed to unmount vm mount path %s", path)
}
if err := os.RemoveAll(getSandboxPath(s.id)); err != nil {
k.Logger().WithError(err).Errorf("failed to cleanup vm path %s", getSandboxPath(s.id))
k.Logger().WithError(err).Errorf("failed to Cleanup vm path %s", getSandboxPath(s.id))
}
}

View File

@@ -591,7 +591,7 @@ func TestConstraintGRPCSpec(t *testing.T) {
k := kataAgent{}
k.constraintGRPCSpec(g, true)
// check nil fields
// Check nil fields
assert.Nil(g.Hooks)
assert.NotNil(g.Linux.Seccomp)
assert.Nil(g.Linux.Resources.Devices)
@@ -603,17 +603,17 @@ func TestConstraintGRPCSpec(t *testing.T) {
assert.NotNil(g.Linux.Resources.CPU)
assert.Equal(g.Process.SelinuxLabel, "")
// check namespaces
// Check namespaces
assert.Len(g.Linux.Namespaces, 1)
assert.Empty(g.Linux.Namespaces[0].Path)
// check mounts
// Check mounts
assert.Len(g.Mounts, 1)
// check cgroup path
// Check cgroup path
assert.Equal(expectedCgroupPath, g.Linux.CgroupsPath)
// check Linux devices
// Check Linux devices
assert.Empty(g.Linux.Devices)
}
@@ -966,7 +966,7 @@ func TestKataCleanupSandbox(t *testing.T) {
kataHostSharedDirSaved := kataHostSharedDir
kataHostSharedDir = func() string {
td, _ := ioutil.TempDir("", "kata-cleanup")
td, _ := ioutil.TempDir("", "kata-Cleanup")
return td
}
defer func() {
@@ -1123,7 +1123,7 @@ func TestSandboxBindMount(t *testing.T) {
// create a new shared directory for our test:
kataHostSharedDirSaved := kataHostSharedDir
testHostDir, err := ioutil.TempDir("", "kata-cleanup")
testHostDir, err := ioutil.TempDir("", "kata-Cleanup")
assert.NoError(err)
kataHostSharedDir = func() string {
return testHostDir
@@ -1175,11 +1175,11 @@ func TestSandboxBindMount(t *testing.T) {
err = k.setupSandboxBindMounts(context.Background(), sandbox)
assert.NoError(err)
// Test the cleanup function. We expect it to succeed for the mount to be removed.
// Test the Cleanup function. We expect it to succeed for the mount to be removed.
err = k.cleanupSandboxBindMounts(sandbox)
assert.NoError(err)
// After successful cleanup, verify there are not any mounts left behind.
// After successful Cleanup, verify there are not any mounts left behind.
stat := syscall.Stat_t{}
mount1CheckPath := filepath.Join(getMountPath(sandbox.id), sandboxMountsDir, filepath.Base(m1Path))
err = syscall.Stat(mount1CheckPath, &stat)
@@ -1191,16 +1191,16 @@ func TestSandboxBindMount(t *testing.T) {
assert.Error(err)
assert.True(os.IsNotExist(err))
// Now, let's setup the cleanup to fail. Setup the sandbox bind mount twice, which will result in
// Now, let's setup the Cleanup to fail. Setup the sandbox bind mount twice, which will result in
// extra mounts being present that the sandbox description doesn't account for (ie, duplicate mounts).
// We expect cleanup to fail on the first time, since it cannot remove the sandbox-bindmount directory because
// We expect Cleanup to fail on the first time, since it cannot remove the sandbox-bindmount directory because
// there are leftover mounts. If we run it a second time, however, it should succeed since it'll remove the
// second set of mounts:
err = k.setupSandboxBindMounts(context.Background(), sandbox)
assert.NoError(err)
err = k.setupSandboxBindMounts(context.Background(), sandbox)
assert.NoError(err)
// Test the cleanup function. We expect it to succeed for the mount to be removed.
// Test the Cleanup function. We expect it to succeed for the mount to be removed.
err = k.cleanupSandboxBindMounts(sandbox)
assert.Error(err)
err = k.cleanupSandboxBindMounts(sandbox)

View File

@@ -69,20 +69,20 @@ func (endpoint *MacvtapEndpoint) Attach(ctx context.Context, s *Sandbox) error {
h := s.hypervisor
endpoint.VMFds, err = createMacvtapFds(endpoint.EndpointProperties.Iface.Index, int(h.hypervisorConfig().NumVCPUs))
endpoint.VMFds, err = createMacvtapFds(endpoint.EndpointProperties.Iface.Index, int(h.HypervisorConfig().NumVCPUs))
if err != nil {
return fmt.Errorf("Could not setup macvtap fds %s: %s", endpoint.EndpointProperties.Iface.Name, err)
}
if !h.hypervisorConfig().DisableVhostNet {
vhostFds, err := createVhostFds(int(h.hypervisorConfig().NumVCPUs))
if !h.HypervisorConfig().DisableVhostNet {
vhostFds, err := createVhostFds(int(h.HypervisorConfig().NumVCPUs))
if err != nil {
return fmt.Errorf("Could not setup vhost fds %s : %s", endpoint.EndpointProperties.Iface.Name, err)
}
endpoint.VhostFds = vhostFds
}
return h.addDevice(ctx, endpoint, netDev)
return h.AddDevice(ctx, endpoint, NetDev)
}
// Detach for macvtap endpoint does nothing.
@@ -91,12 +91,12 @@ func (endpoint *MacvtapEndpoint) Detach(ctx context.Context, netNsCreated bool,
}
// HotAttach for macvtap endpoint not supported yet
func (endpoint *MacvtapEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
func (endpoint *MacvtapEndpoint) HotAttach(ctx context.Context, h Hypervisor) error {
return fmt.Errorf("MacvtapEndpoint does not support Hot attach")
}
// HotDetach for macvtap endpoint not supported yet
func (endpoint *MacvtapEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
func (endpoint *MacvtapEndpoint) HotDetach(ctx context.Context, h Hypervisor, netNsCreated bool, netNsPath string) error {
return fmt.Errorf("MacvtapEndpoint does not support Hot detach")
}

View File

@@ -173,11 +173,11 @@ func (n *mockAgent) resumeContainer(ctx context.Context, sandbox *Sandbox, c Con
}
// configure is the Noop agent configuration implementation. It does nothing.
func (n *mockAgent) configure(ctx context.Context, h hypervisor, id, sharePath string, config KataAgentConfig) error {
func (n *mockAgent) configure(ctx context.Context, h Hypervisor, id, sharePath string, config KataAgentConfig) error {
return nil
}
func (n *mockAgent) configureFromGrpc(ctx context.Context, h hypervisor, id string, config KataAgentConfig) error {
func (n *mockAgent) configureFromGrpc(ctx context.Context, h Hypervisor, id string, config KataAgentConfig) error {
return nil
}

View File

@@ -20,25 +20,25 @@ type mockHypervisor struct {
mockPid int
}
func (m *mockHypervisor) capabilities(ctx context.Context) types.Capabilities {
func (m *mockHypervisor) Capabilities(ctx context.Context) types.Capabilities {
caps := types.Capabilities{}
caps.SetFsSharingSupport()
return caps
}
func (m *mockHypervisor) hypervisorConfig() HypervisorConfig {
func (m *mockHypervisor) HypervisorConfig() HypervisorConfig {
return HypervisorConfig{}
}
func (m *mockHypervisor) setConfig(config *HypervisorConfig) error {
if err := config.valid(); err != nil {
if err := config.Valid(); err != nil {
return err
}
return nil
}
func (m *mockHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
func (m *mockHypervisor) CreateVM(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
if err := m.setConfig(hypervisorConfig); err != nil {
return err
}
@@ -46,79 +46,79 @@ func (m *mockHypervisor) createSandbox(ctx context.Context, id string, networkNS
return nil
}
func (m *mockHypervisor) startSandbox(ctx context.Context, timeout int) error {
func (m *mockHypervisor) StartVM(ctx context.Context, timeout int) error {
return nil
}
func (m *mockHypervisor) stopSandbox(ctx context.Context, waitOnly bool) error {
func (m *mockHypervisor) StopVM(ctx context.Context, waitOnly bool) error {
return nil
}
func (m *mockHypervisor) pauseSandbox(ctx context.Context) error {
func (m *mockHypervisor) PauseVM(ctx context.Context) error {
return nil
}
func (m *mockHypervisor) resumeSandbox(ctx context.Context) error {
func (m *mockHypervisor) ResumeVM(ctx context.Context) error {
return nil
}
func (m *mockHypervisor) saveSandbox() error {
func (m *mockHypervisor) SaveVM() error {
return nil
}
func (m *mockHypervisor) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
func (m *mockHypervisor) AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error {
return nil
}
func (m *mockHypervisor) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
func (m *mockHypervisor) HotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
switch devType {
case cpuDev:
case CpuDev:
return devInfo.(uint32), nil
case memoryDev:
memdev := devInfo.(*memoryDevice)
return memdev.sizeMB, nil
case MemoryDev:
memdev := devInfo.(*MemoryDevice)
return memdev.SizeMB, nil
}
return nil, nil
}
func (m *mockHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
func (m *mockHypervisor) HotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
switch devType {
case cpuDev:
case CpuDev:
return devInfo.(uint32), nil
case memoryDev:
case MemoryDev:
return 0, nil
}
return nil, nil
}
func (m *mockHypervisor) getSandboxConsole(ctx context.Context, sandboxID string) (string, string, error) {
func (m *mockHypervisor) GetVMConsole(ctx context.Context, sandboxID string) (string, string, error) {
return "", "", nil
}
func (m *mockHypervisor) resizeMemory(ctx context.Context, memMB uint32, memorySectionSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
return 0, memoryDevice{}, nil
func (m *mockHypervisor) ResizeMemory(ctx context.Context, memMB uint32, memorySectionSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
return 0, MemoryDevice{}, nil
}
func (m *mockHypervisor) resizeVCPUs(ctx context.Context, cpus uint32) (uint32, uint32, error) {
func (m *mockHypervisor) ResizeVCPUs(ctx context.Context, cpus uint32) (uint32, uint32, error) {
return 0, 0, nil
}
func (m *mockHypervisor) disconnect(ctx context.Context) {
func (m *mockHypervisor) Disconnect(ctx context.Context) {
}
func (m *mockHypervisor) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
func (m *mockHypervisor) GetThreadIDs(ctx context.Context) (VcpuThreadIDs, error) {
vcpus := map[int]int{0: os.Getpid()}
return vcpuThreadIDs{vcpus}, nil
return VcpuThreadIDs{vcpus}, nil
}
func (m *mockHypervisor) cleanup(ctx context.Context) error {
func (m *mockHypervisor) Cleanup(ctx context.Context) error {
return nil
}
func (m *mockHypervisor) getPids() []int {
func (m *mockHypervisor) GetPids() []int {
return []int{m.mockPid}
}
func (m *mockHypervisor) getVirtioFsPid() *int {
func (m *mockHypervisor) GetVirtioFsPid() *int {
return nil
}
@@ -130,23 +130,23 @@ func (m *mockHypervisor) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("mockHypervisor is not supported by VM cache")
}
func (m *mockHypervisor) save() (s persistapi.HypervisorState) {
func (m *mockHypervisor) Save() (s persistapi.HypervisorState) {
return
}
func (m *mockHypervisor) load(s persistapi.HypervisorState) {}
func (m *mockHypervisor) Load(s persistapi.HypervisorState) {}
func (m *mockHypervisor) check() error {
func (m *mockHypervisor) Check() error {
return nil
}
func (m *mockHypervisor) generateSocket(id string) (interface{}, error) {
func (m *mockHypervisor) GenerateSocket(id string) (interface{}, error) {
return types.MockHybridVSock{
UdsPath: MockHybridVSockPath,
}, nil
}
func (m *mockHypervisor) isRateLimiterBuiltin() bool {
func (m *mockHypervisor) IsRateLimiterBuiltin() bool {
return false
}

View File

@@ -13,7 +13,7 @@ import (
"github.com/stretchr/testify/assert"
)
func TestMockHypervisorCreateSandbox(t *testing.T) {
func TestMockHypervisorCreateVM(t *testing.T) {
var m *mockHypervisor
assert := assert.New(t)
@@ -31,7 +31,7 @@ func TestMockHypervisorCreateSandbox(t *testing.T) {
ctx := context.Background()
// wrong config
err := m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
err := m.CreateVM(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
assert.Error(err)
sandbox.config.HypervisorConfig = HypervisorConfig{
@@ -40,26 +40,26 @@ func TestMockHypervisorCreateSandbox(t *testing.T) {
HypervisorPath: fmt.Sprintf("%s/%s", testDir, testHypervisor),
}
err = m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
err = m.CreateVM(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
assert.NoError(err)
}
func TestMockHypervisorStartSandbox(t *testing.T) {
var m *mockHypervisor
assert.NoError(t, m.startSandbox(context.Background(), vmStartTimeout))
assert.NoError(t, m.StartVM(context.Background(), VmStartTimeout))
}
func TestMockHypervisorStopSandbox(t *testing.T) {
var m *mockHypervisor
assert.NoError(t, m.stopSandbox(context.Background(), false))
assert.NoError(t, m.StopVM(context.Background(), false))
}
func TestMockHypervisorAddDevice(t *testing.T) {
var m *mockHypervisor
assert.NoError(t, m.addDevice(context.Background(), nil, imgDev))
assert.NoError(t, m.AddDevice(context.Background(), nil, ImgDev))
}
func TestMockHypervisorGetSandboxConsole(t *testing.T) {
@@ -67,7 +67,7 @@ func TestMockHypervisorGetSandboxConsole(t *testing.T) {
expected := ""
expectedProto := ""
proto, result, err := m.getSandboxConsole(context.Background(), "testSandboxID")
proto, result, err := m.GetVMConsole(context.Background(), "testSandboxID")
assert.NoError(t, err)
assert.Equal(t, result, expected)
assert.Equal(t, proto, expectedProto)
@@ -76,25 +76,25 @@ func TestMockHypervisorGetSandboxConsole(t *testing.T) {
func TestMockHypervisorSaveSandbox(t *testing.T) {
var m *mockHypervisor
assert.NoError(t, m.saveSandbox())
assert.NoError(t, m.SaveVM())
}
func TestMockHypervisorDisconnect(t *testing.T) {
var m *mockHypervisor
m.disconnect(context.Background())
m.Disconnect(context.Background())
}
func TestMockHypervisorCheck(t *testing.T) {
var m *mockHypervisor
assert.NoError(t, m.check())
assert.NoError(t, m.Check())
}
func TestMockGenerateSocket(t *testing.T) {
var m *mockHypervisor
i, err := m.generateSocket("a")
i, err := m.GenerateSocket("a")
assert.NoError(t, err)
assert.NotNil(t, i)
}

View File

@@ -140,8 +140,9 @@ func (m *monitor) watchAgent(ctx context.Context) {
}
func (m *monitor) watchHypervisor(ctx context.Context) error {
if err := m.sandbox.hypervisor.check(); err != nil {
if err := m.sandbox.hypervisor.Check(); err != nil {
m.notify(ctx, errors.Wrapf(err, "failed to ping hypervisor process"))
m.notify(ctx, errors.Wrapf(err, "failed to ping Hypervisor process"))
return err
}
return nil

View File

@@ -495,7 +495,7 @@ func isSecret(path string) bool {
// files observed is greater than limit, break and return -1
func countFiles(path string, limit int) (numFiles int, err error) {
// First, check to see if the path exists
// First, Check to see if the path exists
file, err := os.Stat(path)
if os.IsNotExist(err) {
return 0, err
@@ -531,7 +531,7 @@ func countFiles(path string, limit int) (numFiles int, err error) {
func isWatchableMount(path string) bool {
if isSecret(path) || isConfigMap(path) {
// we have a cap on number of FDs which can be present in mount
// to determine if watchable. A similar check exists within the agent,
// to determine if watchable. A similar Check exists within the agent,
// which may or may not help handle case where extra files are added to
// a mount after the fact
count, _ := countFiles(path, 8)

View File

@@ -472,7 +472,7 @@ func TestBindUnmountContainerRootfsENOENTNotError(t *testing.T) {
cID := "contIDTest"
assert := assert.New(t)
// check to make sure the file doesn't exist
// Check to make sure the file doesn't exist
testPath := filepath.Join(testMnt, sID, cID, rootfsDir)
if _, err := os.Stat(testPath); !os.IsNotExist(err) {
assert.NoError(os.Remove(testPath))

View File

@@ -59,7 +59,7 @@ const (
// NetXConnectNoneModel can be used when the VM is in the host network namespace
NetXConnectNoneModel
// NetXConnectInvalidModel is the last item to check valid values by IsValid()
// NetXConnectInvalidModel is the last item to Check valid values by IsValid()
NetXConnectInvalidModel
)
@@ -426,7 +426,7 @@ func getLinkByName(netHandle *netlink.Handle, name string, expectedLink netlink.
}
// The endpoint type should dictate how the connection needs to happen.
func xConnectVMNetwork(ctx context.Context, endpoint Endpoint, h hypervisor) error {
func xConnectVMNetwork(ctx context.Context, endpoint Endpoint, h Hypervisor) error {
var err error
span, ctx := networkTrace(ctx, "xConnectVMNetwork", endpoint)
@@ -435,16 +435,16 @@ func xConnectVMNetwork(ctx context.Context, endpoint Endpoint, h hypervisor) err
netPair := endpoint.NetworkPair()
queues := 0
caps := h.capabilities(ctx)
caps := h.Capabilities(ctx)
if caps.IsMultiQueueSupported() {
queues = int(h.hypervisorConfig().NumVCPUs)
queues = int(h.HypervisorConfig().NumVCPUs)
}
var disableVhostNet bool
if rootless.IsRootless() {
disableVhostNet = true
} else {
disableVhostNet = h.hypervisorConfig().DisableVhostNet
disableVhostNet = h.HypervisorConfig().DisableVhostNet
}
if netPair.NetInterworkingModel == NetXConnectDefaultModel {
@@ -518,7 +518,7 @@ func createFds(device string, numFds int) ([]*os.File, error) {
//
// Till that bug is fixed we need to pick a random non conflicting index and try to
// create a link. If that fails, we need to try with another.
// All the kernel does not check if the link id conflicts with a link id on the host
// All the kernel does not Check if the link id conflicts with a link id on the host
// hence we need to offset the link id to prevent any overlaps with the host index
//
// Here the kernel will ensure that there is no race condition
@@ -701,7 +701,7 @@ func setupTCFiltering(ctx context.Context, endpoint Endpoint, queues int, disabl
attrs = link.Attrs()
// Save the veth MAC address to the TAP so that it can later be used
// to build the hypervisor command line. This MAC address has to be
// to build the Hypervisor command line. This MAC address has to be
// the one inside the VM in order to avoid any firewall issues. The
// bridge created by the network plugin on the host actually expects
// to see traffic from this MAC address and not another one.
@@ -1356,15 +1356,15 @@ func (n *Network) Add(ctx context.Context, config *NetworkConfig, s *Sandbox, ho
}
}
if !s.hypervisor.isRateLimiterBuiltin() {
rxRateLimiterMaxRate := s.hypervisor.hypervisorConfig().RxRateLimiterMaxRate
if !s.hypervisor.IsRateLimiterBuiltin() {
rxRateLimiterMaxRate := s.hypervisor.HypervisorConfig().RxRateLimiterMaxRate
if rxRateLimiterMaxRate > 0 {
networkLogger().Info("Add Rx Rate Limiter")
if err := addRxRateLimiter(endpoint, rxRateLimiterMaxRate); err != nil {
return err
}
}
txRateLimiterMaxRate := s.hypervisor.hypervisorConfig().TxRateLimiterMaxRate
txRateLimiterMaxRate := s.hypervisor.HypervisorConfig().TxRateLimiterMaxRate
if txRateLimiterMaxRate > 0 {
networkLogger().Info("Add Tx Rate Limiter")
if err := addTxRateLimiter(endpoint, txRateLimiterMaxRate); err != nil {
@@ -1413,7 +1413,7 @@ func (n *Network) PostAdd(ctx context.Context, ns *NetworkNamespace, hotplug boo
// Remove network endpoints in the network namespace. It also deletes the network
// namespace in case the namespace has been created by us.
func (n *Network) Remove(ctx context.Context, ns *NetworkNamespace, hypervisor hypervisor) error {
func (n *Network) Remove(ctx context.Context, ns *NetworkNamespace, hypervisor Hypervisor) error {
span, ctx := n.trace(ctx, "Remove")
defer span.End()
@@ -1560,7 +1560,7 @@ func addHTBQdisc(linkIndex int, maxRate uint64) error {
// By redirecting interface ingress traffic to ifb and treat it as egress traffic there,
// we could do network shaping to interface inbound traffic.
func addIFBDevice() (int, error) {
// check whether host supports ifb
// Check whether host supports ifb
if ok, err := utils.SupportsIfb(); !ok {
return -1, err
}

View File

@@ -59,7 +59,7 @@ func (s *Sandbox) dumpState(ss *persistapi.SandboxState, cs map[string]persistap
}
func (s *Sandbox) dumpHypervisor(ss *persistapi.SandboxState) {
ss.HypervisorState = s.hypervisor.save()
ss.HypervisorState = s.hypervisor.Save()
// BlockIndexMap will be moved from sandbox state to hypervisor state later
ss.HypervisorState.BlockIndexMap = s.state.BlockIndexMap
}
@@ -316,7 +316,7 @@ func (c *Container) loadContState(cs persistapi.ContainerState) {
}
func (s *Sandbox) loadHypervisor(hs persistapi.HypervisorState) {
s.hypervisor.load(hs)
s.hypervisor.Load(hs)
}
func (s *Sandbox) loadAgent(as persistapi.AgentState) {

View File

@@ -55,7 +55,7 @@ func TestSandboxRestore(t *testing.T) {
assert.Equal(sandbox.state.GuestMemoryBlockSizeMB, uint32(0))
assert.Equal(len(sandbox.state.BlockIndexMap), 0)
// set state data and save again
// set state data and Save again
sandbox.state.State = types.StateString("running")
sandbox.state.GuestMemoryBlockSizeMB = uint32(1024)
sandbox.state.BlockIndexMap[2] = struct{}{}

View File

@@ -121,12 +121,12 @@ func (endpoint *PhysicalEndpoint) Detach(ctx context.Context, netNsCreated bool,
}
// HotAttach for physical endpoint not supported yet
func (endpoint *PhysicalEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
func (endpoint *PhysicalEndpoint) HotAttach(ctx context.Context, h Hypervisor) error {
return fmt.Errorf("PhysicalEndpoint does not support Hot attach")
}
// HotDetach for physical endpoint not supported yet
func (endpoint *PhysicalEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
func (endpoint *PhysicalEndpoint) HotDetach(ctx context.Context, h Hypervisor, netNsCreated bool, netNsPath string) error {
return fmt.Errorf("PhysicalEndpoint does not support Hot detach")
}

View File

@@ -126,7 +126,7 @@ const (
// memory dump format will be set to elf
memoryDumpFormat = "elf"
qmpCapErrMsg = "Failed to negotiate QMP capabilities"
qmpCapErrMsg = "Failed to negotiate QMP Capabilities"
qmpExecCatCmd = "exec:cat"
scsiControllerID = "scsi0"
@@ -195,14 +195,14 @@ func (q *qemu) kernelParameters() string {
}
// Adds all capabilities supported by qemu implementation of hypervisor interface
func (q *qemu) capabilities(ctx context.Context) types.Capabilities {
span, _ := katatrace.Trace(ctx, q.Logger(), "capabilities", qemuTracingTags, map[string]string{"sandbox_id": q.id})
func (q *qemu) Capabilities(ctx context.Context) types.Capabilities {
span, _ := katatrace.Trace(ctx, q.Logger(), "Capabilities", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End()
return q.arch.capabilities()
}
func (q *qemu) hypervisorConfig() HypervisorConfig {
func (q *qemu) HypervisorConfig() HypervisorConfig {
return q.config
}
@@ -308,7 +308,7 @@ func (q *qemu) cpuTopology() govmmQemu.SMP {
}
func (q *qemu) hostMemMB() (uint64, error) {
hostMemKb, err := getHostMemorySizeKb(procMemInfo)
hostMemKb, err := GetHostMemorySizeKb(procMemInfo)
if err != nil {
return 0, fmt.Errorf("Unable to read memory info: %s", err)
}
@@ -388,7 +388,7 @@ func (q *qemu) createQmpSocket() ([]govmmQemu.QMPSocket, error) {
func (q *qemu) buildDevices(ctx context.Context, initrdPath string) ([]govmmQemu.Device, *govmmQemu.IOThread, error) {
var devices []govmmQemu.Device
_, console, err := q.getSandboxConsole(ctx, q.id)
_, console, err := q.GetVMConsole(ctx, q.id)
if err != nil {
return nil, nil, err
}
@@ -466,7 +466,7 @@ func (q *qemu) setupFileBackedMem(knobs *govmmQemu.Knobs, memory *govmmQemu.Memo
}
func (q *qemu) setConfig(config *HypervisorConfig) error {
err := config.valid()
err := config.Valid()
if err != nil {
return err
}
@@ -476,14 +476,15 @@ func (q *qemu) setConfig(config *HypervisorConfig) error {
return nil
}
// createSandbox is the Hypervisor sandbox creation implementation for govmmQemu.
func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
// CreateVM is the Hypervisor VM creation implementation for govmmQemu.
func (q *qemu) CreateVM(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
// Save the tracing context
q.ctx = ctx
span, ctx := katatrace.Trace(ctx, q.Logger(), "createSandbox", qemuTracingTags, map[string]string{"sandbox_id": q.id})
span, ctx := katatrace.Trace(ctx, q.Logger(), "CreateVM", qemuTracingTags, map[string]string{"VM_ID": q.id})
defer span.End()
// Breaks hypervisor abstraction Has Kata Specific logic: See within
if err := q.setup(ctx, id, hypervisorConfig); err != nil {
return err
}
@@ -523,6 +524,7 @@ func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNa
return err
}
// Breaks hypervisor abstration Has Kata Specific logic
kernel := govmmQemu.Kernel{
Path: kernelPath,
InitrdPath: initrdPath,
@@ -669,7 +671,7 @@ func (q *qemu) vhostFSSocketPath(id string) (string, error) {
func (q *qemu) setupVirtiofsd(ctx context.Context) (err error) {
pid, err := q.virtiofsd.Start(ctx, func() {
q.stopSandbox(ctx, false)
q.StopVM(ctx, false)
})
if err != nil {
return err
@@ -768,8 +770,8 @@ func (q *qemu) setupVirtioMem(ctx context.Context) error {
}
// startSandbox will start the Sandbox's VM.
func (q *qemu) startSandbox(ctx context.Context, timeout int) error {
span, ctx := katatrace.Trace(ctx, q.Logger(), "startSandbox", qemuTracingTags, map[string]string{"sandbox_id": q.id})
func (q *qemu) StartVM(ctx context.Context, timeout int) error {
span, ctx := katatrace.Trace(ctx, q.Logger(), "StartVM", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End()
if q.config.Debug {
@@ -849,7 +851,7 @@ func (q *qemu) startSandbox(ctx context.Context, timeout int) error {
return fmt.Errorf("failed to launch qemu: %s, error messages from qemu log: %s", err, strErr)
}
err = q.waitSandbox(ctx, timeout)
err = q.waitVM(ctx, timeout)
if err != nil {
return err
}
@@ -886,9 +888,9 @@ func (q *qemu) bootFromTemplate() error {
return q.waitMigration()
}
// waitSandbox will wait for the Sandbox's VM to be up and running.
func (q *qemu) waitSandbox(ctx context.Context, timeout int) error {
span, _ := katatrace.Trace(ctx, q.Logger(), "waitSandbox", qemuTracingTags, map[string]string{"sandbox_id": q.id})
// waitVM will wait for the Sandbox's VM to be up and running.
func (q *qemu) waitVM(ctx context.Context, timeout int) error {
span, _ := katatrace.Trace(ctx, q.Logger(), "waitVM", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End()
if timeout < 0 {
@@ -926,7 +928,7 @@ func (q *qemu) waitSandbox(ctx context.Context, timeout int) error {
"qmp-major-version": ver.Major,
"qmp-minor-version": ver.Minor,
"qmp-micro-version": ver.Micro,
"qmp-capabilities": strings.Join(ver.Capabilities, ","),
"qmp-Capabilities": strings.Join(ver.Capabilities, ","),
}).Infof("QMP details")
if err = q.qmpMonitorCh.qmp.ExecuteQMPCapabilities(q.qmpMonitorCh.ctx); err != nil {
@@ -938,8 +940,8 @@ func (q *qemu) waitSandbox(ctx context.Context, timeout int) error {
}
// stopSandbox will stop the Sandbox's VM.
func (q *qemu) stopSandbox(ctx context.Context, waitOnly bool) error {
span, _ := katatrace.Trace(ctx, q.Logger(), "stopSandbox", qemuTracingTags, map[string]string{"sandbox_id": q.id})
func (q *qemu) StopVM(ctx context.Context, waitOnly bool) error {
span, _ := katatrace.Trace(ctx, q.Logger(), "StopVM", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End()
q.Logger().Info("Stopping Sandbox")
@@ -971,7 +973,7 @@ func (q *qemu) stopSandbox(ctx context.Context, waitOnly bool) error {
}
if waitOnly {
pids := q.getPids()
pids := q.GetPids()
if len(pids) == 0 {
return errors.New("cannot determine QEMU PID")
}
@@ -999,17 +1001,17 @@ func (q *qemu) stopSandbox(ctx context.Context, waitOnly bool) error {
func (q *qemu) cleanupVM() error {
// cleanup vm path
// Cleanup vm path
dir := filepath.Join(q.store.RunVMStoragePath(), q.id)
// If it's a symlink, remove both dir and the target.
// This can happen when vm template links a sandbox to a vm.
link, err := filepath.EvalSymlinks(dir)
if err != nil {
// Well, it's just cleanup failure. Let's ignore it.
// Well, it's just Cleanup failure. Let's ignore it.
q.Logger().WithError(err).WithField("dir", dir).Warn("failed to resolve vm path")
}
q.Logger().WithField("link", link).WithField("dir", dir).Infof("cleanup vm path")
q.Logger().WithField("link", link).WithField("dir", dir).Infof("Cleanup vm path")
if err := os.RemoveAll(dir); err != nil {
q.Logger().WithError(err).Warnf("failed to remove vm path %s", dir)
@@ -1149,18 +1151,18 @@ func (q *qemu) dumpSandboxMetaInfo(dumpSavePath string) {
// copy state from /run/vc/sbs to memory dump directory
statePath := filepath.Join(q.store.RunStoragePath(), q.id)
command := []string{"/bin/cp", "-ar", statePath, dumpStatePath}
q.Logger().WithField("command", command).Info("try to save sandbox state")
q.Logger().WithField("command", command).Info("try to Save sandbox state")
if output, err := pkgUtils.RunCommandFull(command, true); err != nil {
q.Logger().WithError(err).WithField("output", output).Error("failed to save state")
q.Logger().WithError(err).WithField("output", output).Error("failed to Save state")
}
// save hypervisor meta information
// Save hypervisor meta information
fileName := filepath.Join(dumpSavePath, "hypervisor.conf")
data, _ := json.MarshalIndent(q.config, "", " ")
if err := ioutil.WriteFile(fileName, data, defaultFilePerms); err != nil {
q.Logger().WithError(err).WithField("hypervisor.conf", data).Error("write to hypervisor.conf file failed")
}
// save hypervisor version
// Save hypervisor version
hyperVisorVersion, err := pkgUtils.RunCommand([]string{q.config.HypervisorPath, "--version"})
if err != nil {
q.Logger().WithError(err).WithField("HypervisorPath", data).Error("failed to get hypervisor version")
@@ -1188,11 +1190,11 @@ func (q *qemu) dumpGuestMemory(dumpSavePath string) error {
return err
}
// save meta information for sandbox
// Save meta information for sandbox
q.dumpSandboxMetaInfo(dumpSavePath)
q.Logger().Info("dump sandbox meta information completed")
// check device free space and estimated dump size
// Check device free space and estimated dump size
if err := q.canDumpGuestMemory(dumpSavePath); err != nil {
q.Logger().Warnf("can't dump guest memory: %s", err.Error())
return err
@@ -1230,7 +1232,7 @@ func (q *qemu) qmpShutdown() {
}
}
func (q *qemu) hotplugAddBlockDevice(ctx context.Context, drive *config.BlockDrive, op operation, devID string) (err error) {
func (q *qemu) hotplugAddBlockDevice(ctx context.Context, drive *config.BlockDrive, op Operation, devID string) (err error) {
// drive can be a pmem device, in which case it's used as backing file for a nvdimm device
if q.config.BlockDeviceDriver == config.Nvdimm || drive.Pmem {
var blocksize int64
@@ -1351,7 +1353,7 @@ func (q *qemu) hotplugAddBlockDevice(ctx context.Context, drive *config.BlockDri
return nil
}
func (q *qemu) hotplugAddVhostUserBlkDevice(ctx context.Context, vAttr *config.VhostUserDeviceAttrs, op operation, devID string) (err error) {
func (q *qemu) hotplugAddVhostUserBlkDevice(ctx context.Context, vAttr *config.VhostUserDeviceAttrs, op Operation, devID string) (err error) {
err = q.qmpMonitorCh.qmp.ExecuteCharDevUnixSocketAdd(q.qmpMonitorCh.ctx, vAttr.DevID, vAttr.SocketPath, false, false)
if err != nil {
return err
@@ -1392,14 +1394,14 @@ func (q *qemu) hotplugAddVhostUserBlkDevice(ctx context.Context, vAttr *config.V
return nil
}
func (q *qemu) hotplugBlockDevice(ctx context.Context, drive *config.BlockDrive, op operation) error {
func (q *qemu) hotplugBlockDevice(ctx context.Context, drive *config.BlockDrive, op Operation) error {
if err := q.qmpSetup(); err != nil {
return err
}
devID := "virtio-" + drive.ID
if op == addDevice {
if op == AddDevice {
return q.hotplugAddBlockDevice(ctx, drive, op, devID)
}
if !drive.Swap && q.config.BlockDeviceDriver == config.VirtioBlock {
@@ -1415,14 +1417,14 @@ func (q *qemu) hotplugBlockDevice(ctx context.Context, drive *config.BlockDrive,
return q.qmpMonitorCh.qmp.ExecuteBlockdevDel(q.qmpMonitorCh.ctx, drive.ID)
}
func (q *qemu) hotplugVhostUserDevice(ctx context.Context, vAttr *config.VhostUserDeviceAttrs, op operation) error {
func (q *qemu) hotplugVhostUserDevice(ctx context.Context, vAttr *config.VhostUserDeviceAttrs, op Operation) error {
if err := q.qmpSetup(); err != nil {
return err
}
devID := "virtio-" + vAttr.DevID
if op == addDevice {
if op == AddDevice {
switch vAttr.Type {
case config.VhostUserBlk:
return q.hotplugAddVhostUserBlkDevice(ctx, vAttr, op, devID)
@@ -1505,15 +1507,15 @@ func (q *qemu) qomGetPciPath(qemuID string) (vcTypes.PciPath, error) {
return vcTypes.PciPathFromSlots(bridgeSlot, devSlot)
}
func (q *qemu) hotplugVFIODevice(ctx context.Context, device *config.VFIODev, op operation) (err error) {
func (q *qemu) hotplugVFIODevice(ctx context.Context, device *config.VFIODev, op Operation) (err error) {
if err = q.qmpSetup(); err != nil {
return err
}
devID := device.ID
machineType := q.hypervisorConfig().HypervisorMachineType
machineType := q.HypervisorConfig().HypervisorMachineType
if op == addDevice {
if op == AddDevice {
buf, _ := json.Marshal(device)
q.Logger().WithFields(logrus.Fields{
@@ -1623,7 +1625,7 @@ func (q *qemu) hotAddNetDevice(name, hardAddr string, VMFds, VhostFds []*os.File
return q.qmpMonitorCh.qmp.ExecuteNetdevAddByFds(q.qmpMonitorCh.ctx, "tap", name, VMFdNames, VhostFdNames)
}
func (q *qemu) hotplugNetDevice(ctx context.Context, endpoint Endpoint, op operation) (err error) {
func (q *qemu) hotplugNetDevice(ctx context.Context, endpoint Endpoint, op Operation) (err error) {
if err = q.qmpSetup(); err != nil {
return err
}
@@ -1641,7 +1643,7 @@ func (q *qemu) hotplugNetDevice(ctx context.Context, endpoint Endpoint, op opera
}
devID := "virtio-" + tap.ID
if op == addDevice {
if op == AddDevice {
if err = q.hotAddNetDevice(tap.Name, endpoint.HardwareAddr(), tap.VMFds, tap.VhostFds); err != nil {
return err
}
@@ -1698,24 +1700,24 @@ func (q *qemu) hotplugNetDevice(ctx context.Context, endpoint Endpoint, op opera
return q.qmpMonitorCh.qmp.ExecuteNetdevDel(q.qmpMonitorCh.ctx, tap.Name)
}
func (q *qemu) hotplugDevice(ctx context.Context, devInfo interface{}, devType deviceType, op operation) (interface{}, error) {
func (q *qemu) hotplugDevice(ctx context.Context, devInfo interface{}, devType DeviceType, op Operation) (interface{}, error) {
switch devType {
case blockDev:
case BlockDev:
drive := devInfo.(*config.BlockDrive)
return nil, q.hotplugBlockDevice(ctx, drive, op)
case cpuDev:
case CpuDev:
vcpus := devInfo.(uint32)
return q.hotplugCPUs(vcpus, op)
case vfioDev:
case VfioDev:
device := devInfo.(*config.VFIODev)
return nil, q.hotplugVFIODevice(ctx, device, op)
case memoryDev:
memdev := devInfo.(*memoryDevice)
case MemoryDev:
memdev := devInfo.(*MemoryDevice)
return q.hotplugMemory(memdev, op)
case netDev:
case NetDev:
device := devInfo.(Endpoint)
return nil, q.hotplugNetDevice(ctx, device, op)
case vhostuserDev:
case VhostuserDev:
vAttr := devInfo.(*config.VhostUserDeviceAttrs)
return nil, q.hotplugVhostUserDevice(ctx, vAttr, op)
default:
@@ -1723,12 +1725,12 @@ func (q *qemu) hotplugDevice(ctx context.Context, devInfo interface{}, devType d
}
}
func (q *qemu) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, ctx := katatrace.Trace(ctx, q.Logger(), "hotplugAddDevice", qemuTracingTags, map[string]string{"sandbox_id": q.id})
func (q *qemu) HotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, ctx := katatrace.Trace(ctx, q.Logger(), "HotplugAddDevice", qemuTracingTags, map[string]string{"sandbox_id": q.id})
katatrace.AddTag(span, "device", devInfo)
defer span.End()
data, err := q.hotplugDevice(ctx, devInfo, devType, addDevice)
data, err := q.hotplugDevice(ctx, devInfo, devType, AddDevice)
if err != nil {
return data, err
}
@@ -1736,12 +1738,12 @@ func (q *qemu) hotplugAddDevice(ctx context.Context, devInfo interface{}, devTyp
return data, nil
}
func (q *qemu) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, ctx := katatrace.Trace(ctx, q.Logger(), "hotplugRemoveDevice", qemuTracingTags, map[string]string{"sandbox_id": q.id})
func (q *qemu) HotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, ctx := katatrace.Trace(ctx, q.Logger(), "HotplugRemoveDevice", qemuTracingTags, map[string]string{"sandbox_id": q.id})
katatrace.AddTag(span, "device", devInfo)
defer span.End()
data, err := q.hotplugDevice(ctx, devInfo, devType, removeDevice)
data, err := q.hotplugDevice(ctx, devInfo, devType, RemoveDevice)
if err != nil {
return data, err
}
@@ -1749,7 +1751,7 @@ func (q *qemu) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, dev
return data, nil
}
func (q *qemu) hotplugCPUs(vcpus uint32, op operation) (uint32, error) {
func (q *qemu) hotplugCPUs(vcpus uint32, op Operation) (uint32, error) {
if vcpus == 0 {
q.Logger().Warnf("cannot hotplug 0 vCPUs")
return 0, nil
@@ -1759,7 +1761,7 @@ func (q *qemu) hotplugCPUs(vcpus uint32, op operation) (uint32, error) {
return 0, err
}
if op == addDevice {
if op == AddDevice {
return q.hotplugAddCPUs(vcpus)
}
@@ -1819,7 +1821,7 @@ func (q *qemu) hotplugAddCPUs(amount uint32) (uint32, error) {
continue
}
// a new vCPU was added, update list of hotplugged vCPUs and check if all vCPUs were added
// a new vCPU was added, update list of hotplugged vCPUs and Check if all vCPUs were added
q.state.HotpluggedVCPUs = append(q.state.HotpluggedVCPUs, CPUDevice{cpuID})
hotpluggedVCPUs++
if hotpluggedVCPUs == amount {
@@ -1854,46 +1856,46 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) {
return amount, nil
}
func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) (int, error) {
func (q *qemu) hotplugMemory(memDev *MemoryDevice, op Operation) (int, error) {
if !q.arch.supportGuestMemoryHotplug() {
return 0, noGuestMemHotplugErr
}
if memDev.sizeMB < 0 {
return 0, fmt.Errorf("cannot hotplug negative size (%d) memory", memDev.sizeMB)
if memDev.SizeMB < 0 {
return 0, fmt.Errorf("cannot hotplug negative size (%d) memory", memDev.SizeMB)
}
memLog := q.Logger().WithField("hotplug", "memory")
memLog.WithField("hotplug-memory-mb", memDev.sizeMB).Debug("requested memory hotplug")
memLog.WithField("hotplug-memory-mb", memDev.SizeMB).Debug("requested memory hotplug")
if err := q.qmpSetup(); err != nil {
return 0, err
}
currentMemory := int(q.config.MemorySize) + q.state.HotpluggedMemory
if memDev.sizeMB == 0 {
if memDev.SizeMB == 0 {
memLog.Debug("hotplug is not required")
return 0, nil
}
switch op {
case removeDevice:
memLog.WithField("operation", "remove").Debugf("Requested to remove memory: %d MB", memDev.sizeMB)
case RemoveDevice:
memLog.WithField("operation", "remove").Debugf("Requested to remove memory: %d MB", memDev.SizeMB)
// Dont fail but warn that this is not supported.
memLog.Warn("hot-remove VM memory not supported")
return 0, nil
case addDevice:
memLog.WithField("operation", "add").Debugf("Requested to add memory: %d MB", memDev.sizeMB)
case AddDevice:
memLog.WithField("operation", "add").Debugf("Requested to add memory: %d MB", memDev.SizeMB)
maxMem, err := q.hostMemMB()
if err != nil {
return 0, err
}
// Don't exceed the maximum amount of memory
if currentMemory+memDev.sizeMB > int(maxMem) {
if currentMemory+memDev.SizeMB > int(maxMem) {
// Fixme: return a typed error
return 0, fmt.Errorf("Unable to hotplug %d MiB memory, the SB has %d MiB and the maximum amount is %d MiB",
memDev.sizeMB, currentMemory, maxMem)
memDev.SizeMB, currentMemory, maxMem)
}
memoryAdded, err := q.hotplugAddMemory(memDev)
if err != nil {
@@ -1906,7 +1908,7 @@ func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) (int, error) {
}
func (q *qemu) hotplugAddMemory(memDev *memoryDevice) (int, error) {
func (q *qemu) hotplugAddMemory(memDev *MemoryDevice) (int, error) {
memoryDevices, err := q.qmpMonitorCh.qmp.ExecQueryMemoryDevices(q.qmpMonitorCh.ctx)
if err != nil {
return 0, fmt.Errorf("failed to query memory devices: %v", err)
@@ -1919,7 +1921,7 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) (int, error) {
maxSlot = device.Data.Slot
}
}
memDev.slot = maxSlot + 1
memDev.Slot = maxSlot + 1
}
share, target, memoryBack, err := q.getMemArgs()
@@ -1927,46 +1929,46 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) (int, error) {
return 0, err
}
err = q.qmpMonitorCh.qmp.ExecHotplugMemory(q.qmpMonitorCh.ctx, memoryBack, "mem"+strconv.Itoa(memDev.slot), target, memDev.sizeMB, share)
err = q.qmpMonitorCh.qmp.ExecHotplugMemory(q.qmpMonitorCh.ctx, memoryBack, "mem"+strconv.Itoa(memDev.Slot), target, memDev.SizeMB, share)
if err != nil {
q.Logger().WithError(err).Error("hotplug memory")
return 0, err
}
// if guest kernel only supports memory hotplug via probe interface, we need to get address of hot-add memory device
if memDev.probe {
if memDev.Probe {
memoryDevices, err := q.qmpMonitorCh.qmp.ExecQueryMemoryDevices(q.qmpMonitorCh.ctx)
if err != nil {
return 0, fmt.Errorf("failed to query memory devices: %v", err)
}
if len(memoryDevices) != 0 {
q.Logger().WithField("addr", fmt.Sprintf("0x%x", memoryDevices[len(memoryDevices)-1].Data.Addr)).Debug("recently hot-add memory device")
memDev.addr = memoryDevices[len(memoryDevices)-1].Data.Addr
memDev.Addr = memoryDevices[len(memoryDevices)-1].Data.Addr
} else {
return 0, fmt.Errorf("failed to probe address of recently hot-add memory device, no device exists")
}
}
q.state.HotpluggedMemory += memDev.sizeMB
return memDev.sizeMB, nil
q.state.HotpluggedMemory += memDev.SizeMB
return memDev.SizeMB, nil
}
func (q *qemu) pauseSandbox(ctx context.Context) error {
span, ctx := katatrace.Trace(ctx, q.Logger(), "pauseSandbox", qemuTracingTags, map[string]string{"sandbox_id": q.id})
func (q *qemu) PauseVM(ctx context.Context) error {
span, ctx := katatrace.Trace(ctx, q.Logger(), "PauseVM", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End()
return q.togglePauseSandbox(ctx, true)
}
func (q *qemu) resumeSandbox(ctx context.Context) error {
span, ctx := katatrace.Trace(ctx, q.Logger(), "resumeSandbox", qemuTracingTags, map[string]string{"sandbox_id": q.id})
func (q *qemu) ResumeVM(ctx context.Context) error {
span, ctx := katatrace.Trace(ctx, q.Logger(), "ResumeVM", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End()
return q.togglePauseSandbox(ctx, false)
}
// addDevice will add extra devices to Qemu command line.
func (q *qemu) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
func (q *qemu) AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error {
var err error
span, _ := katatrace.Trace(ctx, q.Logger(), "addDevice", qemuTracingTags, map[string]string{"sandbox_id": q.id})
span, _ := katatrace.Trace(ctx, q.Logger(), "AddDevice", qemuTracingTags, map[string]string{"sandbox_id": q.id})
katatrace.AddTag(span, "device", devInfo)
defer span.End()
@@ -2024,8 +2026,8 @@ func (q *qemu) addDevice(ctx context.Context, devInfo interface{}, devType devic
// getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox.
func (q *qemu) getSandboxConsole(ctx context.Context, id string) (string, string, error) {
span, _ := katatrace.Trace(ctx, q.Logger(), "getSandboxConsole", qemuTracingTags, map[string]string{"sandbox_id": q.id})
func (q *qemu) GetVMConsole(ctx context.Context, id string) (string, string, error) {
span, _ := katatrace.Trace(ctx, q.Logger(), "GetVMConsole", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End()
consoleURL, err := utils.BuildSocketPath(q.store.RunVMStoragePath(), id, consoleSocket)
@@ -2036,8 +2038,8 @@ func (q *qemu) getSandboxConsole(ctx context.Context, id string) (string, string
return consoleProtoUnix, consoleURL, nil
}
func (q *qemu) saveSandbox() error {
q.Logger().Info("save sandbox")
func (q *qemu) SaveVM() error {
q.Logger().Info("Save sandbox")
if err := q.qmpSetup(); err != nil {
return err
@@ -2089,8 +2091,8 @@ func (q *qemu) waitMigration() error {
return nil
}
func (q *qemu) disconnect(ctx context.Context) {
span, _ := katatrace.Trace(ctx, q.Logger(), "disconnect", qemuTracingTags, map[string]string{"sandbox_id": q.id})
func (q *qemu) Disconnect(ctx context.Context) {
span, _ := katatrace.Trace(ctx, q.Logger(), "Disconnect", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End()
q.qmpShutdown()
@@ -2107,23 +2109,23 @@ func (q *qemu) disconnect(ctx context.Context) {
// the memory to remove has to be at least the size of one slot.
// To return memory back we are resizing the VM memory balloon.
// A longer term solution is evaluate solutions like virtio-mem
func (q *qemu) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
func (q *qemu) ResizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
currentMemory := q.config.MemorySize + uint32(q.state.HotpluggedMemory)
if err := q.qmpSetup(); err != nil {
return 0, memoryDevice{}, err
return 0, MemoryDevice{}, err
}
var addMemDevice memoryDevice
var addMemDevice MemoryDevice
if q.config.VirtioMem && currentMemory != reqMemMB {
q.Logger().WithField("hotplug", "memory").Debugf("resize memory from %dMB to %dMB", currentMemory, reqMemMB)
sizeByte := uint64(reqMemMB - q.config.MemorySize)
sizeByte = sizeByte * 1024 * 1024
err := q.qmpMonitorCh.qmp.ExecQomSet(q.qmpMonitorCh.ctx, "virtiomem0", "requested-size", sizeByte)
if err != nil {
return 0, memoryDevice{}, err
return 0, MemoryDevice{}, err
}
q.state.HotpluggedMemory = int(sizeByte / 1024 / 1024)
return reqMemMB, memoryDevice{}, nil
return reqMemMB, MemoryDevice{}, nil
}
switch {
@@ -2132,13 +2134,13 @@ func (q *qemu) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSiz
addMemMB := reqMemMB - currentMemory
memHotplugMB, err := calcHotplugMemMiBSize(addMemMB, memoryBlockSizeMB)
if err != nil {
return currentMemory, memoryDevice{}, err
return currentMemory, MemoryDevice{}, err
}
addMemDevice.sizeMB = int(memHotplugMB)
addMemDevice.probe = probe
addMemDevice.SizeMB = int(memHotplugMB)
addMemDevice.Probe = probe
data, err := q.hotplugAddDevice(ctx, &addMemDevice, memoryDev)
data, err := q.HotplugAddDevice(ctx, &addMemDevice, MemoryDev)
if err != nil {
return currentMemory, addMemDevice, err
}
@@ -2152,13 +2154,13 @@ func (q *qemu) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSiz
addMemMB := currentMemory - reqMemMB
memHotunplugMB, err := calcHotplugMemMiBSize(addMemMB, memoryBlockSizeMB)
if err != nil {
return currentMemory, memoryDevice{}, err
return currentMemory, MemoryDevice{}, err
}
addMemDevice.sizeMB = int(memHotunplugMB)
addMemDevice.probe = probe
addMemDevice.SizeMB = int(memHotunplugMB)
addMemDevice.Probe = probe
data, err := q.hotplugRemoveDevice(ctx, &addMemDevice, memoryDev)
data, err := q.HotplugRemoveDevice(ctx, &addMemDevice, MemoryDev)
if err != nil {
return currentMemory, addMemDevice, err
}
@@ -2166,7 +2168,7 @@ func (q *qemu) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSiz
if !ok {
return currentMemory, addMemDevice, fmt.Errorf("Could not get the memory removed, got %+v", data)
}
//FIXME: This is to check memory hotplugRemoveDevice reported 0, as this is not supported.
//FIXME: This is to Check memory HotplugRemoveDevice reported 0, as this is not supported.
// In the future if this is implemented this validation should be removed.
if memoryRemoved != 0 {
return currentMemory, addMemDevice, fmt.Errorf("memory hot unplug is not supported, something went wrong")
@@ -2308,11 +2310,11 @@ func genericAppendPCIeRootPort(devices []govmmQemu.Device, number uint32, machin
return devices
}
func (q *qemu) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
span, _ := katatrace.Trace(ctx, q.Logger(), "getThreadIDs", qemuTracingTags, map[string]string{"sandbox_id": q.id})
func (q *qemu) GetThreadIDs(ctx context.Context) (VcpuThreadIDs, error) {
span, _ := katatrace.Trace(ctx, q.Logger(), "GetThreadIDs", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End()
tid := vcpuThreadIDs{}
tid := VcpuThreadIDs{}
if err := q.qmpSetup(); err != nil {
return tid, err
}
@@ -2340,7 +2342,7 @@ func calcHotplugMemMiBSize(mem uint32, memorySectionSizeMB uint32) (uint32, erro
return uint32(math.Ceil(float64(mem)/float64(memorySectionSizeMB))) * memorySectionSizeMB, nil
}
func (q *qemu) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
func (q *qemu) ResizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
currentVCPUs = q.config.NumVCPUs + uint32(len(q.state.HotpluggedVCPUs))
newVCPUs = currentVCPUs
@@ -2348,7 +2350,7 @@ func (q *qemu) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs u
case currentVCPUs < reqVCPUs:
//hotplug
addCPUs := reqVCPUs - currentVCPUs
data, err := q.hotplugAddDevice(ctx, addCPUs, cpuDev)
data, err := q.HotplugAddDevice(ctx, addCPUs, CpuDev)
if err != nil {
return currentVCPUs, newVCPUs, err
}
@@ -2360,7 +2362,7 @@ func (q *qemu) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs u
case currentVCPUs > reqVCPUs:
//hotunplug
removeCPUs := currentVCPUs - reqVCPUs
data, err := q.hotplugRemoveDevice(ctx, removeCPUs, cpuDev)
data, err := q.HotplugRemoveDevice(ctx, removeCPUs, CpuDev)
if err != nil {
return currentVCPUs, newVCPUs, err
}
@@ -2373,8 +2375,8 @@ func (q *qemu) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs u
return currentVCPUs, newVCPUs, nil
}
func (q *qemu) cleanup(ctx context.Context) error {
span, _ := katatrace.Trace(ctx, q.Logger(), "cleanup", qemuTracingTags, map[string]string{"sandbox_id": q.id})
func (q *qemu) Cleanup(ctx context.Context) error {
span, _ := katatrace.Trace(ctx, q.Logger(), "Cleanup", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End()
for _, fd := range q.fds {
@@ -2387,7 +2389,7 @@ func (q *qemu) cleanup(ctx context.Context) error {
return nil
}
func (q *qemu) getPids() []int {
func (q *qemu) GetPids() []int {
data, err := ioutil.ReadFile(q.qemuConfig.PidFile)
if err != nil {
q.Logger().WithError(err).Error("Could not read qemu pid file")
@@ -2408,7 +2410,7 @@ func (q *qemu) getPids() []int {
return pids
}
func (q *qemu) getVirtioFsPid() *int {
func (q *qemu) GetVirtioFsPid() *int {
return &q.state.VirtiofsdPid
}
@@ -2454,7 +2456,7 @@ func (q *qemu) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig,
func (q *qemu) toGrpc(ctx context.Context) ([]byte, error) {
q.qmpShutdown()
q.cleanup(ctx)
q.Cleanup(ctx)
qp := qemuGrpc{
ID: q.id,
QmpChannelpath: q.qmpMonitorCh.path,
@@ -2467,14 +2469,14 @@ func (q *qemu) toGrpc(ctx context.Context) ([]byte, error) {
return json.Marshal(&qp)
}
func (q *qemu) save() (s persistapi.HypervisorState) {
func (q *qemu) Save() (s persistapi.HypervisorState) {
// If QEMU isn't even running, there isn't any state to save
// If QEMU isn't even running, there isn't any state to Save
if q.stopped {
return
}
pids := q.getPids()
pids := q.GetPids()
if len(pids) != 0 {
s.Pid = pids[0]
}
@@ -2502,7 +2504,7 @@ func (q *qemu) save() (s persistapi.HypervisorState) {
return
}
func (q *qemu) load(s persistapi.HypervisorState) {
func (q *qemu) Load(s persistapi.HypervisorState) {
q.state.UUID = s.UUID
q.state.HotpluggedMemory = s.HotpluggedMemory
q.state.HotplugVFIOOnRootBus = s.HotplugVFIOOnRootBus
@@ -2520,7 +2522,7 @@ func (q *qemu) load(s persistapi.HypervisorState) {
}
}
func (q *qemu) check() error {
func (q *qemu) Check() error {
q.memoryDumpFlag.Lock()
defer q.memoryDumpFlag.Unlock()
@@ -2540,11 +2542,11 @@ func (q *qemu) check() error {
return nil
}
func (q *qemu) generateSocket(id string) (interface{}, error) {
func (q *qemu) GenerateSocket(id string) (interface{}, error) {
return generateVMSocket(id, q.store.RunVMStoragePath())
}
func (q *qemu) isRateLimiterBuiltin() bool {
func (q *qemu) IsRateLimiterBuiltin() bool {
return false
}

View File

@@ -119,7 +119,7 @@ func TestQemuAmd64AppendImage(t *testing.T) {
imageStat, err := f.Stat()
assert.NoError(err)
// save default supportedQemuMachines options
// Save default supportedQemuMachines options
machinesCopy := make([]govmmQemu.Machine, len(supportedQemuMachines))
assert.Equal(len(supportedQemuMachines), copy(machinesCopy, supportedQemuMachines))

View File

@@ -72,7 +72,7 @@ func TestQemuKernelParameters(t *testing.T) {
testQemuKernelParameters(t, params, expectedOut, false)
}
func TestQemuCreateSandbox(t *testing.T) {
func TestQemuCreateVM(t *testing.T) {
qemuConfig := newQemuConfig()
assert := assert.New(t)
@@ -98,13 +98,13 @@ func TestQemuCreateSandbox(t *testing.T) {
parentDir := filepath.Join(q.store.RunStoragePath(), sandbox.id)
assert.NoError(os.MkdirAll(parentDir, DirMode))
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
assert.NoError(err)
assert.NoError(os.RemoveAll(parentDir))
assert.Exactly(qemuConfig, q.config)
}
func TestQemuCreateSandboxMissingParentDirFail(t *testing.T) {
func TestQemuCreateVMMissingParentDirFail(t *testing.T) {
qemuConfig := newQemuConfig()
assert := assert.New(t)
@@ -130,7 +130,7 @@ func TestQemuCreateSandboxMissingParentDirFail(t *testing.T) {
parentDir := filepath.Join(q.store.RunStoragePath(), sandbox.id)
assert.NoError(os.RemoveAll(parentDir))
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
assert.NoError(err)
}
@@ -171,7 +171,7 @@ func TestQemuMemoryTopology(t *testing.T) {
},
}
hostMemKb, err := getHostMemorySizeKb(procMemInfo)
hostMemKb, err := GetHostMemorySizeKb(procMemInfo)
assert.NoError(err)
memMax := fmt.Sprintf("%dM", int(float64(hostMemKb)/1024))
@@ -195,7 +195,7 @@ func TestQemuKnobs(t *testing.T) {
q := &qemu{
store: sandbox.store,
}
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
assert.NoError(err)
assert.Equal(q.qemuConfig.Knobs.NoUserConfig, true)
@@ -204,14 +204,14 @@ func TestQemuKnobs(t *testing.T) {
assert.Equal(q.qemuConfig.Knobs.NoReboot, true)
}
func testQemuAddDevice(t *testing.T, devInfo interface{}, devType deviceType, expected []govmmQemu.Device) {
func testQemuAddDevice(t *testing.T, devInfo interface{}, devType DeviceType, expected []govmmQemu.Device) {
assert := assert.New(t)
q := &qemu{
ctx: context.Background(),
arch: &qemuArchBase{},
}
err := q.addDevice(context.Background(), devInfo, devType)
err := q.AddDevice(context.Background(), devInfo, devType)
assert.NoError(err)
assert.Exactly(q.qemuConfig.Devices, expected)
}
@@ -237,7 +237,7 @@ func TestQemuAddDeviceFsDev(t *testing.T) {
HostPath: hostPath,
}
testQemuAddDevice(t, volume, fsDev, expectedOut)
testQemuAddDevice(t, volume, FsDev, expectedOut)
}
func TestQemuAddDeviceVhostUserBlk(t *testing.T) {
@@ -258,7 +258,7 @@ func TestQemuAddDeviceVhostUserBlk(t *testing.T) {
Type: config.VhostUserBlk,
}
testQemuAddDevice(t, vDevice, vhostuserDev, expectedOut)
testQemuAddDevice(t, vDevice, VhostuserDev, expectedOut)
}
func TestQemuAddDeviceSerialPortDev(t *testing.T) {
@@ -285,7 +285,7 @@ func TestQemuAddDeviceSerialPortDev(t *testing.T) {
Name: name,
}
testQemuAddDevice(t, socket, serialPortDev, expectedOut)
testQemuAddDevice(t, socket, SerialPortDev, expectedOut)
}
func TestQemuAddDeviceKataVSOCK(t *testing.T) {
@@ -318,7 +318,7 @@ func TestQemuAddDeviceKataVSOCK(t *testing.T) {
VhostFd: vsockFile,
}
testQemuAddDevice(t, vsock, vSockPCIDev, expectedOut)
testQemuAddDevice(t, vsock, VSockPCIDev, expectedOut)
}
func TestQemuGetSandboxConsole(t *testing.T) {
@@ -332,7 +332,7 @@ func TestQemuGetSandboxConsole(t *testing.T) {
sandboxID := "testSandboxID"
expected := filepath.Join(q.store.RunVMStoragePath(), sandboxID, consoleSocket)
proto, result, err := q.getSandboxConsole(q.ctx, sandboxID)
proto, result, err := q.GetVMConsole(q.ctx, sandboxID)
assert.NoError(err)
assert.Equal(result, expected)
assert.Equal(proto, consoleProtoUnix)
@@ -345,7 +345,7 @@ func TestQemuCapabilities(t *testing.T) {
arch: &qemuArchBase{},
}
caps := q.capabilities(q.ctx)
caps := q.Capabilities(q.ctx)
assert.True(caps.IsBlockDeviceHotplugSupported())
}
@@ -401,9 +401,9 @@ func TestHotplugUnsupportedDeviceType(t *testing.T) {
config: qemuConfig,
}
_, err := q.hotplugAddDevice(context.Background(), &memoryDevice{0, 128, uint64(0), false}, fsDev)
_, err := q.HotplugAddDevice(context.Background(), &MemoryDevice{0, 128, uint64(0), false}, FsDev)
assert.Error(err)
_, err = q.hotplugRemoveDevice(context.Background(), &memoryDevice{0, 128, uint64(0), false}, fsDev)
_, err = q.HotplugRemoveDevice(context.Background(), &MemoryDevice{0, 128, uint64(0), false}, FsDev)
assert.Error(err)
}
@@ -430,7 +430,7 @@ func TestQemuCleanup(t *testing.T) {
config: newQemuConfig(),
}
err := q.cleanup(q.ctx)
err := q.Cleanup(q.ctx)
assert.Nil(err)
}
@@ -464,7 +464,7 @@ func TestQemuFileBackedMem(t *testing.T) {
store: sandbox.store,
}
sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
assert.NoError(err)
assert.Equal(q.qemuConfig.Knobs.FileBackedMem, true)
@@ -482,7 +482,7 @@ func TestQemuFileBackedMem(t *testing.T) {
sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS
sandbox.config.HypervisorConfig.MemoryPath = fallbackFileBackedMemDir
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
expectErr := errors.New("VM templating has been enabled with either virtio-fs or file backed memory and this configuration will not work")
assert.Equal(expectErr.Error(), err.Error())
@@ -495,7 +495,7 @@ func TestQemuFileBackedMem(t *testing.T) {
store: sandbox.store,
}
sandbox.config.HypervisorConfig.FileBackedMemRootDir = "/tmp/xyzabc"
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
assert.NoError(err)
assert.Equal(q.qemuConfig.Knobs.FileBackedMem, false)
assert.Equal(q.qemuConfig.Knobs.MemShared, false)
@@ -510,7 +510,7 @@ func TestQemuFileBackedMem(t *testing.T) {
}
sandbox.config.HypervisorConfig.EnableVhostUserStore = true
sandbox.config.HypervisorConfig.HugePages = true
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
assert.NoError(err)
assert.Equal(q.qemuConfig.Knobs.MemShared, true)
@@ -523,7 +523,7 @@ func TestQemuFileBackedMem(t *testing.T) {
}
sandbox.config.HypervisorConfig.EnableVhostUserStore = true
sandbox.config.HypervisorConfig.HugePages = false
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
expectErr = errors.New("Vhost-user-blk/scsi is enabled without HugePages. This configuration will not work")
assert.Equal(expectErr.Error(), err.Error())
@@ -554,7 +554,7 @@ func TestQemuGetpids(t *testing.T) {
qemuConfig := newQemuConfig()
q := &qemu{}
pids := q.getPids()
pids := q.GetPids()
assert.NotNil(pids)
assert.True(len(pids) == 1)
assert.True(pids[0] == 0)
@@ -569,18 +569,18 @@ func TestQemuGetpids(t *testing.T) {
defer os.Remove(tmpfile)
q.qemuConfig.PidFile = tmpfile
pids = q.getPids()
pids = q.GetPids()
assert.True(len(pids) == 1)
assert.True(pids[0] == 0)
err = ioutil.WriteFile(tmpfile, []byte("100"), 0)
assert.Nil(err)
pids = q.getPids()
pids = q.GetPids()
assert.True(len(pids) == 1)
assert.True(pids[0] == 100)
q.state.VirtiofsdPid = 200
pids = q.getPids()
pids = q.GetPids()
assert.True(len(pids) == 2)
assert.True(pids[0] == 100)
assert.True(pids[1] == 200)

View File

@@ -54,9 +54,9 @@ var sandboxTracingTags = map[string]string{
}
const (
// vmStartTimeout represents the time in seconds a sandbox can wait before
// VmStartTimeout represents the time in seconds a sandbox can wait before
// to consider the VM starting operation failed.
vmStartTimeout = 10
VmStartTimeout = 10
// DirMode is the permission bits used for creating a directory
DirMode = os.FileMode(0750) | os.ModeDir
@@ -171,7 +171,7 @@ type Sandbox struct {
ctx context.Context
devManager api.DeviceManager
factory Factory
hypervisor hypervisor
hypervisor Hypervisor
agent agent
store persistapi.PersistDriver
@@ -259,7 +259,7 @@ func (s *Sandbox) GetNetNs() string {
// GetHypervisorPid returns the hypervisor's pid.
func (s *Sandbox) GetHypervisorPid() (int, error) {
pids := s.hypervisor.getPids()
pids := s.hypervisor.GetPids()
if len(pids) == 0 || pids[0] == 0 {
return -1, fmt.Errorf("Invalid hypervisor PID: %+v", pids)
}
@@ -294,7 +294,7 @@ func (s *Sandbox) Release(ctx context.Context) error {
if s.monitor != nil {
s.monitor.stop()
}
s.hypervisor.disconnect(ctx)
s.hypervisor.Disconnect(ctx)
return s.agent.disconnect(ctx)
}
@@ -409,7 +409,7 @@ func createAssets(ctx context.Context, sandboxConfig *SandboxConfig) error {
return err
}
if err := sandboxConfig.HypervisorConfig.addCustomAsset(a); err != nil {
if err := sandboxConfig.HypervisorConfig.AddCustomAsset(a); err != nil {
return err
}
}
@@ -474,7 +474,7 @@ func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac
return s, nil
}
// Below code path is called only during create, because of earlier check.
// Below code path is called only during create, because of earlier Check.
if err := s.agent.createSandbox(ctx, s); err != nil {
return nil, err
}
@@ -556,7 +556,7 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
}
// store doesn't require hypervisor to be stored immediately
if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig); err != nil {
if err = s.hypervisor.CreateVM(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig); err != nil {
return nil, err
}
@@ -755,7 +755,7 @@ func (s *Sandbox) Delete(ctx context.Context) error {
if !rootless.IsRootless() {
if err := s.cgroupsDelete(); err != nil {
s.Logger().WithError(err).Error("failed to cleanup cgroups")
s.Logger().WithError(err).Error("failed to Cleanup cgroups")
}
}
@@ -763,8 +763,8 @@ func (s *Sandbox) Delete(ctx context.Context) error {
s.monitor.stop()
}
if err := s.hypervisor.cleanup(ctx); err != nil {
s.Logger().WithError(err).Error("failed to cleanup hypervisor")
if err := s.hypervisor.Cleanup(ctx); err != nil {
s.Logger().WithError(err).Error("failed to Cleanup hypervisor")
}
s.agent.cleanup(ctx, s)
@@ -979,7 +979,7 @@ func newConsoleWatcher(ctx context.Context, s *Sandbox) (*consoleWatcher, error)
cw consoleWatcher
)
cw.proto, cw.consoleURL, err = s.hypervisor.getSandboxConsole(ctx, s.id)
cw.proto, cw.consoleURL, err = s.hypervisor.GetVMConsole(ctx, s.id)
if err != nil {
return nil, err
}
@@ -1036,7 +1036,7 @@ func (cw *consoleWatcher) start(s *Sandbox) (err error) {
return nil
}
// check if the console watcher has already watched the vm console.
// Check if the console watcher has already watched the vm console.
func (cw *consoleWatcher) consoleWatched() bool {
return cw.conn != nil || cw.ptyConsole != nil
}
@@ -1101,7 +1101,7 @@ func (s *Sandbox) addSwap(ctx context.Context, swapID string, size int64) (*conf
ID: swapID,
Swap: true,
}
_, err = s.hypervisor.hotplugAddDevice(ctx, blockDevice, blockDev)
_, err = s.hypervisor.HotplugAddDevice(ctx, blockDevice, BlockDev)
if err != nil {
err = fmt.Errorf("add swapfile %s device to VM fail %s", swapFile, err.Error())
s.Logger().WithError(err).Error("addSwap")
@@ -1109,7 +1109,7 @@ func (s *Sandbox) addSwap(ctx context.Context, swapID string, size int64) (*conf
}
defer func() {
if err != nil {
_, e := s.hypervisor.hotplugRemoveDevice(ctx, blockDevice, blockDev)
_, e := s.hypervisor.HotplugRemoveDevice(ctx, blockDevice, BlockDev)
if e != nil {
s.Logger().Errorf("remove swapfile %s to VM fail %s", swapFile, e.Error())
}
@@ -1181,7 +1181,7 @@ func (s *Sandbox) startVM(ctx context.Context) (err error) {
defer func() {
if err != nil {
s.hypervisor.stopSandbox(ctx, false)
s.hypervisor.StopVM(ctx, false)
}
}()
@@ -1199,7 +1199,7 @@ func (s *Sandbox) startVM(ctx context.Context) (err error) {
return vm.assignSandbox(s)
}
return s.hypervisor.startSandbox(ctx, vmStartTimeout)
return s.hypervisor.StartVM(ctx, VmStartTimeout)
}); err != nil {
return err
}
@@ -1264,7 +1264,7 @@ func (s *Sandbox) stopVM(ctx context.Context) error {
s.Logger().Info("Stopping VM")
return s.hypervisor.stopSandbox(ctx, s.disableVMShutdown)
return s.hypervisor.StopVM(ctx, s.disableVMShutdown)
}
func (s *Sandbox) addContainer(c *Container) error {
@@ -1539,7 +1539,7 @@ func (s *Sandbox) Stats(ctx context.Context) (SandboxStats, error) {
// TODO Do we want to aggregate the overhead cgroup stats to the sandbox ones?
stats.CgroupStats.CPUStats.CPUUsage.TotalUsage = metrics.CPU.Usage.Total
stats.CgroupStats.MemoryStats.Usage.Usage = metrics.Memory.Usage.Usage
tids, err := s.hypervisor.getThreadIDs(ctx)
tids, err := s.hypervisor.GetThreadIDs(ctx)
if err != nil {
return stats, err
}
@@ -1780,7 +1780,7 @@ func (s *Sandbox) HotplugAddDevice(ctx context.Context, device api.Device, devTy
// adding a group of VFIO devices
for _, dev := range vfioDevices {
if _, err := s.hypervisor.hotplugAddDevice(ctx, dev, vfioDev); err != nil {
if _, err := s.hypervisor.HotplugAddDevice(ctx, dev, VfioDev); err != nil {
s.Logger().
WithFields(logrus.Fields{
"sandbox": s.id,
@@ -1796,14 +1796,14 @@ func (s *Sandbox) HotplugAddDevice(ctx context.Context, device api.Device, devTy
if !ok {
return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
}
_, err := s.hypervisor.hotplugAddDevice(ctx, blockDevice.BlockDrive, blockDev)
_, err := s.hypervisor.HotplugAddDevice(ctx, blockDevice.BlockDrive, BlockDev)
return err
case config.VhostUserBlk:
vhostUserBlkDevice, ok := device.(*drivers.VhostUserBlkDevice)
if !ok {
return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
}
_, err := s.hypervisor.hotplugAddDevice(ctx, vhostUserBlkDevice.VhostUserDeviceAttrs, vhostuserDev)
_, err := s.hypervisor.HotplugAddDevice(ctx, vhostUserBlkDevice.VhostUserDeviceAttrs, VhostuserDev)
return err
case config.DeviceGeneric:
// TODO: what?
@@ -1831,7 +1831,7 @@ func (s *Sandbox) HotplugRemoveDevice(ctx context.Context, device api.Device, de
// remove a group of VFIO devices
for _, dev := range vfioDevices {
if _, err := s.hypervisor.hotplugRemoveDevice(ctx, dev, vfioDev); err != nil {
if _, err := s.hypervisor.HotplugRemoveDevice(ctx, dev, VfioDev); err != nil {
s.Logger().WithError(err).
WithFields(logrus.Fields{
"sandbox": s.id,
@@ -1852,14 +1852,14 @@ func (s *Sandbox) HotplugRemoveDevice(ctx context.Context, device api.Device, de
s.Logger().WithField("path", blockDrive.File).Infof("Skip device: cannot hot remove PMEM devices")
return nil
}
_, err := s.hypervisor.hotplugRemoveDevice(ctx, blockDrive, blockDev)
_, err := s.hypervisor.HotplugRemoveDevice(ctx, blockDrive, BlockDev)
return err
case config.VhostUserBlk:
vhostUserDeviceAttrs, ok := device.GetDeviceInfo().(*config.VhostUserDeviceAttrs)
if !ok {
return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
}
_, err := s.hypervisor.hotplugRemoveDevice(ctx, vhostUserDeviceAttrs, vhostuserDev)
_, err := s.hypervisor.HotplugRemoveDevice(ctx, vhostUserDeviceAttrs, VhostuserDev)
return err
case config.DeviceGeneric:
// TODO: what?
@@ -1886,11 +1886,11 @@ func (s *Sandbox) UnsetSandboxBlockIndex(index int) error {
func (s *Sandbox) AppendDevice(ctx context.Context, device api.Device) error {
switch device.DeviceType() {
case config.VhostUserSCSI, config.VhostUserNet, config.VhostUserBlk, config.VhostUserFS:
return s.hypervisor.addDevice(ctx, device.GetDeviceInfo().(*config.VhostUserDeviceAttrs), vhostuserDev)
return s.hypervisor.AddDevice(ctx, device.GetDeviceInfo().(*config.VhostUserDeviceAttrs), VhostuserDev)
case config.DeviceVFIO:
vfioDevs := device.GetDeviceInfo().([]*config.VFIODev)
for _, d := range vfioDevs {
return s.hypervisor.addDevice(ctx, *d, vfioDev)
return s.hypervisor.AddDevice(ctx, *d, VfioDev)
}
default:
s.Logger().WithField("device-type", device.DeviceType()).
@@ -1949,11 +1949,11 @@ func (s *Sandbox) updateResources(ctx context.Context) error {
return err
}
// Add default vcpus for sandbox
sandboxVCPUs += s.hypervisor.hypervisorConfig().NumVCPUs
sandboxVCPUs += s.hypervisor.HypervisorConfig().NumVCPUs
sandboxMemoryByte, sandboxneedPodSwap, sandboxSwapByte := s.calculateSandboxMemory()
// Add default / rsvd memory for sandbox.
hypervisorMemoryByte := int64(s.hypervisor.hypervisorConfig().MemorySize) << utils.MibToBytesShift
hypervisorMemoryByte := int64(s.hypervisor.HypervisorConfig().MemorySize) << utils.MibToBytesShift
sandboxMemoryByte += hypervisorMemoryByte
if sandboxneedPodSwap {
sandboxSwapByte += hypervisorMemoryByte
@@ -1970,7 +1970,7 @@ func (s *Sandbox) updateResources(ctx context.Context) error {
// Update VCPUs
s.Logger().WithField("cpus-sandbox", sandboxVCPUs).Debugf("Request to hypervisor to update vCPUs")
oldCPUs, newCPUs, err := s.hypervisor.resizeVCPUs(ctx, sandboxVCPUs)
oldCPUs, newCPUs, err := s.hypervisor.ResizeVCPUs(ctx, sandboxVCPUs)
if err != nil {
return err
}
@@ -1988,7 +1988,7 @@ func (s *Sandbox) updateResources(ctx context.Context) error {
// Update Memory
s.Logger().WithField("memory-sandbox-size-byte", sandboxMemoryByte).Debugf("Request to hypervisor to update memory")
newMemory, updatedMemoryDevice, err := s.hypervisor.resizeMemory(ctx, uint32(sandboxMemoryByte>>utils.MibToBytesShift), s.state.GuestMemoryBlockSizeMB, s.state.GuestMemoryHotplugProbe)
newMemory, updatedMemoryDevice, err := s.hypervisor.ResizeMemory(ctx, uint32(sandboxMemoryByte>>utils.MibToBytesShift), s.state.GuestMemoryBlockSizeMB, s.state.GuestMemoryHotplugProbe)
if err != nil {
if err == noGuestMemHotplugErr {
s.Logger().Warnf("%s, memory specifications cannot be guaranteed", err)
@@ -1997,10 +1997,10 @@ func (s *Sandbox) updateResources(ctx context.Context) error {
}
}
s.Logger().Debugf("Sandbox memory size: %d MB", newMemory)
if s.state.GuestMemoryHotplugProbe && updatedMemoryDevice.addr != 0 {
if s.state.GuestMemoryHotplugProbe && updatedMemoryDevice.Addr != 0 {
// notify the guest kernel about memory hot-add event, before onlining them
s.Logger().Debugf("notify guest kernel memory hot-add event via probe interface, memory device located at 0x%x", updatedMemoryDevice.addr)
if err := s.agent.memHotplugByProbe(ctx, updatedMemoryDevice.addr, uint32(updatedMemoryDevice.sizeMB), s.state.GuestMemoryBlockSizeMB); err != nil {
s.Logger().Debugf("notify guest kernel memory hot-add event via probe interface, memory device located at 0x%x", updatedMemoryDevice.Addr)
if err := s.agent.memHotplugByProbe(ctx, updatedMemoryDevice.Addr, uint32(updatedMemoryDevice.SizeMB), s.state.GuestMemoryBlockSizeMB); err != nil {
return err
}
}
@@ -2157,7 +2157,7 @@ func (s *Sandbox) cgroupsDelete() error {
// constrainHypervisor will place the VMM and vCPU threads into cgroups.
func (s *Sandbox) constrainHypervisor(ctx context.Context) error {
tids, err := s.hypervisor.getThreadIDs(ctx)
tids, err := s.hypervisor.GetThreadIDs(ctx)
if err != nil {
return fmt.Errorf("failed to get thread ids from hypervisor: %v", err)
}
@@ -2197,7 +2197,7 @@ func (s *Sandbox) setupCgroups() error {
// This OCI specification was patched when the sandbox was created
// by containerCapabilities(), SetEphemeralStorageType() and others
// in order to support:
// * capabilities
// * Capabilities
// * Ephemeral storage
// * k8s empty dir
// If you need the original (vanilla) OCI spec,
@@ -2264,7 +2264,7 @@ func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err
var config SandboxConfig
// load sandbox config fromld store.
// Load sandbox config fromld store.
c, err := loadSandboxConfig(sandboxID)
if err != nil {
virtLog.WithError(err).Warning("failed to get sandbox config from store")

View File

@@ -132,7 +132,7 @@ func RegisterMetrics() {
// UpdateRuntimeMetrics update shim/hypervisor's metrics
func (s *Sandbox) UpdateRuntimeMetrics() error {
pids := s.hypervisor.getPids()
pids := s.hypervisor.GetPids()
if len(pids) == 0 {
return nil
}
@@ -183,7 +183,7 @@ func (s *Sandbox) UpdateRuntimeMetrics() error {
}
func (s *Sandbox) UpdateVirtiofsdMetrics() error {
vfsPid := s.hypervisor.getVirtioFsPid()
vfsPid := s.hypervisor.GetVirtioFsPid()
if vfsPid == nil {
// virtiofsd is not mandatory for a VMM.
return nil

View File

@@ -203,7 +203,7 @@ func testForceSandboxStateChangeAndCheck(t *testing.T, p *Sandbox, newSandboxSta
// force sandbox state change
err := p.setSandboxState(newSandboxState.State)
assert.NoError(t, err)
// check the in-memory state is correct
// Check the in-memory state is correct
if p.state.State != newSandboxState.State {
return fmt.Errorf("Expected state %v, got %v", newSandboxState.State, p.state.State)
}
@@ -216,7 +216,7 @@ func testForceContainerStateChangeAndCheck(t *testing.T, p *Sandbox, c *Containe
err := c.setContainerState(newContainerState.State)
assert.NoError(t, err)
// check the in-memory state is correct
// Check the in-memory state is correct
if c.state.State != newContainerState.State {
return fmt.Errorf("Expected state %v, got %v", newContainerState.State, c.state.State)
}
@@ -225,7 +225,7 @@ func testForceContainerStateChangeAndCheck(t *testing.T, p *Sandbox, c *Containe
}
func testCheckSandboxOnDiskState(p *Sandbox, sandboxState types.SandboxState) error {
// check on-disk state is correct
// Check on-disk state is correct
if p.state.State != sandboxState.State {
return fmt.Errorf("Expected state %v, got %v", sandboxState.State, p.state.State)
}
@@ -234,7 +234,7 @@ func testCheckSandboxOnDiskState(p *Sandbox, sandboxState types.SandboxState) er
}
func testCheckContainerOnDiskState(c *Container, containerState types.ContainerState) error {
// check on-disk state is correct
// Check on-disk state is correct
if c.state.State != containerState.State {
return fmt.Errorf("Expected state %v, got %v", containerState.State, c.state.State)
}
@@ -251,7 +251,7 @@ func writeContainerConfig() (string, error) {
{
"ociVersion": "1.0.0-rc2-dev",
"process": {
"capabilities": [
"Capabilities": [
]
}
}`
@@ -311,7 +311,7 @@ func TestSandboxSetSandboxAndContainerState(t *testing.T) {
c, err := p.findContainer(contID)
assert.NoError(err)
// check initial sandbox and container states
// Check initial sandbox and container states
if err := testCheckInitSandboxAndContainerStates(p, initialSandboxState, c, initialContainerState); err != nil {
t.Error(err)
}
@@ -1377,7 +1377,7 @@ func TestSandboxCreationFromConfigRollbackFromCreateSandbox(t *testing.T) {
// Fail at createSandbox: QEMU path does not exist, it is expected. Then rollback is called
assert.Error(err)
// check dirs
// Check dirs
err = checkSandboxRemains()
assert.NoError(err)
}

View File

@@ -90,18 +90,18 @@ func (endpoint *TapEndpoint) Detach(ctx context.Context, netNsCreated bool, netN
}
// HotAttach for the tap endpoint uses hot plug device
func (endpoint *TapEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
func (endpoint *TapEndpoint) HotAttach(ctx context.Context, h Hypervisor) error {
networkLogger().Info("Hot attaching tap endpoint")
span, ctx := tapTrace(ctx, "HotAttach", endpoint)
defer span.End()
if err := tapNetwork(endpoint, h.hypervisorConfig().NumVCPUs, h.hypervisorConfig().DisableVhostNet); err != nil {
if err := tapNetwork(endpoint, h.HypervisorConfig().NumVCPUs, h.HypervisorConfig().DisableVhostNet); err != nil {
networkLogger().WithError(err).Error("Error bridging tap ep")
return err
}
if _, err := h.hotplugAddDevice(ctx, endpoint, netDev); err != nil {
if _, err := h.HotplugAddDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error attach tap ep")
return err
}
@@ -109,7 +109,7 @@ func (endpoint *TapEndpoint) HotAttach(ctx context.Context, h hypervisor) error
}
// HotDetach for the tap endpoint uses hot pull device
func (endpoint *TapEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
func (endpoint *TapEndpoint) HotDetach(ctx context.Context, h Hypervisor, netNsCreated bool, netNsPath string) error {
networkLogger().Info("Hot detaching tap endpoint")
span, ctx := tapTrace(ctx, "HotDetach", endpoint)
@@ -121,7 +121,7 @@ func (endpoint *TapEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsC
networkLogger().WithError(err).Warn("Error un-bridging tap ep")
}
if _, err := h.hotplugRemoveDevice(ctx, endpoint, netDev); err != nil {
if _, err := h.HotplugRemoveDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error detach tap ep")
return err
}

View File

@@ -82,7 +82,7 @@ func (endpoint *TuntapEndpoint) Attach(ctx context.Context, s *Sandbox) error {
return err
}
return h.addDevice(ctx, endpoint, netDev)
return h.AddDevice(ctx, endpoint, NetDev)
}
// Detach for the tun/tap endpoint tears down the tap
@@ -101,18 +101,18 @@ func (endpoint *TuntapEndpoint) Detach(ctx context.Context, netNsCreated bool, n
}
// HotAttach for the tun/tap endpoint uses hot plug device
func (endpoint *TuntapEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
func (endpoint *TuntapEndpoint) HotAttach(ctx context.Context, h Hypervisor) error {
networkLogger().Info("Hot attaching tun/tap endpoint")
span, ctx := tuntapTrace(ctx, "HotAttach", endpoint)
defer span.End()
if err := tuntapNetwork(endpoint, h.hypervisorConfig().NumVCPUs, h.hypervisorConfig().DisableVhostNet); err != nil {
if err := tuntapNetwork(endpoint, h.HypervisorConfig().NumVCPUs, h.HypervisorConfig().DisableVhostNet); err != nil {
networkLogger().WithError(err).Error("Error bridging tun/tap ep")
return err
}
if _, err := h.hotplugAddDevice(ctx, endpoint, netDev); err != nil {
if _, err := h.HotplugAddDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error attach tun/tap ep")
return err
}
@@ -120,7 +120,7 @@ func (endpoint *TuntapEndpoint) HotAttach(ctx context.Context, h hypervisor) err
}
// HotDetach for the tun/tap endpoint uses hot pull device
func (endpoint *TuntapEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
func (endpoint *TuntapEndpoint) HotDetach(ctx context.Context, h Hypervisor, netNsCreated bool, netNsPath string) error {
networkLogger().Info("Hot detaching tun/tap endpoint")
span, ctx := tuntapTrace(ctx, "HotDetach", endpoint)
@@ -132,7 +132,7 @@ func (endpoint *TuntapEndpoint) HotDetach(ctx context.Context, h hypervisor, net
networkLogger().WithError(err).Warn("Error un-bridging tun/tap ep")
}
if _, err := h.hotplugRemoveDevice(ctx, endpoint, netDev); err != nil {
if _, err := h.HotplugRemoveDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error detach tun/tap ep")
return err
}

View File

@@ -103,7 +103,7 @@ func (endpoint *VethEndpoint) Attach(ctx context.Context, s *Sandbox) error {
return err
}
return h.addDevice(ctx, endpoint, netDev)
return h.AddDevice(ctx, endpoint, NetDev)
}
// Detach for the veth endpoint tears down the tap and bridge
@@ -124,7 +124,7 @@ func (endpoint *VethEndpoint) Detach(ctx context.Context, netNsCreated bool, net
}
// HotAttach for the veth endpoint uses hot plug device
func (endpoint *VethEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
func (endpoint *VethEndpoint) HotAttach(ctx context.Context, h Hypervisor) error {
span, ctx := vethTrace(ctx, "HotAttach", endpoint)
defer span.End()
@@ -133,7 +133,7 @@ func (endpoint *VethEndpoint) HotAttach(ctx context.Context, h hypervisor) error
return err
}
if _, err := h.hotplugAddDevice(ctx, endpoint, netDev); err != nil {
if _, err := h.HotplugAddDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error attach virtual ep")
return err
}
@@ -141,7 +141,7 @@ func (endpoint *VethEndpoint) HotAttach(ctx context.Context, h hypervisor) error
}
// HotDetach for the veth endpoint uses hot pull device
func (endpoint *VethEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
func (endpoint *VethEndpoint) HotDetach(ctx context.Context, h Hypervisor, netNsCreated bool, netNsPath string) error {
if !netNsCreated {
return nil
}
@@ -155,7 +155,7 @@ func (endpoint *VethEndpoint) HotDetach(ctx context.Context, h hypervisor, netNs
networkLogger().WithError(err).Warn("Error un-bridging virtual ep")
}
if _, err := h.hotplugRemoveDevice(ctx, endpoint, netDev); err != nil {
if _, err := h.HotplugRemoveDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error detach virtual ep")
return err
}

View File

@@ -96,7 +96,7 @@ func (endpoint *VhostUserEndpoint) Attach(ctx context.Context, s *Sandbox) error
Type: config.VhostUserNet,
}
return s.hypervisor.addDevice(ctx, d, vhostuserDev)
return s.hypervisor.AddDevice(ctx, d, VhostuserDev)
}
// Detach for vhostuser endpoint
@@ -105,12 +105,12 @@ func (endpoint *VhostUserEndpoint) Detach(ctx context.Context, netNsCreated bool
}
// HotAttach for vhostuser endpoint not supported yet
func (endpoint *VhostUserEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
func (endpoint *VhostUserEndpoint) HotAttach(ctx context.Context, h Hypervisor) error {
return fmt.Errorf("VhostUserEndpoint does not support Hot attach")
}
// HotDetach for vhostuser endpoint not supported yet
func (endpoint *VhostUserEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
func (endpoint *VhostUserEndpoint) HotDetach(ctx context.Context, h Hypervisor, netNsCreated bool, netNsPath string) error {
return fmt.Errorf("VhostUserEndpoint does not support Hot detach")
}
@@ -133,7 +133,7 @@ func findVhostUserNetSocketPath(netInfo NetworkInfo) (string, error) {
return "", nil
}
// check for socket file existence at known location.
// Check for socket file existence at known location.
for _, addr := range netInfo.Addrs {
socketPath := fmt.Sprintf(hostSocketSearchPath, addr.IPNet.IP)
if _, err := os.Stat(socketPath); err == nil {

View File

@@ -134,7 +134,7 @@ func TestMain(m *testing.M) {
}
utils.StartCmd = func(c *exec.Cmd) error {
//startSandbox will check if the hypervisor is alive and
//StartVM will Check if the hypervisor is alive and
// checks for the PID is running, lets fake it using our
// own PID
c.Process = &os.Process{Pid: os.Getpid()}

View File

@@ -23,7 +23,7 @@ var urandomDev = "/dev/urandom"
// VM is abstraction of a virtual machine.
type VM struct {
hypervisor hypervisor
hypervisor Hypervisor
agent agent
store persistapi.PersistDriver
@@ -42,9 +42,9 @@ type VMConfig struct {
HypervisorConfig HypervisorConfig
}
// Valid check VMConfig validity.
// Valid Check VMConfig validity.
func (c *VMConfig) Valid() error {
return c.HypervisorConfig.valid()
return c.HypervisorConfig.Valid()
}
// ToGrpc convert VMConfig struct to grpc format pb.GrpcVMConfig.
@@ -111,7 +111,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
}
}()
if err = hypervisor.createSandbox(ctx, id, NetworkNamespace{}, &config.HypervisorConfig); err != nil {
if err = hypervisor.CreateVM(ctx, id, NetworkNamespace{}, &config.HypervisorConfig); err != nil {
return nil, err
}
@@ -130,21 +130,21 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
}
// 3. boot up guest vm
if err = hypervisor.startSandbox(ctx, vmStartTimeout); err != nil {
if err = hypervisor.StartVM(ctx, VmStartTimeout); err != nil {
return nil, err
}
defer func() {
if err != nil {
virtLog.WithField("vm", id).WithError(err).Info("clean up vm")
hypervisor.stopSandbox(ctx, false)
hypervisor.StopVM(ctx, false)
}
}()
// 4. check agent aliveness
// VMs booted from template are paused, do not check
// 4. Check agent aliveness
// VMs booted from template are paused, do not Check
if !config.HypervisorConfig.BootFromTemplate {
virtLog.WithField("vm", id).Info("check agent status")
virtLog.WithField("vm", id).Info("Check agent status")
err = agent.check(ctx)
if err != nil {
return nil, err
@@ -215,25 +215,25 @@ func (v *VM) logger() logrus.FieldLogger {
// Pause pauses a VM.
func (v *VM) Pause(ctx context.Context) error {
v.logger().Info("pause vm")
return v.hypervisor.pauseSandbox(ctx)
return v.hypervisor.PauseVM(ctx)
}
// Save saves a VM to persistent disk.
func (v *VM) Save() error {
v.logger().Info("save vm")
return v.hypervisor.saveSandbox()
v.logger().Info("Save vm")
return v.hypervisor.SaveVM()
}
// Resume resumes a paused VM.
func (v *VM) Resume(ctx context.Context) error {
v.logger().Info("resume vm")
return v.hypervisor.resumeSandbox(ctx)
return v.hypervisor.ResumeVM(ctx)
}
// Start kicks off a configured VM.
func (v *VM) Start(ctx context.Context) error {
v.logger().Info("start vm")
return v.hypervisor.startSandbox(ctx, vmStartTimeout)
return v.hypervisor.StartVM(ctx, VmStartTimeout)
}
// Disconnect agent connections to a VM
@@ -241,7 +241,7 @@ func (v *VM) Disconnect(ctx context.Context) error {
v.logger().Info("kill vm")
if err := v.agent.disconnect(ctx); err != nil {
v.logger().WithError(err).Error("failed to disconnect agent")
v.logger().WithError(err).Error("failed to Disconnect agent")
}
return nil
@@ -251,7 +251,7 @@ func (v *VM) Disconnect(ctx context.Context) error {
func (v *VM) Stop(ctx context.Context) error {
v.logger().Info("stop vm")
if err := v.hypervisor.stopSandbox(ctx, false); err != nil {
if err := v.hypervisor.StopVM(ctx, false); err != nil {
return err
}
@@ -262,7 +262,7 @@ func (v *VM) Stop(ctx context.Context) error {
func (v *VM) AddCPUs(ctx context.Context, num uint32) error {
if num > 0 {
v.logger().Infof("hot adding %d vCPUs", num)
if _, err := v.hypervisor.hotplugAddDevice(ctx, num, cpuDev); err != nil {
if _, err := v.hypervisor.HotplugAddDevice(ctx, num, CpuDev); err != nil {
return err
}
v.cpuDelta += num
@@ -276,8 +276,8 @@ func (v *VM) AddCPUs(ctx context.Context, num uint32) error {
func (v *VM) AddMemory(ctx context.Context, numMB uint32) error {
if numMB > 0 {
v.logger().Infof("hot adding %d MB memory", numMB)
dev := &memoryDevice{1, int(numMB), 0, false}
if _, err := v.hypervisor.hotplugAddDevice(ctx, dev, memoryDev); err != nil {
dev := &MemoryDevice{1, int(numMB), 0, false}
if _, err := v.hypervisor.HotplugAddDevice(ctx, dev, MemoryDev); err != nil {
return err
}
}
@@ -381,7 +381,7 @@ func (v *VM) ToGrpc(ctx context.Context, config VMConfig) (*pb.GrpcVM, error) {
func (v *VM) GetVMStatus() *pb.GrpcVMStatus {
return &pb.GrpcVMStatus{
Pid: int64(getHypervisorPid(v.hypervisor)),
Pid: int64(GetHypervisorPid(v.hypervisor)),
Cpu: v.cpu,
Memory: v.memory,
}