From 4ac675453f17e0f5412157f5e2a94f45ae4f052d Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 13 Jul 2018 18:07:28 +0800 Subject: [PATCH 01/11] qemu: remove append9PVolumes It is not used and we actully cannot append multiple 9pfs volumes to a guest. Signed-off-by: Peng Tao --- virtcontainers/qemu.go | 1 - virtcontainers/qemu_arch_base.go | 12 --------- virtcontainers/qemu_arch_base_test.go | 39 --------------------------- 3 files changed, 52 deletions(-) diff --git a/virtcontainers/qemu.go b/virtcontainers/qemu.go index a018f578c..53dbffa78 100644 --- a/virtcontainers/qemu.go +++ b/virtcontainers/qemu.go @@ -359,7 +359,6 @@ func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error { // bridge gets the first available PCI address i.e bridgePCIStartAddr devices = q.arch.appendBridges(devices, q.state.Bridges) - devices = q.arch.append9PVolumes(devices, sandboxConfig.Volumes) console, err := q.getSandboxConsole(sandboxConfig.ID) if err != nil { return err diff --git a/virtcontainers/qemu_arch_base.go b/virtcontainers/qemu_arch_base.go index 54dbc3d1e..e0188c4d3 100644 --- a/virtcontainers/qemu_arch_base.go +++ b/virtcontainers/qemu_arch_base.go @@ -50,9 +50,6 @@ type qemuArch interface { // memoryTopology returns the memory topology using the given amount of memoryMb and hostMemoryMb memoryTopology(memoryMb, hostMemoryMb uint64) govmmQemu.Memory - // append9PVolumes appends volumes to devices - append9PVolumes(devices []govmmQemu.Device, volumes []Volume) []govmmQemu.Device - // appendConsole appends a console to devices appendConsole(devices []govmmQemu.Device, path string) []govmmQemu.Device @@ -253,15 +250,6 @@ func (q *qemuArchBase) memoryTopology(memoryMb, hostMemoryMb uint64) govmmQemu.M return memory } -func (q *qemuArchBase) append9PVolumes(devices []govmmQemu.Device, volumes []Volume) []govmmQemu.Device { - // Add the shared volumes - for _, v := range volumes { - devices = q.append9PVolume(devices, v) - } - - return devices -} - func (q *qemuArchBase) appendConsole(devices []govmmQemu.Device, path string) []govmmQemu.Device { serial := govmmQemu.SerialDevice{ Driver: govmmQemu.VirtioSerial, diff --git a/virtcontainers/qemu_arch_base_test.go b/virtcontainers/qemu_arch_base_test.go index 906d354cc..77e3f880c 100644 --- a/virtcontainers/qemu_arch_base_test.go +++ b/virtcontainers/qemu_arch_base_test.go @@ -206,8 +206,6 @@ func testQemuArchBaseAppend(t *testing.T, structure interface{}, expected []govm devices = qemuArchBase.append9PVolume(devices, s) case Socket: devices = qemuArchBase.appendSocket(devices, s) - case []Volume: - devices = qemuArchBase.append9PVolumes(devices, s) case drivers.Drive: devices = qemuArchBase.appendBlockDevice(devices, s) case drivers.VFIODevice: @@ -219,43 +217,6 @@ func testQemuArchBaseAppend(t *testing.T, structure interface{}, expected []govm assert.Equal(devices, expected) } -func TestQemuArchBaseAppend9PVolumes(t *testing.T) { - volMountTag := "testVolMountTag" - volHostPath := "testVolHostPath" - - expectedOut := []govmmQemu.Device{ - govmmQemu.FSDevice{ - Driver: govmmQemu.Virtio9P, - FSDriver: govmmQemu.Local, - ID: fmt.Sprintf("extra-9p-%s", fmt.Sprintf("%s.1", volMountTag)), - Path: fmt.Sprintf("%s.1", volHostPath), - MountTag: fmt.Sprintf("%s.1", volMountTag), - SecurityModel: govmmQemu.None, - }, - govmmQemu.FSDevice{ - Driver: govmmQemu.Virtio9P, - FSDriver: govmmQemu.Local, - ID: fmt.Sprintf("extra-9p-%s", fmt.Sprintf("%s.2", volMountTag)), - Path: fmt.Sprintf("%s.2", volHostPath), - MountTag: fmt.Sprintf("%s.2", volMountTag), - SecurityModel: govmmQemu.None, - }, - } - - volumes := []Volume{ - { - MountTag: fmt.Sprintf("%s.1", volMountTag), - HostPath: fmt.Sprintf("%s.1", volHostPath), - }, - { - MountTag: fmt.Sprintf("%s.2", volMountTag), - HostPath: fmt.Sprintf("%s.2", volHostPath), - }, - } - - testQemuArchBaseAppend(t, volumes, expectedOut) -} - func TestQemuArchBaseAppendConsoles(t *testing.T) { var devices []govmmQemu.Device assert := assert.New(t) From 18e6a6effc1880a2a7cc21c1256c60ec579beeda Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 13 Jul 2018 18:12:08 +0800 Subject: [PATCH 02/11] hypervisor: decouple hypervisor from sandbox A hypervisor implementation does not need to depend on a sandbox structure. Decouple them in preparation for vm factory. Signed-off-by: Peng Tao --- virtcontainers/hypervisor.go | 4 +- virtcontainers/mock_hypervisor.go | 6 +- virtcontainers/mock_hypervisor_test.go | 10 +- virtcontainers/qemu.go | 147 +++++++++++++++---------- virtcontainers/qemu_arch_base_test.go | 2 +- virtcontainers/qemu_test.go | 6 +- virtcontainers/sandbox.go | 4 +- 7 files changed, 107 insertions(+), 72 deletions(-) diff --git a/virtcontainers/hypervisor.go b/virtcontainers/hypervisor.go index 75d2a915d..3950f890c 100644 --- a/virtcontainers/hypervisor.go +++ b/virtcontainers/hypervisor.go @@ -499,8 +499,8 @@ func RunningOnVMM(cpuInfoPath string) (bool, error) { // hypervisor is the virtcontainers hypervisor interface. // The default hypervisor implementation is Qemu. type hypervisor interface { - init(sandbox *Sandbox) error - createSandbox(sandboxConfig SandboxConfig) error + init(id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error + createSandbox() error startSandbox() error waitSandbox(timeout int) error stopSandbox() error diff --git a/virtcontainers/mock_hypervisor.go b/virtcontainers/mock_hypervisor.go index 89824cdfe..f86d37253 100644 --- a/virtcontainers/mock_hypervisor.go +++ b/virtcontainers/mock_hypervisor.go @@ -9,8 +9,8 @@ type mockHypervisor struct { vCPUs uint32 } -func (m *mockHypervisor) init(sandbox *Sandbox) error { - valid, err := sandbox.config.HypervisorConfig.valid() +func (m *mockHypervisor) init(id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error { + valid, err := hypervisorConfig.valid() if valid == false || err != nil { return err } @@ -22,7 +22,7 @@ func (m *mockHypervisor) capabilities() capabilities { return capabilities{} } -func (m *mockHypervisor) createSandbox(sandboxConfig SandboxConfig) error { +func (m *mockHypervisor) createSandbox() error { return nil } diff --git a/virtcontainers/mock_hypervisor_test.go b/virtcontainers/mock_hypervisor_test.go index 86a859035..fae4b73ca 100644 --- a/virtcontainers/mock_hypervisor_test.go +++ b/virtcontainers/mock_hypervisor_test.go @@ -15,16 +15,18 @@ func TestMockHypervisorInit(t *testing.T) { sandbox := &Sandbox{ config: &SandboxConfig{ + ID: "mock_sandbox", HypervisorConfig: HypervisorConfig{ KernelPath: "", ImagePath: "", HypervisorPath: "", }, }, + storage: &filesystem{}, } // wrong config - if err := m.init(sandbox); err == nil { + if err := m.init(sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err == nil { t.Fatal() } @@ -35,7 +37,7 @@ func TestMockHypervisorInit(t *testing.T) { } // right config - if err := m.init(sandbox); err != nil { + if err := m.init(sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil { t.Fatal(err) } } @@ -43,9 +45,7 @@ func TestMockHypervisorInit(t *testing.T) { func TestMockHypervisorCreateSandbox(t *testing.T) { var m *mockHypervisor - config := SandboxConfig{} - - if err := m.createSandbox(config); err != nil { + if err := m.createSandbox(); err != nil { t.Fatal(err) } } diff --git a/virtcontainers/qemu.go b/virtcontainers/qemu.go index 53dbffa78..f59aa3301 100644 --- a/virtcontainers/qemu.go +++ b/virtcontainers/qemu.go @@ -47,16 +47,18 @@ type QemuState struct { // qemu is an Hypervisor interface implementation for the Linux qemu hypervisor. type qemu struct { + id string + vmConfig Resources + storage resourceStorage + config HypervisorConfig qmpMonitorCh qmpChannel qemuConfig govmmQemu.Config - sandbox *Sandbox - state QemuState arch qemuArch @@ -66,7 +68,7 @@ const qmpCapErrMsg = "Failed to negoatiate QMP capabilities" const qmpSocket = "qmp.sock" -const defaultConsole = "console.sock" +const consoleSocket = "console.sock" var qemuMajorVersion int var qemuMinorVersion int @@ -170,25 +172,26 @@ func (q *qemu) qemuPath() (string, error) { } // init intializes the Qemu structure. -func (q *qemu) init(sandbox *Sandbox) error { - valid, err := sandbox.config.HypervisorConfig.valid() +func (q *qemu) init(id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error { + valid, err := hypervisorConfig.valid() if valid == false || err != nil { return err } - q.vmConfig = sandbox.config.VMConfig - q.config = sandbox.config.HypervisorConfig - q.sandbox = sandbox + q.id = id + q.storage = storage + q.vmConfig = vmConfig + q.config = *hypervisorConfig q.arch = newQemuArch(q.config) - if err = sandbox.storage.fetchHypervisorState(sandbox.id, &q.state); err != nil { + if err = q.storage.fetchHypervisorState(q.id, &q.state); err != nil { q.Logger().Debug("Creating bridges") q.state.Bridges = q.arch.bridges(q.config.DefaultBridges) q.Logger().Debug("Creating UUID") q.state.UUID = uuid.Generate().String() - if err = sandbox.storage.storeHypervisorState(sandbox.id, q.state); err != nil { + if err = q.storage.storeHypervisorState(q.id, q.state); err != nil { return err } } @@ -238,17 +241,17 @@ func (q *qemu) memoryTopology() (govmmQemu.Memory, error) { return q.arch.memoryTopology(memMb, hostMemMb), nil } -func (q *qemu) qmpSocketPath(sandboxID string) (string, error) { - return utils.BuildSocketPath(runStoragePath, sandboxID, qmpSocket) +func (q *qemu) qmpSocketPath(id string) (string, error) { + return utils.BuildSocketPath(runStoragePath, id, qmpSocket) } -func (q *qemu) getQemuMachine(sandboxConfig SandboxConfig) (govmmQemu.Machine, error) { +func (q *qemu) getQemuMachine() (govmmQemu.Machine, error) { machine, err := q.arch.machine() if err != nil { return govmmQemu.Machine{}, err } - accelerators := sandboxConfig.HypervisorConfig.MachineAccelerators + accelerators := q.config.MachineAccelerators if accelerators != "" { if !strings.HasPrefix(accelerators, ",") { accelerators = fmt.Sprintf(",%s", accelerators) @@ -275,11 +278,65 @@ func (q *qemu) appendImage(devices []govmmQemu.Device) ([]govmmQemu.Device, erro return devices, nil } -// createSandbox is the Hypervisor sandbox creation implementation for govmmQemu. -func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error { +func (q *qemu) createQmpSocket() ([]govmmQemu.QMPSocket, error) { + monitorSockPath, err := q.qmpSocketPath(q.id) + if err != nil { + return nil, err + } + + q.qmpMonitorCh = qmpChannel{ + ctx: context.Background(), + path: monitorSockPath, + } + + err = os.MkdirAll(filepath.Dir(monitorSockPath), dirMode) + if err != nil { + return nil, err + } + + return []govmmQemu.QMPSocket{ + { + Type: "unix", + Name: q.qmpMonitorCh.path, + Server: true, + NoWait: true, + }, + }, nil +} + +func (q *qemu) buildDevices(initrdPath string) ([]govmmQemu.Device, *govmmQemu.IOThread, error) { var devices []govmmQemu.Device - machine, err := q.getQemuMachine(sandboxConfig) + console, err := q.getSandboxConsole(q.id) + if err != nil { + return nil, nil, err + } + + // Add bridges before any other devices. This way we make sure that + // bridge gets the first available PCI address i.e bridgePCIStartAddr + devices = q.arch.appendBridges(devices, q.state.Bridges) + + devices = q.arch.appendConsole(devices, console) + + if initrdPath == "" { + devices, err = q.appendImage(devices) + if err != nil { + return nil, nil, err + } + } + + var ioThread *govmmQemu.IOThread + if q.config.BlockDeviceDriver == VirtioSCSI { + devices, ioThread = q.arch.appendSCSIController(devices, q.config.EnableIOThreads) + } + + return devices, ioThread, nil + +} + +// createSandbox is the Hypervisor sandbox creation implementation for govmmQemu. +func (q *qemu) createSandbox() error { + machine, err := q.getQemuMachine() if err != nil { return err } @@ -314,7 +371,7 @@ func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error { // Pass the sandbox name to the agent via the kernel command-line to // allow the agent to use it in log messages. - params := q.kernelParameters() + " " + "agent.sandbox=" + sandboxConfig.ID + params := q.kernelParameters() + " " + "agent.sandbox=" + q.id kernel := govmmQemu.Kernel{ Path: kernelPath, @@ -331,7 +388,7 @@ func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error { return fmt.Errorf("UUID should not be empty") } - monitorSockPath, err := q.qmpSocketPath(sandboxConfig.ID) + monitorSockPath, err := q.qmpSocketPath(q.id) if err != nil { return err } @@ -346,41 +403,19 @@ func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error { return err } - qmpSockets := []govmmQemu.QMPSocket{ - { - Type: "unix", - Name: q.qmpMonitorCh.path, - Server: true, - NoWait: true, - }, - } - - // Add bridges before any other devices. This way we make sure that - // bridge gets the first available PCI address i.e bridgePCIStartAddr - devices = q.arch.appendBridges(devices, q.state.Bridges) - - console, err := q.getSandboxConsole(sandboxConfig.ID) + qmpSockets, err := q.createQmpSocket() if err != nil { return err } - devices = q.arch.appendConsole(devices, console) - - if initrdPath == "" { - devices, err = q.appendImage(devices) - if err != nil { - return err - } - } - - var ioThread *govmmQemu.IOThread - if q.config.BlockDeviceDriver == VirtioSCSI { - devices, ioThread = q.arch.appendSCSIController(devices, q.config.EnableIOThreads) + devices, ioThread, err := q.buildDevices(initrdPath) + if err != nil { + return err } cpuModel := q.arch.cpuModel() - firmwarePath, err := sandboxConfig.HypervisorConfig.FirmwareAssetPath() + firmwarePath, err := q.config.FirmwareAssetPath() if err != nil { return err } @@ -391,7 +426,7 @@ func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error { } qemuConfig := govmmQemu.Config{ - Name: fmt.Sprintf("sandbox-%s", sandboxConfig.ID), + Name: fmt.Sprintf("sandbox-%s", q.id), UUID: q.state.UUID, Path: qemuPath, Ctx: q.qmpMonitorCh.ctx, @@ -735,7 +770,7 @@ func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interf return data, err } - return data, q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state) + return data, q.storage.storeHypervisorState(q.id, q.state) } func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) { @@ -744,7 +779,7 @@ func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (int return data, err } - return data, q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state) + return data, q.storage.storeHypervisorState(q.id, q.state) } func (q *qemu) hotplugCPUs(vcpus uint32, op operation) (uint32, error) { @@ -820,12 +855,12 @@ func (q *qemu) hotplugAddCPUs(amount uint32) (uint32, error) { hotpluggedVCPUs++ if hotpluggedVCPUs == amount { // All vCPUs were hotplugged - return amount, q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state) + return amount, q.storage.storeHypervisorState(q.id, q.state) } } // All vCPUs were NOT hotplugged - if err := q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state); err != nil { + if err := q.storage.storeHypervisorState(q.id, q.state); err != nil { q.Logger().Errorf("failed to save hypervisor state after hotplug %d vCPUs: %v", hotpluggedVCPUs, err) } @@ -845,7 +880,7 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) { // get the last vCPUs and try to remove it cpu := q.state.HotpluggedVCPUs[len(q.state.HotpluggedVCPUs)-1] if err := q.qmpMonitorCh.qmp.ExecuteDeviceDel(q.qmpMonitorCh.ctx, cpu.ID); err != nil { - _ = q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state) + _ = q.storage.storeHypervisorState(q.id, q.state) return i, fmt.Errorf("failed to hotunplug CPUs, only %d CPUs were hotunplugged: %v", i, err) } @@ -853,7 +888,7 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) { q.state.HotpluggedVCPUs = q.state.HotpluggedVCPUs[:len(q.state.HotpluggedVCPUs)-1] } - return amount, q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state) + return amount, q.storage.storeHypervisorState(q.id, q.state) } func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) error { @@ -910,7 +945,7 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) error { } q.state.HotpluggedMemory += memDev.sizeMB - return q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state) + return q.storage.storeHypervisorState(q.id, q.state) } func (q *qemu) pauseSandbox() error { @@ -951,8 +986,8 @@ func (q *qemu) addDevice(devInfo interface{}, devType deviceType) error { // getSandboxConsole builds the path of the console where we can read // logs coming from the sandbox. -func (q *qemu) getSandboxConsole(sandboxID string) (string, error) { - return utils.BuildSocketPath(runStoragePath, sandboxID, defaultConsole) +func (q *qemu) getSandboxConsole(id string) (string, error) { + return utils.BuildSocketPath(runStoragePath, id, consoleSocket) } // genericAppendBridges appends to devices the given bridges diff --git a/virtcontainers/qemu_arch_base_test.go b/virtcontainers/qemu_arch_base_test.go index 77e3f880c..518c8135d 100644 --- a/virtcontainers/qemu_arch_base_test.go +++ b/virtcontainers/qemu_arch_base_test.go @@ -222,7 +222,7 @@ func TestQemuArchBaseAppendConsoles(t *testing.T) { assert := assert.New(t) qemuArchBase := newQemuArchBase() - path := filepath.Join(runStoragePath, sandboxID, defaultConsole) + path := filepath.Join(runStoragePath, sandboxID, consoleSocket) expectedOut := []govmmQemu.Device{ govmmQemu.SerialDevice{ diff --git a/virtcontainers/qemu_test.go b/virtcontainers/qemu_test.go index 91e15fd4b..6c0225515 100644 --- a/virtcontainers/qemu_test.go +++ b/virtcontainers/qemu_test.go @@ -86,7 +86,7 @@ func TestQemuInit(t *testing.T) { t.Fatalf("Could not create parent directory %s: %v", parentDir, err) } - if err := q.init(sandbox); err != nil { + if err := q.init(sandbox.id, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil { t.Fatal(err) } @@ -117,7 +117,7 @@ func TestQemuInitMissingParentDirFail(t *testing.T) { t.Fatal(err) } - if err := q.init(sandbox); err == nil { + if err := q.init(sandbox.id, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err == nil { t.Fatal("Qemu init() expected to fail because of missing parent directory for storage") } } @@ -249,7 +249,7 @@ func TestQemuAddDeviceSerialPortDev(t *testing.T) { func TestQemuGetSandboxConsole(t *testing.T) { q := &qemu{} sandboxID := "testSandboxID" - expected := filepath.Join(runStoragePath, sandboxID, defaultConsole) + expected := filepath.Join(runStoragePath, sandboxID, consoleSocket) result, err := q.getSandboxConsole(sandboxID) if err != nil { diff --git a/virtcontainers/sandbox.go b/virtcontainers/sandbox.go index b289db576..5c7fb18da 100644 --- a/virtcontainers/sandbox.go +++ b/virtcontainers/sandbox.go @@ -771,11 +771,11 @@ func newSandbox(sandboxConfig SandboxConfig) (*Sandbox, error) { } }() - if err = s.hypervisor.init(s); err != nil { + if err = s.hypervisor.init(s.id, &sandboxConfig.HypervisorConfig, sandboxConfig.VMConfig, s.storage); err != nil { return nil, err } - if err = s.hypervisor.createSandbox(sandboxConfig); err != nil { + if err = s.hypervisor.createSandbox(); err != nil { return nil, err } From 7f20dd89a3ea5fa24f64ea7086d707c1e86cb485 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Sun, 13 May 2018 18:22:41 +0800 Subject: [PATCH 03/11] hypervisor: cleanup valid method The boolean return value is not necessary. Signed-off-by: Peng Tao --- virtcontainers/hypervisor.go | 8 ++++---- virtcontainers/hypervisor_test.go | 9 ++++++--- virtcontainers/mock_hypervisor.go | 4 ++-- virtcontainers/qemu.go | 4 ++-- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/virtcontainers/hypervisor.go b/virtcontainers/hypervisor.go index 3950f890c..8f0c5997c 100644 --- a/virtcontainers/hypervisor.go +++ b/virtcontainers/hypervisor.go @@ -218,13 +218,13 @@ type HypervisorConfig struct { Msize9p uint32 } -func (conf *HypervisorConfig) valid() (bool, error) { +func (conf *HypervisorConfig) valid() error { if conf.KernelPath == "" { - return false, fmt.Errorf("Missing kernel path") + return fmt.Errorf("Missing kernel path") } if conf.ImagePath == "" && conf.InitrdPath == "" { - return false, fmt.Errorf("Missing image and initrd path") + return fmt.Errorf("Missing image and initrd path") } if conf.DefaultVCPUs == 0 { @@ -251,7 +251,7 @@ func (conf *HypervisorConfig) valid() (bool, error) { conf.Msize9p = defaultMsize9p } - return true, nil + return nil } // AddKernelParam allows the addition of new kernel parameters to an existing diff --git a/virtcontainers/hypervisor_test.go b/virtcontainers/hypervisor_test.go index e9f0caaba..e84899fb8 100644 --- a/virtcontainers/hypervisor_test.go +++ b/virtcontainers/hypervisor_test.go @@ -107,9 +107,12 @@ func TestNewHypervisorFromUnknownHypervisorType(t *testing.T) { } } -func testHypervisorConfigValid(t *testing.T, hypervisorConfig *HypervisorConfig, expected bool) { - ret, _ := hypervisorConfig.valid() - if ret != expected { +func testHypervisorConfigValid(t *testing.T, hypervisorConfig *HypervisorConfig, success bool) { + err := hypervisorConfig.valid() + if success && err != nil { + t.Fatal() + } + if !success && err == nil { t.Fatal() } } diff --git a/virtcontainers/mock_hypervisor.go b/virtcontainers/mock_hypervisor.go index f86d37253..d41714ea7 100644 --- a/virtcontainers/mock_hypervisor.go +++ b/virtcontainers/mock_hypervisor.go @@ -10,8 +10,8 @@ type mockHypervisor struct { } func (m *mockHypervisor) init(id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error { - valid, err := hypervisorConfig.valid() - if valid == false || err != nil { + err := hypervisorConfig.valid() + if err != nil { return err } diff --git a/virtcontainers/qemu.go b/virtcontainers/qemu.go index f59aa3301..94b935eb9 100644 --- a/virtcontainers/qemu.go +++ b/virtcontainers/qemu.go @@ -173,8 +173,8 @@ func (q *qemu) qemuPath() (string, error) { // init intializes the Qemu structure. func (q *qemu) init(id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error { - valid, err := hypervisorConfig.valid() - if valid == false || err != nil { + err := hypervisorConfig.valid() + if err != nil { return err } From 057214f0fed350b939aff179d7279d9a552f1bfa Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 13 Jul 2018 16:23:59 +0800 Subject: [PATCH 04/11] agent: prepare for vm factory There are a few changes we need on kata agent to introduce vm factory support: 1. decouple agent creation from sandbox config 2. setup agent without creating a sandbox 3. expose vm storage path and share mount point Signed-off-by: Peng Tao --- virtcontainers/agent.go | 17 +- virtcontainers/agent_test.go | 2 +- virtcontainers/filesystem.go | 18 +- virtcontainers/hyperstart_agent.go | 20 +- virtcontainers/hyperstart_agent_test.go | 42 ++++ virtcontainers/kata_agent.go | 141 +++++++----- virtcontainers/kata_agent_test.go | 273 +++++++++++++++++++++++- virtcontainers/noop_agent.go | 15 ++ virtcontainers/noop_agent_test.go | 53 +++++ virtcontainers/sandbox.go | 2 +- 10 files changed, 509 insertions(+), 74 deletions(-) diff --git a/virtcontainers/agent.go b/virtcontainers/agent.go index c65406e45..7e68a3e6c 100644 --- a/virtcontainers/agent.go +++ b/virtcontainers/agent.go @@ -96,20 +96,20 @@ func newAgent(agentType AgentType) agent { } // newAgentConfig returns an agent config from a generic SandboxConfig interface. -func newAgentConfig(config SandboxConfig) interface{} { - switch config.AgentType { +func newAgentConfig(agentType AgentType, agentConfig interface{}) interface{} { + switch agentType { case NoopAgentType: return nil case HyperstartAgent: var hyperConfig HyperConfig - err := mapstructure.Decode(config.AgentConfig, &hyperConfig) + err := mapstructure.Decode(agentConfig, &hyperConfig) if err != nil { return err } return hyperConfig case KataContainersAgent: var kataAgentConfig KataAgentConfig - err := mapstructure.Decode(config.AgentConfig, &kataAgentConfig) + err := mapstructure.Decode(agentConfig, &kataAgentConfig) if err != nil { return err } @@ -209,4 +209,13 @@ type agent interface { // resumeContainer will resume a paused container resumeContainer(sandbox *Sandbox, c Container) error + + // configure will update agent settings based on provided arguments + configure(h hypervisor, id, sharePath string, builtin bool, config interface{}) error + + // getVMPath will return the agent vm socket's directory path + getVMPath(id string) string + + // getSharePath will return the agent 9pfs share mount path + getSharePath(id string) string } diff --git a/virtcontainers/agent_test.go b/virtcontainers/agent_test.go index 108d69f93..a1fa120db 100644 --- a/virtcontainers/agent_test.go +++ b/virtcontainers/agent_test.go @@ -99,7 +99,7 @@ func TestNewAgentFromUnknownAgentType(t *testing.T) { } func testNewAgentConfig(t *testing.T, config SandboxConfig, expected interface{}) { - agentConfig := newAgentConfig(config) + agentConfig := newAgentConfig(config.AgentType, config.AgentConfig) if reflect.DeepEqual(agentConfig, expected) == false { t.Fatal() } diff --git a/virtcontainers/filesystem.go b/virtcontainers/filesystem.go index 90a5fd871..f9f607132 100644 --- a/virtcontainers/filesystem.go +++ b/virtcontainers/filesystem.go @@ -85,17 +85,27 @@ const dirMode = os.FileMode(0750) | os.ModeDir // storagePathSuffix is the suffix used for all storage paths // -// Note: this very brief path represents "virtcontainers sandboxes". It is as +// Note: this very brief path represents "virtcontainers". It is as // terse as possible to minimise path length. -const storagePathSuffix = "/vc/sbs" +const storagePathSuffix = "vc" + +// sandboxPathSuffix is the suffix used for sandbox storage +const sandboxPathSuffix = "sbs" + +// vmPathSuffix is the suffix used for guest VMs. +const vmPathSuffix = "vm" // configStoragePath is the sandbox configuration directory. // It will contain one config.json file for each created sandbox. -var configStoragePath = filepath.Join("/var/lib", storagePathSuffix) +var configStoragePath = filepath.Join("/var/lib", storagePathSuffix, sandboxPathSuffix) // runStoragePath is the sandbox runtime directory. // It will contain one state.json and one lock file for each created sandbox. -var runStoragePath = filepath.Join("/run", storagePathSuffix) +var runStoragePath = filepath.Join("/run", storagePathSuffix, sandboxPathSuffix) + +// RunVMStoragePath is the vm directory. +// It will contain all guest vm sockets and shared mountpoints. +var RunVMStoragePath = filepath.Join("/run", storagePathSuffix, vmPathSuffix) // resourceStorage is the virtcontainers resources (configuration, state, etc...) // storage interface. diff --git a/virtcontainers/hyperstart_agent.go b/virtcontainers/hyperstart_agent.go index 6c08b706a..cd2f9a358 100644 --- a/virtcontainers/hyperstart_agent.go +++ b/virtcontainers/hyperstart_agent.go @@ -262,9 +262,17 @@ func (h *hyper) init(sandbox *Sandbox, config interface{}) (err error) { return nil } -func (h *hyper) createSandbox(sandbox *Sandbox) (err error) { +func (h *hyper) getVMPath(id string) string { + return filepath.Join(runStoragePath, id) +} + +func (h *hyper) getSharePath(id string) string { + return filepath.Join(defaultSharedDir, id) +} + +func (h *hyper) configure(hv hypervisor, id, sharePath string, builtin bool, config interface{}) error { for _, socket := range h.sockets { - err := sandbox.hypervisor.addDevice(socket, serialPortDev) + err := hv.addDevice(socket, serialPortDev) if err != nil { return err } @@ -274,14 +282,18 @@ func (h *hyper) createSandbox(sandbox *Sandbox) (err error) { // This volume contains all bind mounted container bundles. sharedVolume := Volume{ MountTag: mountTag, - HostPath: filepath.Join(defaultSharedDir, sandbox.id), + HostPath: sharePath, } if err := os.MkdirAll(sharedVolume.HostPath, dirMode); err != nil { return err } - return sandbox.hypervisor.addDevice(sharedVolume, fsDev) + return hv.addDevice(sharedVolume, fsDev) +} + +func (h *hyper) createSandbox(sandbox *Sandbox) (err error) { + return h.configure(sandbox.hypervisor, "", h.getSharePath(sandbox.id), false, nil) } func (h *hyper) capabilities() capabilities { diff --git a/virtcontainers/hyperstart_agent_test.go b/virtcontainers/hyperstart_agent_test.go index aa0129bca..ea986aaff 100644 --- a/virtcontainers/hyperstart_agent_test.go +++ b/virtcontainers/hyperstart_agent_test.go @@ -7,11 +7,13 @@ package virtcontainers import ( "fmt" + "io/ioutil" "net" "reflect" "testing" "github.com/kata-containers/runtime/virtcontainers/pkg/hyperstart" + "github.com/stretchr/testify/assert" "github.com/vishvananda/netlink" ) @@ -156,3 +158,43 @@ func TestProcessHyperRouteDestIPv6Failure(t *testing.T) { testProcessHyperRoute(t, route, testRouteDeviceName, nil) } + +func TestHyperPathAPI(t *testing.T) { + assert := assert.New(t) + + h1 := &hyper{} + h2 := &hyper{} + id := "foobar" + + // getVMPath + path1 := h1.getVMPath(id) + path2 := h2.getVMPath(id) + assert.Equal(path1, path2) + + // getSharePath + path1 = h1.getSharePath(id) + path2 = h2.getSharePath(id) + assert.Equal(path1, path2) +} + +func TestHyperConfigure(t *testing.T) { + assert := assert.New(t) + + dir, err := ioutil.TempDir("", "hyperstart-test") + assert.Nil(err) + + h := &hyper{} + m := &mockHypervisor{} + c := HyperConfig{} + id := "foobar" + + invalidAgent := KataAgentConfig{} + err = h.configure(m, id, dir, true, invalidAgent) + assert.Nil(err) + + err = h.configure(m, id, dir, true, c) + assert.Nil(err) + + err = h.configure(m, id, dir, false, c) + assert.Nil(err) +} diff --git a/virtcontainers/kata_agent.go b/virtcontainers/kata_agent.go index 80e92833d..53559ca90 100644 --- a/virtcontainers/kata_agent.go +++ b/virtcontainers/kata_agent.go @@ -119,11 +119,19 @@ func parseVSOCKAddr(sock string) (uint32, uint32, error) { return uint32(cid), uint32(port), nil } -func (k *kataAgent) generateVMSocket(sandbox *Sandbox, c KataAgentConfig) error { +func (k *kataAgent) getVMPath(id string) string { + return filepath.Join(RunVMStoragePath, id) +} + +func (k *kataAgent) getSharePath(id string) string { + return filepath.Join(kataHostSharedDir, id) +} + +func (k *kataAgent) generateVMSocket(id string, c KataAgentConfig) error { cid, port, err := parseVSOCKAddr(c.GRPCSocket) if err != nil { // We need to generate a host UNIX socket path for the emulated serial port. - kataSock, err := utils.BuildSocketPath(runStoragePath, sandbox.id, defaultKataSocketName) + kataSock, err := utils.BuildSocketPath(k.getVMPath(id), defaultKataSocketName) if err != nil { return err } @@ -148,7 +156,7 @@ func (k *kataAgent) generateVMSocket(sandbox *Sandbox, c KataAgentConfig) error func (k *kataAgent) init(sandbox *Sandbox, config interface{}) (err error) { switch c := config.(type) { case KataAgentConfig: - if err := k.generateVMSocket(sandbox, c); err != nil { + if err := k.generateVMSocket(sandbox.id, c); err != nil { return err } k.keepConn = c.LongLiveConn @@ -196,10 +204,22 @@ func (k *kataAgent) capabilities() capabilities { return caps } -func (k *kataAgent) createSandbox(sandbox *Sandbox) error { +func (k *kataAgent) configure(h hypervisor, id, sharePath string, builtin bool, config interface{}) error { + if config != nil { + switch c := config.(type) { + case KataAgentConfig: + if err := k.generateVMSocket(id, c); err != nil { + return err + } + k.keepConn = c.LongLiveConn + default: + return fmt.Errorf("Invalid config type") + } + } + switch s := k.vmSocket.(type) { case Socket: - err := sandbox.hypervisor.addDevice(s, serialPortDev) + err := h.addDevice(s, serialPortDev) if err != nil { return err } @@ -209,18 +229,27 @@ func (k *kataAgent) createSandbox(sandbox *Sandbox) error { return fmt.Errorf("Invalid config type") } + if builtin { + k.proxyBuiltIn = true + k.state.URL, _ = k.agentURL() + } + // Adding the shared volume. // This volume contains all bind mounted container bundles. sharedVolume := Volume{ MountTag: mountGuest9pTag, - HostPath: filepath.Join(kataHostSharedDir, sandbox.id), + HostPath: sharePath, } if err := os.MkdirAll(sharedVolume.HostPath, dirMode); err != nil { return err } - return sandbox.hypervisor.addDevice(sharedVolume, fsDev) + return h.addDevice(sharedVolume, fsDev) +} + +func (k *kataAgent) createSandbox(sandbox *Sandbox) error { + return k.configure(sandbox.hypervisor, sandbox.id, k.getSharePath(sandbox.id), k.proxyBuiltIn, nil) } func cmdToKataProcess(cmd Cmd) (process *grpc.Process, err error) { @@ -719,35 +748,15 @@ func (k *kataAgent) rollbackFailingContainerCreation(c *Container) { } } -func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process, err error) { - ociSpecJSON, ok := c.config.Annotations[vcAnnotations.ConfigJSONKey] - if !ok { - return nil, errorMissingOCISpec - } - - var ctrStorages []*grpc.Storage - var ctrDevices []*grpc.Device - - // The rootfs storage volume represents the container rootfs - // mount point inside the guest. - // It can be a block based device (when using block based container - // overlay on the host) mount or a 9pfs one (for all other overlay - // implementations). - rootfs := &grpc.Storage{} - - // This is the guest absolute root path for that container. - rootPathParent := filepath.Join(kataGuestSharedDir, c.id) - rootPath := filepath.Join(rootPathParent, rootfsDir) - - // In case the container creation fails, the following defer statement - // takes care of rolling back actions previously performed. - defer func() { - if err != nil { - k.rollbackFailingContainerCreation(c) - } - }() - +func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPathParent string) (*grpc.Storage, error) { if c.state.Fstype != "" { + // The rootfs storage volume represents the container rootfs + // mount point inside the guest. + // It can be a block based device (when using block based container + // overlay on the host) mount or a 9pfs one (for all other overlay + // implementations). + rootfs := &grpc.Storage{} + // This is a block based device rootfs. // Pass a drive name only in case of virtio-blk driver. @@ -773,24 +782,53 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process, rootfs.Options = []string{"nouuid"} } + return rootfs, nil + } + // This is not a block based device rootfs. + // We are going to bind mount it into the 9pfs + // shared drive between the host and the guest. + // With 9pfs we don't need to ask the agent to + // mount the rootfs as the shared directory + // (kataGuestSharedDir) is already mounted in the + // guest. We only need to mount the rootfs from + // the host and it will show up in the guest. + if err := bindMountContainerRootfs(kataHostSharedDir, sandbox.id, c.id, c.rootFs, false); err != nil { + return nil, err + } + + return nil, nil +} + +func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process, err error) { + ociSpecJSON, ok := c.config.Annotations[vcAnnotations.ConfigJSONKey] + if !ok { + return nil, errorMissingOCISpec + } + + var ctrStorages []*grpc.Storage + var ctrDevices []*grpc.Device + var rootfs *grpc.Storage + + // This is the guest absolute root path for that container. + rootPathParent := filepath.Join(kataGuestSharedDir, c.id) + rootPath := filepath.Join(rootPathParent, rootfsDir) + + // In case the container creation fails, the following defer statement + // takes care of rolling back actions previously performed. + defer func() { + if err != nil { + k.rollbackFailingContainerCreation(c) + } + }() + + if rootfs, err = k.buildContainerRootfs(sandbox, c, rootPathParent); err != nil { + return nil, err + } else if rootfs != nil { // Add rootfs to the list of container storage. // We only need to do this for block based rootfs, as we // want the agent to mount it into the right location // (kataGuestSharedDir/ctrID/ ctrStorages = append(ctrStorages, rootfs) - - } else { - // This is not a block based device rootfs. - // We are going to bind mount it into the 9pfs - // shared drive between the host and the guest. - // With 9pfs we don't need to ask the agent to - // mount the rootfs as the shared directory - // (kataGuestSharedDir) is already mounted in the - // guest. We only need to mount the rootfs from - // the host and it will show up in the guest. - if err = bindMountContainerRootfs(kataHostSharedDir, sandbox.id, c.id, c.rootFs, false); err != nil { - return nil, err - } } ociSpec := &specs.Spec{} @@ -861,11 +899,12 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process, createNSList := []ns.NSType{ns.NSTypePID} - enterNSList := []ns.Namespace{ - { + enterNSList := []ns.Namespace{} + if sandbox.networkNS.NetNsPath != "" { + enterNSList = append(enterNSList, ns.Namespace{ Path: sandbox.networkNS.NetNsPath, Type: ns.NSTypeNet, - }, + }) } return prepareAndStartShim(sandbox, k.shim, c.id, req.ExecId, diff --git a/virtcontainers/kata_agent_test.go b/virtcontainers/kata_agent_test.go index 0d10f66a8..6a0bb09dd 100644 --- a/virtcontainers/kata_agent_test.go +++ b/virtcontainers/kata_agent_test.go @@ -6,12 +6,15 @@ package virtcontainers import ( + "encoding/json" "fmt" "io/ioutil" "net" "os" "path/filepath" "reflect" + "strings" + "syscall" "testing" gpb "github.com/gogo/protobuf/types" @@ -25,6 +28,7 @@ import ( "github.com/kata-containers/runtime/virtcontainers/device/api" "github.com/kata-containers/runtime/virtcontainers/device/config" "github.com/kata-containers/runtime/virtcontainers/device/drivers" + vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations" "github.com/kata-containers/runtime/virtcontainers/pkg/mock" ) @@ -243,6 +247,8 @@ var reqList = []interface{}{ } func TestKataAgentSendReq(t *testing.T) { + assert := assert.New(t) + impl := &gRPCProxy{} proxy := mock.ProxyGRPCMock{ @@ -251,15 +257,12 @@ func TestKataAgentSendReq(t *testing.T) { } sockDir, err := testGenerateKataProxySockDir() - if err != nil { - t.Fatal(err) - } + assert.Nil(err) defer os.RemoveAll(sockDir) testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir) - if err := proxy.Start(testKataProxyURL); err != nil { - t.Fatal(err) - } + err = proxy.Start(testKataProxyURL) + assert.Nil(err) defer proxy.Stop() k := &kataAgent{ @@ -269,10 +272,58 @@ func TestKataAgentSendReq(t *testing.T) { } for _, req := range reqList { - if _, err := k.sendReq(req); err != nil { - t.Fatal(err) - } + _, err = k.sendReq(req) + assert.Nil(err) } + + sandbox := &Sandbox{} + container := &Container{} + execid := "processFooBar" + + err = k.startContainer(sandbox, container) + assert.Nil(err) + + err = k.signalProcess(container, execid, syscall.SIGKILL, true) + assert.Nil(err) + + err = k.winsizeProcess(container, execid, 100, 200) + assert.Nil(err) + + _, err = k.processListContainer(sandbox, Container{}, ProcessListOptions{}) + assert.Nil(err) + + err = k.updateContainer(sandbox, Container{}, specs.LinuxResources{}) + assert.Nil(err) + + err = k.pauseContainer(sandbox, Container{}) + assert.Nil(err) + + err = k.resumeContainer(sandbox, Container{}) + assert.Nil(err) + + err = k.onlineCPUMem(1) + assert.Nil(err) + + _, err = k.statsContainer(sandbox, Container{}) + assert.Nil(err) + + err = k.check() + assert.Nil(err) + + _, err = k.waitProcess(container, execid) + assert.Nil(err) + + _, err = k.writeProcessStdin(container, execid, []byte{'c'}) + assert.Nil(err) + + err = k.closeProcessStdin(container, execid) + assert.Nil(err) + + _, err = k.readProcessStdout(container, execid, []byte{}) + assert.Nil(err) + + _, err = k.readProcessStderr(container, execid, []byte{}) + assert.Nil(err) } func TestGenerateInterfacesAndRoutes(t *testing.T) { @@ -594,3 +645,207 @@ func TestHandlePidNamespace(t *testing.T) { _, err = k.handlePidNamespace(g, sandbox) assert.NotNil(err) } + +func TestAgentPathAPI(t *testing.T) { + assert := assert.New(t) + + k1 := &kataAgent{} + k2 := &kataAgent{} + id := "foobar" + + // getVMPath + path1 := k1.getVMPath(id) + path2 := k2.getVMPath(id) + assert.Equal(path1, path2) + + // getSharePath + path1 = k1.getSharePath(id) + path2 = k2.getSharePath(id) + assert.Equal(path1, path2) + + // generateVMSocket + c := KataAgentConfig{} + err := k1.generateVMSocket(id, c) + assert.Nil(err) + err = k2.generateVMSocket(id, c) + assert.Nil(err) + assert.Equal(k1, k2) + + c.GRPCSocket = "unixsocket" + err = k1.generateVMSocket(id, c) + assert.Nil(err) + _, ok := k1.vmSocket.(Socket) + assert.True(ok) + + c.GRPCSocket = "vsock:100:200" + err = k2.generateVMSocket(id, c) + assert.Nil(err) + _, ok = k2.vmSocket.(kataVSOCK) + assert.True(ok) +} + +func TestAgentConfigure(t *testing.T) { + assert := assert.New(t) + + dir, err := ioutil.TempDir("", "kata-agent-test") + assert.Nil(err) + + k := &kataAgent{} + h := &mockHypervisor{} + c := KataAgentConfig{GRPCSocket: "vsock:100:200"} + id := "foobar" + + invalidAgent := HyperConfig{} + err = k.configure(h, id, dir, true, invalidAgent) + assert.Error(err) + + err = k.configure(h, id, dir, true, c) + assert.Nil(err) + + c.GRPCSocket = "foobarfoobar" + err = k.configure(h, id, dir, true, c) + assert.Nil(err) + + err = k.configure(h, id, dir, false, c) + assert.Nil(err) +} + +func TestParseVSOCKAddr(t *testing.T) { + assert := assert.New(t) + + sock := "randomfoobar" + _, _, err := parseVSOCKAddr(sock) + assert.Error(err) + + sock = "vsock://1:2" + _, _, err = parseVSOCKAddr(sock) + assert.Error(err) + + sock = "unix:1:2" + _, _, err = parseVSOCKAddr(sock) + assert.Error(err) + + sock = "vsock:foo:2" + _, _, err = parseVSOCKAddr(sock) + assert.Error(err) + + sock = "vsock:1:bar" + _, _, err = parseVSOCKAddr(sock) + assert.Error(err) + + sock = "vsock:1:2" + cid, port, err := parseVSOCKAddr(sock) + assert.Nil(err) + assert.Equal(cid, uint32(1)) + assert.Equal(port, uint32(2)) +} + +func TestCmdToKataProcess(t *testing.T) { + assert := assert.New(t) + + cmd := Cmd{ + Args: strings.Split("foo", " "), + Envs: []EnvVar{}, + WorkDir: "/", + User: "1000", + PrimaryGroup: "1000", + } + _, err := cmdToKataProcess(cmd) + assert.Nil(err) + + cmd1 := cmd + cmd1.User = "foobar" + _, err = cmdToKataProcess(cmd1) + assert.Error(err) + + cmd1 = cmd + cmd1.PrimaryGroup = "foobar" + _, err = cmdToKataProcess(cmd1) + assert.Error(err) + + cmd1 = cmd + cmd1.User = "foobar:1000" + _, err = cmdToKataProcess(cmd1) + assert.Error(err) + + cmd1 = cmd + cmd1.User = "1000:2000" + _, err = cmdToKataProcess(cmd1) + assert.Nil(err) + + cmd1 = cmd + cmd1.SupplementaryGroups = []string{"foo"} + _, err = cmdToKataProcess(cmd1) + assert.Error(err) + + cmd1 = cmd + cmd1.SupplementaryGroups = []string{"4000"} + _, err = cmdToKataProcess(cmd1) + assert.Nil(err) +} + +func TestAgentCreateContainer(t *testing.T) { + assert := assert.New(t) + + sandbox := &Sandbox{ + id: "foobar", + config: &SandboxConfig{ + ID: "foobar", + HypervisorType: MockHypervisor, + HypervisorConfig: HypervisorConfig{ + KernelPath: "foo", + ImagePath: "bar", + }, + }, + hypervisor: &mockHypervisor{}, + storage: &filesystem{}, + } + + container := &Container{ + id: "barfoo", + sandboxID: "foobar", + sandbox: sandbox, + state: State{ + Fstype: "xfs", + }, + config: &ContainerConfig{ + Annotations: map[string]string{}, + }, + } + + ociSpec, err := json.Marshal(&specs.Spec{}) + assert.Nil(err) + container.config.Annotations[vcAnnotations.ConfigJSONKey] = string(ociSpec[:]) + + impl := &gRPCProxy{} + + proxy := mock.ProxyGRPCMock{ + GRPCImplementer: impl, + GRPCRegister: gRPCRegister, + } + + sockDir, err := testGenerateKataProxySockDir() + assert.Nil(err) + defer os.RemoveAll(sockDir) + + testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir) + err = proxy.Start(testKataProxyURL) + assert.Nil(err) + defer proxy.Stop() + + k := &kataAgent{ + state: KataAgentState{ + URL: testKataProxyURL, + }, + } + + dir, err := ioutil.TempDir("", "kata-agent-test") + assert.Nil(err) + + err = k.configure(&mockHypervisor{}, sandbox.id, dir, true, KataAgentConfig{}) + assert.Nil(err) + + // We'll fail on container metadata file creation, but it helps increasing coverage... + _, err = k.createContainer(sandbox, container) + assert.Error(err) +} diff --git a/virtcontainers/noop_agent.go b/virtcontainers/noop_agent.go index ed8abdbc7..f4fbb8c13 100644 --- a/virtcontainers/noop_agent.go +++ b/virtcontainers/noop_agent.go @@ -145,3 +145,18 @@ func (n *noopAgent) pauseContainer(sandbox *Sandbox, c Container) error { func (n *noopAgent) resumeContainer(sandbox *Sandbox, c Container) error { return nil } + +// configHypervisor is the Noop agent hypervisor configuration implementation. It does nothing. +func (n *noopAgent) configure(h hypervisor, id, sharePath string, builtin bool, config interface{}) error { + return nil +} + +// getVMPath is the Noop agent vm path getter. It does nothing. +func (n *noopAgent) getVMPath(id string) string { + return "" +} + +// getVMPath is the Noop agent share path getter. It does nothing. +func (n *noopAgent) getSharePath(id string) string { + return "" +} diff --git a/virtcontainers/noop_agent_test.go b/virtcontainers/noop_agent_test.go index f4f076071..1453bbfc6 100644 --- a/virtcontainers/noop_agent_test.go +++ b/virtcontainers/noop_agent_test.go @@ -156,3 +156,56 @@ func TestNoopAgentResumeContainer(t *testing.T) { t.Fatal(err) } } + +func TestNoopAgentConfigure(t *testing.T) { + n := &noopAgent{} + h := &mockHypervisor{} + id := "foobar" + sharePath := "foobarDir" + err := n.configure(h, id, sharePath, true, nil) + if err != nil { + t.Fatal(err) + } +} + +func TestNoopAgentGetVMPath(t *testing.T) { + n := &noopAgent{} + path := n.getVMPath("") + if path != "" { + t.Fatal("getSharePath returns non empty path") + } +} + +func TestNoopAgentGetSharePath(t *testing.T) { + n := &noopAgent{} + path := n.getSharePath("") + if path != "" { + t.Fatal("getSharePath returns non empty path") + } +} + +func TestNoopAgentStartProxy(t *testing.T) { + n := &noopAgent{} + sandbox, _, err := testCreateNoopContainer() + if err != nil { + t.Fatal(err) + } + defer cleanUp() + err = n.startProxy(sandbox) + if err != nil { + t.Fatal(err) + } +} + +func TestNoopAgentProcessListContainer(t *testing.T) { + n := &noopAgent{} + sandbox, container, err := testCreateNoopContainer() + if err != nil { + t.Fatal(err) + } + defer cleanUp() + _, err = n.processListContainer(sandbox, *container, ProcessListOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/virtcontainers/sandbox.go b/virtcontainers/sandbox.go index 5c7fb18da..098bcaa32 100644 --- a/virtcontainers/sandbox.go +++ b/virtcontainers/sandbox.go @@ -779,7 +779,7 @@ func newSandbox(sandboxConfig SandboxConfig) (*Sandbox, error) { return nil, err } - agentConfig := newAgentConfig(sandboxConfig) + agentConfig := newAgentConfig(sandboxConfig.AgentType, sandboxConfig.AgentConfig) if err = s.agent.init(s, agentConfig); err != nil { return nil, err } From 28b6104710d74930099689fc1cf6e238ca212c8a Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 13 Jul 2018 16:42:52 +0800 Subject: [PATCH 05/11] qemu: prepare for vm templating support 1. support qemu migration save operation 2. setup vm templating parameters per hypervisor config 3. create vm storage path when it does not exist. This can happen when an empty guest is created without a sandbox. Signed-off-by: Peng Tao --- virtcontainers/hypervisor.go | 37 ++++++++ virtcontainers/hypervisor_test.go | 24 ++++++ virtcontainers/mock_hypervisor.go | 4 + virtcontainers/mock_hypervisor_test.go | 8 ++ virtcontainers/qemu.go | 113 ++++++++++++++++++++++--- virtcontainers/qemu_test.go | 6 +- 6 files changed, 179 insertions(+), 13 deletions(-) diff --git a/virtcontainers/hypervisor.go b/virtcontainers/hypervisor.go index 8f0c5997c..81b27c509 100644 --- a/virtcontainers/hypervisor.go +++ b/virtcontainers/hypervisor.go @@ -216,6 +216,38 @@ type HypervisorConfig struct { // Msize9p is used as the msize for 9p shares Msize9p uint32 + + // BootToBeTemplate used to indicate if the VM is created to be a template VM + BootToBeTemplate bool + + // BootFromTemplate used to indicate if the VM should be created from a template VM + BootFromTemplate bool + + // MemoryPath is the memory file path of VM memory. Used when either BootToBeTemplate or + // BootFromTemplate is true. + MemoryPath string + + // DevicesStatePath is the VM device state file path. Used when either BootToBeTemplate or + // BootFromTemplate is true. + DevicesStatePath string +} + +func (conf *HypervisorConfig) checkTemplateConfig() error { + if conf.BootToBeTemplate && conf.BootFromTemplate { + return fmt.Errorf("Cannot set both 'to be' and 'from' vm tempate") + } + + if conf.BootToBeTemplate || conf.BootFromTemplate { + if conf.MemoryPath == "" { + return fmt.Errorf("Missing MemoryPath for vm template") + } + + if conf.BootFromTemplate && conf.DevicesStatePath == "" { + return fmt.Errorf("Missing DevicesStatePath to load from vm template") + } + } + + return nil } func (conf *HypervisorConfig) valid() error { @@ -227,6 +259,10 @@ func (conf *HypervisorConfig) valid() error { return fmt.Errorf("Missing image and initrd path") } + if err := conf.checkTemplateConfig(); err != nil { + return err + } + if conf.DefaultVCPUs == 0 { conf.DefaultVCPUs = defaultVCPUs } @@ -505,6 +541,7 @@ type hypervisor interface { waitSandbox(timeout int) error stopSandbox() error pauseSandbox() error + saveSandbox() error resumeSandbox() error addDevice(devInfo interface{}, devType deviceType) error hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) diff --git a/virtcontainers/hypervisor_test.go b/virtcontainers/hypervisor_test.go index e84899fb8..532a0a1db 100644 --- a/virtcontainers/hypervisor_test.go +++ b/virtcontainers/hypervisor_test.go @@ -157,6 +157,30 @@ func TestHypervisorConfigIsValid(t *testing.T) { testHypervisorConfigValid(t, hypervisorConfig, true) } +func TestHypervisorConfigValidTemplateConfig(t *testing.T) { + hypervisorConfig := &HypervisorConfig{ + KernelPath: fmt.Sprintf("%s/%s", testDir, testKernel), + ImagePath: fmt.Sprintf("%s/%s", testDir, testImage), + HypervisorPath: fmt.Sprintf("%s/%s", testDir, testHypervisor), + BootToBeTemplate: true, + BootFromTemplate: true, + } + testHypervisorConfigValid(t, hypervisorConfig, false) + + hypervisorConfig.BootToBeTemplate = false + testHypervisorConfigValid(t, hypervisorConfig, false) + hypervisorConfig.MemoryPath = "foobar" + testHypervisorConfigValid(t, hypervisorConfig, false) + hypervisorConfig.DevicesStatePath = "foobar" + testHypervisorConfigValid(t, hypervisorConfig, true) + + hypervisorConfig.BootFromTemplate = false + hypervisorConfig.BootToBeTemplate = true + testHypervisorConfigValid(t, hypervisorConfig, true) + hypervisorConfig.MemoryPath = "" + testHypervisorConfigValid(t, hypervisorConfig, false) +} + func TestHypervisorConfigDefaults(t *testing.T) { hypervisorConfig := &HypervisorConfig{ KernelPath: fmt.Sprintf("%s/%s", testDir, testKernel), diff --git a/virtcontainers/mock_hypervisor.go b/virtcontainers/mock_hypervisor.go index d41714ea7..38f9a44d7 100644 --- a/virtcontainers/mock_hypervisor.go +++ b/virtcontainers/mock_hypervisor.go @@ -46,6 +46,10 @@ func (m *mockHypervisor) resumeSandbox() error { return nil } +func (m *mockHypervisor) saveSandbox() error { + return nil +} + func (m *mockHypervisor) addDevice(devInfo interface{}, devType deviceType) error { return nil } diff --git a/virtcontainers/mock_hypervisor_test.go b/virtcontainers/mock_hypervisor_test.go index fae4b73ca..194939dc7 100644 --- a/virtcontainers/mock_hypervisor_test.go +++ b/virtcontainers/mock_hypervisor_test.go @@ -96,3 +96,11 @@ func TestMockHypervisorGetSandboxConsole(t *testing.T) { t.Fatalf("Got %s\nExpecting %s", result, expected) } } + +func TestMockHypervisorSaveSandbox(t *testing.T) { + var m *mockHypervisor + + if err := m.saveSandbox(); err != nil { + t.Fatal(err) + } +} diff --git a/virtcontainers/qemu.go b/virtcontainers/qemu.go index 94b935eb9..e7c97446c 100644 --- a/virtcontainers/qemu.go +++ b/virtcontainers/qemu.go @@ -64,11 +64,16 @@ type qemu struct { arch qemuArch } -const qmpCapErrMsg = "Failed to negoatiate QMP capabilities" +const ( + consoleSocket = "console.sock" + qmpSocket = "qmp.sock" -const qmpSocket = "qmp.sock" + qmpCapErrMsg = "Failed to negoatiate QMP capabilities" + qmpCapMigrationBypassSharedMemory = "bypass-shared-memory" + qmpExecCatCmd = "exec:cat" -const consoleSocket = "console.sock" + scsiControllerID = "scsi0" +) var qemuMajorVersion int var qemuMinorVersion int @@ -86,10 +91,6 @@ const ( removeDevice ) -const ( - scsiControllerID = "scsi0" -) - type qmpLogger struct { logger *logrus.Entry } @@ -191,6 +192,12 @@ func (q *qemu) init(id string, hypervisorConfig *HypervisorConfig, vmConfig Reso q.Logger().Debug("Creating UUID") q.state.UUID = uuid.Generate().String() + // The path might already exist, but in case of VM templating, + // we have to create it since the sandbox has not created it yet. + if err = os.MkdirAll(filepath.Join(runStoragePath, id), dirMode); err != nil { + return err + } + if err = q.storage.storeHypervisorState(q.id, q.state); err != nil { return err } @@ -242,7 +249,7 @@ func (q *qemu) memoryTopology() (govmmQemu.Memory, error) { } func (q *qemu) qmpSocketPath(id string) (string, error) { - return utils.BuildSocketPath(runStoragePath, id, qmpSocket) + return utils.BuildSocketPath(RunVMStoragePath, id, qmpSocket) } func (q *qemu) getQemuMachine() (govmmQemu.Machine, error) { @@ -334,6 +341,26 @@ func (q *qemu) buildDevices(initrdPath string) ([]govmmQemu.Device, *govmmQemu.I } +func (q *qemu) setupTemplate(knobs *govmmQemu.Knobs, memory *govmmQemu.Memory) govmmQemu.Incoming { + incoming := govmmQemu.Incoming{} + + if q.config.BootToBeTemplate || q.config.BootFromTemplate { + knobs.FileBackedMem = true + memory.Path = q.config.MemoryPath + + if q.config.BootToBeTemplate { + knobs.FileBackedMemShared = true + } + + if q.config.BootFromTemplate { + incoming.MigrationType = govmmQemu.MigrationExec + incoming.Exec = "cat " + q.config.DevicesStatePath + } + } + + return incoming +} + // createSandbox is the Hypervisor sandbox creation implementation for govmmQemu. func (q *qemu) createSandbox() error { machine, err := q.getQemuMachine() @@ -379,6 +406,8 @@ func (q *qemu) createSandbox() error { Params: params, } + incoming := q.setupTemplate(&knobs, &memory) + rtc := govmmQemu.RTC{ Base: "utc", DriftFix: "slew", @@ -439,6 +468,7 @@ func (q *qemu) createSandbox() error { RTC: rtc, QMPSockets: qmpSockets, Knobs: knobs, + Incoming: incoming, VGA: "none", GlobalParam: "kvm-pit.lost_tick_policy=discard", Bios: firmwarePath, @@ -545,7 +575,18 @@ func (q *qemu) stopSandbox() error { return err } - return qmp.ExecuteQuit(q.qmpMonitorCh.ctx) + err = qmp.ExecuteQuit(q.qmpMonitorCh.ctx) + if err != nil { + q.Logger().WithError(err).Error("Fail to execute qmp QUIT") + return err + } + + err = os.RemoveAll(RunVMStoragePath + q.id) + if err != nil { + q.Logger().WithError(err).Error("Fail to clean up vm directory") + } + + return nil } func (q *qemu) togglePauseSandbox(pause bool) error { @@ -987,7 +1028,59 @@ func (q *qemu) addDevice(devInfo interface{}, devType deviceType) error { // getSandboxConsole builds the path of the console where we can read // logs coming from the sandbox. func (q *qemu) getSandboxConsole(id string) (string, error) { - return utils.BuildSocketPath(runStoragePath, id, consoleSocket) + return utils.BuildSocketPath(RunVMStoragePath, id, consoleSocket) +} + +func (q *qemu) saveSandbox() error { + defer func(qemu *qemu) { + if q.qmpMonitorCh.qmp != nil { + q.qmpMonitorCh.qmp.Shutdown() + } + }(q) + + q.Logger().Info("save sandbox") + + cfg := govmmQemu.QMPConfig{Logger: newQMPLogger()} + + // Auto-closed by QMPStart(). + disconnectCh := make(chan struct{}) + + qmp, _, err := govmmQemu.QMPStart(q.qmpMonitorCh.ctx, q.qmpMonitorCh.path, cfg, disconnectCh) + if err != nil { + q.Logger().WithError(err).Error("Failed to connect to QEMU instance") + return err + } + + q.qmpMonitorCh.qmp = qmp + + err = qmp.ExecuteQMPCapabilities(q.qmpMonitorCh.ctx) + if err != nil { + q.Logger().WithError(err).Error(qmpCapErrMsg) + return err + } + + // BootToBeTemplate sets the VM to be a template that other VMs can clone from. We would want to + // bypass shared memory when saving the VM to a local file through migration exec. + if q.config.BootToBeTemplate { + err = q.qmpMonitorCh.qmp.ExecSetMigrationCaps(q.qmpMonitorCh.ctx, []map[string]interface{}{ + { + "capability": qmpCapMigrationBypassSharedMemory, + "state": true, + }, + }) + if err != nil { + q.Logger().WithError(err).Error("set migration bypass shared memory") + return err + } + } + + err = q.qmpMonitorCh.qmp.ExecSetMigrateArguments(q.qmpMonitorCh.ctx, fmt.Sprintf("%s>%s", qmpExecCatCmd, q.config.DevicesStatePath)) + if err != nil { + q.Logger().WithError(err).Error("exec migration") + return err + } + + return nil } // genericAppendBridges appends to devices the given bridges diff --git a/virtcontainers/qemu_test.go b/virtcontainers/qemu_test.go index 6c0225515..55b443ea7 100644 --- a/virtcontainers/qemu_test.go +++ b/virtcontainers/qemu_test.go @@ -117,8 +117,8 @@ func TestQemuInitMissingParentDirFail(t *testing.T) { t.Fatal(err) } - if err := q.init(sandbox.id, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err == nil { - t.Fatal("Qemu init() expected to fail because of missing parent directory for storage") + if err := q.init(sandbox.id, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil { + t.Fatalf("Qemu init() is not expected to fail because of missing parent directory for storage: %v", err) } } @@ -249,7 +249,7 @@ func TestQemuAddDeviceSerialPortDev(t *testing.T) { func TestQemuGetSandboxConsole(t *testing.T) { q := &qemu{} sandboxID := "testSandboxID" - expected := filepath.Join(runStoragePath, sandboxID, consoleSocket) + expected := filepath.Join(RunVMStoragePath, sandboxID, consoleSocket) result, err := q.getSandboxConsole(sandboxID) if err != nil { From 8dda2dd7a55e04ffa17a9038de0d975e20d111db Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 13 Jul 2018 16:47:45 +0800 Subject: [PATCH 06/11] virtcontainers: add a vm abstraction layer As representation of a guest without actual sandbox attached to it. This prepares for vm factory support. Signed-off-by: Peng Tao --- virtcontainers/vm.go | 227 ++++++++++++++++++++++++++++++++++++++ virtcontainers/vm_test.go | 83 ++++++++++++++ 2 files changed, 310 insertions(+) create mode 100644 virtcontainers/vm.go create mode 100644 virtcontainers/vm_test.go diff --git a/virtcontainers/vm.go b/virtcontainers/vm.go new file mode 100644 index 000000000..09c07d9f1 --- /dev/null +++ b/virtcontainers/vm.go @@ -0,0 +1,227 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package virtcontainers + +import ( + "os" + "path/filepath" + + "github.com/kata-containers/runtime/virtcontainers/pkg/uuid" + "github.com/sirupsen/logrus" +) + +// VM is abstraction of a virtual machine. +type VM struct { + id string + + hypervisor hypervisor + agent agent + + cpu uint32 + memory uint32 + + cpuDelta uint32 +} + +// VMConfig is a collection of all info that a new blackbox VM needs. +type VMConfig struct { + HypervisorType HypervisorType + HypervisorConfig HypervisorConfig + + AgentType AgentType + AgentConfig interface{} +} + +// Valid check VMConfig validity. +func (c *VMConfig) Valid() error { + return c.HypervisorConfig.valid() +} + +// NewVM creates a new VM based on provided VMConfig. +func NewVM(config VMConfig) (*VM, error) { + hypervisor, err := newHypervisor(config.HypervisorType) + if err != nil { + return nil, err + } + + if err = config.Valid(); err != nil { + return nil, err + } + + id := uuid.Generate().String() + + virtLog.WithField("vm id", id).WithField("config", config).Info("create new vm") + + defer func() { + if err != nil { + virtLog.WithField("vm id", id).WithError(err).Error("failed to create new vm") + } + }() + + if err = hypervisor.init(id, &config.HypervisorConfig, Resources{}, &filesystem{}); err != nil { + return nil, err + } + + if err = hypervisor.createSandbox(); err != nil { + return nil, err + } + + agent := newAgent(config.AgentType) + agentConfig := newAgentConfig(config.AgentType, config.AgentConfig) + // do not keep connection for temp agent + if c, ok := agentConfig.(KataAgentConfig); ok { + c.LongLiveConn = false + } + vmSharePath := buildVMSharePath(id) + err = agent.configure(hypervisor, id, vmSharePath, true, agentConfig) + if err != nil { + return nil, err + } + + if err = hypervisor.startSandbox(); err != nil { + return nil, err + } + + defer func() { + if err != nil { + virtLog.WithField("vm id", id).WithError(err).Info("clean up vm") + hypervisor.stopSandbox() + } + }() + + // VMs booted from template are paused, do not check + if !config.HypervisorConfig.BootFromTemplate { + err = hypervisor.waitSandbox(vmStartTimeout) + if err != nil { + return nil, err + } + + virtLog.WithField("vm id", id).Info("check agent status") + err = agent.check() + if err != nil { + return nil, err + } + } + + return &VM{ + id: id, + hypervisor: hypervisor, + agent: agent, + cpu: config.HypervisorConfig.DefaultVCPUs, + memory: config.HypervisorConfig.DefaultMemSz, + }, nil +} + +func buildVMSharePath(id string) string { + return filepath.Join(RunVMStoragePath, id, "shared") +} + +func (v *VM) logger() logrus.FieldLogger { + return virtLog.WithField("vm id", v.id) +} + +// Pause pauses a VM. +func (v *VM) Pause() error { + v.logger().Info("pause vm") + return v.hypervisor.pauseSandbox() +} + +// Save saves a VM to persistent disk. +func (v *VM) Save() error { + v.logger().Info("save vm") + return v.hypervisor.saveSandbox() +} + +// Resume resumes a paused VM. +func (v *VM) Resume() error { + v.logger().Info("resume vm") + return v.hypervisor.resumeSandbox() +} + +// Start kicks off a configured VM. +func (v *VM) Start() error { + v.logger().Info("start vm") + return v.hypervisor.startSandbox() +} + +// Stop stops a VM process. +func (v *VM) Stop() error { + v.logger().Info("kill vm") + return v.hypervisor.stopSandbox() +} + +// AddCPUs adds num of CPUs to the VM. +func (v *VM) AddCPUs(num uint32) error { + if num > 0 { + v.logger().Infof("hot adding %d vCPUs", num) + if _, err := v.hypervisor.hotplugAddDevice(num, cpuDev); err != nil { + return err + } + v.cpuDelta += num + v.cpu += num + } + + return nil +} + +// AddMemory adds numMB of memory to the VM. +func (v *VM) AddMemory(numMB uint32) error { + if numMB > 0 { + v.logger().Infof("hot adding %d MB memory", numMB) + dev := &memoryDevice{1, int(numMB)} + if _, err := v.hypervisor.hotplugAddDevice(dev, memoryDev); err != nil { + return err + } + } + + return nil +} + +// OnlineCPUMemory puts the hotplugged CPU and memory online. +func (v *VM) OnlineCPUMemory() error { + v.logger().Infof("online CPU %d and memory", v.cpuDelta) + err := v.agent.onlineCPUMem(v.cpuDelta) + if err == nil { + v.cpuDelta = 0 + } + + return err +} + +func (v *VM) assignSandbox(s *Sandbox) error { + // add vm symlinks + // - link vm socket from sandbox dir (/run/vc/vm/sbid/) to vm dir (/run/vc/vm/vmid/) + // - link 9pfs share path from sandbox dir (/run/kata-containers/shared/sandboxes/sbid/) to vm dir (/run/vc/vm/vmid/shared/) + + vmSharePath := buildVMSharePath(v.id) + vmSockDir := v.agent.getVMPath(v.id) + sbSharePath := s.agent.getSharePath(s.id) + sbSockDir := s.agent.getVMPath(s.id) + + v.logger().WithFields(logrus.Fields{ + "vmSharePath": vmSharePath, + "vmSockDir": vmSockDir, + "sbSharePath": sbSharePath, + "sbSockDir": sbSockDir, + }).Infof("assign vm to sandbox %s", s.id) + + // First make sure the symlinks do not exist + os.RemoveAll(sbSharePath) + os.RemoveAll(sbSockDir) + + if err := os.Symlink(vmSharePath, sbSharePath); err != nil { + return err + } + + if err := os.Symlink(vmSockDir, sbSockDir); err != nil { + os.Remove(sbSharePath) + return err + } + + s.hypervisor = v.hypervisor + + return nil +} diff --git a/virtcontainers/vm_test.go b/virtcontainers/vm_test.go new file mode 100644 index 000000000..3031fc023 --- /dev/null +++ b/virtcontainers/vm_test.go @@ -0,0 +1,83 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package virtcontainers + +import ( + "io/ioutil" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewVM(t *testing.T) { + assert := assert.New(t) + + testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + config := VMConfig{ + HypervisorType: MockHypervisor, + AgentType: NoopAgentType, + } + hyperConfig := HypervisorConfig{ + KernelPath: testDir, + ImagePath: testDir, + } + + var vm *VM + _, err := NewVM(config) + assert.Error(err) + + config.HypervisorConfig = hyperConfig + vm, err = NewVM(config) + assert.Nil(err) + + // VM operations + err = vm.Pause() + assert.Nil(err) + err = vm.Resume() + assert.Nil(err) + err = vm.Start() + assert.Nil(err) + err = vm.Save() + assert.Nil(err) + err = vm.Stop() + assert.Nil(err) + err = vm.AddCPUs(2) + assert.Nil(err) + err = vm.AddMemory(128) + assert.Nil(err) + err = vm.OnlineCPUMemory() + assert.Nil(err) + + // template VM + config.HypervisorConfig.BootFromTemplate = true + _, err = NewVM(config) + assert.Error(err) + + config.HypervisorConfig.MemoryPath = testDir + _, err = NewVM(config) + assert.Error(err) + + config.HypervisorConfig.DevicesStatePath = testDir + _, err = NewVM(config) + assert.Nil(err) +} + +func TestVMConfigValid(t *testing.T) { + assert := assert.New(t) + + config := VMConfig{} + + err := config.Valid() + assert.Error(err) + + testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + config.HypervisorConfig = HypervisorConfig{ + KernelPath: testDir, + InitrdPath: testDir, + } + err = config.Valid() + assert.Nil(err) +} From bdd5c66fc588855a7c8f5ccdad51fe6aa7b8b844 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 13 Jul 2018 17:26:47 +0800 Subject: [PATCH 07/11] virtcontainers: add vm factory support Add vm factory support per design in the VM Factory plugin section. The vm factory controls how a new vm is created: 1. direct: vm is created directly 2. template: vm is created via vm template. A template vm is pre-created and saved. Later vm is just a clone of the template vm so that they readonly share a portion of initial memory (including kernel, initramfs and the kata agent). CPU and memory are hot plugged when necessary. 3. cache: vm is created via vm caches. A set of cached vm are pre-created and maintained alive. New vms are created by just picking a cached vm. CPU and memory are hot plugged when necessary. Fixes: #303 Signed-off-by: Peng Tao --- virtcontainers/factory.go | 15 ++ virtcontainers/factory/base/base.go | 24 +++ virtcontainers/factory/cache/cache.go | 83 +++++++++ virtcontainers/factory/direct/direct.go | 46 +++++ virtcontainers/factory/factory.go | 176 ++++++++++++++++++++ virtcontainers/factory/factory_test.go | 49 ++++++ virtcontainers/factory/template/template.go | 143 ++++++++++++++++ 7 files changed, 536 insertions(+) create mode 100644 virtcontainers/factory.go create mode 100644 virtcontainers/factory/base/base.go create mode 100644 virtcontainers/factory/cache/cache.go create mode 100644 virtcontainers/factory/direct/direct.go create mode 100644 virtcontainers/factory/factory.go create mode 100644 virtcontainers/factory/factory_test.go create mode 100644 virtcontainers/factory/template/template.go diff --git a/virtcontainers/factory.go b/virtcontainers/factory.go new file mode 100644 index 000000000..ad4f22311 --- /dev/null +++ b/virtcontainers/factory.go @@ -0,0 +1,15 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package virtcontainers + +// Factory controls how a new VM is created. +type Factory interface { + // GetVM gets a new VM from the factory. + GetVM(config VMConfig) (*VM, error) + + // CloseFactory closes and cleans up the factory. + CloseFactory() +} diff --git a/virtcontainers/factory/base/base.go b/virtcontainers/factory/base/base.go new file mode 100644 index 000000000..449bdddf6 --- /dev/null +++ b/virtcontainers/factory/base/base.go @@ -0,0 +1,24 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package base + +import vc "github.com/kata-containers/runtime/virtcontainers" + +// FactoryBase is vm factory's internal base factory interfaces. +// The difference between FactoryBase and Factory is that the Factory +// also handles vm config validation/comparison and possible CPU/memory +// hotplugs. It's better to do it at the factory level instead of doing +// the same work in each of the factory implementations. +type FactoryBase interface { + // Config returns base factory config. + Config() vc.VMConfig + + // GetBaseVM returns a paused VM created by the base factory. + GetBaseVM() (*vc.VM, error) + + // CloseFactory closes the base factory. + CloseFactory() +} diff --git a/virtcontainers/factory/cache/cache.go b/virtcontainers/factory/cache/cache.go new file mode 100644 index 000000000..f008617d6 --- /dev/null +++ b/virtcontainers/factory/cache/cache.go @@ -0,0 +1,83 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// cache implements base vm factory on top of other base vm factory. + +package cache + +import ( + "fmt" + "sync" + + vc "github.com/kata-containers/runtime/virtcontainers" + "github.com/kata-containers/runtime/virtcontainers/factory/base" +) + +type cache struct { + base base.FactoryBase + + cacheCh chan *vc.VM + closed chan<- int + wg sync.WaitGroup + closeOnce sync.Once +} + +// New creates a new cached vm factory. +func New(count uint, b base.FactoryBase) base.FactoryBase { + if count < 1 { + return b + } + + cacheCh := make(chan *vc.VM) + closed := make(chan int, count) + c := cache{base: b, cacheCh: cacheCh, closed: closed} + for i := 0; i < int(count); i++ { + c.wg.Add(1) + go func() { + for { + vm, err := b.GetBaseVM() + if err != nil { + c.wg.Done() + c.CloseFactory() + return + } + + select { + case cacheCh <- vm: + case <-closed: + vm.Stop() + c.wg.Done() + return + } + } + }() + } + return &c +} + +// Config returns cache vm factory's base factory config. +func (c *cache) Config() vc.VMConfig { + return c.base.Config() +} + +// GetBaseVM returns a base VM from cache factory's base factory. +func (c *cache) GetBaseVM() (*vc.VM, error) { + vm, ok := <-c.cacheCh + if ok { + return vm, nil + } + return nil, fmt.Errorf("cache factory is closed") +} + +// CloseFactory closes the cache factory. +func (c *cache) CloseFactory() { + c.closeOnce.Do(func() { + for len(c.closed) < cap(c.closed) { // send sufficient closed signal + c.closed <- 0 + } + c.wg.Wait() + close(c.cacheCh) + c.base.CloseFactory() + }) +} diff --git a/virtcontainers/factory/direct/direct.go b/virtcontainers/factory/direct/direct.go new file mode 100644 index 000000000..db1f6c854 --- /dev/null +++ b/virtcontainers/factory/direct/direct.go @@ -0,0 +1,46 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// direct implements base vm factory without vm templating. + +package direct + +import ( + vc "github.com/kata-containers/runtime/virtcontainers" + "github.com/kata-containers/runtime/virtcontainers/factory/base" +) + +type direct struct { + config vc.VMConfig +} + +// New returns a new direct vm factory. +func New(config vc.VMConfig) base.FactoryBase { + return &direct{config} +} + +// Config returns the direct factory's configuration. +func (d *direct) Config() vc.VMConfig { + return d.config +} + +// GetBaseVM create a new VM directly. +func (d *direct) GetBaseVM() (*vc.VM, error) { + vm, err := vc.NewVM(d.config) + if err != nil { + return nil, err + } + + err = vm.Pause() + if err != nil { + vm.Stop() + return nil, err + } + + return vm, nil +} + +// CloseFactory closes the direct vm factory. +func (d *direct) CloseFactory() { +} diff --git a/virtcontainers/factory/factory.go b/virtcontainers/factory/factory.go new file mode 100644 index 000000000..0efc78364 --- /dev/null +++ b/virtcontainers/factory/factory.go @@ -0,0 +1,176 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package factory + +import ( + "fmt" + "reflect" + + vc "github.com/kata-containers/runtime/virtcontainers" + "github.com/kata-containers/runtime/virtcontainers/factory/base" + "github.com/kata-containers/runtime/virtcontainers/factory/cache" + "github.com/kata-containers/runtime/virtcontainers/factory/direct" + "github.com/kata-containers/runtime/virtcontainers/factory/template" + "github.com/sirupsen/logrus" +) + +var factoryLogger = logrus.FieldLogger(logrus.New()) + +// Config is a collection of VM factory configurations. +type Config struct { + Template bool + Cache uint + + VMConfig vc.VMConfig +} + +func (f *Config) validate() error { + return f.VMConfig.Valid() +} + +type factory struct { + base base.FactoryBase +} + +// NewFactory returns a working factory. +func NewFactory(config Config, fetchOnly bool) (vc.Factory, error) { + err := config.validate() + if err != nil { + return nil, err + } + + if fetchOnly && config.Cache > 0 { + return nil, fmt.Errorf("cache factory does not support fetch") + } + + var b base.FactoryBase + if config.Template { + if fetchOnly { + b, err = template.Fetch(config.VMConfig) + if err != nil { + return nil, err + } + } else { + b = template.New(config.VMConfig) + } + } else { + b = direct.New(config.VMConfig) + } + + if config.Cache > 0 { + b = cache.New(config.Cache, b) + } + + return &factory{b}, nil +} + +func (f *factory) log() *logrus.Entry { + return factoryLogger.WithField("subsystem", "factory") +} + +func resetHypervisorConfig(config *vc.HypervisorConfig) { + config.DefaultVCPUs = 0 + config.DefaultMemSz = 0 + config.BootToBeTemplate = false + config.BootFromTemplate = false + config.MemoryPath = "" + config.DevicesStatePath = "" +} + +// It's important that baseConfig and newConfig are passed by value! +func checkVMConfig(config1, config2 vc.VMConfig) error { + if config1.HypervisorType != config2.HypervisorType { + return fmt.Errorf("hypervisor type does not match: %s vs. %s", config1.HypervisorType, config2.HypervisorType) + } + + if config1.AgentType != config2.AgentType { + return fmt.Errorf("agent type does not match: %s vs. %s", config1.AgentType, config2.AgentType) + } + + // check hypervisor config details + resetHypervisorConfig(&config1.HypervisorConfig) + resetHypervisorConfig(&config2.HypervisorConfig) + + if !reflect.DeepEqual(config1, config2) { + return fmt.Errorf("hypervisor config does not match, base: %+v. new: %+v", config1, config2) + } + + return nil +} + +func (f *factory) checkConfig(config vc.VMConfig) error { + baseConfig := f.base.Config() + + return checkVMConfig(config, baseConfig) +} + +// GetVM returns a working blank VM created by the factory. +func (f *factory) GetVM(config vc.VMConfig) (*vc.VM, error) { + hypervisorConfig := config.HypervisorConfig + err := config.Valid() + if err != nil { + f.log().WithError(err).Error("invalid hypervisor config") + return nil, err + } + + err = f.checkConfig(config) + if err != nil { + f.log().WithError(err).Info("fallback to direct factory vm") + return direct.New(config).GetBaseVM() + } + + f.log().Info("get base VM") + vm, err := f.base.GetBaseVM() + if err != nil { + f.log().WithError(err).Error("failed to get base VM") + return nil, err + } + + // cleanup upon error + defer func() { + if err != nil { + f.log().WithError(err).Error("clean up vm") + vm.Stop() + } + }() + + err = vm.Resume() + if err != nil { + return nil, err + } + + online := false + baseConfig := f.base.Config().HypervisorConfig + if baseConfig.DefaultVCPUs < hypervisorConfig.DefaultVCPUs { + err = vm.AddCPUs(hypervisorConfig.DefaultVCPUs - baseConfig.DefaultVCPUs) + if err != nil { + return nil, err + } + online = true + } + + if baseConfig.DefaultMemSz < hypervisorConfig.DefaultMemSz { + err = vm.AddMemory(hypervisorConfig.DefaultMemSz - baseConfig.DefaultMemSz) + if err != nil { + return nil, err + } + online = true + } + + if online { + err = vm.OnlineCPUMemory() + if err != nil { + return nil, err + } + } + + return vm, nil +} + +// CloseFactory closes the factory. +func (f *factory) CloseFactory() { + f.base.CloseFactory() +} diff --git a/virtcontainers/factory/factory_test.go b/virtcontainers/factory/factory_test.go new file mode 100644 index 000000000..ee2612843 --- /dev/null +++ b/virtcontainers/factory/factory_test.go @@ -0,0 +1,49 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package factory + +import ( + "io/ioutil" + "testing" + + "github.com/stretchr/testify/assert" + + vc "github.com/kata-containers/runtime/virtcontainers" +) + +func TestNewFactory(t *testing.T) { + var config Config + + assert := assert.New(t) + + _, err := NewFactory(config, true) + assert.Error(err) + _, err = NewFactory(config, false) + assert.Error(err) + + config.VMConfig = vc.VMConfig{ + HypervisorType: vc.MockHypervisor, + AgentType: vc.NoopAgentType, + } + + _, err = NewFactory(config, false) + assert.Error(err) + + testDir, err := ioutil.TempDir("", "vmfactory-tmp-") + assert.Nil(err) + + config.VMConfig.HypervisorConfig = vc.HypervisorConfig{ + KernelPath: testDir, + ImagePath: testDir, + } + + _, err = NewFactory(config, false) + assert.Nil(err) + + config.Cache = 10 + _, err = NewFactory(config, true) + assert.Error(err) +} diff --git a/virtcontainers/factory/template/template.go b/virtcontainers/factory/template/template.go new file mode 100644 index 000000000..55dddc67c --- /dev/null +++ b/virtcontainers/factory/template/template.go @@ -0,0 +1,143 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// +// template implements base vm factory with vm templating. + +package template + +import ( + "fmt" + "os" + "syscall" + "time" + + vc "github.com/kata-containers/runtime/virtcontainers" + "github.com/kata-containers/runtime/virtcontainers/factory/base" + "github.com/kata-containers/runtime/virtcontainers/factory/direct" +) + +type template struct { + statePath string + config vc.VMConfig +} + +// Fetch finds and returns a pre-built template factory. +// TODO: save template metadata and fetch from storage. +func Fetch(config vc.VMConfig) (base.FactoryBase, error) { + statePath := vc.RunVMStoragePath + "/template" + t := &template{statePath, config} + + err := t.checkTemplateVM() + if err != nil { + return nil, err + } + + return t, nil +} + +// New creates a new VM template factory. +func New(config vc.VMConfig) base.FactoryBase { + statePath := vc.RunVMStoragePath + "/template" + t := &template{statePath, config} + + err := t.prepareTemplateFiles() + if err != nil { + // fallback to direct factory if template is not supported. + return direct.New(config) + } + + err = t.createTemplateVM() + if err != nil { + // fallback to direct factory if template is not supported. + return direct.New(config) + } + + return t +} + +// Config returns template factory's configuration. +func (t *template) Config() vc.VMConfig { + return t.config +} + +// GetBaseVM creates a new paused VM from the template VM. +func (t *template) GetBaseVM() (*vc.VM, error) { + return t.createFromTemplateVM() +} + +// CloseFactory cleans up the template VM. +func (t *template) CloseFactory() { + syscall.Unmount(t.statePath, 0) + os.RemoveAll(t.statePath) +} + +func (t *template) prepareTemplateFiles() error { + // create and mount tmpfs for the shared memory file + err := os.MkdirAll(t.statePath, 0700) + if err != nil { + return err + } + flags := uintptr(syscall.MS_NOSUID | syscall.MS_NODEV) + opts := fmt.Sprintf("size=%dM", t.config.HypervisorConfig.DefaultMemSz+8) + if err = syscall.Mount("tmpfs", t.statePath, "tmpfs", flags, opts); err != nil { + return err + } + f, err := os.Create(t.statePath + "/memory") + if err != nil { + return err + } + f.Close() + + return nil +} + +func (t *template) createTemplateVM() error { + // create the template vm + config := t.config + config.HypervisorConfig.BootToBeTemplate = true + config.HypervisorConfig.BootFromTemplate = false + config.HypervisorConfig.MemoryPath = t.statePath + "/memory" + config.HypervisorConfig.DevicesStatePath = t.statePath + "/state" + + vm, err := vc.NewVM(config) + if err != nil { + return err + } + defer vm.Stop() + + err = vm.Pause() + if err != nil { + return err + } + + err = vm.Save() + if err != nil { + return err + } + + // qemu QMP does not wait for migration to finish... + time.Sleep(1 * time.Second) + + return nil +} + +func (t *template) createFromTemplateVM() (*vc.VM, error) { + config := t.config + config.HypervisorConfig.BootToBeTemplate = false + config.HypervisorConfig.BootFromTemplate = true + config.HypervisorConfig.MemoryPath = t.statePath + "/memory" + config.HypervisorConfig.DevicesStatePath = t.statePath + "/state" + + return vc.NewVM(config) +} + +func (t *template) checkTemplateVM() error { + _, err := os.Stat(t.statePath + "/memory") + if err != nil { + return err + } + + _, err = os.Stat(t.statePath + "/state") + return err +} From a7d888febc43d7e981d973c21225018ba655c370 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 13 Jul 2018 17:39:23 +0800 Subject: [PATCH 08/11] virtconainers: add SetFactory API Add SetFactory to allow virtcontainers consumers to set a vm factory. And use it to create new VMs whenever the factory is set. Signed-off-by: Peng Tao --- virtcontainers/api.go | 12 ++--- virtcontainers/api_test.go | 70 +++++++++++++------------- virtcontainers/example_pod_run_test.go | 2 +- virtcontainers/hack/virtc/main.go | 4 +- virtcontainers/implementation.go | 10 +++- virtcontainers/interfaces.go | 1 + virtcontainers/noop_agent_test.go | 2 +- virtcontainers/pkg/vcmock/mock.go | 7 +++ virtcontainers/pkg/vcmock/mock_test.go | 37 +++++++++++++- virtcontainers/pkg/vcmock/types.go | 3 +- virtcontainers/sandbox.go | 32 ++++++++++-- virtcontainers/sandbox_test.go | 2 +- 12 files changed, 128 insertions(+), 54 deletions(-) diff --git a/virtcontainers/api.go b/virtcontainers/api.go index dfaf0c881..abaebb50e 100644 --- a/virtcontainers/api.go +++ b/virtcontainers/api.go @@ -34,13 +34,13 @@ func SetLogger(logger logrus.FieldLogger) { // CreateSandbox is the virtcontainers sandbox creation entry point. // CreateSandbox creates a sandbox and its containers. It does not start them. -func CreateSandbox(sandboxConfig SandboxConfig) (VCSandbox, error) { - return createSandboxFromConfig(sandboxConfig) +func CreateSandbox(sandboxConfig SandboxConfig, factory Factory) (VCSandbox, error) { + return createSandboxFromConfig(sandboxConfig, factory) } -func createSandboxFromConfig(sandboxConfig SandboxConfig) (*Sandbox, error) { +func createSandboxFromConfig(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) { // Create the sandbox. - s, err := createSandbox(sandboxConfig) + s, err := createSandbox(sandboxConfig, factory) if err != nil { return nil, err } @@ -206,8 +206,8 @@ func StopSandbox(sandboxID string) (VCSandbox, error) { // RunSandbox is the virtcontainers sandbox running entry point. // RunSandbox creates a sandbox and its containers and then it starts them. -func RunSandbox(sandboxConfig SandboxConfig) (VCSandbox, error) { - s, err := createSandboxFromConfig(sandboxConfig) +func RunSandbox(sandboxConfig SandboxConfig, factory Factory) (VCSandbox, error) { + s, err := createSandboxFromConfig(sandboxConfig, factory) if err != nil { return nil, err } diff --git a/virtcontainers/api_test.go b/virtcontainers/api_test.go index a7dc00703..c7a5ebe8d 100644 --- a/virtcontainers/api_test.go +++ b/virtcontainers/api_test.go @@ -244,7 +244,7 @@ func TestCreateSandboxNoopAgentSuccessful(t *testing.T) { config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -289,7 +289,7 @@ func TestCreateSandboxHyperstartAgentSuccessful(t *testing.T) { proxy.Start() defer proxy.Stop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -330,7 +330,7 @@ func TestCreateSandboxKataAgentSuccessful(t *testing.T) { } defer kataProxyMock.Stop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -347,7 +347,7 @@ func TestCreateSandboxFailing(t *testing.T) { config := SandboxConfig{} - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p.(*Sandbox) != nil || err == nil { t.Fatal() } @@ -358,7 +358,7 @@ func TestDeleteSandboxNoopAgentSuccessful(t *testing.T) { config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -401,7 +401,7 @@ func TestDeleteSandboxHyperstartAgentSuccessful(t *testing.T) { proxy.Start() defer proxy.Stop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -452,7 +452,7 @@ func TestDeleteSandboxKataAgentSuccessful(t *testing.T) { } defer kataProxyMock.Stop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -751,7 +751,7 @@ func TestRunSandboxNoopAgentSuccessful(t *testing.T) { config := newTestSandboxConfigNoop() - p, err := RunSandbox(config) + p, err := RunSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -787,7 +787,7 @@ func TestRunSandboxHyperstartAgentSuccessful(t *testing.T) { hyperConfig := config.AgentConfig.(HyperConfig) config.AgentConfig = hyperConfig - p, err := RunSandbox(config) + p, err := RunSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -833,7 +833,7 @@ func TestRunSandboxKataAgentSuccessful(t *testing.T) { } defer kataProxyMock.Stop() - p, err := RunSandbox(config) + p, err := RunSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -855,7 +855,7 @@ func TestRunSandboxFailing(t *testing.T) { config := SandboxConfig{} - p, err := RunSandbox(config) + p, err := RunSandbox(config, nil) if p != nil || err == nil { t.Fatal() } @@ -868,7 +868,7 @@ func TestListSandboxSuccessful(t *testing.T) { config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -928,7 +928,7 @@ func TestStatusSandboxSuccessfulStateReady(t *testing.T) { }, } - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -985,7 +985,7 @@ func TestStatusSandboxSuccessfulStateRunning(t *testing.T) { }, } - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1014,7 +1014,7 @@ func TestStatusSandboxFailingFetchSandboxConfig(t *testing.T) { config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1034,7 +1034,7 @@ func TestStatusPodSandboxFailingFetchSandboxState(t *testing.T) { config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1069,7 +1069,7 @@ func TestCreateContainerSuccessful(t *testing.T) { contID := "100" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1100,7 +1100,7 @@ func TestCreateContainerFailingNoSandbox(t *testing.T) { contID := "100" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1130,7 +1130,7 @@ func TestDeleteContainerSuccessful(t *testing.T) { contID := "100" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1184,7 +1184,7 @@ func TestDeleteContainerFailingNoContainer(t *testing.T) { contID := "100" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1249,7 +1249,7 @@ func TestStartContainerFailingNoContainer(t *testing.T) { contID := "100" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1272,7 +1272,7 @@ func TestStartContainerFailingSandboxNotStarted(t *testing.T) { contID := "100" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1493,7 +1493,7 @@ func TestStopContainerFailingNoContainer(t *testing.T) { contID := "100" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1674,7 +1674,7 @@ func TestEnterContainerFailingNoContainer(t *testing.T) { contID := "100" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1731,7 +1731,7 @@ func TestStatusContainerSuccessful(t *testing.T) { contID := "100" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1782,7 +1782,7 @@ func TestStatusContainerStateReady(t *testing.T) { contID := "101" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1845,7 +1845,7 @@ func TestStatusContainerStateRunning(t *testing.T) { contID := "101" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1917,7 +1917,7 @@ func TestStatusContainerFailing(t *testing.T) { contID := "100" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1940,7 +1940,7 @@ func TestStatsContainerFailing(t *testing.T) { contID := "100" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if p == nil || err != nil { t.Fatal(err) } @@ -1973,7 +1973,7 @@ func TestStatsContainer(t *testing.T) { assert.Error(err) config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) assert.NoError(err) assert.NotNil(p) @@ -2023,7 +2023,7 @@ func TestProcessListContainer(t *testing.T) { assert.Error(err) config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) assert.NoError(err) assert.NotNil(p) @@ -2117,7 +2117,7 @@ func createAndStartSandbox(config SandboxConfig) (sandbox VCSandbox, sandboxDir err error) { // Create sandbox - sandbox, err = CreateSandbox(config) + sandbox, err = CreateSandbox(config, nil) if sandbox == nil || err != nil { return nil, "", err } @@ -2158,7 +2158,7 @@ func createStartStopDeleteSandbox(b *testing.B, sandboxConfig SandboxConfig) { func createStartStopDeleteContainers(b *testing.B, sandboxConfig SandboxConfig, contConfigs []ContainerConfig) { // Create sandbox - p, err := CreateSandbox(sandboxConfig) + p, err := CreateSandbox(sandboxConfig, nil) if err != nil { b.Fatalf("Could not create sandbox: %s", err) } @@ -2326,7 +2326,7 @@ func TestFetchSandbox(t *testing.T) { config := newTestSandboxConfigNoop() - s, err := CreateSandbox(config) + s, err := CreateSandbox(config, nil) if s == nil || err != nil { t.Fatal(err) } @@ -2348,7 +2348,7 @@ func TestReleaseSandbox(t *testing.T) { config := newTestSandboxConfigNoop() - s, err := CreateSandbox(config) + s, err := CreateSandbox(config, nil) if s == nil || err != nil { t.Fatal(err) } diff --git a/virtcontainers/example_pod_run_test.go b/virtcontainers/example_pod_run_test.go index 7ac0658cf..55e32e03e 100644 --- a/virtcontainers/example_pod_run_test.go +++ b/virtcontainers/example_pod_run_test.go @@ -68,7 +68,7 @@ func Example_createAndStartSandbox() { Containers: []vc.ContainerConfig{container}, } - _, err := vc.RunSandbox(sandboxConfig) + _, err := vc.RunSandbox(sandboxConfig, nil) if err != nil { fmt.Printf("Could not run sandbox: %s", err) } diff --git a/virtcontainers/hack/virtc/main.go b/virtcontainers/hack/virtc/main.go index f4aa95a07..cf6457f9f 100644 --- a/virtcontainers/hack/virtc/main.go +++ b/virtcontainers/hack/virtc/main.go @@ -324,7 +324,7 @@ func runSandbox(context *cli.Context) error { return fmt.Errorf("Could not build sandbox config: %s", err) } - _, err = vc.RunSandbox(sandboxConfig) + _, err = vc.RunSandbox(sandboxConfig, nil) if err != nil { return fmt.Errorf("Could not run sandbox: %s", err) } @@ -338,7 +338,7 @@ func createSandbox(context *cli.Context) error { return fmt.Errorf("Could not build sandbox config: %s", err) } - p, err := vc.CreateSandbox(sandboxConfig) + p, err := vc.CreateSandbox(sandboxConfig, nil) if err != nil { return fmt.Errorf("Could not create sandbox: %s", err) } diff --git a/virtcontainers/implementation.go b/virtcontainers/implementation.go index f75348fa9..8e5f8fa03 100644 --- a/virtcontainers/implementation.go +++ b/virtcontainers/implementation.go @@ -18,6 +18,7 @@ import ( // VCImpl is the official virtcontainers function of the same name. type VCImpl struct { + factory Factory } // SetLogger implements the VC function of the same name. @@ -25,9 +26,14 @@ func (impl *VCImpl) SetLogger(logger logrus.FieldLogger) { SetLogger(logger) } +// SetFactory implements the VC function of the same name. +func (impl *VCImpl) SetFactory(factory Factory) { + impl.factory = factory +} + // CreateSandbox implements the VC function of the same name. func (impl *VCImpl) CreateSandbox(sandboxConfig SandboxConfig) (VCSandbox, error) { - return CreateSandbox(sandboxConfig) + return CreateSandbox(sandboxConfig, impl.factory) } // DeleteSandbox implements the VC function of the same name. @@ -47,7 +53,7 @@ func (impl *VCImpl) StopSandbox(sandboxID string) (VCSandbox, error) { // RunSandbox implements the VC function of the same name. func (impl *VCImpl) RunSandbox(sandboxConfig SandboxConfig) (VCSandbox, error) { - return RunSandbox(sandboxConfig) + return RunSandbox(sandboxConfig, impl.factory) } // ListSandbox implements the VC function of the same name. diff --git a/virtcontainers/interfaces.go b/virtcontainers/interfaces.go index 4a9f2a239..fc74b9799 100644 --- a/virtcontainers/interfaces.go +++ b/virtcontainers/interfaces.go @@ -16,6 +16,7 @@ import ( // VC is the Virtcontainers interface type VC interface { SetLogger(logger logrus.FieldLogger) + SetFactory(Factory) CreateSandbox(sandboxConfig SandboxConfig) (VCSandbox, error) DeleteSandbox(sandboxID string) (VCSandbox, error) diff --git a/virtcontainers/noop_agent_test.go b/virtcontainers/noop_agent_test.go index 1453bbfc6..7b30d425c 100644 --- a/virtcontainers/noop_agent_test.go +++ b/virtcontainers/noop_agent_test.go @@ -14,7 +14,7 @@ func testCreateNoopContainer() (*Sandbox, *Container, error) { contID := "100" config := newTestSandboxConfigNoop() - p, err := CreateSandbox(config) + p, err := CreateSandbox(config, nil) if err != nil { return nil, nil, err } diff --git a/virtcontainers/pkg/vcmock/mock.go b/virtcontainers/pkg/vcmock/mock.go index a4e0bcab3..c0413591b 100644 --- a/virtcontainers/pkg/vcmock/mock.go +++ b/virtcontainers/pkg/vcmock/mock.go @@ -35,6 +35,13 @@ func (m *VCMock) SetLogger(logger logrus.FieldLogger) { } } +// SetFactory implements the VC function of the same name. +func (m *VCMock) SetFactory(factory vc.Factory) { + if m.SetFactoryFunc != nil { + m.SetFactoryFunc(factory) + } +} + // CreateSandbox implements the VC function of the same name. func (m *VCMock) CreateSandbox(sandboxConfig vc.SandboxConfig) (vc.VCSandbox, error) { if m.CreateSandboxFunc != nil { diff --git a/virtcontainers/pkg/vcmock/mock_test.go b/virtcontainers/pkg/vcmock/mock_test.go index 22ae00b58..67c7bfe64 100644 --- a/virtcontainers/pkg/vcmock/mock_test.go +++ b/virtcontainers/pkg/vcmock/mock_test.go @@ -11,6 +11,7 @@ import ( "testing" vc "github.com/kata-containers/runtime/virtcontainers" + "github.com/kata-containers/runtime/virtcontainers/factory" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) @@ -20,7 +21,10 @@ const ( testContainerID = "testContainerID" ) -var loggerTriggered = 0 +var ( + loggerTriggered = 0 + factoryTriggered = 0 +) func TestVCImplementations(t *testing.T) { // official implementation @@ -675,3 +679,34 @@ func TestVCMockResumeContainer(t *testing.T) { assert.Error(err) assert.True(IsMockError(err)) } + +func TestVCMockSetVMFactory(t *testing.T) { + assert := assert.New(t) + + m := &VCMock{} + assert.Nil(m.SetFactoryFunc) + + hyperConfig := vc.HypervisorConfig{ + KernelPath: "foobar", + ImagePath: "foobar", + } + vmConfig := vc.VMConfig{ + HypervisorType: vc.MockHypervisor, + AgentType: vc.NoopAgentType, + HypervisorConfig: hyperConfig, + } + + f, err := factory.NewFactory(factory.Config{VMConfig: vmConfig}, false) + assert.Nil(err) + + assert.Equal(factoryTriggered, 0) + m.SetFactory(f) + assert.Equal(factoryTriggered, 0) + + m.SetFactoryFunc = func(factory vc.Factory) { + factoryTriggered = 1 + } + + m.SetFactory(f) + assert.Equal(factoryTriggered, 1) +} diff --git a/virtcontainers/pkg/vcmock/types.go b/virtcontainers/pkg/vcmock/types.go index c22352603..83b6546cc 100644 --- a/virtcontainers/pkg/vcmock/types.go +++ b/virtcontainers/pkg/vcmock/types.go @@ -35,7 +35,8 @@ type Container struct { // VCMock is a type that provides an implementation of the VC interface. // It is used for testing. type VCMock struct { - SetLoggerFunc func(logger logrus.FieldLogger) + SetLoggerFunc func(logger logrus.FieldLogger) + SetFactoryFunc func(factory vc.Factory) CreateSandboxFunc func(sandboxConfig vc.SandboxConfig) (vc.VCSandbox, error) DeleteSandboxFunc func(sandboxID string) (vc.VCSandbox, error) diff --git a/virtcontainers/sandbox.go b/virtcontainers/sandbox.go index 098bcaa32..316dbd72c 100644 --- a/virtcontainers/sandbox.go +++ b/virtcontainers/sandbox.go @@ -440,6 +440,7 @@ type Sandbox struct { id string sync.Mutex + factory Factory hypervisor hypervisor agent agent storage resourceStorage @@ -679,12 +680,12 @@ func createAssets(sandboxConfig *SandboxConfig) error { // It will create and store the sandbox structure, and then ask the hypervisor // to physically create that sandbox i.e. starts a VM for that sandbox to eventually // be started. -func createSandbox(sandboxConfig SandboxConfig) (*Sandbox, error) { +func createSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) { if err := createAssets(&sandboxConfig); err != nil { return nil, err } - s, err := newSandbox(sandboxConfig) + s, err := newSandbox(sandboxConfig, factory) if err != nil { return nil, err } @@ -718,7 +719,7 @@ func createSandbox(sandboxConfig SandboxConfig) (*Sandbox, error) { return s, nil } -func newSandbox(sandboxConfig SandboxConfig) (*Sandbox, error) { +func newSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) { if sandboxConfig.valid() == false { return nil, fmt.Errorf("Invalid sandbox configuration") } @@ -734,6 +735,7 @@ func newSandbox(sandboxConfig SandboxConfig) (*Sandbox, error) { s := &Sandbox{ id: sandboxConfig.ID, + factory: factory, hypervisor: hypervisor, agent: agent, storage: &filesystem{}, @@ -821,7 +823,8 @@ func fetchSandbox(sandboxID string) (sandbox *Sandbox, err error) { return nil, err } - sandbox, err = createSandbox(config) + // fetchSandbox is not suppose to create new sandbox VM. + sandbox, err = createSandbox(config, nil) if err != nil { return nil, fmt.Errorf("failed to create sandbox with config %+v: %v", config, err) } @@ -935,7 +938,28 @@ func (s *Sandbox) removeNetwork() error { func (s *Sandbox) startVM() error { s.Logger().Info("Starting VM") + // FIXME: This would break cached VMs. We need network hotplug and move + // oci hooks and netns handling to cli. See #273. if err := s.network.run(s.networkNS.NetNsPath, func() error { + if s.factory != nil { + vm, err := s.factory.GetVM(VMConfig{ + HypervisorType: s.config.HypervisorType, + HypervisorConfig: s.config.HypervisorConfig, + AgentType: s.config.AgentType, + AgentConfig: s.config.AgentConfig, + }) + if err != nil { + return err + } + err = vm.assignSandbox(s) + if err != nil { + return err + } + // FIXME: factory vm needs network hotplug to add Nics. + s.networkNS.NetNsPath = "" + return nil + } + return s.hypervisor.startSandbox() }); err != nil { return err diff --git a/virtcontainers/sandbox_test.go b/virtcontainers/sandbox_test.go index eec5a0d2a..d2f351742 100644 --- a/virtcontainers/sandbox_test.go +++ b/virtcontainers/sandbox_test.go @@ -52,7 +52,7 @@ func testCreateSandbox(t *testing.T, id string, Containers: containers, } - sandbox, err := createSandbox(sconfig) + sandbox, err := createSandbox(sconfig, nil) if err != nil { return nil, fmt.Errorf("Could not create sandbox: %s", err) } From 0309e59cf860266ca6484e0679f54d9e2dcab5ce Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 13 Jul 2018 17:58:10 +0800 Subject: [PATCH 09/11] cli: add vm factory management subcommand Add enable_template option to the config file. When it is set, enable the vm template factory. cache factory cannot be used by kata cli directly because it requires a running daemon to maintain the cache VMs. `kata-runtime factory init` would initialize the vm factory and `kata-runtime factory destroy` would destroy the vm factory. When configured, a vm factory is loaded before creating new sandboxes. Signed-off-by: Peng Tao --- cli/config.go | 15 ++++ cli/config/configuration.toml.in | 12 ++++ cli/config_test.go | 16 +++++ cli/create.go | 20 ++++++ cli/factory.go | 98 ++++++++++++++++++++++++++ cli/factory_test.go | 117 +++++++++++++++++++++++++++++++ cli/main.go | 1 + virtcontainers/pkg/oci/utils.go | 8 +++ 8 files changed, 287 insertions(+) create mode 100644 cli/factory.go create mode 100644 cli/factory_test.go diff --git a/cli/config.go b/cli/config.go index cc6e9bb00..a800f19f7 100644 --- a/cli/config.go +++ b/cli/config.go @@ -62,6 +62,11 @@ type tomlConfig struct { Shim map[string]shim Agent map[string]agent Runtime runtime + Factory factory +} + +type factory struct { + Template bool `toml:"enable_template"` } type hypervisor struct { @@ -353,6 +358,10 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { }, nil } +func newFactoryConfig(f factory) (oci.FactoryConfig, error) { + return oci.FactoryConfig{Template: f.Template}, nil +} + func newShimConfig(s shim) (vc.ShimConfig, error) { path, err := s.path() if err != nil { @@ -423,6 +432,12 @@ func updateRuntimeConfig(configPath string, tomlConf tomlConfig, config *oci.Run config.ShimConfig = shConfig } + fConfig, err := newFactoryConfig(tomlConf.Factory) + if err != nil { + return fmt.Errorf("%v: %v", configPath, err) + } + config.FactoryConfig = fConfig + return nil } diff --git a/cli/config/configuration.toml.in b/cli/config/configuration.toml.in index b6e91a31a..b7517284e 100644 --- a/cli/config/configuration.toml.in +++ b/cli/config/configuration.toml.in @@ -134,6 +134,18 @@ enable_iothreads = @DEFENABLEIOTHREADS@ # used for 9p packet payload. #msize_9p = @DEFMSIZE9P@ +[factory] +# VM templating support. Once enabled, new VMs are created from template +# using vm cloning. They will share the same initial kernel, initramfs and +# agent memory by mapping it readonly. It helps speeding up new container +# creation and saves a lot of memory if there are many kata containers running +# on the same host. +# +# When disabled, new VMs are created from scratch. +# +# Default false +#enable_template = true + [proxy.@PROJECT_TYPE@] path = "@PROXYPATH@" diff --git a/cli/config_test.go b/cli/config_test.go index c301da3ef..9b79d5bab 100644 --- a/cli/config_test.go +++ b/cli/config_test.go @@ -1146,3 +1146,19 @@ func TestUpdateRuntimeConfigurationVMConfig(t *testing.T) { assert.Equal(expectedVMConfig, config.VMConfig) } + +func TestUpdateRuntimeConfigurationFactoryConfig(t *testing.T) { + assert := assert.New(t) + + config := oci.RuntimeConfig{} + expectedFactoryConfig := oci.FactoryConfig{ + Template: true, + } + + tomlConf := tomlConfig{Factory: factory{Template: true}} + + err := updateRuntimeConfig("", tomlConf, &config) + assert.NoError(err) + + assert.Equal(expectedFactoryConfig, config.FactoryConfig) +} diff --git a/cli/create.go b/cli/create.go index 1ff6cbcf5..982d5c31a 100644 --- a/cli/create.go +++ b/cli/create.go @@ -15,6 +15,7 @@ import ( "strings" vc "github.com/kata-containers/runtime/virtcontainers" + vf "github.com/kata-containers/runtime/virtcontainers/factory" "github.com/kata-containers/runtime/virtcontainers/pkg/oci" "github.com/urfave/cli" ) @@ -106,6 +107,25 @@ func create(containerID, bundlePath, console, pidFilePath string, detach bool, return err } + if runtimeConfig.FactoryConfig.Template { + factoryConfig := vf.Config{ + Template: true, + VMConfig: vc.VMConfig{ + HypervisorType: runtimeConfig.HypervisorType, + HypervisorConfig: runtimeConfig.HypervisorConfig, + AgentType: runtimeConfig.AgentType, + AgentConfig: runtimeConfig.AgentConfig, + }, + } + kataLog.WithField("factory", factoryConfig).Info("load vm factory") + f, err := vf.NewFactory(factoryConfig, true) + if err != nil { + kataLog.WithError(err).Info("load vm factory failed") + } else { + vci.SetFactory(f) + } + } + disableOutput := noNeedForOutput(detach, ociSpec.Process.Terminal) var process vc.Process diff --git a/cli/factory.go b/cli/factory.go new file mode 100644 index 000000000..c2c1adcff --- /dev/null +++ b/cli/factory.go @@ -0,0 +1,98 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package main + +import ( + "errors" + "fmt" + + vc "github.com/kata-containers/runtime/virtcontainers" + vf "github.com/kata-containers/runtime/virtcontainers/factory" + "github.com/kata-containers/runtime/virtcontainers/pkg/oci" + "github.com/urfave/cli" +) + +var factorySubCmds = []cli.Command{ + initFactoryCommand, + destroyFactoryCommand, +} + +var factoryCLICommand = cli.Command{ + Name: "factory", + Usage: "manage vm factory", + Subcommands: factorySubCmds, + Action: func(context *cli.Context) { + cli.ShowSubcommandHelp(context) + }, +} + +var initFactoryCommand = cli.Command{ + Name: "init", + Usage: "initialize a VM factory based on kata-runtime configuration", + Action: func(context *cli.Context) error { + runtimeConfig, ok := context.App.Metadata["runtimeConfig"].(oci.RuntimeConfig) + if !ok { + return errors.New("invalid runtime config") + } + + if runtimeConfig.FactoryConfig.Template { + factoryConfig := vf.Config{ + Template: true, + VMConfig: vc.VMConfig{ + HypervisorType: runtimeConfig.HypervisorType, + HypervisorConfig: runtimeConfig.HypervisorConfig, + AgentType: runtimeConfig.AgentType, + AgentConfig: runtimeConfig.AgentConfig, + }, + } + kataLog.WithField("factory", factoryConfig).Info("create vm factory") + _, err := vf.NewFactory(factoryConfig, false) + if err != nil { + kataLog.WithError(err).Error("create vm factory failed") + return err + } + fmt.Println("vm factory initialized") + } else { + kataLog.Error("vm factory is not enabled") + fmt.Println("vm factory is not enabled") + } + + return nil + }, +} + +var destroyFactoryCommand = cli.Command{ + Name: "destroy", + Usage: "destroy the VM factory", + Action: func(context *cli.Context) error { + runtimeConfig, ok := context.App.Metadata["runtimeConfig"].(oci.RuntimeConfig) + if !ok { + return errors.New("invalid runtime config") + } + + if runtimeConfig.FactoryConfig.Template { + factoryConfig := vf.Config{ + Template: true, + VMConfig: vc.VMConfig{ + HypervisorType: runtimeConfig.HypervisorType, + HypervisorConfig: runtimeConfig.HypervisorConfig, + AgentType: runtimeConfig.AgentType, + AgentConfig: runtimeConfig.AgentConfig, + }, + } + kataLog.WithField("factory", factoryConfig).Info("load vm factory") + f, err := vf.NewFactory(factoryConfig, true) + if err != nil { + kataLog.WithError(err).Error("load vm factory failed") + // ignore error + } else { + f.CloseFactory() + } + } + fmt.Println("vm factory destroyed") + return nil + }, +} diff --git a/cli/factory_test.go b/cli/factory_test.go new file mode 100644 index 000000000..ccf48856d --- /dev/null +++ b/cli/factory_test.go @@ -0,0 +1,117 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package main + +import ( + "flag" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/urfave/cli" + + vc "github.com/kata-containers/runtime/virtcontainers" +) + +func TestFactoryCLIFunctionNoRuntimeConfig(t *testing.T) { + assert := assert.New(t) + + app := cli.NewApp() + ctx := cli.NewContext(app, nil, nil) + app.Name = "foo" + ctx.App.Metadata = map[string]interface{}{ + "foo": "bar", + } + + fn, ok := initFactoryCommand.Action.(func(context *cli.Context) error) + assert.True(ok) + err := fn(ctx) + // no runtime config in the Metadata + assert.Error(err) + + fn, ok = destroyFactoryCommand.Action.(func(context *cli.Context) error) + assert.True(ok) + err = fn(ctx) + // no runtime config in the Metadata + assert.Error(err) +} + +func TestFactoryCLIFunctionInit(t *testing.T) { + assert := assert.New(t) + + tmpdir, err := ioutil.TempDir("", "") + assert.NoError(err) + defer os.RemoveAll(tmpdir) + + runtimeConfig, err := newTestRuntimeConfig(tmpdir, testConsole, true) + assert.NoError(err) + + set := flag.NewFlagSet("", 0) + + set.String("console-socket", "", "") + + app := cli.NewApp() + ctx := cli.NewContext(app, set, nil) + app.Name = "foo" + + // No template + ctx.App.Metadata = map[string]interface{}{ + "runtimeConfig": runtimeConfig, + } + fn, ok := initFactoryCommand.Action.(func(context *cli.Context) error) + assert.True(ok) + err = fn(ctx) + assert.Nil(err) + + // With template + runtimeConfig.FactoryConfig.Template = true + runtimeConfig.HypervisorType = vc.MockHypervisor + runtimeConfig.AgentType = vc.NoopAgentType + ctx.App.Metadata["runtimeConfig"] = runtimeConfig + fn, ok = initFactoryCommand.Action.(func(context *cli.Context) error) + assert.True(ok) + err = fn(ctx) + assert.Nil(err) +} + +func TestFactoryCLIFunctionDestroy(t *testing.T) { + assert := assert.New(t) + + tmpdir, err := ioutil.TempDir("", "") + assert.NoError(err) + defer os.RemoveAll(tmpdir) + + runtimeConfig, err := newTestRuntimeConfig(tmpdir, testConsole, true) + assert.NoError(err) + + set := flag.NewFlagSet("", 0) + + set.String("console-socket", "", "") + + app := cli.NewApp() + ctx := cli.NewContext(app, set, nil) + app.Name = "foo" + + // No template + ctx.App.Metadata = map[string]interface{}{ + "runtimeConfig": runtimeConfig, + } + fn, ok := destroyFactoryCommand.Action.(func(context *cli.Context) error) + assert.True(ok) + err = fn(ctx) + assert.Nil(err) + + // With template + runtimeConfig.FactoryConfig.Template = true + runtimeConfig.HypervisorType = vc.MockHypervisor + runtimeConfig.AgentType = vc.NoopAgentType + ctx.App.Metadata["runtimeConfig"] = runtimeConfig + fn, ok = destroyFactoryCommand.Action.(func(context *cli.Context) error) + assert.True(ok) + err = fn(ctx) + assert.Nil(err) +} diff --git a/cli/main.go b/cli/main.go index ed3bcdf4d..76fbfb172 100644 --- a/cli/main.go +++ b/cli/main.go @@ -123,6 +123,7 @@ var runtimeCommands = []cli.Command{ // Kata Containers specific extensions kataCheckCLICommand, kataEnvCLICommand, + factoryCLICommand, } // runtimeBeforeSubcommands is the function to run before command-line diff --git a/virtcontainers/pkg/oci/utils.go b/virtcontainers/pkg/oci/utils.go index cc68cfc21..680354b1d 100644 --- a/virtcontainers/pkg/oci/utils.go +++ b/virtcontainers/pkg/oci/utils.go @@ -91,6 +91,12 @@ type CompatOCISpec struct { Process *CompatOCIProcess `json:"process,omitempty"` } +// FactoryConfig is a structure to set the VM factory configuration. +type FactoryConfig struct { + // Template enables VM templating support in VM factory. + Template bool +} + // RuntimeConfig aggregates all runtime specific settings type RuntimeConfig struct { VMConfig vc.Resources @@ -98,6 +104,8 @@ type RuntimeConfig struct { HypervisorType vc.HypervisorType HypervisorConfig vc.HypervisorConfig + FactoryConfig FactoryConfig + AgentType vc.AgentType AgentConfig interface{} From 7cdc0fe912fc1aae9f09a3734acdc7894f9164b9 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 13 Jul 2018 20:28:09 +0800 Subject: [PATCH 10/11] cli: do not set ip based kernel parameter For one thing, it is not used by any kata components. For another thing, it breaks vm factory hypervisor config check. Signed-off-by: Peng Tao --- cli/create.go | 11 +++-------- cli/create_test.go | 6 ++++-- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/cli/create.go b/cli/create.go index 982d5c31a..3bdfa7794 100644 --- a/cli/create.go +++ b/cli/create.go @@ -187,13 +187,8 @@ var systemdKernelParam = []vc.Param{ }, } -func getKernelParams(containerID string, needSystemd bool) []vc.Param { - p := []vc.Param{ - { - Key: "ip", - Value: fmt.Sprintf("::::::%s::off::", containerID), - }, - } +func getKernelParams(needSystemd bool) []vc.Param { + p := []vc.Param{} if needSystemd { p = append(p, systemdKernelParam...) @@ -209,7 +204,7 @@ func needSystemd(config vc.HypervisorConfig) bool { // setKernelParams adds the user-specified kernel parameters (from the // configuration file) to the defaults so that the former take priority. func setKernelParams(containerID string, runtimeConfig *oci.RuntimeConfig) error { - defaultKernelParams := getKernelParamsFunc(containerID, needSystemd(runtimeConfig.HypervisorConfig)) + defaultKernelParams := getKernelParamsFunc(needSystemd(runtimeConfig.HypervisorConfig)) if runtimeConfig.HypervisorConfig.Debug { strParams := vc.SerializeParams(defaultKernelParams, "=") diff --git a/cli/create_test.go b/cli/create_test.go index fd4dd892a..49ff3d34e 100644 --- a/cli/create_test.go +++ b/cli/create_test.go @@ -838,7 +838,7 @@ func TestCreateInvalidKernelParams(t *testing.T) { getKernelParamsFunc = savedFunc }() - getKernelParamsFunc = func(containerID string, needSystemd bool) []vc.Param { + getKernelParamsFunc = func(needSystemd bool) []vc.Param { return []vc.Param{ { Key: "", @@ -1135,7 +1135,9 @@ func TestSetKernelParams(t *testing.T) { err := setKernelParams(testContainerID, &config) assert.NoError(err) - assert.NotEmpty(config.HypervisorConfig.KernelParams) + if needSystemd(config.HypervisorConfig) { + assert.NotEmpty(config.HypervisorConfig.KernelParams) + } } func TestSetKernelParamsUserOptionTakesPriority(t *testing.T) { From 0a11230bfb95899f6988c7f9e8a055da7257217c Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 16 Jul 2018 17:32:08 +0800 Subject: [PATCH 11/11] factory: add UTs Add UTs to all factory components. Signed-off-by: Peng Tao --- virtcontainers/factory/cache/cache_test.go | 44 +++++ virtcontainers/factory/direct/direct_test.go | 43 +++++ virtcontainers/factory/factory_test.go | 164 +++++++++++++++++- .../factory/template/template_test.go | 70 ++++++++ 4 files changed, 319 insertions(+), 2 deletions(-) create mode 100644 virtcontainers/factory/cache/cache_test.go create mode 100644 virtcontainers/factory/direct/direct_test.go create mode 100644 virtcontainers/factory/template/template_test.go diff --git a/virtcontainers/factory/cache/cache_test.go b/virtcontainers/factory/cache/cache_test.go new file mode 100644 index 000000000..3ff1b32f6 --- /dev/null +++ b/virtcontainers/factory/cache/cache_test.go @@ -0,0 +1,44 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package cache + +import ( + "io/ioutil" + "testing" + + "github.com/stretchr/testify/assert" + + vc "github.com/kata-containers/runtime/virtcontainers" + "github.com/kata-containers/runtime/virtcontainers/factory/direct" +) + +func TestTemplateFactory(t *testing.T) { + assert := assert.New(t) + + testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + hyperConfig := vc.HypervisorConfig{ + KernelPath: testDir, + ImagePath: testDir, + } + vmConfig := vc.VMConfig{ + HypervisorType: vc.MockHypervisor, + AgentType: vc.NoopAgentType, + HypervisorConfig: hyperConfig, + } + + // New + f := New(2, direct.New(vmConfig)) + + // Config + assert.Equal(f.Config(), vmConfig) + + // GetBaseVM + _, err := f.GetBaseVM() + assert.Nil(err) + + // CloseFactory + f.CloseFactory() +} diff --git a/virtcontainers/factory/direct/direct_test.go b/virtcontainers/factory/direct/direct_test.go new file mode 100644 index 000000000..f12664c88 --- /dev/null +++ b/virtcontainers/factory/direct/direct_test.go @@ -0,0 +1,43 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package direct + +import ( + "io/ioutil" + "testing" + + "github.com/stretchr/testify/assert" + + vc "github.com/kata-containers/runtime/virtcontainers" +) + +func TestTemplateFactory(t *testing.T) { + assert := assert.New(t) + + testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + hyperConfig := vc.HypervisorConfig{ + KernelPath: testDir, + ImagePath: testDir, + } + vmConfig := vc.VMConfig{ + HypervisorType: vc.MockHypervisor, + AgentType: vc.NoopAgentType, + HypervisorConfig: hyperConfig, + } + + // New + f := New(vmConfig) + + // Config + assert.Equal(f.Config(), vmConfig) + + // GetBaseVM + _, err := f.GetBaseVM() + assert.Nil(err) + + // CloseFactory + f.CloseFactory() +} diff --git a/virtcontainers/factory/factory_test.go b/virtcontainers/factory/factory_test.go index ee2612843..c93d5ab31 100644 --- a/virtcontainers/factory/factory_test.go +++ b/virtcontainers/factory/factory_test.go @@ -32,18 +32,178 @@ func TestNewFactory(t *testing.T) { _, err = NewFactory(config, false) assert.Error(err) - testDir, err := ioutil.TempDir("", "vmfactory-tmp-") - assert.Nil(err) + testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") config.VMConfig.HypervisorConfig = vc.HypervisorConfig{ KernelPath: testDir, ImagePath: testDir, } + // direct _, err = NewFactory(config, false) assert.Nil(err) + _, err = NewFactory(config, true) + assert.Nil(err) + // template + config.Template = true + _, err = NewFactory(config, false) + assert.Nil(err) + _, err = NewFactory(config, true) + assert.Error(err) + + // Cache config.Cache = 10 + _, err = NewFactory(config, false) + assert.Nil(err) + _, err = NewFactory(config, true) + assert.Error(err) + + config.Template = false + _, err = NewFactory(config, false) + assert.Nil(err) _, err = NewFactory(config, true) assert.Error(err) } + +func TestVMConfigValid(t *testing.T) { + assert := assert.New(t) + + config := Config{} + + err := config.validate() + assert.Error(err) + + testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + + config.VMConfig = vc.VMConfig{ + HypervisorType: vc.MockHypervisor, + AgentType: vc.NoopAgentType, + HypervisorConfig: vc.HypervisorConfig{ + KernelPath: testDir, + ImagePath: testDir, + }, + } + + err = config.validate() + assert.Nil(err) +} + +func TestCheckVMConfig(t *testing.T) { + assert := assert.New(t) + + var config1, config2 vc.VMConfig + + // default config should equal + err := checkVMConfig(config1, config2) + assert.Nil(err) + + config1.HypervisorType = vc.MockHypervisor + err = checkVMConfig(config1, config2) + assert.Error(err) + + config2.HypervisorType = vc.MockHypervisor + err = checkVMConfig(config1, config2) + assert.Nil(err) + + config1.AgentType = vc.NoopAgentType + err = checkVMConfig(config1, config2) + assert.Error(err) + + config2.AgentType = vc.NoopAgentType + err = checkVMConfig(config1, config2) + assert.Nil(err) + + testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + config1.HypervisorConfig = vc.HypervisorConfig{ + KernelPath: testDir, + ImagePath: testDir, + } + err = checkVMConfig(config1, config2) + assert.Error(err) + + config2.HypervisorConfig = vc.HypervisorConfig{ + KernelPath: testDir, + ImagePath: testDir, + } + err = checkVMConfig(config1, config2) + assert.Nil(err) +} + +func TestFactoryGetVM(t *testing.T) { + assert := assert.New(t) + + testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + hyperConfig := vc.HypervisorConfig{ + KernelPath: testDir, + ImagePath: testDir, + } + vmConfig := vc.VMConfig{ + HypervisorType: vc.MockHypervisor, + AgentType: vc.NoopAgentType, + HypervisorConfig: hyperConfig, + } + + // direct factory + f, err := NewFactory(Config{VMConfig: vmConfig}, false) + assert.Nil(err) + + _, err = f.GetVM(vmConfig) + assert.Nil(err) + + f.CloseFactory() + + // template factory + f, err = NewFactory(Config{Template: true, VMConfig: vmConfig}, false) + assert.Nil(err) + + _, err = f.GetVM(vmConfig) + assert.Nil(err) + + f.CloseFactory() + + // fetch template factory + f, err = NewFactory(Config{Template: true, VMConfig: vmConfig}, false) + assert.Nil(err) + + _, err = NewFactory(Config{Template: true, VMConfig: vmConfig}, true) + assert.Error(err) + + _, err = f.GetVM(vmConfig) + assert.Nil(err) + + f.CloseFactory() + + // cache factory over direct factory + f, err = NewFactory(Config{Cache: 2, VMConfig: vmConfig}, false) + assert.Nil(err) + + _, err = f.GetVM(vmConfig) + assert.Nil(err) + + f.CloseFactory() + + // cache factory over template factory + f, err = NewFactory(Config{Template: true, Cache: 2, VMConfig: vmConfig}, false) + assert.Nil(err) + + _, err = f.GetVM(vmConfig) + assert.Nil(err) + + // CPU hotplug + vmConfig.HypervisorConfig.DefaultVCPUs++ + _, err = f.GetVM(vmConfig) + assert.Nil(err) + + // Memory hotplug + vmConfig.HypervisorConfig.DefaultMemSz += 128 + _, err = f.GetVM(vmConfig) + assert.Nil(err) + + // checkConfig fall back + vmConfig.HypervisorConfig.Mlock = true + _, err = f.GetVM(vmConfig) + assert.Nil(err) + + f.CloseFactory() +} diff --git a/virtcontainers/factory/template/template_test.go b/virtcontainers/factory/template/template_test.go new file mode 100644 index 000000000..a8b7c7ba7 --- /dev/null +++ b/virtcontainers/factory/template/template_test.go @@ -0,0 +1,70 @@ +// Copyright (c) 2018 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package template + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + vc "github.com/kata-containers/runtime/virtcontainers" +) + +func TestTemplateFactory(t *testing.T) { + assert := assert.New(t) + + testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + hyperConfig := vc.HypervisorConfig{ + KernelPath: testDir, + ImagePath: testDir, + } + vmConfig := vc.VMConfig{ + HypervisorType: vc.MockHypervisor, + AgentType: vc.NoopAgentType, + HypervisorConfig: hyperConfig, + } + + // New + f := New(vmConfig) + + // Config + assert.Equal(f.Config(), vmConfig) + + // GetBaseVM + _, err := f.GetBaseVM() + assert.Nil(err) + + // Fetch + tt := template{ + statePath: testDir, + config: vmConfig, + } + + err = tt.checkTemplateVM() + assert.Error(err) + + _, err = os.Create(tt.statePath + "/memory") + assert.Nil(err) + err = tt.checkTemplateVM() + assert.Error(err) + + _, err = os.Create(tt.statePath + "/state") + assert.Nil(err) + err = tt.checkTemplateVM() + assert.Nil(err) + + err = tt.createTemplateVM() + assert.Nil(err) + + _, err = tt.GetBaseVM() + assert.Nil(err) + + // CloseFactory + f.CloseFactory() + tt.CloseFactory() +}