Merge pull request #303 from bergwolf/vmfactory

add vm factory support
This commit is contained in:
Lai Jiangshan
2018-07-22 06:47:34 +08:00
committed by GitHub
49 changed files with 2387 additions and 279 deletions

View File

@@ -62,6 +62,11 @@ type tomlConfig struct {
Shim map[string]shim
Agent map[string]agent
Runtime runtime
Factory factory
}
type factory struct {
Template bool `toml:"enable_template"`
}
type hypervisor struct {
@@ -353,6 +358,10 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
}, nil
}
func newFactoryConfig(f factory) (oci.FactoryConfig, error) {
return oci.FactoryConfig{Template: f.Template}, nil
}
func newShimConfig(s shim) (vc.ShimConfig, error) {
path, err := s.path()
if err != nil {
@@ -423,6 +432,12 @@ func updateRuntimeConfig(configPath string, tomlConf tomlConfig, config *oci.Run
config.ShimConfig = shConfig
}
fConfig, err := newFactoryConfig(tomlConf.Factory)
if err != nil {
return fmt.Errorf("%v: %v", configPath, err)
}
config.FactoryConfig = fConfig
return nil
}

View File

@@ -134,6 +134,18 @@ enable_iothreads = @DEFENABLEIOTHREADS@
# used for 9p packet payload.
#msize_9p = @DEFMSIZE9P@
[factory]
# VM templating support. Once enabled, new VMs are created from template
# using vm cloning. They will share the same initial kernel, initramfs and
# agent memory by mapping it readonly. It helps speeding up new container
# creation and saves a lot of memory if there are many kata containers running
# on the same host.
#
# When disabled, new VMs are created from scratch.
#
# Default false
#enable_template = true
[proxy.@PROJECT_TYPE@]
path = "@PROXYPATH@"

View File

@@ -1146,3 +1146,19 @@ func TestUpdateRuntimeConfigurationVMConfig(t *testing.T) {
assert.Equal(expectedVMConfig, config.VMConfig)
}
func TestUpdateRuntimeConfigurationFactoryConfig(t *testing.T) {
assert := assert.New(t)
config := oci.RuntimeConfig{}
expectedFactoryConfig := oci.FactoryConfig{
Template: true,
}
tomlConf := tomlConfig{Factory: factory{Template: true}}
err := updateRuntimeConfig("", tomlConf, &config)
assert.NoError(err)
assert.Equal(expectedFactoryConfig, config.FactoryConfig)
}

View File

@@ -15,6 +15,7 @@ import (
"strings"
vc "github.com/kata-containers/runtime/virtcontainers"
vf "github.com/kata-containers/runtime/virtcontainers/factory"
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
"github.com/urfave/cli"
)
@@ -106,6 +107,25 @@ func create(containerID, bundlePath, console, pidFilePath string, detach bool,
return err
}
if runtimeConfig.FactoryConfig.Template {
factoryConfig := vf.Config{
Template: true,
VMConfig: vc.VMConfig{
HypervisorType: runtimeConfig.HypervisorType,
HypervisorConfig: runtimeConfig.HypervisorConfig,
AgentType: runtimeConfig.AgentType,
AgentConfig: runtimeConfig.AgentConfig,
},
}
kataLog.WithField("factory", factoryConfig).Info("load vm factory")
f, err := vf.NewFactory(factoryConfig, true)
if err != nil {
kataLog.WithError(err).Info("load vm factory failed")
} else {
vci.SetFactory(f)
}
}
disableOutput := noNeedForOutput(detach, ociSpec.Process.Terminal)
var process vc.Process
@@ -167,13 +187,8 @@ var systemdKernelParam = []vc.Param{
},
}
func getKernelParams(containerID string, needSystemd bool) []vc.Param {
p := []vc.Param{
{
Key: "ip",
Value: fmt.Sprintf("::::::%s::off::", containerID),
},
}
func getKernelParams(needSystemd bool) []vc.Param {
p := []vc.Param{}
if needSystemd {
p = append(p, systemdKernelParam...)
@@ -189,7 +204,7 @@ func needSystemd(config vc.HypervisorConfig) bool {
// setKernelParams adds the user-specified kernel parameters (from the
// configuration file) to the defaults so that the former take priority.
func setKernelParams(containerID string, runtimeConfig *oci.RuntimeConfig) error {
defaultKernelParams := getKernelParamsFunc(containerID, needSystemd(runtimeConfig.HypervisorConfig))
defaultKernelParams := getKernelParamsFunc(needSystemd(runtimeConfig.HypervisorConfig))
if runtimeConfig.HypervisorConfig.Debug {
strParams := vc.SerializeParams(defaultKernelParams, "=")

View File

@@ -838,7 +838,7 @@ func TestCreateInvalidKernelParams(t *testing.T) {
getKernelParamsFunc = savedFunc
}()
getKernelParamsFunc = func(containerID string, needSystemd bool) []vc.Param {
getKernelParamsFunc = func(needSystemd bool) []vc.Param {
return []vc.Param{
{
Key: "",
@@ -1135,7 +1135,9 @@ func TestSetKernelParams(t *testing.T) {
err := setKernelParams(testContainerID, &config)
assert.NoError(err)
assert.NotEmpty(config.HypervisorConfig.KernelParams)
if needSystemd(config.HypervisorConfig) {
assert.NotEmpty(config.HypervisorConfig.KernelParams)
}
}
func TestSetKernelParamsUserOptionTakesPriority(t *testing.T) {

98
cli/factory.go Normal file
View File

@@ -0,0 +1,98 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package main
import (
"errors"
"fmt"
vc "github.com/kata-containers/runtime/virtcontainers"
vf "github.com/kata-containers/runtime/virtcontainers/factory"
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
"github.com/urfave/cli"
)
var factorySubCmds = []cli.Command{
initFactoryCommand,
destroyFactoryCommand,
}
var factoryCLICommand = cli.Command{
Name: "factory",
Usage: "manage vm factory",
Subcommands: factorySubCmds,
Action: func(context *cli.Context) {
cli.ShowSubcommandHelp(context)
},
}
var initFactoryCommand = cli.Command{
Name: "init",
Usage: "initialize a VM factory based on kata-runtime configuration",
Action: func(context *cli.Context) error {
runtimeConfig, ok := context.App.Metadata["runtimeConfig"].(oci.RuntimeConfig)
if !ok {
return errors.New("invalid runtime config")
}
if runtimeConfig.FactoryConfig.Template {
factoryConfig := vf.Config{
Template: true,
VMConfig: vc.VMConfig{
HypervisorType: runtimeConfig.HypervisorType,
HypervisorConfig: runtimeConfig.HypervisorConfig,
AgentType: runtimeConfig.AgentType,
AgentConfig: runtimeConfig.AgentConfig,
},
}
kataLog.WithField("factory", factoryConfig).Info("create vm factory")
_, err := vf.NewFactory(factoryConfig, false)
if err != nil {
kataLog.WithError(err).Error("create vm factory failed")
return err
}
fmt.Println("vm factory initialized")
} else {
kataLog.Error("vm factory is not enabled")
fmt.Println("vm factory is not enabled")
}
return nil
},
}
var destroyFactoryCommand = cli.Command{
Name: "destroy",
Usage: "destroy the VM factory",
Action: func(context *cli.Context) error {
runtimeConfig, ok := context.App.Metadata["runtimeConfig"].(oci.RuntimeConfig)
if !ok {
return errors.New("invalid runtime config")
}
if runtimeConfig.FactoryConfig.Template {
factoryConfig := vf.Config{
Template: true,
VMConfig: vc.VMConfig{
HypervisorType: runtimeConfig.HypervisorType,
HypervisorConfig: runtimeConfig.HypervisorConfig,
AgentType: runtimeConfig.AgentType,
AgentConfig: runtimeConfig.AgentConfig,
},
}
kataLog.WithField("factory", factoryConfig).Info("load vm factory")
f, err := vf.NewFactory(factoryConfig, true)
if err != nil {
kataLog.WithError(err).Error("load vm factory failed")
// ignore error
} else {
f.CloseFactory()
}
}
fmt.Println("vm factory destroyed")
return nil
},
}

117
cli/factory_test.go Normal file
View File

@@ -0,0 +1,117 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package main
import (
"flag"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/urfave/cli"
vc "github.com/kata-containers/runtime/virtcontainers"
)
func TestFactoryCLIFunctionNoRuntimeConfig(t *testing.T) {
assert := assert.New(t)
app := cli.NewApp()
ctx := cli.NewContext(app, nil, nil)
app.Name = "foo"
ctx.App.Metadata = map[string]interface{}{
"foo": "bar",
}
fn, ok := initFactoryCommand.Action.(func(context *cli.Context) error)
assert.True(ok)
err := fn(ctx)
// no runtime config in the Metadata
assert.Error(err)
fn, ok = destroyFactoryCommand.Action.(func(context *cli.Context) error)
assert.True(ok)
err = fn(ctx)
// no runtime config in the Metadata
assert.Error(err)
}
func TestFactoryCLIFunctionInit(t *testing.T) {
assert := assert.New(t)
tmpdir, err := ioutil.TempDir("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
runtimeConfig, err := newTestRuntimeConfig(tmpdir, testConsole, true)
assert.NoError(err)
set := flag.NewFlagSet("", 0)
set.String("console-socket", "", "")
app := cli.NewApp()
ctx := cli.NewContext(app, set, nil)
app.Name = "foo"
// No template
ctx.App.Metadata = map[string]interface{}{
"runtimeConfig": runtimeConfig,
}
fn, ok := initFactoryCommand.Action.(func(context *cli.Context) error)
assert.True(ok)
err = fn(ctx)
assert.Nil(err)
// With template
runtimeConfig.FactoryConfig.Template = true
runtimeConfig.HypervisorType = vc.MockHypervisor
runtimeConfig.AgentType = vc.NoopAgentType
ctx.App.Metadata["runtimeConfig"] = runtimeConfig
fn, ok = initFactoryCommand.Action.(func(context *cli.Context) error)
assert.True(ok)
err = fn(ctx)
assert.Nil(err)
}
func TestFactoryCLIFunctionDestroy(t *testing.T) {
assert := assert.New(t)
tmpdir, err := ioutil.TempDir("", "")
assert.NoError(err)
defer os.RemoveAll(tmpdir)
runtimeConfig, err := newTestRuntimeConfig(tmpdir, testConsole, true)
assert.NoError(err)
set := flag.NewFlagSet("", 0)
set.String("console-socket", "", "")
app := cli.NewApp()
ctx := cli.NewContext(app, set, nil)
app.Name = "foo"
// No template
ctx.App.Metadata = map[string]interface{}{
"runtimeConfig": runtimeConfig,
}
fn, ok := destroyFactoryCommand.Action.(func(context *cli.Context) error)
assert.True(ok)
err = fn(ctx)
assert.Nil(err)
// With template
runtimeConfig.FactoryConfig.Template = true
runtimeConfig.HypervisorType = vc.MockHypervisor
runtimeConfig.AgentType = vc.NoopAgentType
ctx.App.Metadata["runtimeConfig"] = runtimeConfig
fn, ok = destroyFactoryCommand.Action.(func(context *cli.Context) error)
assert.True(ok)
err = fn(ctx)
assert.Nil(err)
}

View File

@@ -123,6 +123,7 @@ var runtimeCommands = []cli.Command{
// Kata Containers specific extensions
kataCheckCLICommand,
kataEnvCLICommand,
factoryCLICommand,
}
// runtimeBeforeSubcommands is the function to run before command-line

View File

@@ -96,20 +96,20 @@ func newAgent(agentType AgentType) agent {
}
// newAgentConfig returns an agent config from a generic SandboxConfig interface.
func newAgentConfig(config SandboxConfig) interface{} {
switch config.AgentType {
func newAgentConfig(agentType AgentType, agentConfig interface{}) interface{} {
switch agentType {
case NoopAgentType:
return nil
case HyperstartAgent:
var hyperConfig HyperConfig
err := mapstructure.Decode(config.AgentConfig, &hyperConfig)
err := mapstructure.Decode(agentConfig, &hyperConfig)
if err != nil {
return err
}
return hyperConfig
case KataContainersAgent:
var kataAgentConfig KataAgentConfig
err := mapstructure.Decode(config.AgentConfig, &kataAgentConfig)
err := mapstructure.Decode(agentConfig, &kataAgentConfig)
if err != nil {
return err
}
@@ -209,4 +209,13 @@ type agent interface {
// resumeContainer will resume a paused container
resumeContainer(sandbox *Sandbox, c Container) error
// configure will update agent settings based on provided arguments
configure(h hypervisor, id, sharePath string, builtin bool, config interface{}) error
// getVMPath will return the agent vm socket's directory path
getVMPath(id string) string
// getSharePath will return the agent 9pfs share mount path
getSharePath(id string) string
}

View File

@@ -99,7 +99,7 @@ func TestNewAgentFromUnknownAgentType(t *testing.T) {
}
func testNewAgentConfig(t *testing.T, config SandboxConfig, expected interface{}) {
agentConfig := newAgentConfig(config)
agentConfig := newAgentConfig(config.AgentType, config.AgentConfig)
if reflect.DeepEqual(agentConfig, expected) == false {
t.Fatal()
}

View File

@@ -34,13 +34,13 @@ func SetLogger(logger logrus.FieldLogger) {
// CreateSandbox is the virtcontainers sandbox creation entry point.
// CreateSandbox creates a sandbox and its containers. It does not start them.
func CreateSandbox(sandboxConfig SandboxConfig) (VCSandbox, error) {
return createSandboxFromConfig(sandboxConfig)
func CreateSandbox(sandboxConfig SandboxConfig, factory Factory) (VCSandbox, error) {
return createSandboxFromConfig(sandboxConfig, factory)
}
func createSandboxFromConfig(sandboxConfig SandboxConfig) (*Sandbox, error) {
func createSandboxFromConfig(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) {
// Create the sandbox.
s, err := createSandbox(sandboxConfig)
s, err := createSandbox(sandboxConfig, factory)
if err != nil {
return nil, err
}
@@ -206,8 +206,8 @@ func StopSandbox(sandboxID string) (VCSandbox, error) {
// RunSandbox is the virtcontainers sandbox running entry point.
// RunSandbox creates a sandbox and its containers and then it starts them.
func RunSandbox(sandboxConfig SandboxConfig) (VCSandbox, error) {
s, err := createSandboxFromConfig(sandboxConfig)
func RunSandbox(sandboxConfig SandboxConfig, factory Factory) (VCSandbox, error) {
s, err := createSandboxFromConfig(sandboxConfig, factory)
if err != nil {
return nil, err
}

View File

@@ -244,7 +244,7 @@ func TestCreateSandboxNoopAgentSuccessful(t *testing.T) {
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -289,7 +289,7 @@ func TestCreateSandboxHyperstartAgentSuccessful(t *testing.T) {
proxy.Start()
defer proxy.Stop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -330,7 +330,7 @@ func TestCreateSandboxKataAgentSuccessful(t *testing.T) {
}
defer kataProxyMock.Stop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -347,7 +347,7 @@ func TestCreateSandboxFailing(t *testing.T) {
config := SandboxConfig{}
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p.(*Sandbox) != nil || err == nil {
t.Fatal()
}
@@ -358,7 +358,7 @@ func TestDeleteSandboxNoopAgentSuccessful(t *testing.T) {
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -401,7 +401,7 @@ func TestDeleteSandboxHyperstartAgentSuccessful(t *testing.T) {
proxy.Start()
defer proxy.Stop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -452,7 +452,7 @@ func TestDeleteSandboxKataAgentSuccessful(t *testing.T) {
}
defer kataProxyMock.Stop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -751,7 +751,7 @@ func TestRunSandboxNoopAgentSuccessful(t *testing.T) {
config := newTestSandboxConfigNoop()
p, err := RunSandbox(config)
p, err := RunSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -787,7 +787,7 @@ func TestRunSandboxHyperstartAgentSuccessful(t *testing.T) {
hyperConfig := config.AgentConfig.(HyperConfig)
config.AgentConfig = hyperConfig
p, err := RunSandbox(config)
p, err := RunSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -833,7 +833,7 @@ func TestRunSandboxKataAgentSuccessful(t *testing.T) {
}
defer kataProxyMock.Stop()
p, err := RunSandbox(config)
p, err := RunSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -855,7 +855,7 @@ func TestRunSandboxFailing(t *testing.T) {
config := SandboxConfig{}
p, err := RunSandbox(config)
p, err := RunSandbox(config, nil)
if p != nil || err == nil {
t.Fatal()
}
@@ -868,7 +868,7 @@ func TestListSandboxSuccessful(t *testing.T) {
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -928,7 +928,7 @@ func TestStatusSandboxSuccessfulStateReady(t *testing.T) {
},
}
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -985,7 +985,7 @@ func TestStatusSandboxSuccessfulStateRunning(t *testing.T) {
},
}
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1014,7 +1014,7 @@ func TestStatusSandboxFailingFetchSandboxConfig(t *testing.T) {
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1034,7 +1034,7 @@ func TestStatusPodSandboxFailingFetchSandboxState(t *testing.T) {
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1069,7 +1069,7 @@ func TestCreateContainerSuccessful(t *testing.T) {
contID := "100"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1100,7 +1100,7 @@ func TestCreateContainerFailingNoSandbox(t *testing.T) {
contID := "100"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1130,7 +1130,7 @@ func TestDeleteContainerSuccessful(t *testing.T) {
contID := "100"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1184,7 +1184,7 @@ func TestDeleteContainerFailingNoContainer(t *testing.T) {
contID := "100"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1249,7 +1249,7 @@ func TestStartContainerFailingNoContainer(t *testing.T) {
contID := "100"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1272,7 +1272,7 @@ func TestStartContainerFailingSandboxNotStarted(t *testing.T) {
contID := "100"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1493,7 +1493,7 @@ func TestStopContainerFailingNoContainer(t *testing.T) {
contID := "100"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1674,7 +1674,7 @@ func TestEnterContainerFailingNoContainer(t *testing.T) {
contID := "100"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1731,7 +1731,7 @@ func TestStatusContainerSuccessful(t *testing.T) {
contID := "100"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1782,7 +1782,7 @@ func TestStatusContainerStateReady(t *testing.T) {
contID := "101"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1845,7 +1845,7 @@ func TestStatusContainerStateRunning(t *testing.T) {
contID := "101"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1917,7 +1917,7 @@ func TestStatusContainerFailing(t *testing.T) {
contID := "100"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1940,7 +1940,7 @@ func TestStatsContainerFailing(t *testing.T) {
contID := "100"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if p == nil || err != nil {
t.Fatal(err)
}
@@ -1973,7 +1973,7 @@ func TestStatsContainer(t *testing.T) {
assert.Error(err)
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
assert.NoError(err)
assert.NotNil(p)
@@ -2023,7 +2023,7 @@ func TestProcessListContainer(t *testing.T) {
assert.Error(err)
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
assert.NoError(err)
assert.NotNil(p)
@@ -2117,7 +2117,7 @@ func createAndStartSandbox(config SandboxConfig) (sandbox VCSandbox, sandboxDir
err error) {
// Create sandbox
sandbox, err = CreateSandbox(config)
sandbox, err = CreateSandbox(config, nil)
if sandbox == nil || err != nil {
return nil, "", err
}
@@ -2158,7 +2158,7 @@ func createStartStopDeleteSandbox(b *testing.B, sandboxConfig SandboxConfig) {
func createStartStopDeleteContainers(b *testing.B, sandboxConfig SandboxConfig, contConfigs []ContainerConfig) {
// Create sandbox
p, err := CreateSandbox(sandboxConfig)
p, err := CreateSandbox(sandboxConfig, nil)
if err != nil {
b.Fatalf("Could not create sandbox: %s", err)
}
@@ -2326,7 +2326,7 @@ func TestFetchSandbox(t *testing.T) {
config := newTestSandboxConfigNoop()
s, err := CreateSandbox(config)
s, err := CreateSandbox(config, nil)
if s == nil || err != nil {
t.Fatal(err)
}
@@ -2348,7 +2348,7 @@ func TestReleaseSandbox(t *testing.T) {
config := newTestSandboxConfigNoop()
s, err := CreateSandbox(config)
s, err := CreateSandbox(config, nil)
if s == nil || err != nil {
t.Fatal(err)
}

View File

@@ -68,7 +68,7 @@ func Example_createAndStartSandbox() {
Containers: []vc.ContainerConfig{container},
}
_, err := vc.RunSandbox(sandboxConfig)
_, err := vc.RunSandbox(sandboxConfig, nil)
if err != nil {
fmt.Printf("Could not run sandbox: %s", err)
}

15
virtcontainers/factory.go Normal file
View File

@@ -0,0 +1,15 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package virtcontainers
// Factory controls how a new VM is created.
type Factory interface {
// GetVM gets a new VM from the factory.
GetVM(config VMConfig) (*VM, error)
// CloseFactory closes and cleans up the factory.
CloseFactory()
}

View File

@@ -0,0 +1,24 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package base
import vc "github.com/kata-containers/runtime/virtcontainers"
// FactoryBase is vm factory's internal base factory interfaces.
// The difference between FactoryBase and Factory is that the Factory
// also handles vm config validation/comparison and possible CPU/memory
// hotplugs. It's better to do it at the factory level instead of doing
// the same work in each of the factory implementations.
type FactoryBase interface {
// Config returns base factory config.
Config() vc.VMConfig
// GetBaseVM returns a paused VM created by the base factory.
GetBaseVM() (*vc.VM, error)
// CloseFactory closes the base factory.
CloseFactory()
}

83
virtcontainers/factory/cache/cache.go vendored Normal file
View File

@@ -0,0 +1,83 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
// cache implements base vm factory on top of other base vm factory.
package cache
import (
"fmt"
"sync"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/factory/base"
)
type cache struct {
base base.FactoryBase
cacheCh chan *vc.VM
closed chan<- int
wg sync.WaitGroup
closeOnce sync.Once
}
// New creates a new cached vm factory.
func New(count uint, b base.FactoryBase) base.FactoryBase {
if count < 1 {
return b
}
cacheCh := make(chan *vc.VM)
closed := make(chan int, count)
c := cache{base: b, cacheCh: cacheCh, closed: closed}
for i := 0; i < int(count); i++ {
c.wg.Add(1)
go func() {
for {
vm, err := b.GetBaseVM()
if err != nil {
c.wg.Done()
c.CloseFactory()
return
}
select {
case cacheCh <- vm:
case <-closed:
vm.Stop()
c.wg.Done()
return
}
}
}()
}
return &c
}
// Config returns cache vm factory's base factory config.
func (c *cache) Config() vc.VMConfig {
return c.base.Config()
}
// GetBaseVM returns a base VM from cache factory's base factory.
func (c *cache) GetBaseVM() (*vc.VM, error) {
vm, ok := <-c.cacheCh
if ok {
return vm, nil
}
return nil, fmt.Errorf("cache factory is closed")
}
// CloseFactory closes the cache factory.
func (c *cache) CloseFactory() {
c.closeOnce.Do(func() {
for len(c.closed) < cap(c.closed) { // send sufficient closed signal
c.closed <- 0
}
c.wg.Wait()
close(c.cacheCh)
c.base.CloseFactory()
})
}

View File

@@ -0,0 +1,44 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package cache
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/factory/direct"
)
func TestTemplateFactory(t *testing.T) {
assert := assert.New(t)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
hyperConfig := vc.HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,
}
vmConfig := vc.VMConfig{
HypervisorType: vc.MockHypervisor,
AgentType: vc.NoopAgentType,
HypervisorConfig: hyperConfig,
}
// New
f := New(2, direct.New(vmConfig))
// Config
assert.Equal(f.Config(), vmConfig)
// GetBaseVM
_, err := f.GetBaseVM()
assert.Nil(err)
// CloseFactory
f.CloseFactory()
}

View File

@@ -0,0 +1,46 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
// direct implements base vm factory without vm templating.
package direct
import (
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/factory/base"
)
type direct struct {
config vc.VMConfig
}
// New returns a new direct vm factory.
func New(config vc.VMConfig) base.FactoryBase {
return &direct{config}
}
// Config returns the direct factory's configuration.
func (d *direct) Config() vc.VMConfig {
return d.config
}
// GetBaseVM create a new VM directly.
func (d *direct) GetBaseVM() (*vc.VM, error) {
vm, err := vc.NewVM(d.config)
if err != nil {
return nil, err
}
err = vm.Pause()
if err != nil {
vm.Stop()
return nil, err
}
return vm, nil
}
// CloseFactory closes the direct vm factory.
func (d *direct) CloseFactory() {
}

View File

@@ -0,0 +1,43 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package direct
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
vc "github.com/kata-containers/runtime/virtcontainers"
)
func TestTemplateFactory(t *testing.T) {
assert := assert.New(t)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
hyperConfig := vc.HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,
}
vmConfig := vc.VMConfig{
HypervisorType: vc.MockHypervisor,
AgentType: vc.NoopAgentType,
HypervisorConfig: hyperConfig,
}
// New
f := New(vmConfig)
// Config
assert.Equal(f.Config(), vmConfig)
// GetBaseVM
_, err := f.GetBaseVM()
assert.Nil(err)
// CloseFactory
f.CloseFactory()
}

View File

@@ -0,0 +1,176 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package factory
import (
"fmt"
"reflect"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/factory/base"
"github.com/kata-containers/runtime/virtcontainers/factory/cache"
"github.com/kata-containers/runtime/virtcontainers/factory/direct"
"github.com/kata-containers/runtime/virtcontainers/factory/template"
"github.com/sirupsen/logrus"
)
var factoryLogger = logrus.FieldLogger(logrus.New())
// Config is a collection of VM factory configurations.
type Config struct {
Template bool
Cache uint
VMConfig vc.VMConfig
}
func (f *Config) validate() error {
return f.VMConfig.Valid()
}
type factory struct {
base base.FactoryBase
}
// NewFactory returns a working factory.
func NewFactory(config Config, fetchOnly bool) (vc.Factory, error) {
err := config.validate()
if err != nil {
return nil, err
}
if fetchOnly && config.Cache > 0 {
return nil, fmt.Errorf("cache factory does not support fetch")
}
var b base.FactoryBase
if config.Template {
if fetchOnly {
b, err = template.Fetch(config.VMConfig)
if err != nil {
return nil, err
}
} else {
b = template.New(config.VMConfig)
}
} else {
b = direct.New(config.VMConfig)
}
if config.Cache > 0 {
b = cache.New(config.Cache, b)
}
return &factory{b}, nil
}
func (f *factory) log() *logrus.Entry {
return factoryLogger.WithField("subsystem", "factory")
}
func resetHypervisorConfig(config *vc.HypervisorConfig) {
config.DefaultVCPUs = 0
config.DefaultMemSz = 0
config.BootToBeTemplate = false
config.BootFromTemplate = false
config.MemoryPath = ""
config.DevicesStatePath = ""
}
// It's important that baseConfig and newConfig are passed by value!
func checkVMConfig(config1, config2 vc.VMConfig) error {
if config1.HypervisorType != config2.HypervisorType {
return fmt.Errorf("hypervisor type does not match: %s vs. %s", config1.HypervisorType, config2.HypervisorType)
}
if config1.AgentType != config2.AgentType {
return fmt.Errorf("agent type does not match: %s vs. %s", config1.AgentType, config2.AgentType)
}
// check hypervisor config details
resetHypervisorConfig(&config1.HypervisorConfig)
resetHypervisorConfig(&config2.HypervisorConfig)
if !reflect.DeepEqual(config1, config2) {
return fmt.Errorf("hypervisor config does not match, base: %+v. new: %+v", config1, config2)
}
return nil
}
func (f *factory) checkConfig(config vc.VMConfig) error {
baseConfig := f.base.Config()
return checkVMConfig(config, baseConfig)
}
// GetVM returns a working blank VM created by the factory.
func (f *factory) GetVM(config vc.VMConfig) (*vc.VM, error) {
hypervisorConfig := config.HypervisorConfig
err := config.Valid()
if err != nil {
f.log().WithError(err).Error("invalid hypervisor config")
return nil, err
}
err = f.checkConfig(config)
if err != nil {
f.log().WithError(err).Info("fallback to direct factory vm")
return direct.New(config).GetBaseVM()
}
f.log().Info("get base VM")
vm, err := f.base.GetBaseVM()
if err != nil {
f.log().WithError(err).Error("failed to get base VM")
return nil, err
}
// cleanup upon error
defer func() {
if err != nil {
f.log().WithError(err).Error("clean up vm")
vm.Stop()
}
}()
err = vm.Resume()
if err != nil {
return nil, err
}
online := false
baseConfig := f.base.Config().HypervisorConfig
if baseConfig.DefaultVCPUs < hypervisorConfig.DefaultVCPUs {
err = vm.AddCPUs(hypervisorConfig.DefaultVCPUs - baseConfig.DefaultVCPUs)
if err != nil {
return nil, err
}
online = true
}
if baseConfig.DefaultMemSz < hypervisorConfig.DefaultMemSz {
err = vm.AddMemory(hypervisorConfig.DefaultMemSz - baseConfig.DefaultMemSz)
if err != nil {
return nil, err
}
online = true
}
if online {
err = vm.OnlineCPUMemory()
if err != nil {
return nil, err
}
}
return vm, nil
}
// CloseFactory closes the factory.
func (f *factory) CloseFactory() {
f.base.CloseFactory()
}

View File

@@ -0,0 +1,209 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package factory
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
vc "github.com/kata-containers/runtime/virtcontainers"
)
func TestNewFactory(t *testing.T) {
var config Config
assert := assert.New(t)
_, err := NewFactory(config, true)
assert.Error(err)
_, err = NewFactory(config, false)
assert.Error(err)
config.VMConfig = vc.VMConfig{
HypervisorType: vc.MockHypervisor,
AgentType: vc.NoopAgentType,
}
_, err = NewFactory(config, false)
assert.Error(err)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
config.VMConfig.HypervisorConfig = vc.HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,
}
// direct
_, err = NewFactory(config, false)
assert.Nil(err)
_, err = NewFactory(config, true)
assert.Nil(err)
// template
config.Template = true
_, err = NewFactory(config, false)
assert.Nil(err)
_, err = NewFactory(config, true)
assert.Error(err)
// Cache
config.Cache = 10
_, err = NewFactory(config, false)
assert.Nil(err)
_, err = NewFactory(config, true)
assert.Error(err)
config.Template = false
_, err = NewFactory(config, false)
assert.Nil(err)
_, err = NewFactory(config, true)
assert.Error(err)
}
func TestVMConfigValid(t *testing.T) {
assert := assert.New(t)
config := Config{}
err := config.validate()
assert.Error(err)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
config.VMConfig = vc.VMConfig{
HypervisorType: vc.MockHypervisor,
AgentType: vc.NoopAgentType,
HypervisorConfig: vc.HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,
},
}
err = config.validate()
assert.Nil(err)
}
func TestCheckVMConfig(t *testing.T) {
assert := assert.New(t)
var config1, config2 vc.VMConfig
// default config should equal
err := checkVMConfig(config1, config2)
assert.Nil(err)
config1.HypervisorType = vc.MockHypervisor
err = checkVMConfig(config1, config2)
assert.Error(err)
config2.HypervisorType = vc.MockHypervisor
err = checkVMConfig(config1, config2)
assert.Nil(err)
config1.AgentType = vc.NoopAgentType
err = checkVMConfig(config1, config2)
assert.Error(err)
config2.AgentType = vc.NoopAgentType
err = checkVMConfig(config1, config2)
assert.Nil(err)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
config1.HypervisorConfig = vc.HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,
}
err = checkVMConfig(config1, config2)
assert.Error(err)
config2.HypervisorConfig = vc.HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,
}
err = checkVMConfig(config1, config2)
assert.Nil(err)
}
func TestFactoryGetVM(t *testing.T) {
assert := assert.New(t)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
hyperConfig := vc.HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,
}
vmConfig := vc.VMConfig{
HypervisorType: vc.MockHypervisor,
AgentType: vc.NoopAgentType,
HypervisorConfig: hyperConfig,
}
// direct factory
f, err := NewFactory(Config{VMConfig: vmConfig}, false)
assert.Nil(err)
_, err = f.GetVM(vmConfig)
assert.Nil(err)
f.CloseFactory()
// template factory
f, err = NewFactory(Config{Template: true, VMConfig: vmConfig}, false)
assert.Nil(err)
_, err = f.GetVM(vmConfig)
assert.Nil(err)
f.CloseFactory()
// fetch template factory
f, err = NewFactory(Config{Template: true, VMConfig: vmConfig}, false)
assert.Nil(err)
_, err = NewFactory(Config{Template: true, VMConfig: vmConfig}, true)
assert.Error(err)
_, err = f.GetVM(vmConfig)
assert.Nil(err)
f.CloseFactory()
// cache factory over direct factory
f, err = NewFactory(Config{Cache: 2, VMConfig: vmConfig}, false)
assert.Nil(err)
_, err = f.GetVM(vmConfig)
assert.Nil(err)
f.CloseFactory()
// cache factory over template factory
f, err = NewFactory(Config{Template: true, Cache: 2, VMConfig: vmConfig}, false)
assert.Nil(err)
_, err = f.GetVM(vmConfig)
assert.Nil(err)
// CPU hotplug
vmConfig.HypervisorConfig.DefaultVCPUs++
_, err = f.GetVM(vmConfig)
assert.Nil(err)
// Memory hotplug
vmConfig.HypervisorConfig.DefaultMemSz += 128
_, err = f.GetVM(vmConfig)
assert.Nil(err)
// checkConfig fall back
vmConfig.HypervisorConfig.Mlock = true
_, err = f.GetVM(vmConfig)
assert.Nil(err)
f.CloseFactory()
}

View File

@@ -0,0 +1,143 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
// template implements base vm factory with vm templating.
package template
import (
"fmt"
"os"
"syscall"
"time"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/factory/base"
"github.com/kata-containers/runtime/virtcontainers/factory/direct"
)
type template struct {
statePath string
config vc.VMConfig
}
// Fetch finds and returns a pre-built template factory.
// TODO: save template metadata and fetch from storage.
func Fetch(config vc.VMConfig) (base.FactoryBase, error) {
statePath := vc.RunVMStoragePath + "/template"
t := &template{statePath, config}
err := t.checkTemplateVM()
if err != nil {
return nil, err
}
return t, nil
}
// New creates a new VM template factory.
func New(config vc.VMConfig) base.FactoryBase {
statePath := vc.RunVMStoragePath + "/template"
t := &template{statePath, config}
err := t.prepareTemplateFiles()
if err != nil {
// fallback to direct factory if template is not supported.
return direct.New(config)
}
err = t.createTemplateVM()
if err != nil {
// fallback to direct factory if template is not supported.
return direct.New(config)
}
return t
}
// Config returns template factory's configuration.
func (t *template) Config() vc.VMConfig {
return t.config
}
// GetBaseVM creates a new paused VM from the template VM.
func (t *template) GetBaseVM() (*vc.VM, error) {
return t.createFromTemplateVM()
}
// CloseFactory cleans up the template VM.
func (t *template) CloseFactory() {
syscall.Unmount(t.statePath, 0)
os.RemoveAll(t.statePath)
}
func (t *template) prepareTemplateFiles() error {
// create and mount tmpfs for the shared memory file
err := os.MkdirAll(t.statePath, 0700)
if err != nil {
return err
}
flags := uintptr(syscall.MS_NOSUID | syscall.MS_NODEV)
opts := fmt.Sprintf("size=%dM", t.config.HypervisorConfig.DefaultMemSz+8)
if err = syscall.Mount("tmpfs", t.statePath, "tmpfs", flags, opts); err != nil {
return err
}
f, err := os.Create(t.statePath + "/memory")
if err != nil {
return err
}
f.Close()
return nil
}
func (t *template) createTemplateVM() error {
// create the template vm
config := t.config
config.HypervisorConfig.BootToBeTemplate = true
config.HypervisorConfig.BootFromTemplate = false
config.HypervisorConfig.MemoryPath = t.statePath + "/memory"
config.HypervisorConfig.DevicesStatePath = t.statePath + "/state"
vm, err := vc.NewVM(config)
if err != nil {
return err
}
defer vm.Stop()
err = vm.Pause()
if err != nil {
return err
}
err = vm.Save()
if err != nil {
return err
}
// qemu QMP does not wait for migration to finish...
time.Sleep(1 * time.Second)
return nil
}
func (t *template) createFromTemplateVM() (*vc.VM, error) {
config := t.config
config.HypervisorConfig.BootToBeTemplate = false
config.HypervisorConfig.BootFromTemplate = true
config.HypervisorConfig.MemoryPath = t.statePath + "/memory"
config.HypervisorConfig.DevicesStatePath = t.statePath + "/state"
return vc.NewVM(config)
}
func (t *template) checkTemplateVM() error {
_, err := os.Stat(t.statePath + "/memory")
if err != nil {
return err
}
_, err = os.Stat(t.statePath + "/state")
return err
}

View File

@@ -0,0 +1,70 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package template
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
vc "github.com/kata-containers/runtime/virtcontainers"
)
func TestTemplateFactory(t *testing.T) {
assert := assert.New(t)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
hyperConfig := vc.HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,
}
vmConfig := vc.VMConfig{
HypervisorType: vc.MockHypervisor,
AgentType: vc.NoopAgentType,
HypervisorConfig: hyperConfig,
}
// New
f := New(vmConfig)
// Config
assert.Equal(f.Config(), vmConfig)
// GetBaseVM
_, err := f.GetBaseVM()
assert.Nil(err)
// Fetch
tt := template{
statePath: testDir,
config: vmConfig,
}
err = tt.checkTemplateVM()
assert.Error(err)
_, err = os.Create(tt.statePath + "/memory")
assert.Nil(err)
err = tt.checkTemplateVM()
assert.Error(err)
_, err = os.Create(tt.statePath + "/state")
assert.Nil(err)
err = tt.checkTemplateVM()
assert.Nil(err)
err = tt.createTemplateVM()
assert.Nil(err)
_, err = tt.GetBaseVM()
assert.Nil(err)
// CloseFactory
f.CloseFactory()
tt.CloseFactory()
}

View File

@@ -85,17 +85,27 @@ const dirMode = os.FileMode(0750) | os.ModeDir
// storagePathSuffix is the suffix used for all storage paths
//
// Note: this very brief path represents "virtcontainers sandboxes". It is as
// Note: this very brief path represents "virtcontainers". It is as
// terse as possible to minimise path length.
const storagePathSuffix = "/vc/sbs"
const storagePathSuffix = "vc"
// sandboxPathSuffix is the suffix used for sandbox storage
const sandboxPathSuffix = "sbs"
// vmPathSuffix is the suffix used for guest VMs.
const vmPathSuffix = "vm"
// configStoragePath is the sandbox configuration directory.
// It will contain one config.json file for each created sandbox.
var configStoragePath = filepath.Join("/var/lib", storagePathSuffix)
var configStoragePath = filepath.Join("/var/lib", storagePathSuffix, sandboxPathSuffix)
// runStoragePath is the sandbox runtime directory.
// It will contain one state.json and one lock file for each created sandbox.
var runStoragePath = filepath.Join("/run", storagePathSuffix)
var runStoragePath = filepath.Join("/run", storagePathSuffix, sandboxPathSuffix)
// RunVMStoragePath is the vm directory.
// It will contain all guest vm sockets and shared mountpoints.
var RunVMStoragePath = filepath.Join("/run", storagePathSuffix, vmPathSuffix)
// resourceStorage is the virtcontainers resources (configuration, state, etc...)
// storage interface.

View File

@@ -324,7 +324,7 @@ func runSandbox(context *cli.Context) error {
return fmt.Errorf("Could not build sandbox config: %s", err)
}
_, err = vc.RunSandbox(sandboxConfig)
_, err = vc.RunSandbox(sandboxConfig, nil)
if err != nil {
return fmt.Errorf("Could not run sandbox: %s", err)
}
@@ -338,7 +338,7 @@ func createSandbox(context *cli.Context) error {
return fmt.Errorf("Could not build sandbox config: %s", err)
}
p, err := vc.CreateSandbox(sandboxConfig)
p, err := vc.CreateSandbox(sandboxConfig, nil)
if err != nil {
return fmt.Errorf("Could not create sandbox: %s", err)
}

View File

@@ -262,9 +262,17 @@ func (h *hyper) init(sandbox *Sandbox, config interface{}) (err error) {
return nil
}
func (h *hyper) createSandbox(sandbox *Sandbox) (err error) {
func (h *hyper) getVMPath(id string) string {
return filepath.Join(runStoragePath, id)
}
func (h *hyper) getSharePath(id string) string {
return filepath.Join(defaultSharedDir, id)
}
func (h *hyper) configure(hv hypervisor, id, sharePath string, builtin bool, config interface{}) error {
for _, socket := range h.sockets {
err := sandbox.hypervisor.addDevice(socket, serialPortDev)
err := hv.addDevice(socket, serialPortDev)
if err != nil {
return err
}
@@ -274,14 +282,18 @@ func (h *hyper) createSandbox(sandbox *Sandbox) (err error) {
// This volume contains all bind mounted container bundles.
sharedVolume := Volume{
MountTag: mountTag,
HostPath: filepath.Join(defaultSharedDir, sandbox.id),
HostPath: sharePath,
}
if err := os.MkdirAll(sharedVolume.HostPath, dirMode); err != nil {
return err
}
return sandbox.hypervisor.addDevice(sharedVolume, fsDev)
return hv.addDevice(sharedVolume, fsDev)
}
func (h *hyper) createSandbox(sandbox *Sandbox) (err error) {
return h.configure(sandbox.hypervisor, "", h.getSharePath(sandbox.id), false, nil)
}
func (h *hyper) capabilities() capabilities {

View File

@@ -7,11 +7,13 @@ package virtcontainers
import (
"fmt"
"io/ioutil"
"net"
"reflect"
"testing"
"github.com/kata-containers/runtime/virtcontainers/pkg/hyperstart"
"github.com/stretchr/testify/assert"
"github.com/vishvananda/netlink"
)
@@ -156,3 +158,43 @@ func TestProcessHyperRouteDestIPv6Failure(t *testing.T) {
testProcessHyperRoute(t, route, testRouteDeviceName, nil)
}
func TestHyperPathAPI(t *testing.T) {
assert := assert.New(t)
h1 := &hyper{}
h2 := &hyper{}
id := "foobar"
// getVMPath
path1 := h1.getVMPath(id)
path2 := h2.getVMPath(id)
assert.Equal(path1, path2)
// getSharePath
path1 = h1.getSharePath(id)
path2 = h2.getSharePath(id)
assert.Equal(path1, path2)
}
func TestHyperConfigure(t *testing.T) {
assert := assert.New(t)
dir, err := ioutil.TempDir("", "hyperstart-test")
assert.Nil(err)
h := &hyper{}
m := &mockHypervisor{}
c := HyperConfig{}
id := "foobar"
invalidAgent := KataAgentConfig{}
err = h.configure(m, id, dir, true, invalidAgent)
assert.Nil(err)
err = h.configure(m, id, dir, true, c)
assert.Nil(err)
err = h.configure(m, id, dir, false, c)
assert.Nil(err)
}

View File

@@ -216,15 +216,51 @@ type HypervisorConfig struct {
// Msize9p is used as the msize for 9p shares
Msize9p uint32
// BootToBeTemplate used to indicate if the VM is created to be a template VM
BootToBeTemplate bool
// BootFromTemplate used to indicate if the VM should be created from a template VM
BootFromTemplate bool
// MemoryPath is the memory file path of VM memory. Used when either BootToBeTemplate or
// BootFromTemplate is true.
MemoryPath string
// DevicesStatePath is the VM device state file path. Used when either BootToBeTemplate or
// BootFromTemplate is true.
DevicesStatePath string
}
func (conf *HypervisorConfig) valid() (bool, error) {
func (conf *HypervisorConfig) checkTemplateConfig() error {
if conf.BootToBeTemplate && conf.BootFromTemplate {
return fmt.Errorf("Cannot set both 'to be' and 'from' vm tempate")
}
if conf.BootToBeTemplate || conf.BootFromTemplate {
if conf.MemoryPath == "" {
return fmt.Errorf("Missing MemoryPath for vm template")
}
if conf.BootFromTemplate && conf.DevicesStatePath == "" {
return fmt.Errorf("Missing DevicesStatePath to load from vm template")
}
}
return nil
}
func (conf *HypervisorConfig) valid() error {
if conf.KernelPath == "" {
return false, fmt.Errorf("Missing kernel path")
return fmt.Errorf("Missing kernel path")
}
if conf.ImagePath == "" && conf.InitrdPath == "" {
return false, fmt.Errorf("Missing image and initrd path")
return fmt.Errorf("Missing image and initrd path")
}
if err := conf.checkTemplateConfig(); err != nil {
return err
}
if conf.DefaultVCPUs == 0 {
@@ -251,7 +287,7 @@ func (conf *HypervisorConfig) valid() (bool, error) {
conf.Msize9p = defaultMsize9p
}
return true, nil
return nil
}
// AddKernelParam allows the addition of new kernel parameters to an existing
@@ -499,12 +535,13 @@ func RunningOnVMM(cpuInfoPath string) (bool, error) {
// hypervisor is the virtcontainers hypervisor interface.
// The default hypervisor implementation is Qemu.
type hypervisor interface {
init(sandbox *Sandbox) error
createSandbox(sandboxConfig SandboxConfig) error
init(id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error
createSandbox() error
startSandbox() error
waitSandbox(timeout int) error
stopSandbox() error
pauseSandbox() error
saveSandbox() error
resumeSandbox() error
addDevice(devInfo interface{}, devType deviceType) error
hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error)

View File

@@ -107,9 +107,12 @@ func TestNewHypervisorFromUnknownHypervisorType(t *testing.T) {
}
}
func testHypervisorConfigValid(t *testing.T, hypervisorConfig *HypervisorConfig, expected bool) {
ret, _ := hypervisorConfig.valid()
if ret != expected {
func testHypervisorConfigValid(t *testing.T, hypervisorConfig *HypervisorConfig, success bool) {
err := hypervisorConfig.valid()
if success && err != nil {
t.Fatal()
}
if !success && err == nil {
t.Fatal()
}
}
@@ -154,6 +157,30 @@ func TestHypervisorConfigIsValid(t *testing.T) {
testHypervisorConfigValid(t, hypervisorConfig, true)
}
func TestHypervisorConfigValidTemplateConfig(t *testing.T) {
hypervisorConfig := &HypervisorConfig{
KernelPath: fmt.Sprintf("%s/%s", testDir, testKernel),
ImagePath: fmt.Sprintf("%s/%s", testDir, testImage),
HypervisorPath: fmt.Sprintf("%s/%s", testDir, testHypervisor),
BootToBeTemplate: true,
BootFromTemplate: true,
}
testHypervisorConfigValid(t, hypervisorConfig, false)
hypervisorConfig.BootToBeTemplate = false
testHypervisorConfigValid(t, hypervisorConfig, false)
hypervisorConfig.MemoryPath = "foobar"
testHypervisorConfigValid(t, hypervisorConfig, false)
hypervisorConfig.DevicesStatePath = "foobar"
testHypervisorConfigValid(t, hypervisorConfig, true)
hypervisorConfig.BootFromTemplate = false
hypervisorConfig.BootToBeTemplate = true
testHypervisorConfigValid(t, hypervisorConfig, true)
hypervisorConfig.MemoryPath = ""
testHypervisorConfigValid(t, hypervisorConfig, false)
}
func TestHypervisorConfigDefaults(t *testing.T) {
hypervisorConfig := &HypervisorConfig{
KernelPath: fmt.Sprintf("%s/%s", testDir, testKernel),

View File

@@ -18,6 +18,7 @@ import (
// VCImpl is the official virtcontainers function of the same name.
type VCImpl struct {
factory Factory
}
// SetLogger implements the VC function of the same name.
@@ -25,9 +26,14 @@ func (impl *VCImpl) SetLogger(logger logrus.FieldLogger) {
SetLogger(logger)
}
// SetFactory implements the VC function of the same name.
func (impl *VCImpl) SetFactory(factory Factory) {
impl.factory = factory
}
// CreateSandbox implements the VC function of the same name.
func (impl *VCImpl) CreateSandbox(sandboxConfig SandboxConfig) (VCSandbox, error) {
return CreateSandbox(sandboxConfig)
return CreateSandbox(sandboxConfig, impl.factory)
}
// DeleteSandbox implements the VC function of the same name.
@@ -47,7 +53,7 @@ func (impl *VCImpl) StopSandbox(sandboxID string) (VCSandbox, error) {
// RunSandbox implements the VC function of the same name.
func (impl *VCImpl) RunSandbox(sandboxConfig SandboxConfig) (VCSandbox, error) {
return RunSandbox(sandboxConfig)
return RunSandbox(sandboxConfig, impl.factory)
}
// ListSandbox implements the VC function of the same name.

View File

@@ -16,6 +16,7 @@ import (
// VC is the Virtcontainers interface
type VC interface {
SetLogger(logger logrus.FieldLogger)
SetFactory(Factory)
CreateSandbox(sandboxConfig SandboxConfig) (VCSandbox, error)
DeleteSandbox(sandboxID string) (VCSandbox, error)

View File

@@ -119,11 +119,19 @@ func parseVSOCKAddr(sock string) (uint32, uint32, error) {
return uint32(cid), uint32(port), nil
}
func (k *kataAgent) generateVMSocket(sandbox *Sandbox, c KataAgentConfig) error {
func (k *kataAgent) getVMPath(id string) string {
return filepath.Join(RunVMStoragePath, id)
}
func (k *kataAgent) getSharePath(id string) string {
return filepath.Join(kataHostSharedDir, id)
}
func (k *kataAgent) generateVMSocket(id string, c KataAgentConfig) error {
cid, port, err := parseVSOCKAddr(c.GRPCSocket)
if err != nil {
// We need to generate a host UNIX socket path for the emulated serial port.
kataSock, err := utils.BuildSocketPath(runStoragePath, sandbox.id, defaultKataSocketName)
kataSock, err := utils.BuildSocketPath(k.getVMPath(id), defaultKataSocketName)
if err != nil {
return err
}
@@ -148,7 +156,7 @@ func (k *kataAgent) generateVMSocket(sandbox *Sandbox, c KataAgentConfig) error
func (k *kataAgent) init(sandbox *Sandbox, config interface{}) (err error) {
switch c := config.(type) {
case KataAgentConfig:
if err := k.generateVMSocket(sandbox, c); err != nil {
if err := k.generateVMSocket(sandbox.id, c); err != nil {
return err
}
k.keepConn = c.LongLiveConn
@@ -196,10 +204,22 @@ func (k *kataAgent) capabilities() capabilities {
return caps
}
func (k *kataAgent) createSandbox(sandbox *Sandbox) error {
func (k *kataAgent) configure(h hypervisor, id, sharePath string, builtin bool, config interface{}) error {
if config != nil {
switch c := config.(type) {
case KataAgentConfig:
if err := k.generateVMSocket(id, c); err != nil {
return err
}
k.keepConn = c.LongLiveConn
default:
return fmt.Errorf("Invalid config type")
}
}
switch s := k.vmSocket.(type) {
case Socket:
err := sandbox.hypervisor.addDevice(s, serialPortDev)
err := h.addDevice(s, serialPortDev)
if err != nil {
return err
}
@@ -209,18 +229,27 @@ func (k *kataAgent) createSandbox(sandbox *Sandbox) error {
return fmt.Errorf("Invalid config type")
}
if builtin {
k.proxyBuiltIn = true
k.state.URL, _ = k.agentURL()
}
// Adding the shared volume.
// This volume contains all bind mounted container bundles.
sharedVolume := Volume{
MountTag: mountGuest9pTag,
HostPath: filepath.Join(kataHostSharedDir, sandbox.id),
HostPath: sharePath,
}
if err := os.MkdirAll(sharedVolume.HostPath, dirMode); err != nil {
return err
}
return sandbox.hypervisor.addDevice(sharedVolume, fsDev)
return h.addDevice(sharedVolume, fsDev)
}
func (k *kataAgent) createSandbox(sandbox *Sandbox) error {
return k.configure(sandbox.hypervisor, sandbox.id, k.getSharePath(sandbox.id), k.proxyBuiltIn, nil)
}
func cmdToKataProcess(cmd Cmd) (process *grpc.Process, err error) {
@@ -719,35 +748,15 @@ func (k *kataAgent) rollbackFailingContainerCreation(c *Container) {
}
}
func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process, err error) {
ociSpecJSON, ok := c.config.Annotations[vcAnnotations.ConfigJSONKey]
if !ok {
return nil, errorMissingOCISpec
}
var ctrStorages []*grpc.Storage
var ctrDevices []*grpc.Device
// The rootfs storage volume represents the container rootfs
// mount point inside the guest.
// It can be a block based device (when using block based container
// overlay on the host) mount or a 9pfs one (for all other overlay
// implementations).
rootfs := &grpc.Storage{}
// This is the guest absolute root path for that container.
rootPathParent := filepath.Join(kataGuestSharedDir, c.id)
rootPath := filepath.Join(rootPathParent, rootfsDir)
// In case the container creation fails, the following defer statement
// takes care of rolling back actions previously performed.
defer func() {
if err != nil {
k.rollbackFailingContainerCreation(c)
}
}()
func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPathParent string) (*grpc.Storage, error) {
if c.state.Fstype != "" {
// The rootfs storage volume represents the container rootfs
// mount point inside the guest.
// It can be a block based device (when using block based container
// overlay on the host) mount or a 9pfs one (for all other overlay
// implementations).
rootfs := &grpc.Storage{}
// This is a block based device rootfs.
// Pass a drive name only in case of virtio-blk driver.
@@ -773,24 +782,53 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
rootfs.Options = []string{"nouuid"}
}
return rootfs, nil
}
// This is not a block based device rootfs.
// We are going to bind mount it into the 9pfs
// shared drive between the host and the guest.
// With 9pfs we don't need to ask the agent to
// mount the rootfs as the shared directory
// (kataGuestSharedDir) is already mounted in the
// guest. We only need to mount the rootfs from
// the host and it will show up in the guest.
if err := bindMountContainerRootfs(kataHostSharedDir, sandbox.id, c.id, c.rootFs, false); err != nil {
return nil, err
}
return nil, nil
}
func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process, err error) {
ociSpecJSON, ok := c.config.Annotations[vcAnnotations.ConfigJSONKey]
if !ok {
return nil, errorMissingOCISpec
}
var ctrStorages []*grpc.Storage
var ctrDevices []*grpc.Device
var rootfs *grpc.Storage
// This is the guest absolute root path for that container.
rootPathParent := filepath.Join(kataGuestSharedDir, c.id)
rootPath := filepath.Join(rootPathParent, rootfsDir)
// In case the container creation fails, the following defer statement
// takes care of rolling back actions previously performed.
defer func() {
if err != nil {
k.rollbackFailingContainerCreation(c)
}
}()
if rootfs, err = k.buildContainerRootfs(sandbox, c, rootPathParent); err != nil {
return nil, err
} else if rootfs != nil {
// Add rootfs to the list of container storage.
// We only need to do this for block based rootfs, as we
// want the agent to mount it into the right location
// (kataGuestSharedDir/ctrID/
ctrStorages = append(ctrStorages, rootfs)
} else {
// This is not a block based device rootfs.
// We are going to bind mount it into the 9pfs
// shared drive between the host and the guest.
// With 9pfs we don't need to ask the agent to
// mount the rootfs as the shared directory
// (kataGuestSharedDir) is already mounted in the
// guest. We only need to mount the rootfs from
// the host and it will show up in the guest.
if err = bindMountContainerRootfs(kataHostSharedDir, sandbox.id, c.id, c.rootFs, false); err != nil {
return nil, err
}
}
ociSpec := &specs.Spec{}
@@ -861,11 +899,12 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
createNSList := []ns.NSType{ns.NSTypePID}
enterNSList := []ns.Namespace{
{
enterNSList := []ns.Namespace{}
if sandbox.networkNS.NetNsPath != "" {
enterNSList = append(enterNSList, ns.Namespace{
Path: sandbox.networkNS.NetNsPath,
Type: ns.NSTypeNet,
},
})
}
return prepareAndStartShim(sandbox, k.shim, c.id, req.ExecId,

View File

@@ -6,12 +6,15 @@
package virtcontainers
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"reflect"
"strings"
"syscall"
"testing"
gpb "github.com/gogo/protobuf/types"
@@ -25,6 +28,7 @@ import (
"github.com/kata-containers/runtime/virtcontainers/device/api"
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
"github.com/kata-containers/runtime/virtcontainers/pkg/mock"
)
@@ -243,6 +247,8 @@ var reqList = []interface{}{
}
func TestKataAgentSendReq(t *testing.T) {
assert := assert.New(t)
impl := &gRPCProxy{}
proxy := mock.ProxyGRPCMock{
@@ -251,15 +257,12 @@ func TestKataAgentSendReq(t *testing.T) {
}
sockDir, err := testGenerateKataProxySockDir()
if err != nil {
t.Fatal(err)
}
assert.Nil(err)
defer os.RemoveAll(sockDir)
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
if err := proxy.Start(testKataProxyURL); err != nil {
t.Fatal(err)
}
err = proxy.Start(testKataProxyURL)
assert.Nil(err)
defer proxy.Stop()
k := &kataAgent{
@@ -269,10 +272,58 @@ func TestKataAgentSendReq(t *testing.T) {
}
for _, req := range reqList {
if _, err := k.sendReq(req); err != nil {
t.Fatal(err)
}
_, err = k.sendReq(req)
assert.Nil(err)
}
sandbox := &Sandbox{}
container := &Container{}
execid := "processFooBar"
err = k.startContainer(sandbox, container)
assert.Nil(err)
err = k.signalProcess(container, execid, syscall.SIGKILL, true)
assert.Nil(err)
err = k.winsizeProcess(container, execid, 100, 200)
assert.Nil(err)
_, err = k.processListContainer(sandbox, Container{}, ProcessListOptions{})
assert.Nil(err)
err = k.updateContainer(sandbox, Container{}, specs.LinuxResources{})
assert.Nil(err)
err = k.pauseContainer(sandbox, Container{})
assert.Nil(err)
err = k.resumeContainer(sandbox, Container{})
assert.Nil(err)
err = k.onlineCPUMem(1)
assert.Nil(err)
_, err = k.statsContainer(sandbox, Container{})
assert.Nil(err)
err = k.check()
assert.Nil(err)
_, err = k.waitProcess(container, execid)
assert.Nil(err)
_, err = k.writeProcessStdin(container, execid, []byte{'c'})
assert.Nil(err)
err = k.closeProcessStdin(container, execid)
assert.Nil(err)
_, err = k.readProcessStdout(container, execid, []byte{})
assert.Nil(err)
_, err = k.readProcessStderr(container, execid, []byte{})
assert.Nil(err)
}
func TestGenerateInterfacesAndRoutes(t *testing.T) {
@@ -594,3 +645,207 @@ func TestHandlePidNamespace(t *testing.T) {
_, err = k.handlePidNamespace(g, sandbox)
assert.NotNil(err)
}
func TestAgentPathAPI(t *testing.T) {
assert := assert.New(t)
k1 := &kataAgent{}
k2 := &kataAgent{}
id := "foobar"
// getVMPath
path1 := k1.getVMPath(id)
path2 := k2.getVMPath(id)
assert.Equal(path1, path2)
// getSharePath
path1 = k1.getSharePath(id)
path2 = k2.getSharePath(id)
assert.Equal(path1, path2)
// generateVMSocket
c := KataAgentConfig{}
err := k1.generateVMSocket(id, c)
assert.Nil(err)
err = k2.generateVMSocket(id, c)
assert.Nil(err)
assert.Equal(k1, k2)
c.GRPCSocket = "unixsocket"
err = k1.generateVMSocket(id, c)
assert.Nil(err)
_, ok := k1.vmSocket.(Socket)
assert.True(ok)
c.GRPCSocket = "vsock:100:200"
err = k2.generateVMSocket(id, c)
assert.Nil(err)
_, ok = k2.vmSocket.(kataVSOCK)
assert.True(ok)
}
func TestAgentConfigure(t *testing.T) {
assert := assert.New(t)
dir, err := ioutil.TempDir("", "kata-agent-test")
assert.Nil(err)
k := &kataAgent{}
h := &mockHypervisor{}
c := KataAgentConfig{GRPCSocket: "vsock:100:200"}
id := "foobar"
invalidAgent := HyperConfig{}
err = k.configure(h, id, dir, true, invalidAgent)
assert.Error(err)
err = k.configure(h, id, dir, true, c)
assert.Nil(err)
c.GRPCSocket = "foobarfoobar"
err = k.configure(h, id, dir, true, c)
assert.Nil(err)
err = k.configure(h, id, dir, false, c)
assert.Nil(err)
}
func TestParseVSOCKAddr(t *testing.T) {
assert := assert.New(t)
sock := "randomfoobar"
_, _, err := parseVSOCKAddr(sock)
assert.Error(err)
sock = "vsock://1:2"
_, _, err = parseVSOCKAddr(sock)
assert.Error(err)
sock = "unix:1:2"
_, _, err = parseVSOCKAddr(sock)
assert.Error(err)
sock = "vsock:foo:2"
_, _, err = parseVSOCKAddr(sock)
assert.Error(err)
sock = "vsock:1:bar"
_, _, err = parseVSOCKAddr(sock)
assert.Error(err)
sock = "vsock:1:2"
cid, port, err := parseVSOCKAddr(sock)
assert.Nil(err)
assert.Equal(cid, uint32(1))
assert.Equal(port, uint32(2))
}
func TestCmdToKataProcess(t *testing.T) {
assert := assert.New(t)
cmd := Cmd{
Args: strings.Split("foo", " "),
Envs: []EnvVar{},
WorkDir: "/",
User: "1000",
PrimaryGroup: "1000",
}
_, err := cmdToKataProcess(cmd)
assert.Nil(err)
cmd1 := cmd
cmd1.User = "foobar"
_, err = cmdToKataProcess(cmd1)
assert.Error(err)
cmd1 = cmd
cmd1.PrimaryGroup = "foobar"
_, err = cmdToKataProcess(cmd1)
assert.Error(err)
cmd1 = cmd
cmd1.User = "foobar:1000"
_, err = cmdToKataProcess(cmd1)
assert.Error(err)
cmd1 = cmd
cmd1.User = "1000:2000"
_, err = cmdToKataProcess(cmd1)
assert.Nil(err)
cmd1 = cmd
cmd1.SupplementaryGroups = []string{"foo"}
_, err = cmdToKataProcess(cmd1)
assert.Error(err)
cmd1 = cmd
cmd1.SupplementaryGroups = []string{"4000"}
_, err = cmdToKataProcess(cmd1)
assert.Nil(err)
}
func TestAgentCreateContainer(t *testing.T) {
assert := assert.New(t)
sandbox := &Sandbox{
id: "foobar",
config: &SandboxConfig{
ID: "foobar",
HypervisorType: MockHypervisor,
HypervisorConfig: HypervisorConfig{
KernelPath: "foo",
ImagePath: "bar",
},
},
hypervisor: &mockHypervisor{},
storage: &filesystem{},
}
container := &Container{
id: "barfoo",
sandboxID: "foobar",
sandbox: sandbox,
state: State{
Fstype: "xfs",
},
config: &ContainerConfig{
Annotations: map[string]string{},
},
}
ociSpec, err := json.Marshal(&specs.Spec{})
assert.Nil(err)
container.config.Annotations[vcAnnotations.ConfigJSONKey] = string(ociSpec[:])
impl := &gRPCProxy{}
proxy := mock.ProxyGRPCMock{
GRPCImplementer: impl,
GRPCRegister: gRPCRegister,
}
sockDir, err := testGenerateKataProxySockDir()
assert.Nil(err)
defer os.RemoveAll(sockDir)
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
err = proxy.Start(testKataProxyURL)
assert.Nil(err)
defer proxy.Stop()
k := &kataAgent{
state: KataAgentState{
URL: testKataProxyURL,
},
}
dir, err := ioutil.TempDir("", "kata-agent-test")
assert.Nil(err)
err = k.configure(&mockHypervisor{}, sandbox.id, dir, true, KataAgentConfig{})
assert.Nil(err)
// We'll fail on container metadata file creation, but it helps increasing coverage...
_, err = k.createContainer(sandbox, container)
assert.Error(err)
}

View File

@@ -9,9 +9,9 @@ type mockHypervisor struct {
vCPUs uint32
}
func (m *mockHypervisor) init(sandbox *Sandbox) error {
valid, err := sandbox.config.HypervisorConfig.valid()
if valid == false || err != nil {
func (m *mockHypervisor) init(id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error {
err := hypervisorConfig.valid()
if err != nil {
return err
}
@@ -22,7 +22,7 @@ func (m *mockHypervisor) capabilities() capabilities {
return capabilities{}
}
func (m *mockHypervisor) createSandbox(sandboxConfig SandboxConfig) error {
func (m *mockHypervisor) createSandbox() error {
return nil
}
@@ -46,6 +46,10 @@ func (m *mockHypervisor) resumeSandbox() error {
return nil
}
func (m *mockHypervisor) saveSandbox() error {
return nil
}
func (m *mockHypervisor) addDevice(devInfo interface{}, devType deviceType) error {
return nil
}

View File

@@ -15,16 +15,18 @@ func TestMockHypervisorInit(t *testing.T) {
sandbox := &Sandbox{
config: &SandboxConfig{
ID: "mock_sandbox",
HypervisorConfig: HypervisorConfig{
KernelPath: "",
ImagePath: "",
HypervisorPath: "",
},
},
storage: &filesystem{},
}
// wrong config
if err := m.init(sandbox); err == nil {
if err := m.init(sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err == nil {
t.Fatal()
}
@@ -35,7 +37,7 @@ func TestMockHypervisorInit(t *testing.T) {
}
// right config
if err := m.init(sandbox); err != nil {
if err := m.init(sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil {
t.Fatal(err)
}
}
@@ -43,9 +45,7 @@ func TestMockHypervisorInit(t *testing.T) {
func TestMockHypervisorCreateSandbox(t *testing.T) {
var m *mockHypervisor
config := SandboxConfig{}
if err := m.createSandbox(config); err != nil {
if err := m.createSandbox(); err != nil {
t.Fatal(err)
}
}
@@ -96,3 +96,11 @@ func TestMockHypervisorGetSandboxConsole(t *testing.T) {
t.Fatalf("Got %s\nExpecting %s", result, expected)
}
}
func TestMockHypervisorSaveSandbox(t *testing.T) {
var m *mockHypervisor
if err := m.saveSandbox(); err != nil {
t.Fatal(err)
}
}

View File

@@ -145,3 +145,18 @@ func (n *noopAgent) pauseContainer(sandbox *Sandbox, c Container) error {
func (n *noopAgent) resumeContainer(sandbox *Sandbox, c Container) error {
return nil
}
// configHypervisor is the Noop agent hypervisor configuration implementation. It does nothing.
func (n *noopAgent) configure(h hypervisor, id, sharePath string, builtin bool, config interface{}) error {
return nil
}
// getVMPath is the Noop agent vm path getter. It does nothing.
func (n *noopAgent) getVMPath(id string) string {
return ""
}
// getVMPath is the Noop agent share path getter. It does nothing.
func (n *noopAgent) getSharePath(id string) string {
return ""
}

View File

@@ -14,7 +14,7 @@ func testCreateNoopContainer() (*Sandbox, *Container, error) {
contID := "100"
config := newTestSandboxConfigNoop()
p, err := CreateSandbox(config)
p, err := CreateSandbox(config, nil)
if err != nil {
return nil, nil, err
}
@@ -156,3 +156,56 @@ func TestNoopAgentResumeContainer(t *testing.T) {
t.Fatal(err)
}
}
func TestNoopAgentConfigure(t *testing.T) {
n := &noopAgent{}
h := &mockHypervisor{}
id := "foobar"
sharePath := "foobarDir"
err := n.configure(h, id, sharePath, true, nil)
if err != nil {
t.Fatal(err)
}
}
func TestNoopAgentGetVMPath(t *testing.T) {
n := &noopAgent{}
path := n.getVMPath("")
if path != "" {
t.Fatal("getSharePath returns non empty path")
}
}
func TestNoopAgentGetSharePath(t *testing.T) {
n := &noopAgent{}
path := n.getSharePath("")
if path != "" {
t.Fatal("getSharePath returns non empty path")
}
}
func TestNoopAgentStartProxy(t *testing.T) {
n := &noopAgent{}
sandbox, _, err := testCreateNoopContainer()
if err != nil {
t.Fatal(err)
}
defer cleanUp()
err = n.startProxy(sandbox)
if err != nil {
t.Fatal(err)
}
}
func TestNoopAgentProcessListContainer(t *testing.T) {
n := &noopAgent{}
sandbox, container, err := testCreateNoopContainer()
if err != nil {
t.Fatal(err)
}
defer cleanUp()
_, err = n.processListContainer(sandbox, *container, ProcessListOptions{})
if err != nil {
t.Fatal(err)
}
}

View File

@@ -91,6 +91,12 @@ type CompatOCISpec struct {
Process *CompatOCIProcess `json:"process,omitempty"`
}
// FactoryConfig is a structure to set the VM factory configuration.
type FactoryConfig struct {
// Template enables VM templating support in VM factory.
Template bool
}
// RuntimeConfig aggregates all runtime specific settings
type RuntimeConfig struct {
VMConfig vc.Resources
@@ -98,6 +104,8 @@ type RuntimeConfig struct {
HypervisorType vc.HypervisorType
HypervisorConfig vc.HypervisorConfig
FactoryConfig FactoryConfig
AgentType vc.AgentType
AgentConfig interface{}

View File

@@ -35,6 +35,13 @@ func (m *VCMock) SetLogger(logger logrus.FieldLogger) {
}
}
// SetFactory implements the VC function of the same name.
func (m *VCMock) SetFactory(factory vc.Factory) {
if m.SetFactoryFunc != nil {
m.SetFactoryFunc(factory)
}
}
// CreateSandbox implements the VC function of the same name.
func (m *VCMock) CreateSandbox(sandboxConfig vc.SandboxConfig) (vc.VCSandbox, error) {
if m.CreateSandboxFunc != nil {

View File

@@ -11,6 +11,7 @@ import (
"testing"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/factory"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
)
@@ -20,7 +21,10 @@ const (
testContainerID = "testContainerID"
)
var loggerTriggered = 0
var (
loggerTriggered = 0
factoryTriggered = 0
)
func TestVCImplementations(t *testing.T) {
// official implementation
@@ -675,3 +679,34 @@ func TestVCMockResumeContainer(t *testing.T) {
assert.Error(err)
assert.True(IsMockError(err))
}
func TestVCMockSetVMFactory(t *testing.T) {
assert := assert.New(t)
m := &VCMock{}
assert.Nil(m.SetFactoryFunc)
hyperConfig := vc.HypervisorConfig{
KernelPath: "foobar",
ImagePath: "foobar",
}
vmConfig := vc.VMConfig{
HypervisorType: vc.MockHypervisor,
AgentType: vc.NoopAgentType,
HypervisorConfig: hyperConfig,
}
f, err := factory.NewFactory(factory.Config{VMConfig: vmConfig}, false)
assert.Nil(err)
assert.Equal(factoryTriggered, 0)
m.SetFactory(f)
assert.Equal(factoryTriggered, 0)
m.SetFactoryFunc = func(factory vc.Factory) {
factoryTriggered = 1
}
m.SetFactory(f)
assert.Equal(factoryTriggered, 1)
}

View File

@@ -35,7 +35,8 @@ type Container struct {
// VCMock is a type that provides an implementation of the VC interface.
// It is used for testing.
type VCMock struct {
SetLoggerFunc func(logger logrus.FieldLogger)
SetLoggerFunc func(logger logrus.FieldLogger)
SetFactoryFunc func(factory vc.Factory)
CreateSandboxFunc func(sandboxConfig vc.SandboxConfig) (vc.VCSandbox, error)
DeleteSandboxFunc func(sandboxID string) (vc.VCSandbox, error)

View File

@@ -47,26 +47,33 @@ type QemuState struct {
// qemu is an Hypervisor interface implementation for the Linux qemu hypervisor.
type qemu struct {
id string
vmConfig Resources
storage resourceStorage
config HypervisorConfig
qmpMonitorCh qmpChannel
qemuConfig govmmQemu.Config
sandbox *Sandbox
state QemuState
arch qemuArch
}
const qmpCapErrMsg = "Failed to negoatiate QMP capabilities"
const (
consoleSocket = "console.sock"
qmpSocket = "qmp.sock"
const qmpSocket = "qmp.sock"
qmpCapErrMsg = "Failed to negoatiate QMP capabilities"
qmpCapMigrationBypassSharedMemory = "bypass-shared-memory"
qmpExecCatCmd = "exec:cat"
const defaultConsole = "console.sock"
scsiControllerID = "scsi0"
)
var qemuMajorVersion int
var qemuMinorVersion int
@@ -84,10 +91,6 @@ const (
removeDevice
)
const (
scsiControllerID = "scsi0"
)
type qmpLogger struct {
logger *logrus.Entry
}
@@ -170,25 +173,32 @@ func (q *qemu) qemuPath() (string, error) {
}
// init intializes the Qemu structure.
func (q *qemu) init(sandbox *Sandbox) error {
valid, err := sandbox.config.HypervisorConfig.valid()
if valid == false || err != nil {
func (q *qemu) init(id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error {
err := hypervisorConfig.valid()
if err != nil {
return err
}
q.vmConfig = sandbox.config.VMConfig
q.config = sandbox.config.HypervisorConfig
q.sandbox = sandbox
q.id = id
q.storage = storage
q.vmConfig = vmConfig
q.config = *hypervisorConfig
q.arch = newQemuArch(q.config)
if err = sandbox.storage.fetchHypervisorState(sandbox.id, &q.state); err != nil {
if err = q.storage.fetchHypervisorState(q.id, &q.state); err != nil {
q.Logger().Debug("Creating bridges")
q.state.Bridges = q.arch.bridges(q.config.DefaultBridges)
q.Logger().Debug("Creating UUID")
q.state.UUID = uuid.Generate().String()
if err = sandbox.storage.storeHypervisorState(sandbox.id, q.state); err != nil {
// The path might already exist, but in case of VM templating,
// we have to create it since the sandbox has not created it yet.
if err = os.MkdirAll(filepath.Join(runStoragePath, id), dirMode); err != nil {
return err
}
if err = q.storage.storeHypervisorState(q.id, q.state); err != nil {
return err
}
}
@@ -238,17 +248,17 @@ func (q *qemu) memoryTopology() (govmmQemu.Memory, error) {
return q.arch.memoryTopology(memMb, hostMemMb), nil
}
func (q *qemu) qmpSocketPath(sandboxID string) (string, error) {
return utils.BuildSocketPath(runStoragePath, sandboxID, qmpSocket)
func (q *qemu) qmpSocketPath(id string) (string, error) {
return utils.BuildSocketPath(RunVMStoragePath, id, qmpSocket)
}
func (q *qemu) getQemuMachine(sandboxConfig SandboxConfig) (govmmQemu.Machine, error) {
func (q *qemu) getQemuMachine() (govmmQemu.Machine, error) {
machine, err := q.arch.machine()
if err != nil {
return govmmQemu.Machine{}, err
}
accelerators := sandboxConfig.HypervisorConfig.MachineAccelerators
accelerators := q.config.MachineAccelerators
if accelerators != "" {
if !strings.HasPrefix(accelerators, ",") {
accelerators = fmt.Sprintf(",%s", accelerators)
@@ -275,11 +285,85 @@ func (q *qemu) appendImage(devices []govmmQemu.Device) ([]govmmQemu.Device, erro
return devices, nil
}
// createSandbox is the Hypervisor sandbox creation implementation for govmmQemu.
func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error {
func (q *qemu) createQmpSocket() ([]govmmQemu.QMPSocket, error) {
monitorSockPath, err := q.qmpSocketPath(q.id)
if err != nil {
return nil, err
}
q.qmpMonitorCh = qmpChannel{
ctx: context.Background(),
path: monitorSockPath,
}
err = os.MkdirAll(filepath.Dir(monitorSockPath), dirMode)
if err != nil {
return nil, err
}
return []govmmQemu.QMPSocket{
{
Type: "unix",
Name: q.qmpMonitorCh.path,
Server: true,
NoWait: true,
},
}, nil
}
func (q *qemu) buildDevices(initrdPath string) ([]govmmQemu.Device, *govmmQemu.IOThread, error) {
var devices []govmmQemu.Device
machine, err := q.getQemuMachine(sandboxConfig)
console, err := q.getSandboxConsole(q.id)
if err != nil {
return nil, nil, err
}
// Add bridges before any other devices. This way we make sure that
// bridge gets the first available PCI address i.e bridgePCIStartAddr
devices = q.arch.appendBridges(devices, q.state.Bridges)
devices = q.arch.appendConsole(devices, console)
if initrdPath == "" {
devices, err = q.appendImage(devices)
if err != nil {
return nil, nil, err
}
}
var ioThread *govmmQemu.IOThread
if q.config.BlockDeviceDriver == VirtioSCSI {
devices, ioThread = q.arch.appendSCSIController(devices, q.config.EnableIOThreads)
}
return devices, ioThread, nil
}
func (q *qemu) setupTemplate(knobs *govmmQemu.Knobs, memory *govmmQemu.Memory) govmmQemu.Incoming {
incoming := govmmQemu.Incoming{}
if q.config.BootToBeTemplate || q.config.BootFromTemplate {
knobs.FileBackedMem = true
memory.Path = q.config.MemoryPath
if q.config.BootToBeTemplate {
knobs.FileBackedMemShared = true
}
if q.config.BootFromTemplate {
incoming.MigrationType = govmmQemu.MigrationExec
incoming.Exec = "cat " + q.config.DevicesStatePath
}
}
return incoming
}
// createSandbox is the Hypervisor sandbox creation implementation for govmmQemu.
func (q *qemu) createSandbox() error {
machine, err := q.getQemuMachine()
if err != nil {
return err
}
@@ -314,7 +398,7 @@ func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error {
// Pass the sandbox name to the agent via the kernel command-line to
// allow the agent to use it in log messages.
params := q.kernelParameters() + " " + "agent.sandbox=" + sandboxConfig.ID
params := q.kernelParameters() + " " + "agent.sandbox=" + q.id
kernel := govmmQemu.Kernel{
Path: kernelPath,
@@ -322,6 +406,8 @@ func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error {
Params: params,
}
incoming := q.setupTemplate(&knobs, &memory)
rtc := govmmQemu.RTC{
Base: "utc",
DriftFix: "slew",
@@ -331,7 +417,7 @@ func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error {
return fmt.Errorf("UUID should not be empty")
}
monitorSockPath, err := q.qmpSocketPath(sandboxConfig.ID)
monitorSockPath, err := q.qmpSocketPath(q.id)
if err != nil {
return err
}
@@ -346,42 +432,19 @@ func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error {
return err
}
qmpSockets := []govmmQemu.QMPSocket{
{
Type: "unix",
Name: q.qmpMonitorCh.path,
Server: true,
NoWait: true,
},
}
// Add bridges before any other devices. This way we make sure that
// bridge gets the first available PCI address i.e bridgePCIStartAddr
devices = q.arch.appendBridges(devices, q.state.Bridges)
devices = q.arch.append9PVolumes(devices, sandboxConfig.Volumes)
console, err := q.getSandboxConsole(sandboxConfig.ID)
qmpSockets, err := q.createQmpSocket()
if err != nil {
return err
}
devices = q.arch.appendConsole(devices, console)
if initrdPath == "" {
devices, err = q.appendImage(devices)
if err != nil {
return err
}
}
var ioThread *govmmQemu.IOThread
if q.config.BlockDeviceDriver == VirtioSCSI {
devices, ioThread = q.arch.appendSCSIController(devices, q.config.EnableIOThreads)
devices, ioThread, err := q.buildDevices(initrdPath)
if err != nil {
return err
}
cpuModel := q.arch.cpuModel()
firmwarePath, err := sandboxConfig.HypervisorConfig.FirmwareAssetPath()
firmwarePath, err := q.config.FirmwareAssetPath()
if err != nil {
return err
}
@@ -392,7 +455,7 @@ func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error {
}
qemuConfig := govmmQemu.Config{
Name: fmt.Sprintf("sandbox-%s", sandboxConfig.ID),
Name: fmt.Sprintf("sandbox-%s", q.id),
UUID: q.state.UUID,
Path: qemuPath,
Ctx: q.qmpMonitorCh.ctx,
@@ -405,6 +468,7 @@ func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error {
RTC: rtc,
QMPSockets: qmpSockets,
Knobs: knobs,
Incoming: incoming,
VGA: "none",
GlobalParam: "kvm-pit.lost_tick_policy=discard",
Bios: firmwarePath,
@@ -511,7 +575,18 @@ func (q *qemu) stopSandbox() error {
return err
}
return qmp.ExecuteQuit(q.qmpMonitorCh.ctx)
err = qmp.ExecuteQuit(q.qmpMonitorCh.ctx)
if err != nil {
q.Logger().WithError(err).Error("Fail to execute qmp QUIT")
return err
}
err = os.RemoveAll(RunVMStoragePath + q.id)
if err != nil {
q.Logger().WithError(err).Error("Fail to clean up vm directory")
}
return nil
}
func (q *qemu) togglePauseSandbox(pause bool) error {
@@ -736,7 +811,7 @@ func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interf
return data, err
}
return data, q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state)
return data, q.storage.storeHypervisorState(q.id, q.state)
}
func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
@@ -745,7 +820,7 @@ func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (int
return data, err
}
return data, q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state)
return data, q.storage.storeHypervisorState(q.id, q.state)
}
func (q *qemu) hotplugCPUs(vcpus uint32, op operation) (uint32, error) {
@@ -821,12 +896,12 @@ func (q *qemu) hotplugAddCPUs(amount uint32) (uint32, error) {
hotpluggedVCPUs++
if hotpluggedVCPUs == amount {
// All vCPUs were hotplugged
return amount, q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state)
return amount, q.storage.storeHypervisorState(q.id, q.state)
}
}
// All vCPUs were NOT hotplugged
if err := q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state); err != nil {
if err := q.storage.storeHypervisorState(q.id, q.state); err != nil {
q.Logger().Errorf("failed to save hypervisor state after hotplug %d vCPUs: %v", hotpluggedVCPUs, err)
}
@@ -846,7 +921,7 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) {
// get the last vCPUs and try to remove it
cpu := q.state.HotpluggedVCPUs[len(q.state.HotpluggedVCPUs)-1]
if err := q.qmpMonitorCh.qmp.ExecuteDeviceDel(q.qmpMonitorCh.ctx, cpu.ID); err != nil {
_ = q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state)
_ = q.storage.storeHypervisorState(q.id, q.state)
return i, fmt.Errorf("failed to hotunplug CPUs, only %d CPUs were hotunplugged: %v", i, err)
}
@@ -854,7 +929,7 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) {
q.state.HotpluggedVCPUs = q.state.HotpluggedVCPUs[:len(q.state.HotpluggedVCPUs)-1]
}
return amount, q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state)
return amount, q.storage.storeHypervisorState(q.id, q.state)
}
func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) error {
@@ -911,7 +986,7 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) error {
}
q.state.HotpluggedMemory += memDev.sizeMB
return q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state)
return q.storage.storeHypervisorState(q.id, q.state)
}
func (q *qemu) pauseSandbox() error {
@@ -952,8 +1027,60 @@ func (q *qemu) addDevice(devInfo interface{}, devType deviceType) error {
// getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox.
func (q *qemu) getSandboxConsole(sandboxID string) (string, error) {
return utils.BuildSocketPath(runStoragePath, sandboxID, defaultConsole)
func (q *qemu) getSandboxConsole(id string) (string, error) {
return utils.BuildSocketPath(RunVMStoragePath, id, consoleSocket)
}
func (q *qemu) saveSandbox() error {
defer func(qemu *qemu) {
if q.qmpMonitorCh.qmp != nil {
q.qmpMonitorCh.qmp.Shutdown()
}
}(q)
q.Logger().Info("save sandbox")
cfg := govmmQemu.QMPConfig{Logger: newQMPLogger()}
// Auto-closed by QMPStart().
disconnectCh := make(chan struct{})
qmp, _, err := govmmQemu.QMPStart(q.qmpMonitorCh.ctx, q.qmpMonitorCh.path, cfg, disconnectCh)
if err != nil {
q.Logger().WithError(err).Error("Failed to connect to QEMU instance")
return err
}
q.qmpMonitorCh.qmp = qmp
err = qmp.ExecuteQMPCapabilities(q.qmpMonitorCh.ctx)
if err != nil {
q.Logger().WithError(err).Error(qmpCapErrMsg)
return err
}
// BootToBeTemplate sets the VM to be a template that other VMs can clone from. We would want to
// bypass shared memory when saving the VM to a local file through migration exec.
if q.config.BootToBeTemplate {
err = q.qmpMonitorCh.qmp.ExecSetMigrationCaps(q.qmpMonitorCh.ctx, []map[string]interface{}{
{
"capability": qmpCapMigrationBypassSharedMemory,
"state": true,
},
})
if err != nil {
q.Logger().WithError(err).Error("set migration bypass shared memory")
return err
}
}
err = q.qmpMonitorCh.qmp.ExecSetMigrateArguments(q.qmpMonitorCh.ctx, fmt.Sprintf("%s>%s", qmpExecCatCmd, q.config.DevicesStatePath))
if err != nil {
q.Logger().WithError(err).Error("exec migration")
return err
}
return nil
}
// genericAppendBridges appends to devices the given bridges

View File

@@ -50,9 +50,6 @@ type qemuArch interface {
// memoryTopology returns the memory topology using the given amount of memoryMb and hostMemoryMb
memoryTopology(memoryMb, hostMemoryMb uint64) govmmQemu.Memory
// append9PVolumes appends volumes to devices
append9PVolumes(devices []govmmQemu.Device, volumes []Volume) []govmmQemu.Device
// appendConsole appends a console to devices
appendConsole(devices []govmmQemu.Device, path string) []govmmQemu.Device
@@ -253,15 +250,6 @@ func (q *qemuArchBase) memoryTopology(memoryMb, hostMemoryMb uint64) govmmQemu.M
return memory
}
func (q *qemuArchBase) append9PVolumes(devices []govmmQemu.Device, volumes []Volume) []govmmQemu.Device {
// Add the shared volumes
for _, v := range volumes {
devices = q.append9PVolume(devices, v)
}
return devices
}
func (q *qemuArchBase) appendConsole(devices []govmmQemu.Device, path string) []govmmQemu.Device {
serial := govmmQemu.SerialDevice{
Driver: govmmQemu.VirtioSerial,

View File

@@ -206,8 +206,6 @@ func testQemuArchBaseAppend(t *testing.T, structure interface{}, expected []govm
devices = qemuArchBase.append9PVolume(devices, s)
case Socket:
devices = qemuArchBase.appendSocket(devices, s)
case []Volume:
devices = qemuArchBase.append9PVolumes(devices, s)
case drivers.Drive:
devices = qemuArchBase.appendBlockDevice(devices, s)
case drivers.VFIODevice:
@@ -219,49 +217,12 @@ func testQemuArchBaseAppend(t *testing.T, structure interface{}, expected []govm
assert.Equal(devices, expected)
}
func TestQemuArchBaseAppend9PVolumes(t *testing.T) {
volMountTag := "testVolMountTag"
volHostPath := "testVolHostPath"
expectedOut := []govmmQemu.Device{
govmmQemu.FSDevice{
Driver: govmmQemu.Virtio9P,
FSDriver: govmmQemu.Local,
ID: fmt.Sprintf("extra-9p-%s", fmt.Sprintf("%s.1", volMountTag)),
Path: fmt.Sprintf("%s.1", volHostPath),
MountTag: fmt.Sprintf("%s.1", volMountTag),
SecurityModel: govmmQemu.None,
},
govmmQemu.FSDevice{
Driver: govmmQemu.Virtio9P,
FSDriver: govmmQemu.Local,
ID: fmt.Sprintf("extra-9p-%s", fmt.Sprintf("%s.2", volMountTag)),
Path: fmt.Sprintf("%s.2", volHostPath),
MountTag: fmt.Sprintf("%s.2", volMountTag),
SecurityModel: govmmQemu.None,
},
}
volumes := []Volume{
{
MountTag: fmt.Sprintf("%s.1", volMountTag),
HostPath: fmt.Sprintf("%s.1", volHostPath),
},
{
MountTag: fmt.Sprintf("%s.2", volMountTag),
HostPath: fmt.Sprintf("%s.2", volHostPath),
},
}
testQemuArchBaseAppend(t, volumes, expectedOut)
}
func TestQemuArchBaseAppendConsoles(t *testing.T) {
var devices []govmmQemu.Device
assert := assert.New(t)
qemuArchBase := newQemuArchBase()
path := filepath.Join(runStoragePath, sandboxID, defaultConsole)
path := filepath.Join(runStoragePath, sandboxID, consoleSocket)
expectedOut := []govmmQemu.Device{
govmmQemu.SerialDevice{

View File

@@ -86,7 +86,7 @@ func TestQemuInit(t *testing.T) {
t.Fatalf("Could not create parent directory %s: %v", parentDir, err)
}
if err := q.init(sandbox); err != nil {
if err := q.init(sandbox.id, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil {
t.Fatal(err)
}
@@ -117,8 +117,8 @@ func TestQemuInitMissingParentDirFail(t *testing.T) {
t.Fatal(err)
}
if err := q.init(sandbox); err == nil {
t.Fatal("Qemu init() expected to fail because of missing parent directory for storage")
if err := q.init(sandbox.id, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil {
t.Fatalf("Qemu init() is not expected to fail because of missing parent directory for storage: %v", err)
}
}
@@ -249,7 +249,7 @@ func TestQemuAddDeviceSerialPortDev(t *testing.T) {
func TestQemuGetSandboxConsole(t *testing.T) {
q := &qemu{}
sandboxID := "testSandboxID"
expected := filepath.Join(runStoragePath, sandboxID, defaultConsole)
expected := filepath.Join(RunVMStoragePath, sandboxID, consoleSocket)
result, err := q.getSandboxConsole(sandboxID)
if err != nil {

View File

@@ -440,6 +440,7 @@ type Sandbox struct {
id string
sync.Mutex
factory Factory
hypervisor hypervisor
agent agent
storage resourceStorage
@@ -679,12 +680,12 @@ func createAssets(sandboxConfig *SandboxConfig) error {
// It will create and store the sandbox structure, and then ask the hypervisor
// to physically create that sandbox i.e. starts a VM for that sandbox to eventually
// be started.
func createSandbox(sandboxConfig SandboxConfig) (*Sandbox, error) {
func createSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) {
if err := createAssets(&sandboxConfig); err != nil {
return nil, err
}
s, err := newSandbox(sandboxConfig)
s, err := newSandbox(sandboxConfig, factory)
if err != nil {
return nil, err
}
@@ -718,7 +719,7 @@ func createSandbox(sandboxConfig SandboxConfig) (*Sandbox, error) {
return s, nil
}
func newSandbox(sandboxConfig SandboxConfig) (*Sandbox, error) {
func newSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) {
if sandboxConfig.valid() == false {
return nil, fmt.Errorf("Invalid sandbox configuration")
}
@@ -734,6 +735,7 @@ func newSandbox(sandboxConfig SandboxConfig) (*Sandbox, error) {
s := &Sandbox{
id: sandboxConfig.ID,
factory: factory,
hypervisor: hypervisor,
agent: agent,
storage: &filesystem{},
@@ -771,15 +773,15 @@ func newSandbox(sandboxConfig SandboxConfig) (*Sandbox, error) {
}
}()
if err = s.hypervisor.init(s); err != nil {
if err = s.hypervisor.init(s.id, &sandboxConfig.HypervisorConfig, sandboxConfig.VMConfig, s.storage); err != nil {
return nil, err
}
if err = s.hypervisor.createSandbox(sandboxConfig); err != nil {
if err = s.hypervisor.createSandbox(); err != nil {
return nil, err
}
agentConfig := newAgentConfig(sandboxConfig)
agentConfig := newAgentConfig(sandboxConfig.AgentType, sandboxConfig.AgentConfig)
if err = s.agent.init(s, agentConfig); err != nil {
return nil, err
}
@@ -821,7 +823,8 @@ func fetchSandbox(sandboxID string) (sandbox *Sandbox, err error) {
return nil, err
}
sandbox, err = createSandbox(config)
// fetchSandbox is not suppose to create new sandbox VM.
sandbox, err = createSandbox(config, nil)
if err != nil {
return nil, fmt.Errorf("failed to create sandbox with config %+v: %v", config, err)
}
@@ -935,7 +938,28 @@ func (s *Sandbox) removeNetwork() error {
func (s *Sandbox) startVM() error {
s.Logger().Info("Starting VM")
// FIXME: This would break cached VMs. We need network hotplug and move
// oci hooks and netns handling to cli. See #273.
if err := s.network.run(s.networkNS.NetNsPath, func() error {
if s.factory != nil {
vm, err := s.factory.GetVM(VMConfig{
HypervisorType: s.config.HypervisorType,
HypervisorConfig: s.config.HypervisorConfig,
AgentType: s.config.AgentType,
AgentConfig: s.config.AgentConfig,
})
if err != nil {
return err
}
err = vm.assignSandbox(s)
if err != nil {
return err
}
// FIXME: factory vm needs network hotplug to add Nics.
s.networkNS.NetNsPath = ""
return nil
}
return s.hypervisor.startSandbox()
}); err != nil {
return err

View File

@@ -52,7 +52,7 @@ func testCreateSandbox(t *testing.T, id string,
Containers: containers,
}
sandbox, err := createSandbox(sconfig)
sandbox, err := createSandbox(sconfig, nil)
if err != nil {
return nil, fmt.Errorf("Could not create sandbox: %s", err)
}

227
virtcontainers/vm.go Normal file
View File

@@ -0,0 +1,227 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package virtcontainers
import (
"os"
"path/filepath"
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
"github.com/sirupsen/logrus"
)
// VM is abstraction of a virtual machine.
type VM struct {
id string
hypervisor hypervisor
agent agent
cpu uint32
memory uint32
cpuDelta uint32
}
// VMConfig is a collection of all info that a new blackbox VM needs.
type VMConfig struct {
HypervisorType HypervisorType
HypervisorConfig HypervisorConfig
AgentType AgentType
AgentConfig interface{}
}
// Valid check VMConfig validity.
func (c *VMConfig) Valid() error {
return c.HypervisorConfig.valid()
}
// NewVM creates a new VM based on provided VMConfig.
func NewVM(config VMConfig) (*VM, error) {
hypervisor, err := newHypervisor(config.HypervisorType)
if err != nil {
return nil, err
}
if err = config.Valid(); err != nil {
return nil, err
}
id := uuid.Generate().String()
virtLog.WithField("vm id", id).WithField("config", config).Info("create new vm")
defer func() {
if err != nil {
virtLog.WithField("vm id", id).WithError(err).Error("failed to create new vm")
}
}()
if err = hypervisor.init(id, &config.HypervisorConfig, Resources{}, &filesystem{}); err != nil {
return nil, err
}
if err = hypervisor.createSandbox(); err != nil {
return nil, err
}
agent := newAgent(config.AgentType)
agentConfig := newAgentConfig(config.AgentType, config.AgentConfig)
// do not keep connection for temp agent
if c, ok := agentConfig.(KataAgentConfig); ok {
c.LongLiveConn = false
}
vmSharePath := buildVMSharePath(id)
err = agent.configure(hypervisor, id, vmSharePath, true, agentConfig)
if err != nil {
return nil, err
}
if err = hypervisor.startSandbox(); err != nil {
return nil, err
}
defer func() {
if err != nil {
virtLog.WithField("vm id", id).WithError(err).Info("clean up vm")
hypervisor.stopSandbox()
}
}()
// VMs booted from template are paused, do not check
if !config.HypervisorConfig.BootFromTemplate {
err = hypervisor.waitSandbox(vmStartTimeout)
if err != nil {
return nil, err
}
virtLog.WithField("vm id", id).Info("check agent status")
err = agent.check()
if err != nil {
return nil, err
}
}
return &VM{
id: id,
hypervisor: hypervisor,
agent: agent,
cpu: config.HypervisorConfig.DefaultVCPUs,
memory: config.HypervisorConfig.DefaultMemSz,
}, nil
}
func buildVMSharePath(id string) string {
return filepath.Join(RunVMStoragePath, id, "shared")
}
func (v *VM) logger() logrus.FieldLogger {
return virtLog.WithField("vm id", v.id)
}
// Pause pauses a VM.
func (v *VM) Pause() error {
v.logger().Info("pause vm")
return v.hypervisor.pauseSandbox()
}
// Save saves a VM to persistent disk.
func (v *VM) Save() error {
v.logger().Info("save vm")
return v.hypervisor.saveSandbox()
}
// Resume resumes a paused VM.
func (v *VM) Resume() error {
v.logger().Info("resume vm")
return v.hypervisor.resumeSandbox()
}
// Start kicks off a configured VM.
func (v *VM) Start() error {
v.logger().Info("start vm")
return v.hypervisor.startSandbox()
}
// Stop stops a VM process.
func (v *VM) Stop() error {
v.logger().Info("kill vm")
return v.hypervisor.stopSandbox()
}
// AddCPUs adds num of CPUs to the VM.
func (v *VM) AddCPUs(num uint32) error {
if num > 0 {
v.logger().Infof("hot adding %d vCPUs", num)
if _, err := v.hypervisor.hotplugAddDevice(num, cpuDev); err != nil {
return err
}
v.cpuDelta += num
v.cpu += num
}
return nil
}
// AddMemory adds numMB of memory to the VM.
func (v *VM) AddMemory(numMB uint32) error {
if numMB > 0 {
v.logger().Infof("hot adding %d MB memory", numMB)
dev := &memoryDevice{1, int(numMB)}
if _, err := v.hypervisor.hotplugAddDevice(dev, memoryDev); err != nil {
return err
}
}
return nil
}
// OnlineCPUMemory puts the hotplugged CPU and memory online.
func (v *VM) OnlineCPUMemory() error {
v.logger().Infof("online CPU %d and memory", v.cpuDelta)
err := v.agent.onlineCPUMem(v.cpuDelta)
if err == nil {
v.cpuDelta = 0
}
return err
}
func (v *VM) assignSandbox(s *Sandbox) error {
// add vm symlinks
// - link vm socket from sandbox dir (/run/vc/vm/sbid/<kata.sock>) to vm dir (/run/vc/vm/vmid/<kata.sock>)
// - link 9pfs share path from sandbox dir (/run/kata-containers/shared/sandboxes/sbid/) to vm dir (/run/vc/vm/vmid/shared/)
vmSharePath := buildVMSharePath(v.id)
vmSockDir := v.agent.getVMPath(v.id)
sbSharePath := s.agent.getSharePath(s.id)
sbSockDir := s.agent.getVMPath(s.id)
v.logger().WithFields(logrus.Fields{
"vmSharePath": vmSharePath,
"vmSockDir": vmSockDir,
"sbSharePath": sbSharePath,
"sbSockDir": sbSockDir,
}).Infof("assign vm to sandbox %s", s.id)
// First make sure the symlinks do not exist
os.RemoveAll(sbSharePath)
os.RemoveAll(sbSockDir)
if err := os.Symlink(vmSharePath, sbSharePath); err != nil {
return err
}
if err := os.Symlink(vmSockDir, sbSockDir); err != nil {
os.Remove(sbSharePath)
return err
}
s.hypervisor = v.hypervisor
return nil
}

83
virtcontainers/vm_test.go Normal file
View File

@@ -0,0 +1,83 @@
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package virtcontainers
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewVM(t *testing.T) {
assert := assert.New(t)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
config := VMConfig{
HypervisorType: MockHypervisor,
AgentType: NoopAgentType,
}
hyperConfig := HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,
}
var vm *VM
_, err := NewVM(config)
assert.Error(err)
config.HypervisorConfig = hyperConfig
vm, err = NewVM(config)
assert.Nil(err)
// VM operations
err = vm.Pause()
assert.Nil(err)
err = vm.Resume()
assert.Nil(err)
err = vm.Start()
assert.Nil(err)
err = vm.Save()
assert.Nil(err)
err = vm.Stop()
assert.Nil(err)
err = vm.AddCPUs(2)
assert.Nil(err)
err = vm.AddMemory(128)
assert.Nil(err)
err = vm.OnlineCPUMemory()
assert.Nil(err)
// template VM
config.HypervisorConfig.BootFromTemplate = true
_, err = NewVM(config)
assert.Error(err)
config.HypervisorConfig.MemoryPath = testDir
_, err = NewVM(config)
assert.Error(err)
config.HypervisorConfig.DevicesStatePath = testDir
_, err = NewVM(config)
assert.Nil(err)
}
func TestVMConfigValid(t *testing.T) {
assert := assert.New(t)
config := VMConfig{}
err := config.Valid()
assert.Error(err)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
config.HypervisorConfig = HypervisorConfig{
KernelPath: testDir,
InitrdPath: testDir,
}
err = config.Valid()
assert.Nil(err)
}