mirror of
https://github.com/aljazceru/kata-containers.git
synced 2025-12-17 22:34:25 +01:00
kata 2.0: delete use_vsock option and proxy abstraction
With kata containers moving to 2.0, (hybrid-)vsock will be the only way to directly communicate between host and agent. And kata-proxy as additional component to handle the multiplexing on serial port is also no longer needed. Cleaning up related unit tests, and also add another mock socket type `MockHybridVSock` to deal with ttrpc-based hybrid-vsock mock server. Fixes: #389 Signed-off-by: Penny Zheng penny.zheng@arm.com
This commit is contained in:
@@ -142,9 +142,6 @@ ACRNCTLPATH := $(ACRNBINDIR)/$(ACRNCTLCMD)
|
||||
SHIMCMD := $(BIN_PREFIX)-shim
|
||||
SHIMPATH := $(PKGLIBEXECDIR)/$(SHIMCMD)
|
||||
|
||||
PROXYCMD := $(BIN_PREFIX)-proxy
|
||||
PROXYPATH := $(PKGLIBEXECDIR)/$(PROXYCMD)
|
||||
|
||||
NETMONCMD := $(BIN_PREFIX)-netmon
|
||||
NETMONPATH := $(PKGLIBEXECDIR)/$(NETMONCMD)
|
||||
|
||||
@@ -421,7 +418,6 @@ USER_VARS += PREFIX
|
||||
USER_VARS += PROJECT_NAME
|
||||
USER_VARS += PROJECT_PREFIX
|
||||
USER_VARS += PROJECT_TYPE
|
||||
USER_VARS += PROXYPATH
|
||||
USER_VARS += NETMONPATH
|
||||
USER_VARS += QEMUBINDIR
|
||||
USER_VARS += QEMUCMD
|
||||
@@ -629,7 +625,6 @@ $(GENERATED_FILES): %: %.in $(MAKEFILE_LIST) VERSION .git-commit
|
||||
-e "s|@LOCALSTATEDIR@|$(LOCALSTATEDIR)|g" \
|
||||
-e "s|@PKGLIBEXECDIR@|$(PKGLIBEXECDIR)|g" \
|
||||
-e "s|@PKGRUNDIR@|$(PKGRUNDIR)|g" \
|
||||
-e "s|@PROXYPATH@|$(PROXYPATH)|g" \
|
||||
-e "s|@NETMONPATH@|$(NETMONPATH)|g" \
|
||||
-e "s|@PROJECT_BUG_URL@|$(PROJECT_BUG_URL)|g" \
|
||||
-e "s|@PROJECT_URL@|$(PROJECT_URL)|g" \
|
||||
@@ -853,5 +848,5 @@ ifneq (,$(findstring $(HYPERVISOR_ACRN),$(KNOWN_HYPERVISORS)))
|
||||
@printf "\t$(HYPERVISOR_ACRN) hypervisor path (ACRNPATH) : %s\n" $(abspath $(ACRNPATH))
|
||||
endif
|
||||
@printf "\tassets path (PKGDATADIR) : %s\n" $(abspath $(PKGDATADIR))
|
||||
@printf "\tproxy+shim path (PKGLIBEXECDIR) : %s\n" $(abspath $(PKGLIBEXECDIR))
|
||||
@printf "\tshim path (PKGLIBEXECDIR) : %s\n" $(abspath $(PKGLIBEXECDIR))
|
||||
@printf "\n"
|
||||
|
||||
@@ -69,8 +69,7 @@ default_memory = @DEFMEMSZ@
|
||||
block_device_driver = "@DEFBLOCKSTORAGEDRIVER_ACRN@"
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available. This extra output is added
|
||||
# to the proxy logs, but only when proxy debug is also enabled.
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
@@ -102,13 +101,6 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_ACRN@"
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
|
||||
[proxy.@PROJECT_TYPE@]
|
||||
path = "@PROXYPATH@"
|
||||
|
||||
# If enabled, proxy messages will be sent to the system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
|
||||
[shim.@PROJECT_TYPE@]
|
||||
path = "@SHIMPATH@"
|
||||
|
||||
|
||||
@@ -94,19 +94,11 @@ virtio_fs_cache = "@DEFVIRTIOFSCACHE@"
|
||||
block_device_driver = "virtio-blk"
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available. This extra output is added
|
||||
# to the proxy logs, but only when proxy debug is also enabled.
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
|
||||
[proxy.@PROJECT_TYPE@]
|
||||
path = "@PROXYPATH@"
|
||||
|
||||
# If enabled, proxy messages will be sent to the system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
|
||||
[shim.@PROJECT_TYPE@]
|
||||
path = "@SHIMPATH@"
|
||||
|
||||
|
||||
@@ -141,8 +141,7 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
#enable_swap = true
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available. This extra output is added
|
||||
# to the proxy logs, but only when proxy debug is also enabled.
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
@@ -157,11 +156,6 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If true and vsocks are supported, use vsocks to communicate directly
|
||||
# with the agent (no proxy is started).
|
||||
# Default true
|
||||
use_vsock = true
|
||||
|
||||
# VFIO devices are hotplugged on a bridge by default.
|
||||
# Enable hotplugging on root bus. This may be required for devices with
|
||||
# a large PCI bar, as this is a current limitation with hotplugging on
|
||||
|
||||
@@ -206,8 +206,7 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
#enable_swap = true
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available. This extra output is added
|
||||
# to the proxy logs, but only when proxy debug is also enabled.
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
@@ -222,12 +221,6 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If true and vsocks are supported, use vsocks to communicate directly
|
||||
# with the agent and no proxy is started, otherwise use unix
|
||||
# sockets and start a proxy to communicate with the agent.
|
||||
# Default false
|
||||
#use_vsock = true
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
# Default false
|
||||
@@ -316,13 +309,6 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
|
||||
[proxy.@PROJECT_TYPE@]
|
||||
path = "@PROXYPATH@"
|
||||
|
||||
# If enabled, proxy messages will be sent to the system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
|
||||
[shim.@PROJECT_TYPE@]
|
||||
path = "@SHIMPATH@"
|
||||
|
||||
|
||||
@@ -213,8 +213,7 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
#enable_swap = true
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available. This extra output is added
|
||||
# to the proxy logs, but only when proxy debug is also enabled.
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
@@ -229,12 +228,6 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If true and vsocks are supported, use vsocks to communicate directly
|
||||
# with the agent and no proxy is started, otherwise use unix
|
||||
# sockets and start a proxy to communicate with the agent.
|
||||
# Default false
|
||||
#use_vsock = true
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
# Default is false
|
||||
@@ -340,13 +333,6 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
|
||||
[proxy.@PROJECT_TYPE@]
|
||||
path = "@PROXYPATH@"
|
||||
|
||||
# If enabled, proxy messages will be sent to the system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
|
||||
[shim.@PROJECT_TYPE@]
|
||||
path = "@SHIMPATH@"
|
||||
|
||||
|
||||
@@ -167,8 +167,6 @@ var initFactoryCommand = cli.Command{
|
||||
HypervisorType: runtimeConfig.HypervisorType,
|
||||
HypervisorConfig: runtimeConfig.HypervisorConfig,
|
||||
AgentConfig: runtimeConfig.AgentConfig,
|
||||
ProxyType: runtimeConfig.ProxyType,
|
||||
ProxyConfig: runtimeConfig.ProxyConfig,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -256,8 +254,6 @@ var destroyFactoryCommand = cli.Command{
|
||||
HypervisorType: runtimeConfig.HypervisorType,
|
||||
HypervisorConfig: runtimeConfig.HypervisorConfig,
|
||||
AgentConfig: runtimeConfig.AgentConfig,
|
||||
ProxyType: runtimeConfig.ProxyType,
|
||||
ProxyConfig: runtimeConfig.ProxyConfig,
|
||||
},
|
||||
}
|
||||
kataLog.WithField("factory", factoryConfig).Info("load vm factory")
|
||||
@@ -313,8 +309,6 @@ var statusFactoryCommand = cli.Command{
|
||||
HypervisorType: runtimeConfig.HypervisorType,
|
||||
HypervisorConfig: runtimeConfig.HypervisorConfig,
|
||||
AgentConfig: runtimeConfig.AgentConfig,
|
||||
ProxyType: runtimeConfig.ProxyType,
|
||||
ProxyConfig: runtimeConfig.ProxyConfig,
|
||||
},
|
||||
}
|
||||
kataLog.WithField("factory", factoryConfig).Info("load vm factory")
|
||||
|
||||
@@ -74,7 +74,6 @@ func TestFactoryCLIFunctionInit(t *testing.T) {
|
||||
runtimeConfig.FactoryConfig.Template = true
|
||||
runtimeConfig.FactoryConfig.TemplatePath = "/run/vc/vm/template"
|
||||
runtimeConfig.HypervisorType = vc.MockHypervisor
|
||||
runtimeConfig.ProxyType = vc.NoopProxyType
|
||||
ctx.App.Metadata["runtimeConfig"] = runtimeConfig
|
||||
fn, ok = initFactoryCommand.Action.(func(context *cli.Context) error)
|
||||
assert.True(ok)
|
||||
|
||||
@@ -101,15 +101,6 @@ type HypervisorInfo struct {
|
||||
PCIeRootPort uint32
|
||||
HotplugVFIOOnRootBus bool
|
||||
Debug bool
|
||||
UseVSock bool
|
||||
}
|
||||
|
||||
// ProxyInfo stores proxy details
|
||||
type ProxyInfo struct {
|
||||
Type string
|
||||
Version VersionInfo
|
||||
Path string
|
||||
Debug bool
|
||||
}
|
||||
|
||||
// AgentInfo stores agent details
|
||||
@@ -155,7 +146,6 @@ type EnvInfo struct {
|
||||
Image ImageInfo
|
||||
Kernel KernelInfo
|
||||
Initrd InitrdInfo
|
||||
Proxy ProxyInfo
|
||||
Agent AgentInfo
|
||||
Host HostInfo
|
||||
Netmon NetmonInfo
|
||||
@@ -234,42 +224,20 @@ func getHostInfo() (HostInfo, error) {
|
||||
Model: cpuModel,
|
||||
}
|
||||
|
||||
supportVSocks, _ := vcUtils.SupportsVsocks()
|
||||
|
||||
host := HostInfo{
|
||||
Kernel: hostKernelVersion,
|
||||
Architecture: arch,
|
||||
Distro: hostDistro,
|
||||
CPU: hostCPU,
|
||||
VMContainerCapable: hostVMContainerCapable,
|
||||
SupportVSocks: vcUtils.SupportsVsocks(),
|
||||
SupportVSocks: supportVSocks,
|
||||
}
|
||||
|
||||
return host, nil
|
||||
}
|
||||
|
||||
func getProxyInfo(config oci.RuntimeConfig) ProxyInfo {
|
||||
if config.ProxyType == vc.NoProxyType {
|
||||
return ProxyInfo{Type: string(config.ProxyType)}
|
||||
}
|
||||
|
||||
proxyConfig := config.ProxyConfig
|
||||
|
||||
var proxyVersionInfo VersionInfo
|
||||
if version, err := getCommandVersion(proxyConfig.Path); err != nil {
|
||||
proxyVersionInfo = unknownVersionInfo
|
||||
} else {
|
||||
proxyVersionInfo = constructVersionInfo(version)
|
||||
}
|
||||
|
||||
proxy := ProxyInfo{
|
||||
Type: string(config.ProxyType),
|
||||
Version: proxyVersionInfo,
|
||||
Path: proxyConfig.Path,
|
||||
Debug: proxyConfig.Debug,
|
||||
}
|
||||
|
||||
return proxy
|
||||
}
|
||||
|
||||
func getNetmonInfo(config oci.RuntimeConfig) NetmonInfo {
|
||||
netmonConfig := config.NetmonConfig
|
||||
|
||||
@@ -321,7 +289,6 @@ func getHypervisorInfo(config oci.RuntimeConfig) HypervisorInfo {
|
||||
Path: hypervisorPath,
|
||||
BlockDeviceDriver: config.HypervisorConfig.BlockDeviceDriver,
|
||||
Msize9p: config.HypervisorConfig.Msize9p,
|
||||
UseVSock: config.HypervisorConfig.UseVSock,
|
||||
MemorySlots: config.HypervisorConfig.MemSlots,
|
||||
EntropySource: config.HypervisorConfig.EntropySource,
|
||||
SharedFS: config.HypervisorConfig.SharedFS,
|
||||
@@ -347,8 +314,6 @@ func getEnvInfo(configFile string, config oci.RuntimeConfig) (env EnvInfo, err e
|
||||
return EnvInfo{}, err
|
||||
}
|
||||
|
||||
proxy := getProxyInfo(config)
|
||||
|
||||
netmon := getNetmonInfo(config)
|
||||
|
||||
agent, err := getAgentInfo(config)
|
||||
@@ -378,7 +343,6 @@ func getEnvInfo(configFile string, config oci.RuntimeConfig) (env EnvInfo, err e
|
||||
Image: image,
|
||||
Kernel: kernel,
|
||||
Initrd: initrd,
|
||||
Proxy: proxy,
|
||||
Agent: agent,
|
||||
Host: host,
|
||||
Netmon: netmon,
|
||||
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
testProxyVersion = "proxy version 0.1"
|
||||
testNetmonVersion = "netmon version 0.1"
|
||||
testHypervisorVersion = "QEMU emulator version 2.7.0+git.741f430a96-6.1, Copyright (c) 2003-2016 Fabrice Bellard and the QEMU Project developers"
|
||||
)
|
||||
@@ -39,7 +38,6 @@ var (
|
||||
var (
|
||||
hypervisorDebug = false
|
||||
enableVirtioFS = false
|
||||
proxyDebug = false
|
||||
runtimeDebug = false
|
||||
runtimeTrace = false
|
||||
netmonDebug = false
|
||||
@@ -84,7 +82,6 @@ func makeRuntimeConfig(prefixDir string) (configFile string, config oci.RuntimeC
|
||||
imagePath := filepath.Join(prefixDir, "image")
|
||||
kernelParams := "foo=bar xyz"
|
||||
machineType := "machineType"
|
||||
proxyPath := filepath.Join(prefixDir, "proxy")
|
||||
netmonPath := filepath.Join(prefixDir, "netmon")
|
||||
disableBlock := true
|
||||
blockStorageDriver := "virtio-scsi"
|
||||
@@ -109,11 +106,6 @@ func makeRuntimeConfig(prefixDir string) (configFile string, config oci.RuntimeC
|
||||
}
|
||||
}
|
||||
|
||||
err = makeVersionBinary(proxyPath, testProxyVersion)
|
||||
if err != nil {
|
||||
return "", oci.RuntimeConfig{}, err
|
||||
}
|
||||
|
||||
err = makeVersionBinary(netmonPath, testNetmonVersion)
|
||||
if err != nil {
|
||||
return "", oci.RuntimeConfig{}, err
|
||||
@@ -137,7 +129,6 @@ func makeRuntimeConfig(prefixDir string) (configFile string, config oci.RuntimeC
|
||||
ImagePath: imagePath,
|
||||
KernelParams: kernelParams,
|
||||
MachineType: machineType,
|
||||
ProxyPath: proxyPath,
|
||||
NetmonPath: netmonPath,
|
||||
LogPath: logPath,
|
||||
DefaultGuestHookPath: hypConfig.GuestHookPath,
|
||||
@@ -154,7 +145,6 @@ func makeRuntimeConfig(prefixDir string) (configFile string, config oci.RuntimeC
|
||||
HypervisorDebug: hypervisorDebug,
|
||||
RuntimeDebug: runtimeDebug,
|
||||
RuntimeTrace: runtimeTrace,
|
||||
ProxyDebug: proxyDebug,
|
||||
NetmonDebug: netmonDebug,
|
||||
AgentDebug: agentDebug,
|
||||
AgentTrace: agentTrace,
|
||||
@@ -178,15 +168,6 @@ func makeRuntimeConfig(prefixDir string) (configFile string, config oci.RuntimeC
|
||||
return configFile, config, nil
|
||||
}
|
||||
|
||||
func getExpectedProxyDetails(config oci.RuntimeConfig) (ProxyInfo, error) {
|
||||
return ProxyInfo{
|
||||
Type: string(config.ProxyType),
|
||||
Version: constructVersionInfo(testProxyVersion),
|
||||
Path: config.ProxyConfig.Path,
|
||||
Debug: config.ProxyConfig.Debug,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getExpectedNetmonDetails(config oci.RuntimeConfig) (NetmonInfo, error) {
|
||||
return NetmonInfo{
|
||||
Version: constructVersionInfo(testNetmonVersion),
|
||||
@@ -229,13 +210,14 @@ func genericGetExpectedHostDetails(tmpdir string, expectedVendor string, expecte
|
||||
Model: expectedModel,
|
||||
}
|
||||
|
||||
expectedSupportVSocks, _ := vcUtils.SupportsVsocks()
|
||||
expectedHostDetails := HostInfo{
|
||||
Kernel: expectedKernelVersion,
|
||||
Architecture: expectedArch,
|
||||
Distro: expectedDistro,
|
||||
CPU: expectedCPU,
|
||||
VMContainerCapable: expectedVMContainerCapable,
|
||||
SupportVSocks: vcUtils.SupportsVsocks(),
|
||||
SupportVSocks: expectedSupportVSocks,
|
||||
}
|
||||
|
||||
testProcCPUInfo := filepath.Join(tmpdir, "cpuinfo")
|
||||
@@ -349,11 +331,6 @@ func getExpectedSettings(config oci.RuntimeConfig, tmpdir, configFile string) (E
|
||||
|
||||
runtime := getExpectedRuntimeDetails(config, configFile)
|
||||
|
||||
proxy, err := getExpectedProxyDetails(config)
|
||||
if err != nil {
|
||||
return EnvInfo{}, err
|
||||
}
|
||||
|
||||
agent, err := getExpectedAgentDetails(config)
|
||||
if err != nil {
|
||||
return EnvInfo{}, err
|
||||
@@ -379,7 +356,6 @@ func getExpectedSettings(config oci.RuntimeConfig, tmpdir, configFile string) (E
|
||||
Hypervisor: hypervisor,
|
||||
Image: image,
|
||||
Kernel: kernel,
|
||||
Proxy: proxy,
|
||||
Agent: agent,
|
||||
Host: host,
|
||||
Netmon: netmon,
|
||||
@@ -481,7 +457,6 @@ func TestEnvGetEnvInfo(t *testing.T) {
|
||||
for _, toggle := range []bool{false, true} {
|
||||
hypervisorDebug = toggle
|
||||
enableVirtioFS = toggle
|
||||
proxyDebug = toggle
|
||||
runtimeDebug = toggle
|
||||
runtimeTrace = toggle
|
||||
agentDebug = toggle
|
||||
@@ -611,50 +586,6 @@ func TestEnvGetRuntimeInfo(t *testing.T) {
|
||||
assert.Equal(t, expectedRuntime, runtime)
|
||||
}
|
||||
|
||||
func TestEnvGetProxyInfo(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
_, config, err := makeRuntimeConfig(tmpdir)
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedProxy, err := getExpectedProxyDetails(config)
|
||||
assert.NoError(t, err)
|
||||
|
||||
proxy := getProxyInfo(config)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expectedProxy, proxy)
|
||||
}
|
||||
|
||||
func TestEnvGetProxyInfoNoVersion(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
_, config, err := makeRuntimeConfig(tmpdir)
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedProxy, err := getExpectedProxyDetails(config)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// remove the proxy ensuring its version cannot be queried
|
||||
err = os.Remove(config.ProxyConfig.Path)
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedProxy.Version = unknownVersionInfo
|
||||
|
||||
proxy := getProxyInfo(config)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expectedProxy, proxy)
|
||||
}
|
||||
|
||||
func TestEnvGetNetmonInfo(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
@@ -754,13 +685,6 @@ func testEnvShowTOMLSettings(t *testing.T, tmpdir string, tmpfile *os.File) erro
|
||||
Parameters: "foo=bar xyz",
|
||||
}
|
||||
|
||||
proxy := ProxyInfo{
|
||||
Type: "proxy-type",
|
||||
Version: constructVersionInfo(testProxyVersion),
|
||||
Path: "file:///proxy-url",
|
||||
Debug: false,
|
||||
}
|
||||
|
||||
agent := AgentInfo{}
|
||||
|
||||
expectedHostDetails, err := getExpectedHostDetails(tmpdir)
|
||||
@@ -771,7 +695,6 @@ func testEnvShowTOMLSettings(t *testing.T, tmpdir string, tmpfile *os.File) erro
|
||||
Hypervisor: hypervisor,
|
||||
Image: image,
|
||||
Kernel: kernel,
|
||||
Proxy: proxy,
|
||||
Agent: agent,
|
||||
Host: expectedHostDetails,
|
||||
}
|
||||
@@ -814,13 +737,6 @@ func testEnvShowJSONSettings(t *testing.T, tmpdir string, tmpfile *os.File) erro
|
||||
Parameters: "foo=bar xyz",
|
||||
}
|
||||
|
||||
proxy := ProxyInfo{
|
||||
Type: "proxy-type",
|
||||
Version: constructVersionInfo(testProxyVersion),
|
||||
Path: "file:///proxy-url",
|
||||
Debug: false,
|
||||
}
|
||||
|
||||
agent := AgentInfo{}
|
||||
|
||||
expectedHostDetails, err := getExpectedHostDetails(tmpdir)
|
||||
@@ -831,7 +747,6 @@ func testEnvShowJSONSettings(t *testing.T, tmpdir string, tmpfile *os.File) erro
|
||||
Hypervisor: hypervisor,
|
||||
Image: image,
|
||||
Kernel: kernel,
|
||||
Proxy: proxy,
|
||||
Agent: agent,
|
||||
Host: expectedHostDetails,
|
||||
}
|
||||
|
||||
@@ -242,7 +242,6 @@ func newTestRuntimeConfig(dir, consolePath string, create bool) (oci.RuntimeConf
|
||||
return oci.RuntimeConfig{
|
||||
HypervisorType: vc.QemuHypervisor,
|
||||
HypervisorConfig: hypervisorConfig,
|
||||
ProxyType: vc.KataProxyType,
|
||||
Console: consolePath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -384,7 +384,6 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config string, err err
|
||||
kernelParams := "foo=bar xyz"
|
||||
imagePath := path.Join(dir, "image")
|
||||
shimPath := path.Join(dir, "shim")
|
||||
proxyPath := path.Join(dir, "proxy")
|
||||
netmonPath := path.Join(dir, "netmon")
|
||||
logDir := path.Join(dir, "logs")
|
||||
logPath := path.Join(logDir, "runtime.log")
|
||||
@@ -406,7 +405,6 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config string, err err
|
||||
KernelParams: kernelParams,
|
||||
MachineType: machineType,
|
||||
ShimPath: shimPath,
|
||||
ProxyPath: proxyPath,
|
||||
NetmonPath: netmonPath,
|
||||
LogPath: logPath,
|
||||
DisableBlock: disableBlockDevice,
|
||||
@@ -427,7 +425,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config string, err err
|
||||
return "", err
|
||||
}
|
||||
|
||||
files := []string{hypervisorPath, kernelPath, imagePath, shimPath, proxyPath}
|
||||
files := []string{hypervisorPath, kernelPath, imagePath, shimPath}
|
||||
|
||||
for _, file := range files {
|
||||
// create the resource (which must be >0 bytes)
|
||||
|
||||
@@ -209,7 +209,6 @@ func newTestRuntimeConfig(dir, consolePath string, create bool) (oci.RuntimeConf
|
||||
return oci.RuntimeConfig{
|
||||
HypervisorType: vc.QemuHypervisor,
|
||||
HypervisorConfig: hypervisorConfig,
|
||||
ProxyType: vc.KataBuiltInProxyType,
|
||||
Console: consolePath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ typeset -r unknown="unknown"
|
||||
typeset -r osbuilder_file="/var/lib/osbuilder/osbuilder.yaml"
|
||||
|
||||
# Maximum number of errors to show for a single system component
|
||||
# (such as runtime or proxy).
|
||||
# (such as runtime).
|
||||
PROBLEM_LIMIT=${PROBLEM_LIMIT:-50}
|
||||
|
||||
# List of patterns used to detect problems in logfiles.
|
||||
@@ -296,17 +296,6 @@ show_containerd_shimv2_log_details()
|
||||
end_section
|
||||
}
|
||||
|
||||
show_proxy_log_details()
|
||||
{
|
||||
local title="Proxy logs"
|
||||
|
||||
subheading "$title"
|
||||
|
||||
start_section "$title"
|
||||
find_system_journal_problems "proxy" "@PROJECT_TYPE@-proxy"
|
||||
end_section
|
||||
}
|
||||
|
||||
show_runtime_log_details()
|
||||
{
|
||||
local title="Runtime logs"
|
||||
@@ -347,7 +336,6 @@ show_log_details()
|
||||
heading "$title"
|
||||
|
||||
show_runtime_log_details
|
||||
show_proxy_log_details
|
||||
show_shim_log_details
|
||||
show_throttler_log_details
|
||||
show_containerd_shimv2_log_details
|
||||
@@ -377,7 +365,6 @@ show_package_versions()
|
||||
# core components
|
||||
for project in @PROJECT_TYPE@
|
||||
do
|
||||
pattern+="|${project}-proxy"
|
||||
pattern+="|${project}-runtime"
|
||||
pattern+="|${project}-shim"
|
||||
pattern+="|${project}-ksm-throttler"
|
||||
|
||||
@@ -21,7 +21,6 @@ type RuntimeConfigOptions struct {
|
||||
KernelParams string
|
||||
MachineType string
|
||||
ShimPath string
|
||||
ProxyPath string
|
||||
NetmonPath string
|
||||
LogPath string
|
||||
BlockDeviceDriver string
|
||||
@@ -37,7 +36,6 @@ type RuntimeConfigOptions struct {
|
||||
HypervisorDebug bool
|
||||
RuntimeDebug bool
|
||||
RuntimeTrace bool
|
||||
ProxyDebug bool
|
||||
ShimDebug bool
|
||||
NetmonDebug bool
|
||||
AgentDebug bool
|
||||
@@ -69,10 +67,6 @@ func MakeRuntimeConfigFileData(config RuntimeConfigOptions) string {
|
||||
shared_fs = "` + config.SharedFS + `"
|
||||
virtio_fs_daemon = "` + config.VirtioFSDaemon + `"
|
||||
|
||||
[proxy.kata]
|
||||
enable_debug = ` + strconv.FormatBool(config.ProxyDebug) + `
|
||||
path = "` + config.ProxyPath + `"
|
||||
|
||||
[shim.kata]
|
||||
path = "` + config.ShimPath + `"
|
||||
enable_debug = ` + strconv.FormatBool(config.ShimDebug) + `
|
||||
|
||||
@@ -66,5 +66,4 @@ var defaultRuntimeConfiguration = "@CONFIG_PATH@"
|
||||
var defaultSysConfRuntimeConfiguration = "@SYSCONFIG@"
|
||||
|
||||
var name = "kata"
|
||||
var defaultProxyPath = "/usr/libexec/kata-containers/kata-proxy"
|
||||
var defaultNetmonPath = "/usr/libexec/kata-containers/kata-netmon"
|
||||
|
||||
@@ -28,8 +28,6 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
defaultProxy = vc.KataProxyType
|
||||
|
||||
// if true, enable opentracing support.
|
||||
tracing = false
|
||||
)
|
||||
@@ -40,9 +38,9 @@ var (
|
||||
//
|
||||
// [<component>.<type>]
|
||||
//
|
||||
// The components are hypervisor, proxy and agent. For example,
|
||||
// The components are hypervisor, and agent. For example,
|
||||
//
|
||||
// [proxy.kata]
|
||||
// [agent.kata]
|
||||
//
|
||||
// The currently supported types are listed below:
|
||||
const (
|
||||
@@ -52,16 +50,12 @@ const (
|
||||
qemuHypervisorTableType = "qemu"
|
||||
acrnHypervisorTableType = "acrn"
|
||||
|
||||
// supported proxy component types
|
||||
kataProxyTableType = "kata"
|
||||
|
||||
// the maximum amount of PCI bridges that can be cold plugged in a VM
|
||||
maxPCIBridges uint32 = 5
|
||||
)
|
||||
|
||||
type tomlConfig struct {
|
||||
Hypervisor map[string]hypervisor
|
||||
Proxy map[string]proxy
|
||||
Agent map[string]agent
|
||||
Runtime runtime
|
||||
Factory factory
|
||||
@@ -117,7 +111,6 @@ type hypervisor struct {
|
||||
Debug bool `toml:"enable_debug"`
|
||||
DisableNestingChecks bool `toml:"disable_nesting_checks"`
|
||||
EnableIOThreads bool `toml:"enable_iothreads"`
|
||||
UseVSock bool `toml:"use_vsock"`
|
||||
DisableImageNvdimm bool `toml:"disable_image_nvdimm"`
|
||||
HotplugVFIOOnRootBus bool `toml:"hotplug_vfio_on_root_bus"`
|
||||
DisableVhostNet bool `toml:"disable_vhost_net"`
|
||||
@@ -126,11 +119,6 @@ type hypervisor struct {
|
||||
TxRateLimiterMaxRate uint64 `toml:"tx_rate_limiter_max_rate"`
|
||||
}
|
||||
|
||||
type proxy struct {
|
||||
Path string `toml:"path"`
|
||||
Debug bool `toml:"enable_debug"`
|
||||
}
|
||||
|
||||
type runtime struct {
|
||||
Debug bool `toml:"enable_debug"`
|
||||
Tracing bool `toml:"enable_tracing"`
|
||||
@@ -397,10 +385,6 @@ func (h hypervisor) msize9p() uint32 {
|
||||
return h.Msize9p
|
||||
}
|
||||
|
||||
func (h hypervisor) useVSock() bool {
|
||||
return h.UseVSock
|
||||
}
|
||||
|
||||
func (h hypervisor) guestHookPath() string {
|
||||
if h.GuestHookPath == "" {
|
||||
return defaultGuestHookPath
|
||||
@@ -447,19 +431,6 @@ func (h hypervisor) getTxRateLimiterCfg() (uint64, error) {
|
||||
return h.TxRateLimiterMaxRate, nil
|
||||
}
|
||||
|
||||
func (p proxy) path() (string, error) {
|
||||
path := p.Path
|
||||
if path == "" {
|
||||
path = defaultProxyPath
|
||||
}
|
||||
|
||||
return ResolvePath(path)
|
||||
}
|
||||
|
||||
func (p proxy) debug() bool {
|
||||
return p.Debug
|
||||
}
|
||||
|
||||
func (a agent) debug() bool {
|
||||
return a.Debug
|
||||
}
|
||||
@@ -561,7 +532,6 @@ func newFirecrackerHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
BlockDeviceDriver: blockDriver,
|
||||
EnableIOThreads: h.EnableIOThreads,
|
||||
DisableVhostNet: true, // vhost-net backend is not supported in Firecracker
|
||||
UseVSock: true,
|
||||
GuestHookPath: h.guestHookPath(),
|
||||
RxRateLimiterMaxRate: rxRateLimiterMaxRate,
|
||||
TxRateLimiterMaxRate: txRateLimiterMaxRate,
|
||||
@@ -627,14 +597,8 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
errors.New("cannot enable virtio-fs without daemon path in configuration file")
|
||||
}
|
||||
|
||||
useVSock := false
|
||||
if h.useVSock() {
|
||||
if utils.SupportsVsocks() {
|
||||
kataUtilsLogger.Info("vsock supported")
|
||||
useVSock = true
|
||||
} else {
|
||||
kataUtilsLogger.Warn("No vsock support, falling back to legacy serial port")
|
||||
}
|
||||
if vSock, err := utils.SupportsVsocks(); !vSock {
|
||||
return vc.HypervisorConfig{}, err
|
||||
}
|
||||
|
||||
rxRateLimiterMaxRate, err := h.getRxRateLimiterCfg()
|
||||
@@ -684,7 +648,6 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
BlockDeviceCacheNoflush: h.BlockDeviceCacheNoflush,
|
||||
EnableIOThreads: h.EnableIOThreads,
|
||||
Msize9p: h.msize9p(),
|
||||
UseVSock: useVSock,
|
||||
DisableImageNvdimm: h.DisableImageNvdimm,
|
||||
HotplugVFIOOnRootBus: h.HotplugVFIOOnRootBus,
|
||||
PCIeRootPort: h.PCIeRootPort,
|
||||
@@ -842,7 +805,6 @@ func newClhHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
HotplugVFIOOnRootBus: h.HotplugVFIOOnRootBus,
|
||||
PCIeRootPort: h.PCIeRootPort,
|
||||
DisableVhostNet: true,
|
||||
UseVSock: true,
|
||||
VirtioFSExtraArgs: h.VirtioFSExtraArgs,
|
||||
}, nil
|
||||
}
|
||||
@@ -892,42 +854,10 @@ func updateRuntimeConfigHypervisor(configPath string, tomlConf tomlConfig, confi
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateRuntimeConfigProxy(configPath string, tomlConf tomlConfig, config *oci.RuntimeConfig, builtIn bool) error {
|
||||
if builtIn {
|
||||
config.ProxyType = vc.KataBuiltInProxyType
|
||||
config.ProxyConfig = vc.ProxyConfig{
|
||||
Debug: config.Debug,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for k, proxy := range tomlConf.Proxy {
|
||||
switch k {
|
||||
case kataProxyTableType:
|
||||
config.ProxyType = vc.KataProxyType
|
||||
default:
|
||||
return fmt.Errorf("%s proxy type not supported", k)
|
||||
}
|
||||
|
||||
path, err := proxy.path()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config.ProxyConfig = vc.ProxyConfig{
|
||||
Path: path,
|
||||
Debug: proxy.debug(),
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateRuntimeConfigAgent(configPath string, tomlConf tomlConfig, config *oci.RuntimeConfig, builtIn bool) error {
|
||||
if builtIn {
|
||||
config.AgentConfig = vc.KataAgentConfig{
|
||||
LongLiveConn: true,
|
||||
UseVSock: config.HypervisorConfig.UseVSock,
|
||||
Debug: config.AgentConfig.Debug,
|
||||
KernelModules: config.AgentConfig.KernelModules,
|
||||
}
|
||||
@@ -937,7 +867,6 @@ func updateRuntimeConfigAgent(configPath string, tomlConf tomlConfig, config *oc
|
||||
|
||||
for _, agent := range tomlConf.Agent {
|
||||
config.AgentConfig = vc.KataAgentConfig{
|
||||
UseVSock: config.HypervisorConfig.UseVSock,
|
||||
Debug: agent.debug(),
|
||||
Trace: agent.trace(),
|
||||
TraceMode: agent.traceMode(),
|
||||
@@ -1013,10 +942,6 @@ func updateRuntimeConfig(configPath string, tomlConf tomlConfig, config *oci.Run
|
||||
return err
|
||||
}
|
||||
|
||||
if err := updateRuntimeConfigProxy(configPath, tomlConf, config, builtIn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := updateRuntimeConfigAgent(configPath, tomlConf, config, builtIn); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1095,7 +1020,6 @@ func initConfig() (config oci.RuntimeConfig, err error) {
|
||||
HypervisorType: defaultHypervisor,
|
||||
HypervisorConfig: GetDefaultHypervisorConfig(),
|
||||
AgentConfig: defaultAgentConfig,
|
||||
ProxyType: defaultProxy,
|
||||
}
|
||||
|
||||
return config, nil
|
||||
@@ -1157,13 +1081,6 @@ func LoadConfiguration(configPath string, ignoreLogging, builtIn bool) (resolved
|
||||
|
||||
config.DisableGuestSeccomp = tomlConf.Runtime.DisableGuestSeccomp
|
||||
|
||||
// use no proxy if HypervisorConfig.UseVSock is true
|
||||
if config.HypervisorConfig.UseVSock {
|
||||
kataUtilsLogger.Info("VSOCK supported, configure to not use proxy")
|
||||
config.ProxyType = vc.NoProxyType
|
||||
config.ProxyConfig = vc.ProxyConfig{Debug: config.Debug}
|
||||
}
|
||||
|
||||
config.SandboxCgroupOnly = tomlConf.Runtime.SandboxCgroupOnly
|
||||
config.DisableNewNetNs = tomlConf.Runtime.DisableNewNetNs
|
||||
config.EnablePprof = tomlConf.Runtime.EnablePprof
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
|
||||
var (
|
||||
hypervisorDebug = false
|
||||
proxyDebug = false
|
||||
runtimeDebug = false
|
||||
runtimeTrace = false
|
||||
netmonDebug = false
|
||||
@@ -72,7 +71,6 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf
|
||||
kernelPath := path.Join(dir, "kernel")
|
||||
kernelParams := "foo=bar xyz"
|
||||
imagePath := path.Join(dir, "image")
|
||||
proxyPath := path.Join(dir, "proxy")
|
||||
netmonPath := path.Join(dir, "netmon")
|
||||
logDir := path.Join(dir, "logs")
|
||||
logPath := path.Join(logDir, "runtime.log")
|
||||
@@ -93,7 +91,6 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf
|
||||
ImagePath: imagePath,
|
||||
KernelParams: kernelParams,
|
||||
MachineType: machineType,
|
||||
ProxyPath: proxyPath,
|
||||
NetmonPath: netmonPath,
|
||||
LogPath: logPath,
|
||||
DefaultGuestHookPath: defaultGuestHookPath,
|
||||
@@ -110,7 +107,6 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf
|
||||
HypervisorDebug: hypervisorDebug,
|
||||
RuntimeDebug: runtimeDebug,
|
||||
RuntimeTrace: runtimeTrace,
|
||||
ProxyDebug: proxyDebug,
|
||||
NetmonDebug: netmonDebug,
|
||||
AgentDebug: agentDebug,
|
||||
AgentTrace: agentTrace,
|
||||
@@ -135,7 +131,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf
|
||||
return config, err
|
||||
}
|
||||
|
||||
files := []string{hypervisorPath, kernelPath, imagePath, proxyPath}
|
||||
files := []string{hypervisorPath, kernelPath, imagePath}
|
||||
|
||||
for _, file := range files {
|
||||
// create the resource (which must be >0 bytes)
|
||||
@@ -173,10 +169,6 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf
|
||||
|
||||
agentConfig := vc.KataAgentConfig{}
|
||||
|
||||
proxyConfig := vc.ProxyConfig{
|
||||
Path: proxyPath,
|
||||
}
|
||||
|
||||
netmonConfig := vc.NetmonConfig{
|
||||
Path: netmonPath,
|
||||
Debug: false,
|
||||
@@ -194,9 +186,6 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf
|
||||
|
||||
AgentConfig: agentConfig,
|
||||
|
||||
ProxyType: defaultProxy,
|
||||
ProxyConfig: proxyConfig,
|
||||
|
||||
NetmonConfig: netmonConfig,
|
||||
DisableNewNetNs: disableNewNetNs,
|
||||
EnablePprof: enablePprof,
|
||||
@@ -486,7 +475,6 @@ func TestMinimalRuntimeConfig(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
proxyPath := path.Join(dir, "proxy")
|
||||
hypervisorPath := path.Join(dir, "hypervisor")
|
||||
defaultHypervisorPath = hypervisorPath
|
||||
jailerPath := path.Join(dir, "jailer")
|
||||
@@ -530,26 +518,24 @@ func TestMinimalRuntimeConfig(t *testing.T) {
|
||||
runtimeMinimalConfig := `
|
||||
# Runtime configuration file
|
||||
|
||||
[proxy.kata]
|
||||
path = "` + proxyPath + `"
|
||||
|
||||
[agent.kata]
|
||||
|
||||
[netmon]
|
||||
path = "` + netmonPath + `"
|
||||
`
|
||||
|
||||
orgVHostVSockDevicePath := utils.VHostVSockDevicePath
|
||||
defer func() {
|
||||
utils.VHostVSockDevicePath = orgVHostVSockDevicePath
|
||||
}()
|
||||
utils.VHostVSockDevicePath = "/dev/null"
|
||||
|
||||
configPath := path.Join(dir, "runtime.toml")
|
||||
err = createConfig(configPath, runtimeMinimalConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = createEmptyFile(proxyPath)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
err = createEmptyFile(hypervisorPath)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
@@ -592,10 +578,6 @@ func TestMinimalRuntimeConfig(t *testing.T) {
|
||||
|
||||
expectedAgentConfig := vc.KataAgentConfig{}
|
||||
|
||||
expectedProxyConfig := vc.ProxyConfig{
|
||||
Path: proxyPath,
|
||||
}
|
||||
|
||||
expectedNetmonConfig := vc.NetmonConfig{
|
||||
Path: netmonPath,
|
||||
Debug: false,
|
||||
@@ -613,9 +595,6 @@ func TestMinimalRuntimeConfig(t *testing.T) {
|
||||
|
||||
AgentConfig: expectedAgentConfig,
|
||||
|
||||
ProxyType: defaultProxy,
|
||||
ProxyConfig: expectedProxyConfig,
|
||||
|
||||
NetmonConfig: expectedNetmonConfig,
|
||||
|
||||
FactoryConfig: expectedFactoryConfig,
|
||||
@@ -630,87 +609,6 @@ func TestMinimalRuntimeConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMinimalRuntimeConfigWithVsock(t *testing.T) {
|
||||
dir, err := ioutil.TempDir(testDir, "minimal-runtime-config-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
imagePath := path.Join(dir, "image.img")
|
||||
initrdPath := path.Join(dir, "initrd.img")
|
||||
proxyPath := path.Join(dir, "proxy")
|
||||
hypervisorPath := path.Join(dir, "hypervisor")
|
||||
kernelPath := path.Join(dir, "kernel")
|
||||
|
||||
savedDefaultImagePath := defaultImagePath
|
||||
savedDefaultInitrdPath := defaultInitrdPath
|
||||
savedDefaultHypervisorPath := defaultHypervisorPath
|
||||
savedDefaultKernelPath := defaultKernelPath
|
||||
|
||||
defer func() {
|
||||
defaultImagePath = savedDefaultImagePath
|
||||
defaultInitrdPath = savedDefaultInitrdPath
|
||||
defaultHypervisorPath = savedDefaultHypervisorPath
|
||||
defaultKernelPath = savedDefaultKernelPath
|
||||
}()
|
||||
|
||||
// Temporarily change the defaults to avoid this test using the real
|
||||
// resource files that might be installed on the system!
|
||||
defaultImagePath = imagePath
|
||||
defaultInitrdPath = initrdPath
|
||||
defaultHypervisorPath = hypervisorPath
|
||||
defaultKernelPath = kernelPath
|
||||
|
||||
for _, file := range []string{proxyPath, hypervisorPath, kernelPath, imagePath} {
|
||||
err = WriteFile(file, "foo", testFileMode)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// minimal config with vsock enabled
|
||||
runtimeMinimalConfig := `
|
||||
# Runtime configuration file
|
||||
[hypervisor.qemu]
|
||||
use_vsock = true
|
||||
image = "` + imagePath + `"
|
||||
|
||||
[proxy.kata]
|
||||
path = "` + proxyPath + `"
|
||||
|
||||
[agent.kata]
|
||||
`
|
||||
orgVHostVSockDevicePath := utils.VHostVSockDevicePath
|
||||
defer func() {
|
||||
utils.VHostVSockDevicePath = orgVHostVSockDevicePath
|
||||
}()
|
||||
utils.VHostVSockDevicePath = "/dev/null"
|
||||
|
||||
configPath := path.Join(dir, "runtime.toml")
|
||||
err = createConfig(configPath, runtimeMinimalConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, config, err := LoadConfiguration(configPath, false, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if config.ProxyType != vc.NoProxyType {
|
||||
t.Fatalf("Proxy type must be NoProxy, got %+v", config.ProxyType)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(config.ProxyConfig, vc.ProxyConfig{}) {
|
||||
t.Fatalf("Got %+v\n expecting %+v", config.ProxyConfig, vc.ProxyConfig{})
|
||||
}
|
||||
|
||||
if config.HypervisorConfig.UseVSock != true {
|
||||
t.Fatalf("use_vsock must be true, got %v", config.HypervisorConfig.UseVSock)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewQemuHypervisorConfig(t *testing.T) {
|
||||
dir, err := ioutil.TempDir(testDir, "hypervisor-config-")
|
||||
if err != nil {
|
||||
@@ -730,7 +628,7 @@ func TestNewQemuHypervisorConfig(t *testing.T) {
|
||||
defer func() {
|
||||
utils.VHostVSockDevicePath = orgVHostVSockDevicePath
|
||||
}()
|
||||
utils.VHostVSockDevicePath = "/dev/abc/xyz"
|
||||
utils.VHostVSockDevicePath = "/dev/null"
|
||||
// 10Mbits/sec
|
||||
rxRateLimiterMaxRate := uint64(10000000)
|
||||
txRateLimiterMaxRate := uint64(10000000)
|
||||
@@ -744,7 +642,6 @@ func TestNewQemuHypervisorConfig(t *testing.T) {
|
||||
EnableIOThreads: enableIOThreads,
|
||||
HotplugVFIOOnRootBus: hotplugVFIOOnRootBus,
|
||||
PCIeRootPort: pcieRootPort,
|
||||
UseVSock: true,
|
||||
RxRateLimiterMaxRate: rxRateLimiterMaxRate,
|
||||
TxRateLimiterMaxRate: txRateLimiterMaxRate,
|
||||
}
|
||||
@@ -766,16 +663,8 @@ func TestNewQemuHypervisorConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// falling back to legacy serial port
|
||||
config, err := newQemuHypervisorConfig(hypervisor)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
utils.VHostVSockDevicePath = "/dev/null"
|
||||
|
||||
// all paths exist now
|
||||
config, err = newQemuHypervisorConfig(hypervisor)
|
||||
config, err := newQemuHypervisorConfig(hypervisor)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -830,7 +719,6 @@ func TestNewFirecrackerHypervisorConfig(t *testing.T) {
|
||||
jailerPath := path.Join(dir, "jailer")
|
||||
disableBlockDeviceUse := false
|
||||
disableVhostNet := true
|
||||
useVSock := true
|
||||
blockDeviceDriver := "virtio-mmio"
|
||||
// !0Mbits/sec
|
||||
rxRateLimiterMaxRate := uint64(10000000)
|
||||
@@ -902,10 +790,6 @@ func TestNewFirecrackerHypervisorConfig(t *testing.T) {
|
||||
t.Errorf("Expected value for disable vhost net usage %v, got %v", disableVhostNet, config.DisableVhostNet)
|
||||
}
|
||||
|
||||
if config.UseVSock != useVSock {
|
||||
t.Errorf("Expected value for vsock usage %v, got %v", useVSock, config.UseVSock)
|
||||
}
|
||||
|
||||
if config.RxRateLimiterMaxRate != rxRateLimiterMaxRate {
|
||||
t.Errorf("Expected value for rx rate limiter %v, got %v", rxRateLimiterMaxRate, config.RxRateLimiterMaxRate)
|
||||
}
|
||||
@@ -1002,10 +886,6 @@ func TestNewClhHypervisorConfig(t *testing.T) {
|
||||
t.Errorf("Expected image path %v, got %v", hypervisor.Image, config.ImagePath)
|
||||
}
|
||||
|
||||
if config.UseVSock != true {
|
||||
t.Errorf("Expected UseVSock %v, got %v", true, config.UseVSock)
|
||||
}
|
||||
|
||||
if config.DisableVhostNet != true {
|
||||
t.Errorf("Expected DisableVhostNet %v, got %v", true, config.DisableVhostNet)
|
||||
}
|
||||
@@ -1238,46 +1118,6 @@ func TestHypervisorDefaultsVhostUserStorePath(t *testing.T) {
|
||||
assert.Equal(vhostUserStorePath, testVhostUserStorePath, "custom vhost-user store path wrong")
|
||||
}
|
||||
|
||||
func TestProxyDefaults(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
tmpdir, err := ioutil.TempDir(testDir, "")
|
||||
assert.NoError(err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
testProxyPath := filepath.Join(tmpdir, "proxy")
|
||||
testProxyLinkPath := filepath.Join(tmpdir, "proxy-link")
|
||||
|
||||
err = createEmptyFile(testProxyPath)
|
||||
assert.NoError(err)
|
||||
|
||||
err = syscall.Symlink(testProxyPath, testProxyLinkPath)
|
||||
assert.NoError(err)
|
||||
|
||||
savedProxyPath := defaultProxyPath
|
||||
|
||||
defer func() {
|
||||
defaultProxyPath = savedProxyPath
|
||||
}()
|
||||
|
||||
defaultProxyPath = testProxyPath
|
||||
p := proxy{}
|
||||
path, err := p.path()
|
||||
assert.NoError(err)
|
||||
assert.Equal(path, defaultProxyPath, "default proxy path wrong")
|
||||
|
||||
// test path resolution
|
||||
defaultProxyPath = testProxyLinkPath
|
||||
p = proxy{}
|
||||
path, err = p.path()
|
||||
assert.NoError(err)
|
||||
assert.Equal(path, testProxyPath)
|
||||
|
||||
assert.False(p.debug())
|
||||
p.Debug = true
|
||||
assert.True(p.debug())
|
||||
}
|
||||
|
||||
func TestAgentDefaults(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
|
||||
@@ -65,8 +65,6 @@ func HandleFactory(ctx context.Context, vci vc.VC, runtimeConfig *oci.RuntimeCon
|
||||
HypervisorType: runtimeConfig.HypervisorType,
|
||||
HypervisorConfig: runtimeConfig.HypervisorConfig,
|
||||
AgentConfig: runtimeConfig.AgentConfig,
|
||||
ProxyType: runtimeConfig.ProxyType,
|
||||
ProxyConfig: runtimeConfig.ProxyConfig,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -109,7 +109,6 @@ func newTestRuntimeConfig(dir, consolePath string, create bool) (oci.RuntimeConf
|
||||
return oci.RuntimeConfig{
|
||||
HypervisorType: vc.QemuHypervisor,
|
||||
HypervisorConfig: hypervisorConfig,
|
||||
ProxyType: vc.KataProxyType,
|
||||
Console: consolePath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ Table of Contents
|
||||
* [Hypervisors](#hypervisors)
|
||||
* [Agents](#agents)
|
||||
* [Shim](#shim)
|
||||
* [Proxy](#proxy)
|
||||
* [API](#api)
|
||||
* [Sandbox API](#sandbox-api)
|
||||
* [Container API](#container-api)
|
||||
@@ -101,12 +100,6 @@ monitoring. In cases where they assume containers are simply regular host proces
|
||||
layer is needed to translate host specific semantics into e.g. agent controlled virtual
|
||||
machine ones.
|
||||
|
||||
## Proxy
|
||||
|
||||
When hardware virtualized containers have limited I/O multiplexing capabilities,
|
||||
runtimes may decide to rely on an external host proxy to support cases where several
|
||||
runtime instances are talking to the same container.
|
||||
|
||||
# API
|
||||
|
||||
The high level `virtcontainers` API is the following one:
|
||||
|
||||
@@ -722,8 +722,8 @@ func (a *Acrn) check() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Acrn) generateSocket(id string, useVsock bool) (interface{}, error) {
|
||||
return generateVMSocket(id, useVsock, a.store.RunVMStoragePath())
|
||||
func (a *Acrn) generateSocket(id string) (interface{}, error) {
|
||||
return generateVMSocket(id, a.store.RunVMStoragePath())
|
||||
}
|
||||
|
||||
// GetACRNUUIDBytes returns UUID bytes that is used for VM creation
|
||||
|
||||
@@ -107,7 +107,7 @@ func TestAcrnArchBaseAppendConsoles(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
acrnArchBase := newAcrnArchBase()
|
||||
|
||||
path := filepath.Join(filepath.Join(fs.MockRunStoragePath(), sandboxID), consoleSocket)
|
||||
path := filepath.Join(filepath.Join(fs.MockRunStoragePath(), "test"), consoleSocket)
|
||||
|
||||
expectedOut := []Device{
|
||||
ConsoleDevice{
|
||||
|
||||
@@ -56,10 +56,6 @@ type ProcessList []byte
|
||||
const (
|
||||
// SocketTypeVSOCK is a VSOCK socket type for talking to an agent.
|
||||
SocketTypeVSOCK = "vsock"
|
||||
|
||||
// SocketTypeUNIX is a UNIX socket type for talking to an agent.
|
||||
// It typically means the agent is living behind a host proxy.
|
||||
SocketTypeUNIX = "unix"
|
||||
)
|
||||
|
||||
// agent is the virtcontainers agent interface.
|
||||
@@ -86,18 +82,12 @@ type agent interface {
|
||||
// disconnect will disconnect the connection to the agent
|
||||
disconnect() error
|
||||
|
||||
// start the proxy
|
||||
startProxy(sandbox *Sandbox) error
|
||||
|
||||
// set to use an existing proxy
|
||||
setProxy(sandbox *Sandbox, proxy proxy, pid int, url string) error
|
||||
|
||||
// set to use an existing proxy from Grpc
|
||||
setProxyFromGrpc(proxy proxy, pid int, url string)
|
||||
|
||||
// get agent url
|
||||
getAgentURL() (string, error)
|
||||
|
||||
// set agent url
|
||||
setAgentURL() error
|
||||
|
||||
// update the agent using some elements from another agent
|
||||
reuseAgent(agent agent) error
|
||||
|
||||
@@ -173,10 +163,10 @@ type agent interface {
|
||||
resumeContainer(sandbox *Sandbox, c Container) error
|
||||
|
||||
// configure will update agent settings based on provided arguments
|
||||
configure(h hypervisor, id, sharePath string, builtin bool, config interface{}) error
|
||||
configure(h hypervisor, id, sharePath string, config interface{}) error
|
||||
|
||||
// configureFromGrpc will update agent settings based on provided arguments which from Grpc
|
||||
configureFromGrpc(h hypervisor, id string, builtin bool, config interface{}) error
|
||||
configureFromGrpc(h hypervisor, id string, config interface{}) error
|
||||
|
||||
// reseedRNG will reseed the guest random number generator
|
||||
reseedRNG(data []byte) error
|
||||
|
||||
@@ -192,16 +192,6 @@ func FetchSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the agent is long live connection, it needs to restart the proxy to
|
||||
// watch the guest console if it hadn't been watched.
|
||||
if s.agent.longLiveConn() {
|
||||
err = s.startProxy()
|
||||
if err != nil {
|
||||
s.Release()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -116,7 +116,6 @@ func newTestSandboxConfigNoop() SandboxConfig {
|
||||
|
||||
Annotations: sandboxAnnotations,
|
||||
|
||||
ProxyType: NoopProxyType,
|
||||
AgentConfig: KataAgentConfig{},
|
||||
}
|
||||
|
||||
@@ -171,23 +170,10 @@ func TestCreateSandboxKataAgentSuccessful(t *testing.T) {
|
||||
|
||||
config := newTestSandboxConfigKataAgent()
|
||||
|
||||
sockDir, err := testGenerateKataProxySockDir()
|
||||
hybridVSockTTRPCMock := mock.HybridVSockTTRPCMock{}
|
||||
err := hybridVSockTTRPCMock.Start(fmt.Sprintf("mock://%s", MockHybridVSockPath))
|
||||
assert.NoError(err)
|
||||
|
||||
defer os.RemoveAll(sockDir)
|
||||
|
||||
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
|
||||
noopProxyURL = testKataProxyURL
|
||||
|
||||
impl := &gRPCProxy{}
|
||||
|
||||
kataProxyMock := mock.ProxyGRPCMock{
|
||||
GRPCImplementer: impl,
|
||||
GRPCRegister: gRPCRegister,
|
||||
}
|
||||
err = kataProxyMock.Start(testKataProxyURL)
|
||||
assert.NoError(err)
|
||||
defer kataProxyMock.Stop()
|
||||
defer hybridVSockTTRPCMock.Stop()
|
||||
|
||||
ctx := WithNewAgentFunc(context.Background(), newMockAgent)
|
||||
p, err := CreateSandbox(ctx, config, nil)
|
||||
@@ -234,8 +220,6 @@ func createNewSandboxConfig(hType HypervisorType) SandboxConfig {
|
||||
AgentConfig: KataAgentConfig{},
|
||||
|
||||
NetworkConfig: netConfig,
|
||||
|
||||
ProxyType: NoopProxyType,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -772,11 +772,7 @@ func (clh *cloudHypervisor) reset() {
|
||||
clh.state.reset()
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) generateSocket(id string, useVsock bool) (interface{}, error) {
|
||||
if !useVsock {
|
||||
return nil, fmt.Errorf("Can't generate hybrid vsocket for cloud-hypervisor: vsocks is disabled")
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) generateSocket(id string) (interface{}, error) {
|
||||
udsPath, err := clh.vsockSocketPath(id)
|
||||
if err != nil {
|
||||
clh.Logger().Info("Can't generate socket path for cloud-hypervisor")
|
||||
|
||||
@@ -28,7 +28,6 @@ func TestTemplateFactory(t *testing.T) {
|
||||
}
|
||||
vmConfig := vc.VMConfig{
|
||||
HypervisorType: vc.MockHypervisor,
|
||||
ProxyType: vc.NoopProxyType,
|
||||
HypervisorConfig: hyperConfig,
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,6 @@ func TestTemplateFactory(t *testing.T) {
|
||||
}
|
||||
vmConfig := vc.VMConfig{
|
||||
HypervisorType: vc.MockHypervisor,
|
||||
ProxyType: vc.NoopProxyType,
|
||||
HypervisorConfig: hyperConfig,
|
||||
}
|
||||
|
||||
|
||||
@@ -112,7 +112,6 @@ func resetHypervisorConfig(config *vc.VMConfig) {
|
||||
config.HypervisorConfig.BootFromTemplate = false
|
||||
config.HypervisorConfig.MemoryPath = ""
|
||||
config.HypervisorConfig.DevicesStatePath = ""
|
||||
config.ProxyConfig = vc.ProxyConfig{}
|
||||
}
|
||||
|
||||
// It's important that baseConfig and newConfig are passed by value!
|
||||
@@ -138,27 +137,18 @@ func (f *factory) checkConfig(config vc.VMConfig) error {
|
||||
return checkVMConfig(baseConfig, config)
|
||||
}
|
||||
|
||||
func (f *factory) validateNewVMConfig(config vc.VMConfig) error {
|
||||
if len(config.ProxyType.String()) == 0 {
|
||||
return fmt.Errorf("Missing proxy type")
|
||||
}
|
||||
|
||||
return config.Valid()
|
||||
}
|
||||
|
||||
// GetVM returns a working blank VM created by the factory.
|
||||
func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) {
|
||||
span, _ := trace(ctx, "GetVM")
|
||||
defer span.Finish()
|
||||
|
||||
hypervisorConfig := config.HypervisorConfig
|
||||
err := f.validateNewVMConfig(config)
|
||||
if err != nil {
|
||||
if err := config.Valid(); err != nil {
|
||||
f.log().WithError(err).Error("invalid hypervisor config")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.checkConfig(config)
|
||||
err := f.checkConfig(config)
|
||||
if err != nil {
|
||||
f.log().WithError(err).Info("fallback to direct factory vm")
|
||||
return direct.New(ctx, config).GetBaseVM(ctx, config)
|
||||
|
||||
@@ -7,12 +7,14 @@ package factory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory/base"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/fs"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/mock"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -33,7 +35,6 @@ func TestNewFactory(t *testing.T) {
|
||||
|
||||
config.VMConfig = vc.VMConfig{
|
||||
HypervisorType: vc.MockHypervisor,
|
||||
ProxyType: vc.NoopProxyType,
|
||||
}
|
||||
|
||||
_, err = NewFactory(ctx, config, false)
|
||||
@@ -58,6 +59,11 @@ func TestNewFactory(t *testing.T) {
|
||||
t.Skip(testDisabledAsNonRoot)
|
||||
}
|
||||
|
||||
hybridVSockTTRPCMock := mock.HybridVSockTTRPCMock{}
|
||||
err = hybridVSockTTRPCMock.Start(fmt.Sprintf("mock://%s", vc.MockHybridVSockPath))
|
||||
assert.NoError(err)
|
||||
defer hybridVSockTTRPCMock.Stop()
|
||||
|
||||
config.Template = true
|
||||
config.TemplatePath = fs.MockStorageRootPath()
|
||||
f, err = NewFactory(ctx, config, false)
|
||||
@@ -104,31 +110,6 @@ func TestFactorySetLogger(t *testing.T) {
|
||||
assert.Equal(f.log().Logger.Level, testLog.Logger.Level)
|
||||
}
|
||||
|
||||
func TestVMConfigValid(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
defer fs.MockStorageDestroy()
|
||||
config := vc.VMConfig{
|
||||
HypervisorType: vc.MockHypervisor,
|
||||
HypervisorConfig: vc.HypervisorConfig{
|
||||
KernelPath: fs.MockStorageRootPath(),
|
||||
ImagePath: fs.MockStorageRootPath(),
|
||||
},
|
||||
}
|
||||
|
||||
f := factory{}
|
||||
|
||||
err := f.validateNewVMConfig(config)
|
||||
assert.NotNil(err)
|
||||
|
||||
err = f.validateNewVMConfig(config)
|
||||
assert.NotNil(err)
|
||||
|
||||
config.ProxyType = vc.NoopProxyType
|
||||
err = f.validateNewVMConfig(config)
|
||||
assert.Nil(err)
|
||||
}
|
||||
|
||||
func TestCheckVMConfig(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
@@ -180,7 +161,6 @@ func TestFactoryGetVM(t *testing.T) {
|
||||
vmConfig := vc.VMConfig{
|
||||
HypervisorType: vc.MockHypervisor,
|
||||
HypervisorConfig: hyperConfig,
|
||||
ProxyType: vc.NoopProxyType,
|
||||
}
|
||||
|
||||
err := vmConfig.Valid()
|
||||
@@ -193,6 +173,11 @@ func TestFactoryGetVM(t *testing.T) {
|
||||
t.Skip(testDisabledAsNonRoot)
|
||||
}
|
||||
|
||||
hybridVSockTTRPCMock := mock.HybridVSockTTRPCMock{}
|
||||
err = hybridVSockTTRPCMock.Start(fmt.Sprintf("mock://%s", vc.MockHybridVSockPath))
|
||||
assert.NoError(err)
|
||||
defer hybridVSockTTRPCMock.Stop()
|
||||
|
||||
f, err := NewFactory(ctx, Config{VMConfig: vmConfig}, false)
|
||||
assert.Nil(err)
|
||||
|
||||
@@ -327,7 +312,6 @@ func TestDeepCompare(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
config.VMConfig = vc.VMConfig{
|
||||
HypervisorType: vc.MockHypervisor,
|
||||
ProxyType: vc.NoopProxyType,
|
||||
}
|
||||
testDir := fs.MockStorageRootPath()
|
||||
defer fs.MockStorageDestroy()
|
||||
|
||||
@@ -156,9 +156,6 @@ func (t *template) createFromTemplateVM(ctx context.Context, c vc.VMConfig) (*vc
|
||||
config.HypervisorConfig.BootFromTemplate = true
|
||||
config.HypervisorConfig.MemoryPath = t.statePath + "/memory"
|
||||
config.HypervisorConfig.DevicesStatePath = t.statePath + "/state"
|
||||
config.ProxyType = c.ProxyType
|
||||
config.ProxyConfig = c.ProxyConfig
|
||||
|
||||
return vc.NewVM(ctx, config)
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ package template
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
|
||||
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/fs"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/mock"
|
||||
)
|
||||
|
||||
const testDisabledAsNonRoot = "Test disabled as requires root privileges"
|
||||
@@ -38,7 +40,6 @@ func TestTemplateFactory(t *testing.T) {
|
||||
vmConfig := vc.VMConfig{
|
||||
HypervisorType: vc.MockHypervisor,
|
||||
HypervisorConfig: hyperConfig,
|
||||
ProxyType: vc.NoopProxyType,
|
||||
}
|
||||
|
||||
err := vmConfig.Valid()
|
||||
@@ -46,6 +47,11 @@ func TestTemplateFactory(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
hybridVSockTTRPCMock := mock.HybridVSockTTRPCMock{}
|
||||
err = hybridVSockTTRPCMock.Start(fmt.Sprintf("mock://%s", vc.MockHybridVSockPath))
|
||||
assert.NoError(err)
|
||||
defer hybridVSockTTRPCMock.Stop()
|
||||
|
||||
// New
|
||||
f, err := New(ctx, vmConfig, testDir)
|
||||
assert.Nil(err)
|
||||
|
||||
@@ -1204,11 +1204,7 @@ func (fc *firecracker) check() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fc *firecracker) generateSocket(id string, useVsock bool) (interface{}, error) {
|
||||
if !useVsock {
|
||||
return nil, fmt.Errorf("Can't start firecracker: vsocks is disabled")
|
||||
}
|
||||
|
||||
func (fc *firecracker) generateSocket(id string) (interface{}, error) {
|
||||
fc.Logger().Debug("Using hybrid-vsock endpoint")
|
||||
udsPath := filepath.Join(fc.jailerRoot, defaultHybridVSocketName)
|
||||
|
||||
|
||||
@@ -16,11 +16,7 @@ func TestFCGenerateSocket(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
fc := firecracker{}
|
||||
i, err := fc.generateSocket("a", false)
|
||||
assert.Error(err)
|
||||
assert.Nil(i)
|
||||
|
||||
i, err = fc.generateSocket("a", true)
|
||||
i, err := fc.generateSocket("a")
|
||||
assert.NoError(err)
|
||||
assert.NotNil(i)
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -63,11 +62,6 @@ const (
|
||||
|
||||
defaultBlockDriver = config.VirtioSCSI
|
||||
|
||||
defaultSocketName = "kata.sock"
|
||||
defaultSocketDeviceID = "channel0"
|
||||
defaultSocketChannelName = "agent.channel.0"
|
||||
defaultSocketID = "charch0"
|
||||
|
||||
// port numbers below 1024 are called privileged ports. Only a process with
|
||||
// CAP_NET_BIND_SERVICE capability may bind to these port numbers.
|
||||
vSockPort = 1024
|
||||
@@ -376,9 +370,6 @@ type HypervisorConfig struct {
|
||||
// when running on top of another VMM.
|
||||
DisableNestingChecks bool
|
||||
|
||||
// UseVSock use a vsock for agent communication
|
||||
UseVSock bool
|
||||
|
||||
// DisableImageNvdimm is used to disable guest rootfs image nvdimm devices
|
||||
DisableImageNvdimm bool
|
||||
|
||||
@@ -746,8 +737,7 @@ func getHypervisorPid(h hypervisor) int {
|
||||
return pids[0]
|
||||
}
|
||||
|
||||
func generateVMSocket(id string, useVsock bool, vmStogarePath string) (interface{}, error) {
|
||||
if useVsock {
|
||||
func generateVMSocket(id string, vmStogarePath string) (interface{}, error) {
|
||||
vhostFd, contextID, err := utils.FindContextID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -758,19 +748,6 @@ func generateVMSocket(id string, useVsock bool, vmStogarePath string) (interface
|
||||
ContextID: contextID,
|
||||
Port: uint32(vSockPort),
|
||||
}, nil
|
||||
}
|
||||
|
||||
path, err := utils.BuildSocketPath(filepath.Join(vmStogarePath, id), defaultSocketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return types.Socket{
|
||||
DeviceID: defaultSocketDeviceID,
|
||||
ID: defaultSocketID,
|
||||
HostPath: path,
|
||||
Name: defaultSocketChannelName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// hypervisor is the virtcontainers hypervisor interface.
|
||||
@@ -804,7 +781,7 @@ type hypervisor interface {
|
||||
load(persistapi.HypervisorState)
|
||||
|
||||
// generate the socket to communicate the host and guest
|
||||
generateSocket(id string, useVsock bool) (interface{}, error)
|
||||
generateSocket(id string) (interface{}, error)
|
||||
|
||||
// check if hypervisor supports built-in rate limiter.
|
||||
isRateLimiterBuiltin() bool
|
||||
|
||||
@@ -435,19 +435,10 @@ func genericTestRunningOnVMM(t *testing.T, data []testNestedVMMData) {
|
||||
func TestGenerateVMSocket(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
s, err := generateVMSocket("a", false, "")
|
||||
assert.NoError(err)
|
||||
socket, ok := s.(types.Socket)
|
||||
assert.True(ok)
|
||||
assert.NotEmpty(socket.DeviceID)
|
||||
assert.NotEmpty(socket.ID)
|
||||
assert.NotEmpty(socket.HostPath)
|
||||
assert.NotEmpty(socket.Name)
|
||||
|
||||
if tc.NotValid(ktu.NeedRoot()) {
|
||||
t.Skip(testDisabledAsNonRoot)
|
||||
}
|
||||
s, err = generateVMSocket("a", true, "")
|
||||
s, err := generateVMSocket("a", "")
|
||||
assert.NoError(err)
|
||||
vsock, ok := s.(types.VSock)
|
||||
assert.True(ok)
|
||||
|
||||
@@ -57,7 +57,6 @@ const (
|
||||
var (
|
||||
checkRequestTimeout = 30 * time.Second
|
||||
defaultRequestTimeout = 60 * time.Second
|
||||
errorMissingProxy = errors.New("Missing proxy pointer")
|
||||
errorMissingOCISpec = errors.New("Missing OCI specification")
|
||||
defaultKataHostSharedDir = "/run/kata-containers/shared/sandboxes/"
|
||||
defaultKataGuestSharedDir = "/run/kata-containers/shared/containers/"
|
||||
@@ -197,7 +196,6 @@ func ephemeralPath() string {
|
||||
// to reach the Kata Containers agent.
|
||||
type KataAgentConfig struct {
|
||||
LongLiveConn bool
|
||||
UseVSock bool
|
||||
Debug bool
|
||||
Trace bool
|
||||
ContainerPipeSize uint32
|
||||
@@ -209,13 +207,10 @@ type KataAgentConfig struct {
|
||||
// KataAgentState is the structure describing the data stored from this
|
||||
// agent implementation.
|
||||
type KataAgentState struct {
|
||||
ProxyPid int
|
||||
URL string
|
||||
}
|
||||
|
||||
type kataAgent struct {
|
||||
proxy proxy
|
||||
|
||||
// lock protects the client pointer
|
||||
sync.Mutex
|
||||
client *kataclient.AgentClient
|
||||
@@ -223,7 +218,6 @@ type kataAgent struct {
|
||||
reqHandlers map[string]reqFunc
|
||||
state KataAgentState
|
||||
keepConn bool
|
||||
proxyBuiltIn bool
|
||||
dynamicTracing bool
|
||||
dead bool
|
||||
kmodules []string
|
||||
@@ -331,24 +325,17 @@ func (k *kataAgent) init(ctx context.Context, sandbox *Sandbox, config KataAgent
|
||||
k.keepConn = config.LongLiveConn
|
||||
k.kmodules = config.KernelModules
|
||||
|
||||
k.proxy, err = newProxy(sandbox.config.ProxyType)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
k.proxyBuiltIn = isProxyBuiltIn(sandbox.config.ProxyType)
|
||||
|
||||
return disableVMShutdown, nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) agentURL() (string, error) {
|
||||
switch s := k.vmSocket.(type) {
|
||||
case types.Socket:
|
||||
return s.HostPath, nil
|
||||
case types.VSock:
|
||||
return s.String(), nil
|
||||
case types.HybridVSock:
|
||||
return s.String(), nil
|
||||
case types.MockHybridVSock:
|
||||
return s.String(), nil
|
||||
default:
|
||||
return "", fmt.Errorf("Invalid socket type")
|
||||
}
|
||||
@@ -363,12 +350,12 @@ func (k *kataAgent) capabilities() types.Capabilities {
|
||||
return caps
|
||||
}
|
||||
|
||||
func (k *kataAgent) internalConfigure(h hypervisor, id string, builtin bool, config interface{}) error {
|
||||
func (k *kataAgent) internalConfigure(h hypervisor, id string, config interface{}) error {
|
||||
var err error
|
||||
if config != nil {
|
||||
switch c := config.(type) {
|
||||
case KataAgentConfig:
|
||||
if k.vmSocket, err = h.generateSocket(id, c.UseVSock); err != nil {
|
||||
if k.vmSocket, err = h.generateSocket(id); err != nil {
|
||||
return err
|
||||
}
|
||||
k.keepConn = c.LongLiveConn
|
||||
@@ -377,25 +364,16 @@ func (k *kataAgent) internalConfigure(h hypervisor, id string, builtin bool, con
|
||||
}
|
||||
}
|
||||
|
||||
if builtin {
|
||||
k.proxyBuiltIn = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) configure(h hypervisor, id, sharePath string, builtin bool, config interface{}) error {
|
||||
err := k.internalConfigure(h, id, builtin, config)
|
||||
func (k *kataAgent) configure(h hypervisor, id, sharePath string, config interface{}) error {
|
||||
err := k.internalConfigure(h, id, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch s := k.vmSocket.(type) {
|
||||
case types.Socket:
|
||||
err = h.addDevice(s, serialPortDev)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case types.VSock:
|
||||
if err = h.addDevice(s, vSockPCIDev); err != nil {
|
||||
return err
|
||||
@@ -405,6 +383,7 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, builtin bool,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case types.MockHybridVSock:
|
||||
default:
|
||||
return vcTypes.ErrInvalidConfigType
|
||||
}
|
||||
@@ -430,8 +409,8 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, builtin bool,
|
||||
return h.addDevice(sharedVolume, fsDev)
|
||||
}
|
||||
|
||||
func (k *kataAgent) configureFromGrpc(h hypervisor, id string, builtin bool, config interface{}) error {
|
||||
return k.internalConfigure(h, id, builtin, config)
|
||||
func (k *kataAgent) configureFromGrpc(h hypervisor, id string, config interface{}) error {
|
||||
return k.internalConfigure(h, id, config)
|
||||
}
|
||||
|
||||
func (k *kataAgent) setupSharedPath(sandbox *Sandbox) error {
|
||||
@@ -460,7 +439,7 @@ func (k *kataAgent) createSandbox(sandbox *Sandbox) error {
|
||||
if err := k.setupSharedPath(sandbox); err != nil {
|
||||
return err
|
||||
}
|
||||
return k.configure(sandbox.hypervisor, sandbox.id, getSharePath(sandbox.id), k.proxyBuiltIn, sandbox.config.AgentConfig)
|
||||
return k.configure(sandbox.hypervisor, sandbox.id, getSharePath(sandbox.id), sandbox.config.AgentConfig)
|
||||
}
|
||||
|
||||
func cmdToKataProcess(cmd types.Cmd) (process *grpc.Process, err error) {
|
||||
@@ -664,93 +643,19 @@ func (k *kataAgent) listRoutes() ([]*pbTypes.Route, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (k *kataAgent) startProxy(sandbox *Sandbox) error {
|
||||
span, _ := k.trace("startProxy")
|
||||
defer span.Finish()
|
||||
|
||||
var err error
|
||||
var agentURL string
|
||||
|
||||
if k.proxy == nil {
|
||||
return errorMissingProxy
|
||||
}
|
||||
|
||||
if k.proxy.consoleWatched() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if k.state.URL != "" {
|
||||
// For keepConn case, when k.state.URL isn't nil, it means shimv2 had disconnected from
|
||||
// sandbox and try to relaunch sandbox again. Here it needs to start proxy again to watch
|
||||
// the hypervisor console.
|
||||
if k.keepConn {
|
||||
agentURL = k.state.URL
|
||||
} else {
|
||||
k.Logger().WithFields(logrus.Fields{
|
||||
"sandbox": sandbox.id,
|
||||
"proxy-pid": k.state.ProxyPid,
|
||||
"proxy-url": k.state.URL,
|
||||
}).Infof("proxy already started")
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// Get agent socket path to provide it to the proxy.
|
||||
agentURL, err = k.agentURL()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
consoleURL, err := sandbox.hypervisor.getSandboxConsole(sandbox.id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
proxyParams := proxyParams{
|
||||
id: sandbox.id,
|
||||
hid: getHypervisorPid(sandbox.hypervisor),
|
||||
path: sandbox.config.ProxyConfig.Path,
|
||||
agentURL: agentURL,
|
||||
consoleURL: consoleURL,
|
||||
logger: k.Logger().WithField("sandbox", sandbox.id),
|
||||
// Disable debug so proxy doesn't read console if we want to
|
||||
// debug the agent console ourselves.
|
||||
debug: sandbox.config.ProxyConfig.Debug &&
|
||||
!k.hasAgentDebugConsole(sandbox),
|
||||
}
|
||||
|
||||
// Start the proxy here
|
||||
pid, uri, err := k.proxy.start(proxyParams)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If error occurs after kata-proxy process start,
|
||||
// then rollback to kill kata-proxy process
|
||||
defer func() {
|
||||
if err != nil {
|
||||
k.proxy.stop(pid)
|
||||
}
|
||||
}()
|
||||
|
||||
// Fill agent state with proxy information, and store them.
|
||||
if err = k.setProxy(sandbox, k.proxy, pid, uri); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
k.Logger().WithFields(logrus.Fields{
|
||||
"sandbox": sandbox.id,
|
||||
"proxy-pid": pid,
|
||||
"proxy-url": uri,
|
||||
}).Info("proxy started")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) getAgentURL() (string, error) {
|
||||
return k.agentURL()
|
||||
}
|
||||
|
||||
func (k *kataAgent) setAgentURL() error {
|
||||
var err error
|
||||
if k.state.URL, err = k.agentURL(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) reuseAgent(agent agent) error {
|
||||
a, ok := agent.(*kataAgent)
|
||||
if !ok {
|
||||
@@ -762,31 +667,6 @@ func (k *kataAgent) reuseAgent(agent agent) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) setProxy(sandbox *Sandbox, proxy proxy, pid int, url string) error {
|
||||
if url == "" {
|
||||
var err error
|
||||
if url, err = k.agentURL(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Are we setting the same proxy again?
|
||||
if k.proxy != nil && k.state.URL != "" && k.state.URL != url {
|
||||
k.proxy.stop(k.state.ProxyPid)
|
||||
}
|
||||
|
||||
k.proxy = proxy
|
||||
k.state.ProxyPid = pid
|
||||
k.state.URL = url
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) setProxyFromGrpc(proxy proxy, pid int, url string) {
|
||||
k.proxy = proxy
|
||||
k.state.ProxyPid = pid
|
||||
k.state.URL = url
|
||||
}
|
||||
|
||||
func (k *kataAgent) getDNS(sandbox *Sandbox) ([]string, error) {
|
||||
ociSpec := sandbox.GetPatchedOCISpec()
|
||||
if ociSpec == nil {
|
||||
@@ -815,16 +695,10 @@ func (k *kataAgent) startSandbox(sandbox *Sandbox) error {
|
||||
span, _ := k.trace("startSandbox")
|
||||
defer span.Finish()
|
||||
|
||||
err := k.startProxy(sandbox)
|
||||
if err != nil {
|
||||
if err := k.setAgentURL(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
k.proxy.stop(k.state.ProxyPid)
|
||||
}
|
||||
}()
|
||||
hostname := sandbox.config.Hostname
|
||||
if len(hostname) > maxHostnameLen {
|
||||
hostname = hostname[:maxHostnameLen]
|
||||
@@ -976,10 +850,6 @@ func (k *kataAgent) stopSandbox(sandbox *Sandbox) error {
|
||||
span, _ := k.trace("stopSandbox")
|
||||
defer span.Finish()
|
||||
|
||||
if k.proxy == nil {
|
||||
return errorMissingProxy
|
||||
}
|
||||
|
||||
req := &grpc.DestroySandboxRequest{}
|
||||
|
||||
if _, err := k.sendReq(req); err != nil {
|
||||
@@ -993,13 +863,6 @@ func (k *kataAgent) stopSandbox(sandbox *Sandbox) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := k.proxy.stop(k.state.ProxyPid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// clean up agent state
|
||||
k.state.ProxyPid = -1
|
||||
k.state.URL = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1861,15 +1724,8 @@ func (k *kataAgent) connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if k.state.ProxyPid > 0 {
|
||||
// check that proxy is running before talk with it avoiding long timeouts
|
||||
if err := syscall.Kill(k.state.ProxyPid, syscall.Signal(0)); err != nil {
|
||||
return errors.New("Proxy is not running")
|
||||
}
|
||||
}
|
||||
|
||||
k.Logger().WithField("url", k.state.URL).WithField("proxy", k.state.ProxyPid).Info("New client")
|
||||
client, err := kataclient.NewAgentClient(k.ctx, k.state.URL, k.proxyBuiltIn)
|
||||
k.Logger().WithField("url", k.state.URL).Info("New client")
|
||||
client, err := kataclient.NewAgentClient(k.ctx, k.state.URL)
|
||||
if err != nil {
|
||||
k.dead = true
|
||||
return err
|
||||
@@ -2249,13 +2105,11 @@ func (k *kataAgent) cleanup(s *Sandbox) {
|
||||
|
||||
func (k *kataAgent) save() persistapi.AgentState {
|
||||
return persistapi.AgentState{
|
||||
ProxyPid: k.state.ProxyPid,
|
||||
URL: k.state.URL,
|
||||
}
|
||||
}
|
||||
|
||||
func (k *kataAgent) load(s persistapi.AgentState) {
|
||||
k.state.ProxyPid = s.ProxyPid
|
||||
k.state.URL = s.URL
|
||||
}
|
||||
|
||||
|
||||
@@ -18,8 +18,6 @@ import (
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/ttrpc"
|
||||
gpb "github.com/gogo/protobuf/types"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
@@ -28,7 +26,6 @@ import (
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/drivers"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/manager"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
|
||||
aTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols"
|
||||
pbTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols"
|
||||
pb "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
|
||||
vcAnnotations "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
|
||||
@@ -38,7 +35,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
testKataProxyURLTempl = "unix://%s/kata-proxy-test.sock"
|
||||
testBlkDriveFormat = "testBlkDriveFormat"
|
||||
testBlockDeviceCtrPath = "testBlockDeviceCtrPath"
|
||||
testDevNo = "testDevNo"
|
||||
@@ -48,35 +44,21 @@ var (
|
||||
testVirtPath = "testVirtPath"
|
||||
)
|
||||
|
||||
func testGenerateKataProxySockDir() (string, error) {
|
||||
dir, err := ioutil.TempDir("", "kata-proxy-test")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
func TestKataAgentConnect(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
proxy := mock.ProxyGRPCMock{
|
||||
GRPCImplementer: &gRPCProxy{},
|
||||
GRPCRegister: gRPCRegister,
|
||||
}
|
||||
|
||||
sockDir, err := testGenerateKataProxySockDir()
|
||||
url, err := mock.GenerateKataMockHybridVSock()
|
||||
assert.NoError(err)
|
||||
defer os.RemoveAll(sockDir)
|
||||
|
||||
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
|
||||
err = proxy.Start(testKataProxyURL)
|
||||
hybridVSockTTRPCMock := mock.HybridVSockTTRPCMock{}
|
||||
err = hybridVSockTTRPCMock.Start(url)
|
||||
assert.NoError(err)
|
||||
defer proxy.Stop()
|
||||
defer hybridVSockTTRPCMock.Stop()
|
||||
|
||||
k := &kataAgent{
|
||||
ctx: context.Background(),
|
||||
state: KataAgentState{
|
||||
URL: testKataProxyURL,
|
||||
URL: url,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -87,24 +69,19 @@ func TestKataAgentConnect(t *testing.T) {
|
||||
|
||||
func TestKataAgentDisconnect(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
proxy := mock.ProxyGRPCMock{
|
||||
GRPCImplementer: &gRPCProxy{},
|
||||
GRPCRegister: gRPCRegister,
|
||||
}
|
||||
|
||||
sockDir, err := testGenerateKataProxySockDir()
|
||||
url, err := mock.GenerateKataMockHybridVSock()
|
||||
assert.NoError(err)
|
||||
defer os.RemoveAll(sockDir)
|
||||
|
||||
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
|
||||
err = proxy.Start(testKataProxyURL)
|
||||
hybridVSockTTRPCMock := mock.HybridVSockTTRPCMock{}
|
||||
err = hybridVSockTTRPCMock.Start(url)
|
||||
assert.NoError(err)
|
||||
defer proxy.Stop()
|
||||
defer hybridVSockTTRPCMock.Stop()
|
||||
|
||||
k := &kataAgent{
|
||||
ctx: context.Background(),
|
||||
state: KataAgentState{
|
||||
URL: testKataProxyURL,
|
||||
URL: url,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -113,159 +90,6 @@ func TestKataAgentDisconnect(t *testing.T) {
|
||||
assert.Nil(k.client)
|
||||
}
|
||||
|
||||
type gRPCProxy struct{}
|
||||
|
||||
var emptyResp = &gpb.Empty{}
|
||||
|
||||
func (p *gRPCProxy) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) ExecProcess(ctx context.Context, req *pb.ExecProcessRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) SignalProcess(ctx context.Context, req *pb.SignalProcessRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) WaitProcess(ctx context.Context, req *pb.WaitProcessRequest) (*pb.WaitProcessResponse, error) {
|
||||
return &pb.WaitProcessResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) ListProcesses(ctx context.Context, req *pb.ListProcessesRequest) (*pb.ListProcessesResponse, error) {
|
||||
return &pb.ListProcessesResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) UpdateContainer(ctx context.Context, req *pb.UpdateContainerRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) WriteStdin(ctx context.Context, req *pb.WriteStreamRequest) (*pb.WriteStreamResponse, error) {
|
||||
return &pb.WriteStreamResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) ReadStdout(ctx context.Context, req *pb.ReadStreamRequest) (*pb.ReadStreamResponse, error) {
|
||||
return &pb.ReadStreamResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) ReadStderr(ctx context.Context, req *pb.ReadStreamRequest) (*pb.ReadStreamResponse, error) {
|
||||
return &pb.ReadStreamResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) CloseStdin(ctx context.Context, req *pb.CloseStdinRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) TtyWinResize(ctx context.Context, req *pb.TtyWinResizeRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) CreateSandbox(ctx context.Context, req *pb.CreateSandboxRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) DestroySandbox(ctx context.Context, req *pb.DestroySandboxRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) UpdateInterface(ctx context.Context, req *pb.UpdateInterfaceRequest) (*aTypes.Interface, error) {
|
||||
return &aTypes.Interface{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) UpdateRoutes(ctx context.Context, req *pb.UpdateRoutesRequest) (*pb.Routes, error) {
|
||||
return &pb.Routes{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) ListInterfaces(ctx context.Context, req *pb.ListInterfacesRequest) (*pb.Interfaces, error) {
|
||||
return &pb.Interfaces{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) ListRoutes(ctx context.Context, req *pb.ListRoutesRequest) (*pb.Routes, error) {
|
||||
return &pb.Routes{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) AddARPNeighbors(ctx context.Context, req *pb.AddARPNeighborsRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) OnlineCPUMem(ctx context.Context, req *pb.OnlineCPUMemRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) StatsContainer(ctx context.Context, req *pb.StatsContainerRequest) (*pb.StatsContainerResponse, error) {
|
||||
return &pb.StatsContainerResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) Check(ctx context.Context, req *pb.CheckRequest) (*pb.HealthCheckResponse, error) {
|
||||
return &pb.HealthCheckResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) Version(ctx context.Context, req *pb.CheckRequest) (*pb.VersionCheckResponse, error) {
|
||||
return &pb.VersionCheckResponse{}, nil
|
||||
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) PauseContainer(ctx context.Context, req *pb.PauseContainerRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) ResumeContainer(ctx context.Context, req *pb.ResumeContainerRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) ReseedRandomDev(ctx context.Context, req *pb.ReseedRandomDevRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) GetGuestDetails(ctx context.Context, req *pb.GuestDetailsRequest) (*pb.GuestDetailsResponse, error) {
|
||||
return &pb.GuestDetailsResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) SetGuestDateTime(ctx context.Context, req *pb.SetGuestDateTimeRequest) (*gpb.Empty, error) {
|
||||
return &gpb.Empty{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) CopyFile(ctx context.Context, req *pb.CopyFileRequest) (*gpb.Empty, error) {
|
||||
return &gpb.Empty{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) StartTracing(ctx context.Context, req *pb.StartTracingRequest) (*gpb.Empty, error) {
|
||||
return &gpb.Empty{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) StopTracing(ctx context.Context, req *pb.StopTracingRequest) (*gpb.Empty, error) {
|
||||
return &gpb.Empty{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) MemHotplugByProbe(ctx context.Context, req *pb.MemHotplugByProbeRequest) (*gpb.Empty, error) {
|
||||
return &gpb.Empty{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) GetOOMEvent(ctx context.Context, req *pb.GetOOMEventRequest) (*pb.OOMEvent, error) {
|
||||
return &pb.OOMEvent{}, nil
|
||||
}
|
||||
|
||||
func (p *gRPCProxy) GetMetrics(ctx context.Context, req *pb.GetMetricsRequest) (*pb.Metrics, error) {
|
||||
return &pb.Metrics{}, nil
|
||||
}
|
||||
|
||||
func gRPCRegister(s *ttrpc.Server, srv interface{}) {
|
||||
switch g := srv.(type) {
|
||||
case *gRPCProxy:
|
||||
pb.RegisterAgentServiceService(s, g)
|
||||
pb.RegisterHealthService(s, g)
|
||||
}
|
||||
}
|
||||
|
||||
var reqList = []interface{}{
|
||||
&pb.CreateSandboxRequest{},
|
||||
&pb.DestroySandboxRequest{},
|
||||
@@ -283,26 +107,18 @@ var reqList = []interface{}{
|
||||
func TestKataAgentSendReq(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
impl := &gRPCProxy{}
|
||||
url, err := mock.GenerateKataMockHybridVSock()
|
||||
assert.NoError(err)
|
||||
|
||||
proxy := mock.ProxyGRPCMock{
|
||||
GRPCImplementer: impl,
|
||||
GRPCRegister: gRPCRegister,
|
||||
}
|
||||
|
||||
sockDir, err := testGenerateKataProxySockDir()
|
||||
assert.Nil(err)
|
||||
defer os.RemoveAll(sockDir)
|
||||
|
||||
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
|
||||
err = proxy.Start(testKataProxyURL)
|
||||
assert.Nil(err)
|
||||
defer proxy.Stop()
|
||||
hybridVSockTTRPCMock := mock.HybridVSockTTRPCMock{}
|
||||
err = hybridVSockTTRPCMock.Start(url)
|
||||
assert.NoError(err)
|
||||
defer hybridVSockTTRPCMock.Stop()
|
||||
|
||||
k := &kataAgent{
|
||||
ctx: context.Background(),
|
||||
state: KataAgentState{
|
||||
URL: testKataProxyURL,
|
||||
URL: url,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -905,14 +721,14 @@ func TestAgentConfigure(t *testing.T) {
|
||||
c := KataAgentConfig{}
|
||||
id := "foobar"
|
||||
|
||||
err = k.configure(h, id, dir, true, c)
|
||||
err = k.configure(h, id, dir, c)
|
||||
assert.Nil(err)
|
||||
|
||||
err = k.configure(h, id, dir, true, c)
|
||||
err = k.configure(h, id, dir, c)
|
||||
assert.Nil(err)
|
||||
assert.Empty(k.state.URL)
|
||||
|
||||
err = k.configure(h, id, dir, false, c)
|
||||
err = k.configure(h, id, dir, c)
|
||||
assert.Nil(err)
|
||||
}
|
||||
|
||||
@@ -996,26 +812,18 @@ func TestAgentCreateContainer(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
impl := &gRPCProxy{}
|
||||
url, err := mock.GenerateKataMockHybridVSock()
|
||||
assert.NoError(err)
|
||||
|
||||
proxy := mock.ProxyGRPCMock{
|
||||
GRPCImplementer: impl,
|
||||
GRPCRegister: gRPCRegister,
|
||||
}
|
||||
|
||||
sockDir, err := testGenerateKataProxySockDir()
|
||||
assert.Nil(err)
|
||||
defer os.RemoveAll(sockDir)
|
||||
|
||||
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
|
||||
err = proxy.Start(testKataProxyURL)
|
||||
assert.Nil(err)
|
||||
defer proxy.Stop()
|
||||
hybridVSockTTRPCMock := mock.HybridVSockTTRPCMock{}
|
||||
err = hybridVSockTTRPCMock.Start(url)
|
||||
assert.NoError(err)
|
||||
defer hybridVSockTTRPCMock.Stop()
|
||||
|
||||
k := &kataAgent{
|
||||
ctx: context.Background(),
|
||||
state: KataAgentState{
|
||||
URL: testKataProxyURL,
|
||||
URL: url,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1023,7 +831,7 @@ func TestAgentCreateContainer(t *testing.T) {
|
||||
assert.Nil(err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
err = k.configure(&mockHypervisor{}, sandbox.id, dir, true, KataAgentConfig{})
|
||||
err = k.configure(&mockHypervisor{}, sandbox.id, dir, KataAgentConfig{})
|
||||
assert.Nil(err)
|
||||
|
||||
// We'll fail on container metadata file creation, but it helps increasing coverage...
|
||||
@@ -1034,25 +842,18 @@ func TestAgentCreateContainer(t *testing.T) {
|
||||
func TestAgentNetworkOperation(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
impl := &gRPCProxy{}
|
||||
|
||||
proxy := mock.ProxyGRPCMock{
|
||||
GRPCImplementer: impl,
|
||||
GRPCRegister: gRPCRegister,
|
||||
}
|
||||
|
||||
sockDir, err := testGenerateKataProxySockDir()
|
||||
url, err := mock.GenerateKataMockHybridVSock()
|
||||
assert.NoError(err)
|
||||
defer os.RemoveAll(sockDir)
|
||||
|
||||
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
|
||||
assert.NoError(proxy.Start(testKataProxyURL))
|
||||
defer proxy.Stop()
|
||||
hybridVSockTTRPCMock := mock.HybridVSockTTRPCMock{}
|
||||
err = hybridVSockTTRPCMock.Start(url)
|
||||
assert.NoError(err)
|
||||
defer hybridVSockTTRPCMock.Stop()
|
||||
|
||||
k := &kataAgent{
|
||||
ctx: context.Background(),
|
||||
state: KataAgentState{
|
||||
URL: testKataProxyURL,
|
||||
URL: url,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1069,31 +870,17 @@ func TestAgentNetworkOperation(t *testing.T) {
|
||||
assert.Nil(err)
|
||||
}
|
||||
|
||||
func TestKataAgentSetProxy(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
k := &kataAgent{ctx: context.Background()}
|
||||
p := &kataBuiltInProxy{}
|
||||
s := &Sandbox{
|
||||
ctx: context.Background(),
|
||||
id: "foobar",
|
||||
}
|
||||
|
||||
err := k.setProxy(s, p, 0, "")
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
func TestKataGetAgentUrl(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
var err error
|
||||
|
||||
k := &kataAgent{vmSocket: types.Socket{HostPath: "/abc"}}
|
||||
k := &kataAgent{vmSocket: types.VSock{}}
|
||||
assert.NoError(err)
|
||||
url, err := k.getAgentURL()
|
||||
assert.Nil(err)
|
||||
assert.NotEmpty(url)
|
||||
|
||||
k.vmSocket = types.VSock{}
|
||||
k.vmSocket = types.HybridVSock{}
|
||||
assert.NoError(err)
|
||||
url, err = k.getAgentURL()
|
||||
assert.Nil(err)
|
||||
@@ -1103,26 +890,18 @@ func TestKataGetAgentUrl(t *testing.T) {
|
||||
func TestKataCopyFile(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
impl := &gRPCProxy{}
|
||||
|
||||
proxy := mock.ProxyGRPCMock{
|
||||
GRPCImplementer: impl,
|
||||
GRPCRegister: gRPCRegister,
|
||||
}
|
||||
|
||||
sockDir, err := testGenerateKataProxySockDir()
|
||||
url, err := mock.GenerateKataMockHybridVSock()
|
||||
assert.NoError(err)
|
||||
defer os.RemoveAll(sockDir)
|
||||
|
||||
testKataProxyURL := fmt.Sprintf(testKataProxyURLTempl, sockDir)
|
||||
err = proxy.Start(testKataProxyURL)
|
||||
hybridVSockTTRPCMock := mock.HybridVSockTTRPCMock{}
|
||||
err = hybridVSockTTRPCMock.Start(url)
|
||||
assert.NoError(err)
|
||||
defer proxy.Stop()
|
||||
defer hybridVSockTTRPCMock.Stop()
|
||||
|
||||
k := &kataAgent{
|
||||
ctx: context.Background(),
|
||||
state: KataAgentState{
|
||||
URL: testKataProxyURL,
|
||||
URL: url,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
// Copyright (c) 2018 HyperHQ Inc.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package virtcontainers
|
||||
|
||||
import "fmt"
|
||||
|
||||
// This is a kata builtin proxy implementation of the proxy interface. Kata proxy
|
||||
// functionality is implemented inside the virtcontainers library.
|
||||
type kataBuiltInProxy struct {
|
||||
proxyBuiltin
|
||||
}
|
||||
|
||||
func (p *kataBuiltInProxy) validateParams(params proxyParams) error {
|
||||
if len(params.id) == 0 || len(params.agentURL) == 0 || len(params.consoleURL) == 0 {
|
||||
return fmt.Errorf("Invalid proxy parameters %+v", params)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// start is the proxy start implementation for kata builtin proxy.
|
||||
// It starts the console watcher for the guest.
|
||||
// It returns agentURL to let agent connect directly.
|
||||
func (p *kataBuiltInProxy) start(params proxyParams) (int, string, error) {
|
||||
if err := p.validateParams(params); err != nil {
|
||||
return -1, "", err
|
||||
}
|
||||
|
||||
return p.proxyBuiltin.start(params)
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
// Copyright (c) 2018 HyperHQ Inc.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestKataBuiltinProxy(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
p := kataBuiltInProxy{}
|
||||
|
||||
params := proxyParams{debug: true}
|
||||
|
||||
err := p.validateParams(params)
|
||||
assert.NotNil(err)
|
||||
|
||||
params.id = "foobarproxy"
|
||||
err = p.validateParams(params)
|
||||
assert.NotNil(err)
|
||||
|
||||
params.agentURL = "foobaragent"
|
||||
err = p.validateParams(params)
|
||||
assert.NotNil(err)
|
||||
|
||||
params.consoleURL = "foobarconsole"
|
||||
err = p.validateParams(params)
|
||||
assert.Nil(err)
|
||||
|
||||
params.logger = logrus.WithField("proxy", params.id)
|
||||
buildinProxyConsoleProto = "foobarproto"
|
||||
_, _, err = p.start(params)
|
||||
assert.NotNil(err)
|
||||
assert.Empty(p.sandboxID)
|
||||
|
||||
err = p.stop(0)
|
||||
assert.Nil(err)
|
||||
|
||||
assert.False(p.consoleWatched())
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
// Copyright (c) 2017 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// This is the Kata Containers implementation of the proxy interface.
|
||||
// This is pretty simple since it provides the same interface to both
|
||||
// runtime and shim as if they were talking directly to the agent.
|
||||
type kataProxy struct {
|
||||
}
|
||||
|
||||
// The kata proxy doesn't need to watch the vm console, thus return false always.
|
||||
func (p *kataProxy) consoleWatched() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// start is kataProxy start implementation for proxy interface.
|
||||
func (p *kataProxy) start(params proxyParams) (int, string, error) {
|
||||
if err := validateProxyParams(params); err != nil {
|
||||
return -1, "", err
|
||||
}
|
||||
|
||||
params.logger.Debug("Starting regular Kata proxy rather than built-in")
|
||||
|
||||
// construct the socket path the proxy instance will use
|
||||
proxyURL, err := defaultProxyURL(params.id, SocketTypeUNIX)
|
||||
if err != nil {
|
||||
return -1, "", err
|
||||
}
|
||||
|
||||
args := []string{
|
||||
params.path,
|
||||
"-listen-socket", proxyURL,
|
||||
"-mux-socket", params.agentURL,
|
||||
"-sandbox", params.id,
|
||||
}
|
||||
|
||||
if params.debug {
|
||||
args = append(args, "-log", "debug", "-agent-logs-socket", params.consoleURL)
|
||||
}
|
||||
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setsid: true,
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
return -1, "", err
|
||||
}
|
||||
|
||||
go cmd.Wait()
|
||||
|
||||
return cmd.Process.Pid, proxyURL, nil
|
||||
}
|
||||
|
||||
// stop is kataProxy stop implementation for proxy interface.
|
||||
func (p *kataProxy) stop(pid int) error {
|
||||
// Signal the proxy with SIGTERM.
|
||||
return syscall.Kill(pid, syscall.SIGTERM)
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
// Copyright (c) 2018 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package virtcontainers
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestKataProxyStart(t *testing.T) {
|
||||
agent := &kataAgent{}
|
||||
proxy := &kataProxy{}
|
||||
|
||||
testProxyStart(t, agent, proxy)
|
||||
}
|
||||
@@ -26,11 +26,6 @@ func NewMockAgent() agent {
|
||||
return &mockAgent{}
|
||||
}
|
||||
|
||||
//start the proxy to watch the vm console. It does nothing.
|
||||
func (n *mockAgent) startProxy(sandbox *Sandbox) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// init initializes the Noop agent, i.e. it does nothing.
|
||||
func (n *mockAgent) init(ctx context.Context, sandbox *Sandbox, config KataAgentConfig) (bool, error) {
|
||||
return false, nil
|
||||
@@ -181,11 +176,11 @@ func (n *mockAgent) resumeContainer(sandbox *Sandbox, c Container) error {
|
||||
}
|
||||
|
||||
// configHypervisor is the Noop agent hypervisor configuration implementation. It does nothing.
|
||||
func (n *mockAgent) configure(h hypervisor, id, sharePath string, builtin bool, config interface{}) error {
|
||||
func (n *mockAgent) configure(h hypervisor, id, sharePath string, config interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *mockAgent) configureFromGrpc(h hypervisor, id string, builtin bool, config interface{}) error {
|
||||
func (n *mockAgent) configureFromGrpc(h hypervisor, id string, config interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -204,14 +199,11 @@ func (n *mockAgent) getAgentURL() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// setProxy is the Noop agent proxy setter. It does nothing.
|
||||
func (n *mockAgent) setProxy(sandbox *Sandbox, proxy proxy, pid int, url string) error {
|
||||
// setAgentURL is the Noop agent url setter. It does nothing.
|
||||
func (n *mockAgent) setAgentURL() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *mockAgent) setProxyFromGrpc(proxy proxy, pid int, url string) {
|
||||
}
|
||||
|
||||
// getGuestDetails is the Noop agent GuestDetails queryer. It does nothing.
|
||||
func (n *mockAgent) getGuestDetails(*grpc.GuestDetailsRequest) (*grpc.GuestDetailsResponse, error) {
|
||||
return nil, nil
|
||||
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
|
||||
)
|
||||
|
||||
var MockHybridVSockPath = "/tmp/kata-mock-hybrid-vsock.socket"
|
||||
|
||||
type mockHypervisor struct {
|
||||
mockPid int
|
||||
}
|
||||
@@ -125,8 +127,10 @@ func (m *mockHypervisor) check() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) generateSocket(id string, useVsock bool) (interface{}, error) {
|
||||
return types.Socket{HostPath: "/tmp/socket", Name: "socket"}, nil
|
||||
func (m *mockHypervisor) generateSocket(id string) (interface{}, error) {
|
||||
return types.MockHybridVSock{
|
||||
UdsPath: MockHybridVSockPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) isRateLimiterBuiltin() bool {
|
||||
|
||||
@@ -92,7 +92,7 @@ func TestMockHypervisorCheck(t *testing.T) {
|
||||
func TestMockGenerateSocket(t *testing.T) {
|
||||
var m *mockHypervisor
|
||||
|
||||
i, err := m.generateSocket("a", true)
|
||||
i, err := m.generateSocket("a")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, i)
|
||||
}
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
// Copyright (c) 2017 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package virtcontainers
|
||||
|
||||
// This is the no proxy implementation of the proxy interface. This
|
||||
// is a generic implementation for any case (basically any agent),
|
||||
// where no actual proxy is needed. This happens when the combination
|
||||
// of the VM and the agent can handle multiple connections without
|
||||
// additional component to handle the multiplexing. Both the runtime
|
||||
// and the shim will connect to the agent through the VM, bypassing
|
||||
// the proxy model.
|
||||
// That's why this implementation is very generic, and all it does
|
||||
// is to provide both shim and runtime the correct URL to connect
|
||||
// directly to the VM.
|
||||
type noProxy struct {
|
||||
proxyBuiltin
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
// Copyright (c) 2017 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNoProxyStart(t *testing.T) {
|
||||
p := &noProxy{}
|
||||
assert := assert.New(t)
|
||||
|
||||
agentURL := "agentURL"
|
||||
_, _, err := p.start(proxyParams{
|
||||
agentURL: agentURL,
|
||||
})
|
||||
assert.NotNil(err)
|
||||
|
||||
pid, vmURL, err := p.start(proxyParams{
|
||||
agentURL: agentURL,
|
||||
logger: testDefaultLogger,
|
||||
})
|
||||
assert.Nil(err)
|
||||
assert.Equal(vmURL, agentURL)
|
||||
assert.Equal(pid, 0)
|
||||
|
||||
err = p.stop(0)
|
||||
assert.Nil(err)
|
||||
|
||||
assert.False(p.consoleWatched())
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
// Copyright (c) 2017 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package virtcontainers
|
||||
|
||||
// This is a dummy proxy implementation of the proxy interface, only
|
||||
// used for testing purpose.
|
||||
type noopProxy struct{}
|
||||
|
||||
var noopProxyURL = "noopProxyURL"
|
||||
|
||||
// register is the proxy start implementation for testing purpose.
|
||||
// It does nothing.
|
||||
func (p *noopProxy) start(params proxyParams) (int, string, error) {
|
||||
return params.hid, noopProxyURL, nil
|
||||
}
|
||||
|
||||
// stop is the proxy stop implementation for testing purpose.
|
||||
// It does nothing.
|
||||
func (p *noopProxy) stop(pid int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The noopproxy doesn't need to watch the vm console, thus return false always.
|
||||
func (p *noopProxy) consoleWatched() bool {
|
||||
return false
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
// Copyright (c) 2018 HyperHQ Inc.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNoopProxy(t *testing.T) {
|
||||
n := &noopProxy{}
|
||||
assert := assert.New(t)
|
||||
|
||||
_, url, err := n.start(proxyParams{})
|
||||
assert.Nil(err)
|
||||
assert.Equal(url, noopProxyURL)
|
||||
|
||||
err = n.stop(0)
|
||||
assert.Nil(err)
|
||||
|
||||
assert.False(n.consoleWatched())
|
||||
}
|
||||
@@ -176,11 +176,6 @@ func (s *Sandbox) dumpConfig(ss *persistapi.SandboxState) {
|
||||
sconfig := s.config
|
||||
ss.Config = persistapi.SandboxConfig{
|
||||
HypervisorType: string(sconfig.HypervisorType),
|
||||
ProxyType: string(sconfig.ProxyType),
|
||||
ProxyConfig: persistapi.ProxyConfig{
|
||||
Path: sconfig.ProxyConfig.Path,
|
||||
Debug: sconfig.ProxyConfig.Debug,
|
||||
},
|
||||
NetworkConfig: persistapi.NetworkConfig{
|
||||
NetNSPath: sconfig.NetworkConfig.NetNSPath,
|
||||
NetNsCreated: sconfig.NetworkConfig.NetNsCreated,
|
||||
@@ -240,7 +235,6 @@ func (s *Sandbox) dumpConfig(ss *persistapi.SandboxState) {
|
||||
Realtime: sconfig.HypervisorConfig.Realtime,
|
||||
Mlock: sconfig.HypervisorConfig.Mlock,
|
||||
DisableNestingChecks: sconfig.HypervisorConfig.DisableNestingChecks,
|
||||
UseVSock: sconfig.HypervisorConfig.UseVSock,
|
||||
DisableImageNvdimm: sconfig.HypervisorConfig.DisableImageNvdimm,
|
||||
HotplugVFIOOnRootBus: sconfig.HypervisorConfig.HotplugVFIOOnRootBus,
|
||||
PCIeRootPort: sconfig.HypervisorConfig.PCIeRootPort,
|
||||
@@ -257,7 +251,6 @@ func (s *Sandbox) dumpConfig(ss *persistapi.SandboxState) {
|
||||
|
||||
ss.Config.KataAgentConfig = &persistapi.KataAgentConfig{
|
||||
LongLiveConn: sconfig.AgentConfig.LongLiveConn,
|
||||
UseVSock: sconfig.AgentConfig.UseVSock,
|
||||
}
|
||||
|
||||
for _, contConf := range sconfig.Containers {
|
||||
@@ -443,11 +436,6 @@ func loadSandboxConfig(id string) (*SandboxConfig, error) {
|
||||
sconfig := &SandboxConfig{
|
||||
ID: id,
|
||||
HypervisorType: HypervisorType(savedConf.HypervisorType),
|
||||
ProxyType: ProxyType(savedConf.ProxyType),
|
||||
ProxyConfig: ProxyConfig{
|
||||
Path: savedConf.ProxyConfig.Path,
|
||||
Debug: savedConf.ProxyConfig.Debug,
|
||||
},
|
||||
NetworkConfig: NetworkConfig{
|
||||
NetNSPath: savedConf.NetworkConfig.NetNSPath,
|
||||
NetNsCreated: savedConf.NetworkConfig.NetNsCreated,
|
||||
@@ -508,7 +496,6 @@ func loadSandboxConfig(id string) (*SandboxConfig, error) {
|
||||
Realtime: hconf.Realtime,
|
||||
Mlock: hconf.Mlock,
|
||||
DisableNestingChecks: hconf.DisableNestingChecks,
|
||||
UseVSock: hconf.UseVSock,
|
||||
DisableImageNvdimm: hconf.DisableImageNvdimm,
|
||||
HotplugVFIOOnRootBus: hconf.HotplugVFIOOnRootBus,
|
||||
PCIeRootPort: hconf.PCIeRootPort,
|
||||
@@ -525,7 +512,6 @@ func loadSandboxConfig(id string) (*SandboxConfig, error) {
|
||||
|
||||
sconfig.AgentConfig = KataAgentConfig{
|
||||
LongLiveConn: savedConf.KataAgentConfig.LongLiveConn,
|
||||
UseVSock: savedConf.KataAgentConfig.UseVSock,
|
||||
}
|
||||
|
||||
for _, contConf := range savedConf.ContainerConfigs {
|
||||
|
||||
@@ -146,9 +146,6 @@ type HypervisorConfig struct {
|
||||
// when running on top of another VMM.
|
||||
DisableNestingChecks bool
|
||||
|
||||
// UseVSock use a vsock for agent communication
|
||||
UseVSock bool
|
||||
|
||||
// DisableImageNvdimm disables nvdimm for guest rootfs image
|
||||
DisableImageNvdimm bool
|
||||
|
||||
@@ -194,14 +191,6 @@ type HypervisorConfig struct {
|
||||
// to reach the Kata Containers agent.
|
||||
type KataAgentConfig struct {
|
||||
LongLiveConn bool
|
||||
UseVSock bool
|
||||
}
|
||||
|
||||
// ProxyConfig is a structure storing information needed from any
|
||||
// proxy in order to be properly initialized.
|
||||
type ProxyConfig struct {
|
||||
Path string
|
||||
Debug bool
|
||||
}
|
||||
|
||||
// ShimConfig is the structure providing specific configuration
|
||||
@@ -236,9 +225,6 @@ type SandboxConfig struct {
|
||||
// only one agent config can be non-nil according to agent type
|
||||
KataAgentConfig *KataAgentConfig `json:",omitempty"`
|
||||
|
||||
ProxyType string
|
||||
ProxyConfig ProxyConfig
|
||||
|
||||
ShimType string
|
||||
KataShimConfig *ShimConfig
|
||||
|
||||
|
||||
@@ -10,9 +10,6 @@ package persistapi
|
||||
|
||||
// AgentState save agent state data
|
||||
type AgentState struct {
|
||||
// Pid of proxy process
|
||||
ProxyPid int
|
||||
|
||||
// URL to connect to agent
|
||||
URL string
|
||||
}
|
||||
|
||||
@@ -18,22 +18,21 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/yamux"
|
||||
"github.com/mdlayher/vsock"
|
||||
// opentracing "github.com/opentracing/opentracing-go"
|
||||
// opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
// "google.golang.org/grpc"
|
||||
// "google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
grpcStatus "google.golang.org/grpc/status"
|
||||
|
||||
agentgrpc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
|
||||
"github.com/containerd/ttrpc"
|
||||
agentgrpc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
|
||||
)
|
||||
|
||||
const (
|
||||
UnixSocketScheme = "unix"
|
||||
VSockSocketScheme = "vsock"
|
||||
HybridVSockScheme = "hvsock"
|
||||
MockHybridVSockScheme = "mock"
|
||||
)
|
||||
|
||||
var defaultDialTimeout = 15 * time.Second
|
||||
@@ -56,46 +55,17 @@ type AgentClient struct {
|
||||
conn *ttrpc.Client
|
||||
}
|
||||
|
||||
type yamuxSessionStream struct {
|
||||
net.Conn
|
||||
session *yamux.Session
|
||||
}
|
||||
|
||||
func (y *yamuxSessionStream) Close() error {
|
||||
waitCh := y.session.CloseChan()
|
||||
timeout := time.NewTimer(defaultCloseTimeout)
|
||||
|
||||
if err := y.Conn.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := y.session.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// block until session is really closed
|
||||
select {
|
||||
case <-waitCh:
|
||||
timeout.Stop()
|
||||
case <-timeout.C:
|
||||
return fmt.Errorf("timeout waiting for session close")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type dialer func(string, time.Duration) (net.Conn, error)
|
||||
|
||||
// NewAgentClient creates a new agent gRPC client and handles both unix and vsock addresses.
|
||||
//
|
||||
// Supported sock address formats are:
|
||||
// - unix://<unix socket path>
|
||||
// - vsock://<cid>:<port>
|
||||
// - <unix socket path>
|
||||
// - hvsock://<path>:<port>. Firecracker implements the virtio-vsock device
|
||||
// model, and mediates communication between AF_UNIX sockets (on the host end)
|
||||
// and AF_VSOCK sockets (on the guest end).
|
||||
func NewAgentClient(ctx context.Context, sock string, enableYamux bool) (*AgentClient, error) {
|
||||
// - mock://<path>. just for test use.
|
||||
func NewAgentClient(ctx context.Context, sock string) (*AgentClient, error) {
|
||||
grpcAddr, parsedAddr, err := parse(sock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -103,12 +73,12 @@ func NewAgentClient(ctx context.Context, sock string, enableYamux bool) (*AgentC
|
||||
|
||||
var conn net.Conn
|
||||
var d dialer
|
||||
d = agentDialer(parsedAddr, enableYamux)
|
||||
d = agentDialer(parsedAddr)
|
||||
conn, err = d(grpcAddr, defaultDialTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
/*
|
||||
/*
|
||||
dialOpts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithBlock()}
|
||||
dialOpts = append(dialOpts, grpc.WithDialer(agentDialer(parsedAddr, enableYamux)))
|
||||
|
||||
@@ -132,7 +102,7 @@ func NewAgentClient(ctx context.Context, sock string, enableYamux bool) (*AgentC
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
*/
|
||||
*/
|
||||
client := ttrpc.NewClient(conn)
|
||||
|
||||
return &AgentClient{
|
||||
@@ -176,17 +146,6 @@ func parse(sock string) (string, *url.URL, error) {
|
||||
return "", nil, grpcStatus.Errorf(codes.InvalidArgument, "Invalid vsock port: %s", sock)
|
||||
}
|
||||
grpcAddr = VSockSocketScheme + ":" + addr.Host
|
||||
case UnixSocketScheme:
|
||||
fallthrough
|
||||
case "":
|
||||
if (addr.Host == "" && addr.Path == "") || addr.Port() != "" {
|
||||
return "", nil, grpcStatus.Errorf(codes.InvalidArgument, "Invalid unix scheme: %s", sock)
|
||||
}
|
||||
if addr.Host == "" {
|
||||
grpcAddr = UnixSocketScheme + ":///" + addr.Path
|
||||
} else {
|
||||
grpcAddr = UnixSocketScheme + ":///" + addr.Host + "/" + addr.Path
|
||||
}
|
||||
case HybridVSockScheme:
|
||||
if addr.Path == "" {
|
||||
return "", nil, grpcStatus.Errorf(codes.InvalidArgument, "Invalid hybrid vsock scheme: %s", sock)
|
||||
@@ -202,6 +161,13 @@ func parse(sock string) (string, *url.URL, error) {
|
||||
}
|
||||
hybridVSockPort = uint32(port)
|
||||
grpcAddr = HybridVSockScheme + ":" + hvsocket[0]
|
||||
// just for tests use.
|
||||
case MockHybridVSockScheme:
|
||||
if addr.Path == "" {
|
||||
return "", nil, grpcStatus.Errorf(codes.InvalidArgument, "Invalid mock hybrid vsock scheme: %s", sock)
|
||||
}
|
||||
// e.g. mock:/tmp/socket
|
||||
grpcAddr = MockHybridVSockScheme + ":" + addr.Path
|
||||
default:
|
||||
return "", nil, grpcStatus.Errorf(codes.InvalidArgument, "Invalid scheme: %s", sock)
|
||||
}
|
||||
@@ -209,94 +175,17 @@ func parse(sock string) (string, *url.URL, error) {
|
||||
return grpcAddr, addr, nil
|
||||
}
|
||||
|
||||
// This function is meant to run in a go routine since it will send ping
|
||||
// commands every second. It behaves as a heartbeat to maintain a proper
|
||||
// communication state with the Yamux server in the agent.
|
||||
func heartBeat(session *yamux.Session) {
|
||||
if session == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
if session.IsClosed() {
|
||||
break
|
||||
}
|
||||
|
||||
session.Ping()
|
||||
|
||||
// 1 Hz heartbeat
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func agentDialer(addr *url.URL, enableYamux bool) dialer {
|
||||
var d dialer
|
||||
func agentDialer(addr *url.URL) dialer {
|
||||
switch addr.Scheme {
|
||||
case VSockSocketScheme:
|
||||
d = vsockDialer
|
||||
return vsockDialer
|
||||
case HybridVSockScheme:
|
||||
d = HybridVSockDialer
|
||||
case UnixSocketScheme:
|
||||
fallthrough
|
||||
return HybridVSockDialer
|
||||
case MockHybridVSockScheme:
|
||||
return MockHybridVSockDialer
|
||||
default:
|
||||
d = unixDialer
|
||||
return nil
|
||||
}
|
||||
|
||||
if !enableYamux {
|
||||
return d
|
||||
}
|
||||
|
||||
// yamux dialer
|
||||
return func(sock string, timeout time.Duration) (net.Conn, error) {
|
||||
conn, err := d(sock, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
var session *yamux.Session
|
||||
sessionConfig := yamux.DefaultConfig()
|
||||
// Disable keepAlive since we don't know how much time a container can be paused
|
||||
sessionConfig.EnableKeepAlive = false
|
||||
sessionConfig.ConnectionWriteTimeout = time.Second
|
||||
session, err = yamux.Client(conn, sessionConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Start the heartbeat in a separate go routine
|
||||
go heartBeat(session)
|
||||
|
||||
var stream net.Conn
|
||||
stream, err = session.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
y := &yamuxSessionStream{
|
||||
Conn: stream.(net.Conn),
|
||||
session: session,
|
||||
}
|
||||
|
||||
return y, nil
|
||||
}
|
||||
}
|
||||
|
||||
func unixDialer(sock string, timeout time.Duration) (net.Conn, error) {
|
||||
if strings.HasPrefix(sock, "unix:") {
|
||||
sock = strings.Trim(sock, "unix:")
|
||||
}
|
||||
|
||||
dialFunc := func() (net.Conn, error) {
|
||||
return net.DialTimeout("unix", sock, timeout)
|
||||
}
|
||||
|
||||
timeoutErr := grpcStatus.Errorf(codes.DeadlineExceeded, "timed out connecting to unix socket %s", sock)
|
||||
return commonDialer(timeout, dialFunc, timeoutErr)
|
||||
}
|
||||
|
||||
func parseGrpcVsockAddr(sock string) (uint32, uint32, error) {
|
||||
@@ -471,3 +360,17 @@ func HybridVSockDialer(sock string, timeout time.Duration) (net.Conn, error) {
|
||||
timeoutErr := grpcStatus.Errorf(codes.DeadlineExceeded, "timed out connecting to hybrid vsocket %s", sock)
|
||||
return commonDialer(timeout, dialFunc, timeoutErr)
|
||||
}
|
||||
|
||||
// just for tests use.
|
||||
func MockHybridVSockDialer(sock string, timeout time.Duration) (net.Conn, error) {
|
||||
if strings.HasPrefix(sock, "mock:") {
|
||||
sock = strings.TrimPrefix(sock, "mock:")
|
||||
}
|
||||
|
||||
dialFunc := func() (net.Conn, error) {
|
||||
return net.DialTimeout("unix", sock, timeout)
|
||||
}
|
||||
|
||||
timeoutErr := grpcStatus.Errorf(codes.DeadlineExceeded, "timed out connecting to mock hybrid vsocket %s", sock)
|
||||
return commonDialer(timeout, dialFunc, timeoutErr)
|
||||
}
|
||||
|
||||
@@ -97,9 +97,6 @@ const (
|
||||
// GuestHookPath is a sandbox annotation to specify the path within the VM that will be used for 'drop-in' hooks.
|
||||
GuestHookPath = kataAnnotHypervisorPrefix + "guest_hook_path"
|
||||
|
||||
// UseVSock is a sandbox annotation to specify use of vsock for agent communication.
|
||||
UseVSock = kataAnnotHypervisorPrefix + "use_vsock"
|
||||
|
||||
// DisableImageNvdimm is a sandbox annotation to specify use of nvdimm device for guest rootfs image.
|
||||
DisableImageNvdimm = kataAnnotHypervisorPrefix + "disable_image_nvdimm"
|
||||
|
||||
|
||||
@@ -16,6 +16,9 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/ttrpc"
|
||||
gpb "github.com/gogo/protobuf/types"
|
||||
aTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols"
|
||||
pb "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
|
||||
)
|
||||
|
||||
// DefaultMockKataShimBinPath is populated at link time.
|
||||
@@ -100,119 +103,211 @@ func StartShim(config ShimMockConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProxyMock is the proxy mock interface.
|
||||
// It allows for implementing different kind
|
||||
// of containers proxies front end.
|
||||
type ProxyMock interface {
|
||||
Start(URL string) error
|
||||
Stop() error
|
||||
var testKataMockHybridVSockURLTempl = "mock://%s/kata-mock-hybrid-vsock.sock"
|
||||
|
||||
func GenerateKataMockHybridVSock() (string, error) {
|
||||
dir, err := ioutil.TempDir("", "kata-mock-hybrid-vsock-test")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf(testKataMockHybridVSockURLTempl, dir), nil
|
||||
}
|
||||
|
||||
// ProxyUnixMock is the UNIX proxy mock
|
||||
type ProxyUnixMock struct {
|
||||
ClientHandler func(c net.Conn)
|
||||
// HybridVSockTTRPCMock is the ttrpc-based mock hybrid-vsock backend implementation
|
||||
type HybridVSockTTRPCMock struct {
|
||||
// HybridVSockTTRPCMockImp is the structure implementing
|
||||
// the ttrpc interface we want the mock hybrid-vsock server to serve.
|
||||
HybridVSockTTRPCMockImp
|
||||
|
||||
listener net.Listener
|
||||
}
|
||||
|
||||
// ProxyGRPCMock is the gRPC proxy mock
|
||||
type ProxyGRPCMock struct {
|
||||
// GRPCImplementer is the structure implementing
|
||||
// the GRPC interface we want the proxy to serve.
|
||||
GRPCImplementer interface{}
|
||||
|
||||
// GRPCRegister is the registration routine for
|
||||
// the GRPC service.
|
||||
GRPCRegister func(s *ttrpc.Server, srv interface{})
|
||||
|
||||
listener net.Listener
|
||||
func (hv *HybridVSockTTRPCMock) ttrpcRegister(s *ttrpc.Server) {
|
||||
pb.RegisterAgentServiceService(s, &hv.HybridVSockTTRPCMockImp)
|
||||
pb.RegisterHealthService(s, &hv.HybridVSockTTRPCMockImp)
|
||||
}
|
||||
|
||||
// Start starts the UNIX proxy mock
|
||||
func (p *ProxyUnixMock) Start(URL string) error {
|
||||
if p.ClientHandler == nil {
|
||||
return fmt.Errorf("Missing client handler")
|
||||
// Start starts the ttrpc-based mock hybrid-vsock server
|
||||
func (hv *HybridVSockTTRPCMock) Start(socketAddr string) error {
|
||||
if socketAddr == "" {
|
||||
return fmt.Errorf("Missing Socket Address")
|
||||
}
|
||||
|
||||
url, err := url.Parse(URL)
|
||||
url, err := url.Parse(socketAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l, err := net.Listen(url.Scheme, url.Path)
|
||||
l, err := net.Listen("unix", url.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.listener = l
|
||||
hv.listener = l
|
||||
|
||||
ttrpcServer, err := ttrpc.NewServer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hv.ttrpcRegister(ttrpcServer)
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
l.Close()
|
||||
}()
|
||||
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
go p.ClientHandler(conn)
|
||||
}
|
||||
ttrpcServer.Serve(context.Background(), l)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the UNIX proxy mock
|
||||
func (p *ProxyUnixMock) Stop() error {
|
||||
if p.listener == nil {
|
||||
return fmt.Errorf("Missing proxy listener")
|
||||
// Stop stops the ttrpc-based mock hybrid-vsock server
|
||||
func (hv *HybridVSockTTRPCMock) Stop() error {
|
||||
if hv.listener == nil {
|
||||
return fmt.Errorf("Missing mock hvbrid vsock listener")
|
||||
}
|
||||
|
||||
return p.listener.Close()
|
||||
return hv.listener.Close()
|
||||
}
|
||||
|
||||
// Start starts the gRPC proxy mock
|
||||
func (p *ProxyGRPCMock) Start(URL string) error {
|
||||
if p.GRPCImplementer == nil {
|
||||
return fmt.Errorf("Missing gRPC handler")
|
||||
}
|
||||
type HybridVSockTTRPCMockImp struct{}
|
||||
|
||||
if p.GRPCRegister == nil {
|
||||
return fmt.Errorf("Missing gRPC registration routine")
|
||||
}
|
||||
var emptyResp = &gpb.Empty{}
|
||||
|
||||
url, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l, err := net.Listen(url.Scheme, url.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.listener = l
|
||||
|
||||
grpcServer, err := ttrpc.NewServer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.GRPCRegister(grpcServer, p.GRPCImplementer)
|
||||
|
||||
go func() {
|
||||
grpcServer.Serve(context.Background(), l)
|
||||
}()
|
||||
|
||||
return nil
|
||||
func (p *HybridVSockTTRPCMockImp) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
// Stop stops the gRPC proxy mock
|
||||
func (p *ProxyGRPCMock) Stop() error {
|
||||
if p.listener == nil {
|
||||
return fmt.Errorf("Missing proxy listener")
|
||||
}
|
||||
|
||||
return p.listener.Close()
|
||||
func (p *HybridVSockTTRPCMockImp) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) ExecProcess(ctx context.Context, req *pb.ExecProcessRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) SignalProcess(ctx context.Context, req *pb.SignalProcessRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) WaitProcess(ctx context.Context, req *pb.WaitProcessRequest) (*pb.WaitProcessResponse, error) {
|
||||
return &pb.WaitProcessResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) ListProcesses(ctx context.Context, req *pb.ListProcessesRequest) (*pb.ListProcessesResponse, error) {
|
||||
return &pb.ListProcessesResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) UpdateContainer(ctx context.Context, req *pb.UpdateContainerRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) WriteStdin(ctx context.Context, req *pb.WriteStreamRequest) (*pb.WriteStreamResponse, error) {
|
||||
return &pb.WriteStreamResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) ReadStdout(ctx context.Context, req *pb.ReadStreamRequest) (*pb.ReadStreamResponse, error) {
|
||||
return &pb.ReadStreamResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) ReadStderr(ctx context.Context, req *pb.ReadStreamRequest) (*pb.ReadStreamResponse, error) {
|
||||
return &pb.ReadStreamResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) CloseStdin(ctx context.Context, req *pb.CloseStdinRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) TtyWinResize(ctx context.Context, req *pb.TtyWinResizeRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) CreateSandbox(ctx context.Context, req *pb.CreateSandboxRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) DestroySandbox(ctx context.Context, req *pb.DestroySandboxRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) UpdateInterface(ctx context.Context, req *pb.UpdateInterfaceRequest) (*aTypes.Interface, error) {
|
||||
return &aTypes.Interface{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) UpdateRoutes(ctx context.Context, req *pb.UpdateRoutesRequest) (*pb.Routes, error) {
|
||||
return &pb.Routes{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) ListInterfaces(ctx context.Context, req *pb.ListInterfacesRequest) (*pb.Interfaces, error) {
|
||||
return &pb.Interfaces{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) ListRoutes(ctx context.Context, req *pb.ListRoutesRequest) (*pb.Routes, error) {
|
||||
return &pb.Routes{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) AddARPNeighbors(ctx context.Context, req *pb.AddARPNeighborsRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) OnlineCPUMem(ctx context.Context, req *pb.OnlineCPUMemRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) StatsContainer(ctx context.Context, req *pb.StatsContainerRequest) (*pb.StatsContainerResponse, error) {
|
||||
return &pb.StatsContainerResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) Check(ctx context.Context, req *pb.CheckRequest) (*pb.HealthCheckResponse, error) {
|
||||
return &pb.HealthCheckResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) Version(ctx context.Context, req *pb.CheckRequest) (*pb.VersionCheckResponse, error) {
|
||||
return &pb.VersionCheckResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) PauseContainer(ctx context.Context, req *pb.PauseContainerRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) ResumeContainer(ctx context.Context, req *pb.ResumeContainerRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) ReseedRandomDev(ctx context.Context, req *pb.ReseedRandomDevRequest) (*gpb.Empty, error) {
|
||||
return emptyResp, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) GetGuestDetails(ctx context.Context, req *pb.GuestDetailsRequest) (*pb.GuestDetailsResponse, error) {
|
||||
return &pb.GuestDetailsResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) SetGuestDateTime(ctx context.Context, req *pb.SetGuestDateTimeRequest) (*gpb.Empty, error) {
|
||||
return &gpb.Empty{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) CopyFile(ctx context.Context, req *pb.CopyFileRequest) (*gpb.Empty, error) {
|
||||
return &gpb.Empty{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) StartTracing(ctx context.Context, req *pb.StartTracingRequest) (*gpb.Empty, error) {
|
||||
return &gpb.Empty{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) StopTracing(ctx context.Context, req *pb.StopTracingRequest) (*gpb.Empty, error) {
|
||||
return &gpb.Empty{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) MemHotplugByProbe(ctx context.Context, req *pb.MemHotplugByProbeRequest) (*gpb.Empty, error) {
|
||||
return &gpb.Empty{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) GetOOMEvent(ctx context.Context, req *pb.GetOOMEventRequest) (*pb.OOMEvent, error) {
|
||||
return &pb.OOMEvent{}, nil
|
||||
}
|
||||
|
||||
func (p *HybridVSockTTRPCMockImp) GetMetrics(ctx context.Context, req *pb.GetMetricsRequest) (*pb.Metrics, error) {
|
||||
return &pb.Metrics{}, nil
|
||||
}
|
||||
|
||||
@@ -98,9 +98,6 @@ type RuntimeConfig struct {
|
||||
|
||||
AgentConfig vc.KataAgentConfig
|
||||
|
||||
ProxyType vc.ProxyType
|
||||
ProxyConfig vc.ProxyConfig
|
||||
|
||||
Console string
|
||||
|
||||
//Determines how the VM should be connected to the
|
||||
@@ -414,15 +411,6 @@ func addHypervisorConfigOverrides(ocispec specs.Spec, config *vc.SandboxConfig)
|
||||
}
|
||||
}
|
||||
|
||||
if value, ok := ocispec.Annotations[vcAnnotations.UseVSock]; ok {
|
||||
useVsock, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing annotation for use_vsock: Please specify boolean value 'true|false'")
|
||||
}
|
||||
|
||||
config.HypervisorConfig.UseVSock = useVsock
|
||||
}
|
||||
|
||||
if value, ok := ocispec.Annotations[vcAnnotations.DisableImageNvdimm]; ok {
|
||||
disableNvdimm, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
@@ -851,9 +839,6 @@ func SandboxConfig(ocispec specs.Spec, runtime RuntimeConfig, bundlePath, cid, c
|
||||
|
||||
AgentConfig: runtime.AgentConfig,
|
||||
|
||||
ProxyType: runtime.ProxyType,
|
||||
ProxyConfig: runtime.ProxyConfig,
|
||||
|
||||
NetworkConfig: networkConfig,
|
||||
|
||||
Containers: []vc.ContainerConfig{containerConfig},
|
||||
|
||||
@@ -71,7 +71,6 @@ func TestMinimalSandboxConfig(t *testing.T) {
|
||||
|
||||
runtimeConfig := RuntimeConfig{
|
||||
HypervisorType: vc.QemuHypervisor,
|
||||
ProxyType: vc.KataProxyType,
|
||||
Console: consolePath,
|
||||
}
|
||||
|
||||
@@ -169,7 +168,6 @@ func TestMinimalSandboxConfig(t *testing.T) {
|
||||
Hostname: "testHostname",
|
||||
|
||||
HypervisorType: vc.QemuHypervisor,
|
||||
ProxyType: vc.KataProxyType,
|
||||
|
||||
NetworkConfig: expectedNetworkConfig,
|
||||
|
||||
@@ -783,7 +781,6 @@ func TestAddHypervisorAnnotations(t *testing.T) {
|
||||
ocispec.Annotations[vcAnnotations.CPUFeatures] = "pmu=off"
|
||||
ocispec.Annotations[vcAnnotations.DisableVhostNet] = "true"
|
||||
ocispec.Annotations[vcAnnotations.GuestHookPath] = "/usr/bin/"
|
||||
ocispec.Annotations[vcAnnotations.UseVSock] = "true"
|
||||
ocispec.Annotations[vcAnnotations.DisableImageNvdimm] = "true"
|
||||
ocispec.Annotations[vcAnnotations.HotplugVFIOOnRootBus] = "true"
|
||||
ocispec.Annotations[vcAnnotations.PCIeRootPort] = "2"
|
||||
@@ -819,7 +816,6 @@ func TestAddHypervisorAnnotations(t *testing.T) {
|
||||
assert.Equal(config.HypervisorConfig.CPUFeatures, "pmu=off")
|
||||
assert.Equal(config.HypervisorConfig.DisableVhostNet, true)
|
||||
assert.Equal(config.HypervisorConfig.GuestHookPath, "/usr/bin/")
|
||||
assert.Equal(config.HypervisorConfig.UseVSock, true)
|
||||
assert.Equal(config.HypervisorConfig.DisableImageNvdimm, true)
|
||||
assert.Equal(config.HypervisorConfig.HotplugVFIOOnRootBus, true)
|
||||
assert.Equal(config.HypervisorConfig.PCIeRootPort, uint32(2))
|
||||
|
||||
@@ -1,270 +0,0 @@
|
||||
// Copyright (c) 2017 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
|
||||
kataclient "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/client"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var buildinProxyConsoleProto = consoleProtoUnix
|
||||
|
||||
type proxyBuiltin struct {
|
||||
sandboxID string
|
||||
conn net.Conn
|
||||
}
|
||||
|
||||
// ProxyConfig is a structure storing information needed from any
|
||||
// proxy in order to be properly initialized.
|
||||
type ProxyConfig struct {
|
||||
Path string
|
||||
Debug bool
|
||||
}
|
||||
|
||||
// proxyParams is the structure providing specific parameters needed
|
||||
// for the execution of the proxy binary.
|
||||
type proxyParams struct {
|
||||
id string
|
||||
path string
|
||||
agentURL string
|
||||
consoleURL string
|
||||
logger *logrus.Entry
|
||||
hid int
|
||||
debug bool
|
||||
}
|
||||
|
||||
// ProxyType describes a proxy type.
|
||||
type ProxyType string
|
||||
|
||||
const (
|
||||
// NoopProxyType is the noopProxy.
|
||||
NoopProxyType ProxyType = "noopProxy"
|
||||
|
||||
// NoProxyType is the noProxy.
|
||||
NoProxyType ProxyType = "noProxy"
|
||||
|
||||
// KataProxyType is the kataProxy.
|
||||
KataProxyType ProxyType = "kataProxy"
|
||||
|
||||
// KataBuiltInProxyType is the kataBuiltInProxy.
|
||||
KataBuiltInProxyType ProxyType = "kataBuiltInProxy"
|
||||
)
|
||||
|
||||
const (
|
||||
// unix socket type of console
|
||||
consoleProtoUnix = "unix"
|
||||
|
||||
// pty type of console. Used mostly by kvmtools.
|
||||
consoleProtoPty = "pty"
|
||||
)
|
||||
|
||||
// Set sets a proxy type based on the input string.
|
||||
func (pType *ProxyType) Set(value string) error {
|
||||
switch value {
|
||||
case "noopProxy":
|
||||
*pType = NoopProxyType
|
||||
return nil
|
||||
case "noProxy":
|
||||
*pType = NoProxyType
|
||||
return nil
|
||||
case "kataProxy":
|
||||
*pType = KataProxyType
|
||||
return nil
|
||||
case "kataBuiltInProxy":
|
||||
*pType = KataBuiltInProxyType
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("Unknown proxy type %s", value)
|
||||
}
|
||||
}
|
||||
|
||||
// String converts a proxy type to a string.
|
||||
func (pType *ProxyType) String() string {
|
||||
switch *pType {
|
||||
case NoopProxyType:
|
||||
return string(NoopProxyType)
|
||||
case NoProxyType:
|
||||
return string(NoProxyType)
|
||||
case KataProxyType:
|
||||
return string(KataProxyType)
|
||||
case KataBuiltInProxyType:
|
||||
return string(KataBuiltInProxyType)
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// newProxy returns a proxy from a proxy type.
|
||||
func newProxy(pType ProxyType) (proxy, error) {
|
||||
switch pType {
|
||||
case "":
|
||||
return &kataBuiltInProxy{}, nil
|
||||
case NoopProxyType:
|
||||
return &noopProxy{}, nil
|
||||
case NoProxyType:
|
||||
return &noProxy{}, nil
|
||||
case KataProxyType:
|
||||
return &kataProxy{}, nil
|
||||
case KataBuiltInProxyType:
|
||||
return &kataBuiltInProxy{}, nil
|
||||
default:
|
||||
return &noopProxy{}, fmt.Errorf("Invalid proxy type: %s", pType)
|
||||
}
|
||||
}
|
||||
|
||||
func validateProxyParams(p proxyParams) error {
|
||||
if len(p.path) == 0 || len(p.id) == 0 || len(p.agentURL) == 0 || len(p.consoleURL) == 0 {
|
||||
return fmt.Errorf("Invalid proxy parameters %+v", p)
|
||||
}
|
||||
|
||||
if p.logger == nil {
|
||||
return fmt.Errorf("Invalid proxy parameter: proxy logger is not set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateProxyConfig(proxyConfig ProxyConfig) error {
|
||||
if len(proxyConfig.Path) == 0 {
|
||||
return fmt.Errorf("Proxy path cannot be empty")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func defaultProxyURL(id, socketType string) (string, error) {
|
||||
switch socketType {
|
||||
case SocketTypeUNIX:
|
||||
store, err := persist.GetDriver()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
socketPath := filepath.Join(filepath.Join(store.RunStoragePath(), id), "proxy.sock")
|
||||
return fmt.Sprintf("unix://%s", socketPath), nil
|
||||
case SocketTypeVSOCK:
|
||||
// TODO Build the VSOCK default URL
|
||||
return "", nil
|
||||
default:
|
||||
return "", fmt.Errorf("Unknown socket type: %s", socketType)
|
||||
}
|
||||
}
|
||||
|
||||
func isProxyBuiltIn(pType ProxyType) bool {
|
||||
return pType == KataBuiltInProxyType
|
||||
}
|
||||
|
||||
// proxy is the virtcontainers proxy interface.
|
||||
type proxy interface {
|
||||
// start launches a proxy instance with specified parameters, returning
|
||||
// the PID of the process and the URL used to connect to it.
|
||||
start(params proxyParams) (int, string, error)
|
||||
|
||||
// stop terminates a proxy instance after all communications with the
|
||||
// agent inside the VM have been properly stopped.
|
||||
stop(pid int) error
|
||||
|
||||
//check if the proxy has watched the vm console.
|
||||
consoleWatched() bool
|
||||
}
|
||||
|
||||
func (p *proxyBuiltin) watchConsole(proto, console string, logger *logrus.Entry) (err error) {
|
||||
var (
|
||||
scanner *bufio.Scanner
|
||||
conn net.Conn
|
||||
)
|
||||
|
||||
switch proto {
|
||||
case consoleProtoUnix:
|
||||
conn, err = net.Dial("unix", console)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: please see
|
||||
// https://github.com/kata-containers/runtime/issues/1940.
|
||||
case consoleProtoPty:
|
||||
fallthrough
|
||||
default:
|
||||
return fmt.Errorf("unknown console proto %s", proto)
|
||||
}
|
||||
|
||||
p.conn = conn
|
||||
|
||||
go func() {
|
||||
scanner = bufio.NewScanner(conn)
|
||||
for scanner.Scan() {
|
||||
logger.WithFields(logrus.Fields{
|
||||
"sandbox": p.sandboxID,
|
||||
"vmconsole": scanner.Text(),
|
||||
}).Debug("reading guest console")
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
if err == io.EOF {
|
||||
logger.Info("console watcher quits")
|
||||
} else {
|
||||
logger.WithError(err).WithFields(logrus.Fields{
|
||||
"console-protocol": proto,
|
||||
"console-socket": console,
|
||||
}).Error("Failed to read agent logs")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if the proxy has watched the vm console.
|
||||
func (p *proxyBuiltin) consoleWatched() bool {
|
||||
return p.conn != nil
|
||||
}
|
||||
|
||||
// start is the proxy start implementation for builtin proxy.
|
||||
// It starts the console watcher for the guest.
|
||||
// It returns agentURL to let agent connect directly.
|
||||
func (p *proxyBuiltin) start(params proxyParams) (int, string, error) {
|
||||
if params.logger == nil {
|
||||
return -1, "", fmt.Errorf("Invalid proxy parameter: proxy logger is not set")
|
||||
}
|
||||
|
||||
if p.consoleWatched() {
|
||||
return -1, "", fmt.Errorf("The console has been watched for sandbox %s", params.id)
|
||||
}
|
||||
|
||||
params.logger.Debug("Start to watch the console")
|
||||
|
||||
p.sandboxID = params.id
|
||||
|
||||
// For firecracker, it hasn't support the console watching and it's consoleURL
|
||||
// will be set empty.
|
||||
// TODO: add support for hybrid vsocks, see https://github.com/kata-containers/runtime/issues/2098
|
||||
if params.debug && params.consoleURL != "" && !strings.HasPrefix(params.consoleURL, kataclient.HybridVSockScheme) {
|
||||
err := p.watchConsole(buildinProxyConsoleProto, params.consoleURL, params.logger)
|
||||
if err != nil {
|
||||
p.sandboxID = ""
|
||||
return -1, "", err
|
||||
}
|
||||
}
|
||||
|
||||
return params.hid, params.agentURL, nil
|
||||
}
|
||||
|
||||
// stop is the proxy stop implementation for builtin proxy.
|
||||
func (p *proxyBuiltin) stop(pid int) error {
|
||||
if p.conn != nil {
|
||||
p.conn.Close()
|
||||
p.conn = nil
|
||||
p.sandboxID = ""
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,297 +0,0 @@
|
||||
// Copyright (c) 2017 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/fs"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var testDefaultLogger = logrus.WithField("proxy", "test")
|
||||
|
||||
func testSetProxyType(t *testing.T, value string, expected ProxyType) {
|
||||
var proxyType ProxyType
|
||||
assert := assert.New(t)
|
||||
|
||||
err := (&proxyType).Set(value)
|
||||
assert.NoError(err)
|
||||
assert.Equal(proxyType, expected)
|
||||
}
|
||||
|
||||
func TestSetKataProxyType(t *testing.T) {
|
||||
testSetProxyType(t, "kataProxy", KataProxyType)
|
||||
}
|
||||
|
||||
func TestSetNoopProxyType(t *testing.T) {
|
||||
testSetProxyType(t, "noopProxy", NoopProxyType)
|
||||
}
|
||||
|
||||
func TestSetNoProxyType(t *testing.T) {
|
||||
testSetProxyType(t, "noProxy", NoProxyType)
|
||||
}
|
||||
|
||||
func TestSetKataBuiltInProxyType(t *testing.T) {
|
||||
testSetProxyType(t, "kataBuiltInProxy", KataBuiltInProxyType)
|
||||
}
|
||||
|
||||
func TestSetUnknownProxyType(t *testing.T) {
|
||||
var proxyType ProxyType
|
||||
assert := assert.New(t)
|
||||
|
||||
unknownType := "unknown"
|
||||
|
||||
err := (&proxyType).Set(unknownType)
|
||||
assert.Error(err)
|
||||
assert.NotEqual(proxyType, NoopProxyType)
|
||||
assert.NotEqual(proxyType, NoProxyType)
|
||||
assert.NotEqual(proxyType, KataProxyType)
|
||||
}
|
||||
|
||||
func testStringFromProxyType(t *testing.T, proxyType ProxyType, expected string) {
|
||||
proxyTypeStr := (&proxyType).String()
|
||||
assert.Equal(t, proxyTypeStr, expected)
|
||||
}
|
||||
|
||||
func TestStringFromKataProxyType(t *testing.T) {
|
||||
proxyType := KataProxyType
|
||||
testStringFromProxyType(t, proxyType, "kataProxy")
|
||||
}
|
||||
|
||||
func TestStringFromNoProxyType(t *testing.T) {
|
||||
proxyType := NoProxyType
|
||||
testStringFromProxyType(t, proxyType, "noProxy")
|
||||
}
|
||||
|
||||
func TestStringFromNoopProxyType(t *testing.T) {
|
||||
proxyType := NoopProxyType
|
||||
testStringFromProxyType(t, proxyType, "noopProxy")
|
||||
}
|
||||
|
||||
func TestStringFromKataBuiltInProxyType(t *testing.T) {
|
||||
proxyType := KataBuiltInProxyType
|
||||
testStringFromProxyType(t, proxyType, "kataBuiltInProxy")
|
||||
}
|
||||
|
||||
func TestStringFromUnknownProxyType(t *testing.T) {
|
||||
var proxyType ProxyType
|
||||
testStringFromProxyType(t, proxyType, "")
|
||||
}
|
||||
|
||||
func testNewProxyFromProxyType(t *testing.T, proxyType ProxyType, expected proxy) {
|
||||
result, err := newProxy(proxyType)
|
||||
assert := assert.New(t)
|
||||
assert.NoError(err)
|
||||
assert.Exactly(result, expected)
|
||||
}
|
||||
|
||||
func TestNewProxyFromKataProxyType(t *testing.T) {
|
||||
proxyType := KataProxyType
|
||||
expectedProxy := &kataProxy{}
|
||||
testNewProxyFromProxyType(t, proxyType, expectedProxy)
|
||||
}
|
||||
|
||||
func TestNewProxyFromNoProxyType(t *testing.T) {
|
||||
proxyType := NoProxyType
|
||||
expectedProxy := &noProxy{}
|
||||
testNewProxyFromProxyType(t, proxyType, expectedProxy)
|
||||
}
|
||||
|
||||
func TestNewProxyFromNoopProxyType(t *testing.T) {
|
||||
proxyType := NoopProxyType
|
||||
expectedProxy := &noopProxy{}
|
||||
testNewProxyFromProxyType(t, proxyType, expectedProxy)
|
||||
}
|
||||
|
||||
func TestNewProxyFromKataBuiltInProxyType(t *testing.T) {
|
||||
proxyType := KataBuiltInProxyType
|
||||
expectedProxy := &kataBuiltInProxy{}
|
||||
testNewProxyFromProxyType(t, proxyType, expectedProxy)
|
||||
}
|
||||
|
||||
func TestNewProxyFromUnknownProxyType(t *testing.T) {
|
||||
var proxyType ProxyType
|
||||
_, err := newProxy(proxyType)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func testNewProxyFromSandboxConfig(t *testing.T, sandboxConfig SandboxConfig) {
|
||||
assert := assert.New(t)
|
||||
|
||||
_, err := newProxy(sandboxConfig.ProxyType)
|
||||
assert.NoError(err)
|
||||
|
||||
err = validateProxyConfig(sandboxConfig.ProxyConfig)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
var testProxyPath = "proxy-path"
|
||||
|
||||
func TestNewProxyConfigFromKataProxySandboxConfig(t *testing.T) {
|
||||
proxyConfig := ProxyConfig{
|
||||
Path: testProxyPath,
|
||||
}
|
||||
|
||||
sandboxConfig := SandboxConfig{
|
||||
ProxyType: KataProxyType,
|
||||
ProxyConfig: proxyConfig,
|
||||
}
|
||||
|
||||
testNewProxyFromSandboxConfig(t, sandboxConfig)
|
||||
}
|
||||
|
||||
func TestNewProxyConfigNoPathFailure(t *testing.T) {
|
||||
assert.Error(t, validateProxyConfig(ProxyConfig{}))
|
||||
}
|
||||
|
||||
const sandboxID = "123456789"
|
||||
|
||||
func testDefaultProxyURL(expectedURL string, socketType string, sandboxID string) error {
|
||||
sandbox := &Sandbox{
|
||||
id: sandboxID,
|
||||
}
|
||||
|
||||
url, err := defaultProxyURL(sandbox.id, socketType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if url != expectedURL {
|
||||
return fmt.Errorf("Mismatched URL: %s vs %s", url, expectedURL)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestDefaultProxyURLUnix(t *testing.T) {
|
||||
path := filepath.Join(filepath.Join(fs.MockRunStoragePath(), sandboxID), "proxy.sock")
|
||||
socketPath := fmt.Sprintf("unix://%s", path)
|
||||
assert.NoError(t, testDefaultProxyURL(socketPath, SocketTypeUNIX, sandboxID))
|
||||
}
|
||||
|
||||
func TestDefaultProxyURLVSock(t *testing.T) {
|
||||
assert.NoError(t, testDefaultProxyURL("", SocketTypeVSOCK, sandboxID))
|
||||
}
|
||||
|
||||
func TestDefaultProxyURLUnknown(t *testing.T) {
|
||||
path := filepath.Join(filepath.Join(fs.MockRunStoragePath(), sandboxID), "proxy.sock")
|
||||
socketPath := fmt.Sprintf("unix://%s", path)
|
||||
assert.Error(t, testDefaultProxyURL(socketPath, "foobar", sandboxID))
|
||||
}
|
||||
|
||||
func testProxyStart(t *testing.T, agent agent, proxy proxy) {
|
||||
assert := assert.New(t)
|
||||
|
||||
assert.NotNil(proxy)
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "")
|
||||
assert.NoError(err)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
type testData struct {
|
||||
params proxyParams
|
||||
expectedURI string
|
||||
expectError bool
|
||||
}
|
||||
|
||||
invalidPath := filepath.Join(tmpdir, "enoent")
|
||||
expectedSocketPath := filepath.Join(filepath.Join(fs.MockRunStoragePath(), testSandboxID), "proxy.sock")
|
||||
expectedURI := fmt.Sprintf("unix://%s", expectedSocketPath)
|
||||
|
||||
data := []testData{
|
||||
{proxyParams{}, "", true},
|
||||
{
|
||||
// no path
|
||||
proxyParams{
|
||||
id: "foobar",
|
||||
agentURL: "agentURL",
|
||||
consoleURL: "consoleURL",
|
||||
logger: testDefaultLogger,
|
||||
},
|
||||
"", true,
|
||||
},
|
||||
{
|
||||
// invalid path
|
||||
proxyParams{
|
||||
id: "foobar",
|
||||
path: invalidPath,
|
||||
agentURL: "agentURL",
|
||||
consoleURL: "consoleURL",
|
||||
logger: testDefaultLogger,
|
||||
},
|
||||
"", true,
|
||||
},
|
||||
{
|
||||
// good case
|
||||
proxyParams{
|
||||
id: testSandboxID,
|
||||
path: "echo",
|
||||
agentURL: "agentURL",
|
||||
consoleURL: "consoleURL",
|
||||
logger: testDefaultLogger,
|
||||
},
|
||||
expectedURI, false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, d := range data {
|
||||
pid, uri, err := proxy.start(d.params)
|
||||
if d.expectError {
|
||||
assert.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.NoError(err)
|
||||
assert.True(pid > 0)
|
||||
assert.Equal(d.expectedURI, uri)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateProxyConfig(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
config := ProxyConfig{}
|
||||
err := validateProxyConfig(config)
|
||||
assert.Error(err)
|
||||
|
||||
config.Path = "foobar"
|
||||
err = validateProxyConfig(config)
|
||||
assert.Nil(err)
|
||||
}
|
||||
|
||||
func TestValidateProxyParams(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
p := proxyParams{}
|
||||
err := validateProxyParams(p)
|
||||
assert.Error(err)
|
||||
|
||||
p.path = "foobar"
|
||||
err = validateProxyParams(p)
|
||||
assert.Error(err)
|
||||
|
||||
p.id = "foobar1"
|
||||
err = validateProxyParams(p)
|
||||
assert.Error(err)
|
||||
|
||||
p.agentURL = "foobar2"
|
||||
err = validateProxyParams(p)
|
||||
assert.Error(err)
|
||||
|
||||
p.consoleURL = "foobar3"
|
||||
err = validateProxyParams(p)
|
||||
assert.Error(err)
|
||||
|
||||
p.logger = &logrus.Entry{}
|
||||
err = validateProxyParams(p)
|
||||
assert.Nil(err)
|
||||
}
|
||||
@@ -164,11 +164,6 @@ func (q *qemu) kernelParameters() string {
|
||||
// set the maximum number of vCPUs
|
||||
params = append(params, Param{"nr_cpus", fmt.Sprintf("%d", q.config.DefaultMaxVCPUs)})
|
||||
|
||||
// Add a kernel param to indicate if vsock is being used.
|
||||
// This will be consumed by the agent to determine if it needs to listen on
|
||||
// a serial or vsock channel
|
||||
params = append(params, Param{vsockKernelOption, strconv.FormatBool(q.config.UseVSock)})
|
||||
|
||||
// add the params specified by the provided config. As the kernel
|
||||
// honours the last parameter value set and since the config-provided
|
||||
// params are added here, they will take priority over the defaults.
|
||||
@@ -2259,8 +2254,8 @@ func (q *qemu) check() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *qemu) generateSocket(id string, useVsock bool) (interface{}, error) {
|
||||
return generateVMSocket(id, useVsock, q.store.RunVMStoragePath())
|
||||
func (q *qemu) generateSocket(id string) (interface{}, error) {
|
||||
return generateVMSocket(id, q.store.RunVMStoragePath())
|
||||
}
|
||||
|
||||
func (q *qemu) isRateLimiterBuiltin() bool {
|
||||
|
||||
@@ -236,7 +236,7 @@ func TestQemuArchBaseAppendConsoles(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
qemuArchBase := newQemuArchBase()
|
||||
|
||||
path := filepath.Join(filepath.Join(fs.MockRunStoragePath(), sandboxID), consoleSocket)
|
||||
path := filepath.Join(filepath.Join(fs.MockRunStoragePath(), "test"), consoleSocket)
|
||||
|
||||
expectedOut := []govmmQemu.Device{
|
||||
govmmQemu.SerialDevice{
|
||||
|
||||
@@ -57,7 +57,7 @@ func testQemuKernelParameters(t *testing.T, kernelParams []Param, expected strin
|
||||
}
|
||||
|
||||
func TestQemuKernelParameters(t *testing.T) {
|
||||
expectedOut := fmt.Sprintf("panic=1 nr_cpus=%d agent.use_vsock=false foo=foo bar=bar", MaxQemuVCPUs())
|
||||
expectedOut := fmt.Sprintf("panic=1 nr_cpus=%d foo=foo bar=bar", MaxQemuVCPUs())
|
||||
params := []Param{
|
||||
{
|
||||
Key: "foo",
|
||||
|
||||
@@ -83,9 +83,6 @@ type SandboxConfig struct {
|
||||
|
||||
AgentConfig KataAgentConfig
|
||||
|
||||
ProxyType ProxyType
|
||||
ProxyConfig ProxyConfig
|
||||
|
||||
NetworkConfig NetworkConfig
|
||||
|
||||
// Volumes is a list of shared volumes between the host and the Sandbox.
|
||||
@@ -135,17 +132,6 @@ func (s *Sandbox) trace(name string) (opentracing.Span, context.Context) {
|
||||
return span, ctx
|
||||
}
|
||||
|
||||
func (s *Sandbox) startProxy() error {
|
||||
|
||||
// If the proxy is KataBuiltInProxyType type, it needs to restart the proxy
|
||||
// to watch the guest console if it hadn't been watched.
|
||||
if s.agent == nil {
|
||||
return fmt.Errorf("sandbox %s missed agent pointer", s.ID())
|
||||
}
|
||||
|
||||
return s.agent.startProxy(s)
|
||||
}
|
||||
|
||||
// valid checks that the sandbox configuration is valid.
|
||||
func (sandboxConfig *SandboxConfig) valid() bool {
|
||||
if sandboxConfig.ID == "" {
|
||||
@@ -956,8 +942,6 @@ func (s *Sandbox) startVM() (err error) {
|
||||
HypervisorType: s.config.HypervisorType,
|
||||
HypervisorConfig: s.config.HypervisorConfig,
|
||||
AgentConfig: s.config.AgentConfig,
|
||||
ProxyType: s.config.ProxyType,
|
||||
ProxyConfig: s.config.ProxyConfig,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1397,7 +1381,7 @@ func (s *Sandbox) ResumeContainer(containerID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// createContainers registers all containers to the proxy, create the
|
||||
// createContainers registers all containers, create the
|
||||
// containers in the guest and starts one shim per container.
|
||||
func (s *Sandbox) createContainers() error {
|
||||
span, _ := s.trace("createContainers")
|
||||
|
||||
@@ -32,6 +32,7 @@ const (
|
||||
|
||||
const (
|
||||
HybridVSockScheme = "hvsock"
|
||||
MockHybridVSockScheme = "mock"
|
||||
VSockScheme = "vsock"
|
||||
)
|
||||
|
||||
@@ -200,6 +201,15 @@ func (s *HybridVSock) String() string {
|
||||
return fmt.Sprintf("%s://%s:%d", HybridVSockScheme, s.UdsPath, s.Port)
|
||||
}
|
||||
|
||||
// MockHybridVSock defines a mock hybrid vsocket for tests only.
|
||||
type MockHybridVSock struct {
|
||||
UdsPath string
|
||||
}
|
||||
|
||||
func (s *MockHybridVSock) String() string {
|
||||
return fmt.Sprintf("%s://%s", MockHybridVSockScheme, s.UdsPath)
|
||||
}
|
||||
|
||||
// Socket defines a socket to communicate between
|
||||
// the host and any process inside the VM.
|
||||
type Socket struct {
|
||||
|
||||
@@ -235,12 +235,12 @@ func BuildSocketPath(elements ...string) (string, error) {
|
||||
}
|
||||
|
||||
// SupportsVsocks returns true if vsocks are supported, otherwise false
|
||||
func SupportsVsocks() bool {
|
||||
func SupportsVsocks() (bool, error) {
|
||||
if _, err := os.Stat(VHostVSockDevicePath); err != nil {
|
||||
return false
|
||||
return false, fmt.Errorf("host system doesn't support vsock: %v", err)
|
||||
}
|
||||
|
||||
return true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SupportsIfb returns true if ifb are supported, otherwise false
|
||||
|
||||
@@ -26,10 +26,6 @@ type VM struct {
|
||||
hypervisor hypervisor
|
||||
agent agent
|
||||
|
||||
proxy proxy
|
||||
proxyPid int
|
||||
proxyURL string
|
||||
|
||||
cpu uint32
|
||||
memory uint32
|
||||
|
||||
@@ -44,9 +40,6 @@ type VMConfig struct {
|
||||
HypervisorConfig HypervisorConfig
|
||||
|
||||
AgentConfig KataAgentConfig
|
||||
|
||||
ProxyType ProxyType
|
||||
ProxyConfig ProxyConfig
|
||||
}
|
||||
|
||||
// Valid check VMConfig validity.
|
||||
@@ -89,50 +82,8 @@ func GrpcToVMConfig(j *pb.GrpcVMConfig) (*VMConfig, error) {
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
func setupProxy(h hypervisor, agent agent, config VMConfig, id string) (int, string, proxy, error) {
|
||||
consoleURL, err := h.getSandboxConsole(id)
|
||||
if err != nil {
|
||||
return -1, "", nil, err
|
||||
}
|
||||
agentURL, err := agent.getAgentURL()
|
||||
if err != nil {
|
||||
return -1, "", nil, err
|
||||
}
|
||||
|
||||
proxy, err := newProxy(config.ProxyType)
|
||||
if err != nil {
|
||||
return -1, "", nil, err
|
||||
}
|
||||
|
||||
proxyParams := proxyParams{
|
||||
id: id,
|
||||
path: config.ProxyConfig.Path,
|
||||
agentURL: agentURL,
|
||||
consoleURL: consoleURL,
|
||||
logger: virtLog.WithField("vm", id),
|
||||
debug: config.ProxyConfig.Debug,
|
||||
}
|
||||
pid, url, err := proxy.start(proxyParams)
|
||||
if err != nil {
|
||||
virtLog.WithFields(logrus.Fields{
|
||||
"vm": id,
|
||||
"proxy type": config.ProxyType,
|
||||
"params": proxyParams,
|
||||
}).WithError(err).Error("failed to start proxy")
|
||||
return -1, "", nil, err
|
||||
}
|
||||
|
||||
return pid, url, proxy, nil
|
||||
}
|
||||
|
||||
// NewVM creates a new VM based on provided VMConfig.
|
||||
func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
||||
var (
|
||||
proxy proxy
|
||||
pid int
|
||||
url string
|
||||
)
|
||||
|
||||
// 1. setup hypervisor
|
||||
hypervisor, err := newHypervisor(config.HypervisorType)
|
||||
if err != nil {
|
||||
@@ -169,7 +120,11 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
||||
agent := newAagentFunc()
|
||||
|
||||
vmSharePath := buildVMSharePath(id, store.RunVMStoragePath())
|
||||
err = agent.configure(hypervisor, id, vmSharePath, isProxyBuiltIn(config.ProxyType), config.AgentConfig)
|
||||
err = agent.configure(hypervisor, id, vmSharePath, config.AgentConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = agent.setAgentURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -186,22 +141,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
// 4. setup proxy
|
||||
pid, url, proxy, err = setupProxy(hypervisor, agent, config, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
virtLog.WithField("vm", id).WithError(err).Info("clean up proxy")
|
||||
proxy.stop(pid)
|
||||
}
|
||||
}()
|
||||
if err = agent.setProxy(nil, proxy, pid, url); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 5. check agent aliveness
|
||||
// 4. check agent aliveness
|
||||
// VMs booted from template are paused, do not check
|
||||
if !config.HypervisorConfig.BootFromTemplate {
|
||||
virtLog.WithField("vm", id).Info("check agent status")
|
||||
@@ -215,9 +155,6 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
||||
id: id,
|
||||
hypervisor: hypervisor,
|
||||
agent: agent,
|
||||
proxy: proxy,
|
||||
proxyPid: pid,
|
||||
proxyURL: url,
|
||||
cpu: config.HypervisorConfig.NumVCPUs,
|
||||
memory: config.HypervisorConfig.MemorySize,
|
||||
store: store,
|
||||
@@ -254,21 +191,12 @@ func NewVMFromGrpc(ctx context.Context, v *pb.GrpcVM, config VMConfig) (*VM, err
|
||||
// create agent instance
|
||||
newAagentFunc := getNewAgentFunc(ctx)
|
||||
agent := newAagentFunc()
|
||||
agent.configureFromGrpc(hypervisor, v.Id, isProxyBuiltIn(config.ProxyType), config.AgentConfig)
|
||||
|
||||
proxy, err := newProxy(config.ProxyType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
agent.setProxyFromGrpc(proxy, int(v.ProxyPid), v.ProxyURL)
|
||||
agent.configureFromGrpc(hypervisor, v.Id, config.AgentConfig)
|
||||
|
||||
return &VM{
|
||||
id: v.Id,
|
||||
hypervisor: hypervisor,
|
||||
agent: agent,
|
||||
proxy: proxy,
|
||||
proxyPid: int(v.ProxyPid),
|
||||
proxyURL: v.ProxyURL,
|
||||
cpu: v.Cpu,
|
||||
memory: v.Memory,
|
||||
cpuDelta: v.CpuDelta,
|
||||
@@ -308,16 +236,13 @@ func (v *VM) Start() error {
|
||||
return v.hypervisor.startSandbox(vmStartTimeout)
|
||||
}
|
||||
|
||||
// Disconnect agent and proxy connections to a VM
|
||||
// Disconnect agent connections to a VM
|
||||
func (v *VM) Disconnect() error {
|
||||
v.logger().Info("kill vm")
|
||||
|
||||
if err := v.agent.disconnect(); err != nil {
|
||||
v.logger().WithError(err).Error("failed to disconnect agent")
|
||||
}
|
||||
if err := v.proxy.stop(v.proxyPid); err != nil {
|
||||
v.logger().WithError(err).Error("failed to stop proxy")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -413,14 +338,8 @@ func (v *VM) assignSandbox(s *Sandbox) error {
|
||||
"vmSockDir": vmSockDir,
|
||||
"sbSharePath": sbSharePath,
|
||||
"sbSockDir": sbSockDir,
|
||||
"proxy-pid": v.proxyPid,
|
||||
"proxy-url": v.proxyURL,
|
||||
}).Infof("assign vm to sandbox %s", s.id)
|
||||
|
||||
if err := s.agent.setProxy(s, v.proxy, v.proxyPid, v.proxyURL); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.agent.reuseAgent(v.agent); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -455,9 +374,6 @@ func (v *VM) ToGrpc(config VMConfig) (*pb.GrpcVM, error) {
|
||||
Id: v.id,
|
||||
Hypervisor: hJSON,
|
||||
|
||||
ProxyPid: int64(v.proxyPid),
|
||||
ProxyURL: v.proxyURL,
|
||||
|
||||
Cpu: v.cpu,
|
||||
Memory: v.memory,
|
||||
CpuDelta: v.cpuDelta,
|
||||
|
||||
@@ -24,7 +24,6 @@ func TestNewVM(t *testing.T) {
|
||||
|
||||
config := VMConfig{
|
||||
HypervisorType: MockHypervisor,
|
||||
ProxyType: NoopProxyType,
|
||||
}
|
||||
hyperConfig := HypervisorConfig{
|
||||
KernelPath: testDir,
|
||||
@@ -97,33 +96,12 @@ func TestVMConfigValid(t *testing.T) {
|
||||
assert.Nil(err)
|
||||
}
|
||||
|
||||
func TestSetupProxy(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
config := VMConfig{
|
||||
HypervisorType: MockHypervisor,
|
||||
}
|
||||
|
||||
hypervisor := &mockHypervisor{}
|
||||
agent := &mockAgent{}
|
||||
|
||||
// wrong proxy type
|
||||
config.ProxyType = ProxyType("invalidProxyType")
|
||||
_, _, _, err := setupProxy(hypervisor, agent, config, "foobar")
|
||||
assert.NotNil(err)
|
||||
|
||||
config.ProxyType = NoopProxyType
|
||||
_, _, _, err = setupProxy(hypervisor, agent, config, "foobar")
|
||||
assert.Nil(err)
|
||||
}
|
||||
|
||||
func TestVMConfigGrpc(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
config := VMConfig{
|
||||
HypervisorType: QemuHypervisor,
|
||||
HypervisorConfig: newQemuConfig(),
|
||||
AgentConfig: KataAgentConfig{false, true, false, false, 0, "", "", []string{}},
|
||||
ProxyType: NoopProxyType,
|
||||
AgentConfig: KataAgentConfig{true, false, false, 0, "", "", []string{}},
|
||||
}
|
||||
|
||||
p, err := config.ToGrpc()
|
||||
|
||||
Reference in New Issue
Block a user