Merge pull request #324 from bergwolf/runtime-1.11.1-for-2.0

port runtime stable commits
This commit is contained in:
Julio Montes
2020-06-23 07:12:44 -05:00
committed by GitHub
17 changed files with 164 additions and 96 deletions

1
src/agent/Cargo.lock generated
View File

@@ -232,7 +232,6 @@ dependencies = [
"slog-async",
"slog-json",
"slog-scope",
"tempfile",
]
[[package]]

View File

@@ -224,9 +224,6 @@ type agent interface {
// configureFromGrpc will update agent settings based on provided arguments which from Grpc
configureFromGrpc(h hypervisor, id string, builtin bool, config interface{}) error
// getSharePath will return the agent 9pfs share mount path
getSharePath(id string) string
// reseedRNG will reseed the guest random number generator
reseedRNG(data []byte) error

View File

@@ -16,6 +16,7 @@ import (
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/types"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/store"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
specs "github.com/opencontainers/runtime-spec/specs-go"
opentracing "github.com/opentracing/opentracing-go"
@@ -50,6 +51,8 @@ func SetLogger(ctx context.Context, logger *logrus.Entry) {
deviceApi.SetLogger(virtLog)
compatoci.SetLogger(virtLog)
store.SetLogger(virtLog)
deviceConfig.SetLogger(virtLog)
}
// CreateSandbox is the virtcontainers sandbox creation entry point.

View File

@@ -340,7 +340,7 @@ func TestStartSandboxKataAgentSuccessful(t *testing.T) {
// TODO: defaultSharedDir is a hyper var = /run/hyper/shared/sandboxes
// do we need to unmount sandboxes and containers?
err = bindUnmountAllRootfs(ctx, testDir, pImpl)
err = bindUnmountAllRootfs(ctx, filepath.Join(testDir, p.ID()), pImpl)
assert.NoError(err)
}
@@ -474,7 +474,7 @@ func TestRunSandboxKataAgentSuccessful(t *testing.T) {
_, err = os.Stat(sandboxDir)
assert.NoError(err)
err = bindUnmountAllRootfs(ctx, testDir, pImpl)
err = bindUnmountAllRootfs(ctx, filepath.Join(testDir, p.ID()), pImpl)
assert.NoError(err)
}

View File

@@ -203,7 +203,7 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
clh.Logger().WithField("function", "createSandbox").Info("Sandbox already exist, loading from state")
clh.virtiofsd = &virtiofsd{
PID: clh.state.VirtiofsdPID,
sourcePath: filepath.Join(kataHostSharedDir(), clh.id),
sourcePath: filepath.Join(getSharePath(clh.id)),
debug: clh.config.Debug,
socketPath: virtiofsdSocketPath,
}
@@ -308,7 +308,7 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
clh.virtiofsd = &virtiofsd{
path: clh.config.VirtioFSDaemon,
sourcePath: filepath.Join(kataHostSharedDir(), clh.id),
sourcePath: filepath.Join(getSharePath(clh.id)),
socketPath: virtiofsdSocketPath,
extraArgs: clh.config.VirtioFSExtraArgs,
debug: clh.config.Debug,

View File

@@ -480,7 +480,7 @@ func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, guestSharedDir s
}
} else {
// These mounts are created in the shared dir
mountDest := filepath.Join(hostSharedDir, c.sandbox.id, filename)
mountDest := filepath.Join(hostSharedDir, filename)
if err := bindMount(c.ctx, m.Source, mountDest, false, "private"); err != nil {
return "", false, err
}
@@ -850,7 +850,7 @@ func (c *Container) rollbackFailingContainerCreation() {
if err := c.unmountHostMounts(); err != nil {
c.Logger().WithError(err).Error("rollback failed unmountHostMounts()")
}
if err := bindUnmountContainerRootfs(c.ctx, kataHostSharedDir(), c.sandbox.id, c.id); err != nil {
if err := bindUnmountContainerRootfs(c.ctx, getMountPath(c.sandbox.id), c.id); err != nil {
c.Logger().WithError(err).Error("rollback failed bindUnmountContainerRootfs()")
}
}
@@ -875,6 +875,7 @@ func (c *Container) create() (err error) {
// of rolling back all the actions previously performed.
defer func() {
if err != nil {
c.Logger().WithError(err).Error("container create failed")
c.rollbackFailingContainerCreation()
}
}()
@@ -1119,7 +1120,7 @@ func (c *Container) stop(force bool) error {
return err
}
if err := bindUnmountContainerRootfs(c.ctx, kataHostSharedDir(), c.sandbox.id, c.id); err != nil && !force {
if err := bindUnmountContainerRootfs(c.ctx, getMountPath(c.sandbox.id), c.id); err != nil && !force {
return err
}

View File

@@ -29,6 +29,13 @@ var (
pmemLog = logrus.WithField("source", "virtcontainers/device/config")
)
// SetLogger sets up a logger for this pkg
func SetLogger(logger *logrus.Entry) {
fields := pmemLog.Data
pmemLog = logger.WithFields(fields)
}
// PmemDeviceInfo returns a DeviceInfo if a loop device
// is mounted on source, and the backing file of the loop device
// has the PFN signature.

View File

@@ -138,6 +138,26 @@ var kataHostSharedDir = func() string {
return defaultKataHostSharedDir
}
// Shared path handling:
// 1. create two directories for each sandbox:
// -. /run/kata-containers/shared/sandboxes/$sbx_id/mounts/, a directory to hold all host/guest shared mounts
// -. /run/kata-containers/shared/sandboxes/$sbx_id/shared/, a host/guest shared directory (9pfs/virtiofs source dir)
//
// 2. /run/kata-containers/shared/sandboxes/$sbx_id/mounts/ is bind mounted readonly to /run/kata-containers/shared/sandboxes/$sbx_id/shared/, so guest cannot modify it
//
// 3. host-guest shared files/directories are mounted one-level under /run/kata-containers/shared/sandboxes/$sbx_id/mounts/ and thus present to guest at one level under /run/kata-containers/shared/sandboxes/$sbx_id/shared/
func getSharePath(id string) string {
return filepath.Join(kataHostSharedDir(), id, "shared")
}
func getMountPath(id string) string {
return filepath.Join(kataHostSharedDir(), id, "mounts")
}
func getSandboxPath(id string) string {
return filepath.Join(kataHostSharedDir(), id)
}
// The function is declared this way for mocking in unit tests
var kataGuestSharedDir = func() string {
if rootless.IsRootless() {
@@ -156,6 +176,10 @@ var kataGuestSandboxDir = func() string {
return defaultKataGuestSandboxDir
}
var kataGuestSandboxStorageDir = func() string {
return filepath.Join(defaultKataGuestSandboxDir, "storage")
}
func ephemeralPath() string {
if rootless.IsRootless() {
return filepath.Join(kataGuestSandboxDir(), kataEphemeralDevType)
@@ -221,10 +245,6 @@ func (k *kataAgent) Logger() *logrus.Entry {
return virtLog.WithField("subsystem", "kata_agent")
}
func (k *kataAgent) getSharePath(id string) string {
return filepath.Join(kataHostSharedDir(), id)
}
func (k *kataAgent) longLiveConn() bool {
return k.keepConn
}
@@ -354,7 +374,7 @@ func (k *kataAgent) capabilities() types.Capabilities {
return caps
}
func (k *kataAgent) internalConfigure(h hypervisor, id, sharePath string, builtin bool, config interface{}) error {
func (k *kataAgent) internalConfigure(h hypervisor, id string, builtin bool, config interface{}) error {
var err error
if config != nil {
switch c := config.(type) {
@@ -376,7 +396,7 @@ func (k *kataAgent) internalConfigure(h hypervisor, id, sharePath string, builti
}
func (k *kataAgent) configure(h hypervisor, id, sharePath string, builtin bool, config interface{}) error {
err := k.internalConfigure(h, id, sharePath, builtin, config)
err := k.internalConfigure(h, id, builtin, config)
if err != nil {
return err
}
@@ -422,14 +442,36 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, builtin bool,
}
func (k *kataAgent) configureFromGrpc(h hypervisor, id string, builtin bool, config interface{}) error {
return k.internalConfigure(h, id, "", builtin, config)
return k.internalConfigure(h, id, builtin, config)
}
func (k *kataAgent) setupSharedPath(sandbox *Sandbox) error {
// create shared path structure
sharePath := getSharePath(sandbox.id)
mountPath := getMountPath(sandbox.id)
if err := os.MkdirAll(sharePath, DirMode); err != nil {
return err
}
if err := os.MkdirAll(mountPath, DirMode); err != nil {
return err
}
// slave mount so that future mountpoints under mountPath are shown in sharePath as well
if err := bindMount(context.Background(), mountPath, sharePath, true, "slave"); err != nil {
return err
}
return nil
}
func (k *kataAgent) createSandbox(sandbox *Sandbox) error {
span, _ := k.trace("createSandbox")
defer span.Finish()
return k.configure(sandbox.hypervisor, sandbox.id, k.getSharePath(sandbox.id), k.proxyBuiltIn, sandbox.config.AgentConfig)
if err := k.setupSharedPath(sandbox); err != nil {
return err
}
return k.configure(sandbox.hypervisor, sandbox.id, getSharePath(sandbox.id), k.proxyBuiltIn, sandbox.config.AgentConfig)
}
func cmdToKataProcess(cmd types.Cmd) (process *grpc.Process, err error) {
@@ -1001,7 +1043,7 @@ func (k *kataAgent) replaceOCIMountsForStorages(spec *specs.Spec, volumeStorages
// Create a temporary location to mount the Storage. Mounting to the correct location
// will be handled by the OCI mount structure.
filename := fmt.Sprintf("%s-%s", uuid.Generate().String(), filepath.Base(m.Destination))
path := filepath.Join(kataGuestSharedDir(), filename)
path := filepath.Join(kataGuestSandboxStorageDir(), filename)
k.Logger().Debugf("Replacing OCI mount source (%s) with %s", m.Source, path)
ociMounts[index].Source = path
@@ -1088,22 +1130,32 @@ func (k *kataAgent) constraintGRPCSpec(grpcSpec *grpc.Spec, passSeccomp bool) {
grpcSpec.Linux.Devices = linuxDevices
}
func (k *kataAgent) handleShm(grpcSpec *grpc.Spec, sandbox *Sandbox) {
for idx, mnt := range grpcSpec.Mounts {
func (k *kataAgent) handleShm(mounts []specs.Mount, sandbox *Sandbox) {
for idx, mnt := range mounts {
if mnt.Destination != "/dev/shm" {
continue
}
// If /dev/shm for a container is backed by an ephemeral volume, skip
// bind-mounting it to the sandbox shm.
// A later call to handleEphemeralStorage should take care of setting up /dev/shm correctly.
if mnt.Type == KataEphemeralDevType {
continue
}
// A container shm mount is shared with sandbox shm mount.
if sandbox.shmSize > 0 {
grpcSpec.Mounts[idx].Type = "bind"
grpcSpec.Mounts[idx].Options = []string{"rbind"}
grpcSpec.Mounts[idx].Source = filepath.Join(kataGuestSandboxDir(), shmDir)
mounts[idx].Type = "bind"
mounts[idx].Options = []string{"rbind"}
mounts[idx].Source = filepath.Join(kataGuestSandboxDir(), shmDir)
k.Logger().WithField("shm-size", sandbox.shmSize).Info("Using sandbox shm")
} else {
// This should typically not happen, as a sandbox shm mount is always set up by the
// upper stack.
sizeOption := fmt.Sprintf("size=%d", DefaultShmSize)
grpcSpec.Mounts[idx].Type = "tmpfs"
grpcSpec.Mounts[idx].Source = "shm"
grpcSpec.Mounts[idx].Options = []string{"noexec", "nosuid", "nodev", "mode=1777", sizeOption}
mounts[idx].Type = "tmpfs"
mounts[idx].Source = "shm"
mounts[idx].Options = []string{"noexec", "nosuid", "nodev", "mode=1777", sizeOption}
k.Logger().WithField("shm-size", sizeOption).Info("Setting up a separate shm for container")
}
}
@@ -1206,7 +1258,7 @@ func (k *kataAgent) rollbackFailingContainerCreation(c *Container) {
k.Logger().WithError(err2).Error("rollback failed unmountHostMounts()")
}
if err2 := bindUnmountContainerRootfs(k.ctx, kataHostSharedDir(), c.sandbox.id, c.id); err2 != nil {
if err2 := bindUnmountContainerRootfs(k.ctx, getMountPath(c.sandbox.id), c.id); err2 != nil {
k.Logger().WithError(err2).Error("rollback failed bindUnmountContainerRootfs()")
}
}
@@ -1263,6 +1315,13 @@ func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPat
rootfs.Options = []string{"nouuid"}
}
// Ensure container mount destination exists
// TODO: remove dependency on shared fs path. shared fs is just one kind of storage sources.
// we should not always use shared fs path for all kinds of storage. Stead, all storage
// should be bind mounted to a tmpfs path for containers to use.
if err := os.MkdirAll(filepath.Join(getMountPath(c.sandbox.id), c.id, c.rootfsSuffix), DirMode); err != nil {
return nil, err
}
return rootfs, nil
}
@@ -1274,7 +1333,7 @@ func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPat
// (kataGuestSharedDir) is already mounted in the
// guest. We only need to mount the rootfs from
// the host and it will show up in the guest.
if err := bindMountContainerRootfs(k.ctx, kataHostSharedDir(), sandbox.id, c.id, c.rootFs.Target, false); err != nil {
if err := bindMountContainerRootfs(k.ctx, getMountPath(sandbox.id), c.id, c.rootFs.Target, false); err != nil {
return nil, err
}
@@ -1307,6 +1366,7 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
// takes care of rolling back actions previously performed.
defer func() {
if err != nil {
k.Logger().WithError(err).Error("createContainer failed")
k.rollbackFailingContainerCreation(c)
}
}()
@@ -1327,11 +1387,13 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
}
// Handle container mounts
newMounts, ignoredMounts, err := c.mountSharedDirMounts(kataHostSharedDir(), kataGuestSharedDir())
newMounts, ignoredMounts, err := c.mountSharedDirMounts(getMountPath(sandbox.id), kataGuestSharedDir())
if err != nil {
return nil, err
}
k.handleShm(ociSpec.Mounts, sandbox)
epheStorages := k.handleEphemeralStorage(ociSpec.Mounts)
ctrStorages = append(ctrStorages, epheStorages...)
@@ -1382,8 +1444,6 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
// irrelevant information to the agent.
k.constraintGRPCSpec(grpcSpec, passSeccomp)
k.handleShm(grpcSpec, sandbox)
req := &grpc.CreateContainerRequest{
ContainerId: c.id,
ExecId: c.id,
@@ -2290,13 +2350,20 @@ func (k *kataAgent) markDead() {
}
func (k *kataAgent) cleanup(s *Sandbox) {
path := k.getSharePath(s.id)
// Unmount shared path
path := getSharePath(s.id)
k.Logger().WithField("path", path).Infof("cleanup agent")
if err := bindUnmountAllRootfs(k.ctx, path, s); err != nil {
k.Logger().WithError(err).Errorf("failed to unmount container share path %s", path)
if err := syscall.Unmount(path, syscall.MNT_DETACH|UmountNoFollow); err != nil {
k.Logger().WithError(err).Errorf("failed to unmount vm share path %s", path)
}
if err := os.RemoveAll(path); err != nil {
k.Logger().WithError(err).Errorf("failed to cleanup vm share path %s", path)
// Unmount mount path
path = getMountPath(s.id)
if err := bindUnmountAllRootfs(k.ctx, path, s); err != nil {
k.Logger().WithError(err).Errorf("failed to unmount vm mount path %s", path)
}
if err := os.RemoveAll(getSandboxPath(s.id)); err != nil {
k.Logger().WithError(err).Errorf("failed to cleanup vm path %s", getSandboxPath(s.id))
}
}

View File

@@ -663,33 +663,50 @@ func TestHandleShm(t *testing.T) {
shmSize: 8192,
}
g := &pb.Spec{
Hooks: &pb.Hooks{},
Mounts: []pb.Mount{
{Destination: "/dev/shm"},
},
var ociMounts []specs.Mount
mount := specs.Mount{
Type: "bind",
Destination: "/dev/shm",
}
k.handleShm(g, sandbox)
ociMounts = append(ociMounts, mount)
k.handleShm(ociMounts, sandbox)
assert.Len(g.Mounts, 1)
assert.NotEmpty(g.Mounts[0].Destination)
assert.Equal(g.Mounts[0].Destination, "/dev/shm")
assert.Equal(g.Mounts[0].Type, "bind")
assert.NotEmpty(g.Mounts[0].Source, filepath.Join(kataGuestSharedDir(), shmDir))
assert.Equal(g.Mounts[0].Options, []string{"rbind"})
assert.Len(ociMounts, 1)
assert.NotEmpty(ociMounts[0].Destination)
assert.Equal(ociMounts[0].Destination, "/dev/shm")
assert.Equal(ociMounts[0].Type, "bind")
assert.NotEmpty(ociMounts[0].Source, filepath.Join(kataGuestSharedDir(), shmDir))
assert.Equal(ociMounts[0].Options, []string{"rbind"})
sandbox.shmSize = 0
k.handleShm(g, sandbox)
assert.Len(g.Mounts, 1)
assert.NotEmpty(g.Mounts[0].Destination)
assert.Equal(g.Mounts[0].Destination, "/dev/shm")
assert.Equal(g.Mounts[0].Type, "tmpfs")
assert.Equal(g.Mounts[0].Source, "shm")
k.handleShm(ociMounts, sandbox)
assert.Len(ociMounts, 1)
assert.Equal(ociMounts[0].Destination, "/dev/shm")
assert.Equal(ociMounts[0].Type, "tmpfs")
assert.Equal(ociMounts[0].Source, "shm")
sizeOption := fmt.Sprintf("size=%d", DefaultShmSize)
assert.Equal(g.Mounts[0].Options, []string{"noexec", "nosuid", "nodev", "mode=1777", sizeOption})
assert.Equal(ociMounts[0].Options, []string{"noexec", "nosuid", "nodev", "mode=1777", sizeOption})
// In case the type of mount is ephemeral, the container mount is not
// shared with the sandbox shm.
ociMounts[0].Type = KataEphemeralDevType
mountSource := "/tmp/mountPoint"
ociMounts[0].Source = mountSource
k.handleShm(ociMounts, sandbox)
assert.Len(ociMounts, 1)
assert.Equal(ociMounts[0].Type, KataEphemeralDevType)
assert.NotEmpty(ociMounts[0].Source, mountSource)
epheStorages := k.handleEphemeralStorage(ociMounts)
epheMountPoint := epheStorages[0].MountPoint
expected := filepath.Join(ephemeralPath(), filepath.Base(mountSource))
assert.Equal(epheMountPoint, expected,
"Ephemeral mount point didn't match: got %s, expecting %s", epheMountPoint, expected)
}
func testIsPidNamespacePresent(grpcSpec *pb.Spec) bool {
@@ -756,19 +773,6 @@ func TestHandlePidNamespace(t *testing.T) {
assert.False(testIsPidNamespacePresent(g))
}
func TestAgentPathAPI(t *testing.T) {
assert := assert.New(t)
k1 := &kataAgent{}
k2 := &kataAgent{}
id := "foobar"
// getSharePath
path1 := k1.getSharePath(id)
path2 := k2.getSharePath(id)
assert.Equal(path1, path2)
}
func TestAgentConfigure(t *testing.T) {
assert := assert.New(t)

View File

@@ -275,11 +275,11 @@ func remount(ctx context.Context, mountflags uintptr, src string) error {
// bindMountContainerRootfs bind mounts a container rootfs into a 9pfs shared
// directory between the guest and the host.
func bindMountContainerRootfs(ctx context.Context, sharedDir, sandboxID, cID, cRootFs string, readonly bool) error {
func bindMountContainerRootfs(ctx context.Context, shareDir, cid, cRootFs string, readonly bool) error {
span, _ := trace(ctx, "bindMountContainerRootfs")
defer span.Finish()
rootfsDest := filepath.Join(sharedDir, sandboxID, cID, rootfsDir)
rootfsDest := filepath.Join(shareDir, cid, rootfsDir)
return bindMount(ctx, cRootFs, rootfsDest, readonly, "private")
}
@@ -315,12 +315,12 @@ func isSymlink(path string) bool {
return stat.Mode()&os.ModeSymlink != 0
}
func bindUnmountContainerRootfs(ctx context.Context, sharedDir, sandboxID, cID string) error {
func bindUnmountContainerRootfs(ctx context.Context, sharedDir, cID string) error {
span, _ := trace(ctx, "bindUnmountContainerRootfs")
defer span.Finish()
rootfsDest := filepath.Join(sharedDir, sandboxID, cID, rootfsDir)
if isSymlink(filepath.Join(sharedDir, sandboxID, cID)) || isSymlink(rootfsDest) {
rootfsDest := filepath.Join(sharedDir, cID, rootfsDir)
if isSymlink(filepath.Join(sharedDir, cID)) || isSymlink(rootfsDest) {
logrus.Warnf("container dir %s is a symlink, malicious guest?", cID)
return nil
}
@@ -343,7 +343,7 @@ func bindUnmountAllRootfs(ctx context.Context, sharedDir string, sandbox *Sandbo
var errors *merr.Error
for _, c := range sandbox.containers {
if isSymlink(filepath.Join(sharedDir, sandbox.id, c.id)) {
if isSymlink(filepath.Join(sharedDir, c.id)) {
logrus.Warnf("container dir %s is a symlink, malicious guest?", c.id)
continue
}
@@ -351,7 +351,7 @@ func bindUnmountAllRootfs(ctx context.Context, sharedDir string, sandbox *Sandbo
if c.state.Fstype == "" {
// even if error found, don't break out of loop until all mounts attempted
// to be unmounted, and collect all errors
errors = merr.Append(errors, bindUnmountContainerRootfs(c.ctx, sharedDir, sandbox.id, c.id))
errors = merr.Append(errors, bindUnmountContainerRootfs(c.ctx, sharedDir, c.id))
}
}
return errors.ErrorOrNil()

View File

@@ -386,7 +386,7 @@ func TestBindUnmountContainerRootfsENOENTNotError(t *testing.T) {
assert.NoError(os.Remove(testPath))
}
err := bindUnmountContainerRootfs(context.Background(), testMnt, sID, cID)
err := bindUnmountContainerRootfs(context.Background(), filepath.Join(testMnt, sID), cID)
assert.NoError(err)
}
@@ -407,7 +407,7 @@ func TestBindUnmountContainerRootfsRemoveRootfsDest(t *testing.T) {
assert.NoError(err)
defer os.RemoveAll(filepath.Join(testDir, sID))
bindUnmountContainerRootfs(context.Background(), testDir, sID, cID)
bindUnmountContainerRootfs(context.Background(), filepath.Join(testDir, sID), cID)
if _, err := os.Stat(testPath); err == nil {
t.Fatal("empty rootfs dest should be removed")

View File

@@ -185,11 +185,6 @@ func (n *noopAgent) configureFromGrpc(h hypervisor, id string, builtin bool, con
return nil
}
// getSharePath is the Noop agent share path getter. It does nothing.
func (n *noopAgent) getSharePath(id string) string {
return ""
}
// reseedRNG is the Noop agent RND reseeder. It does nothing.
func (n *noopAgent) reseedRNG(data []byte) error {
return nil

View File

@@ -156,13 +156,6 @@ func TestNoopAgentConfigure(t *testing.T) {
assert.NoError(err)
}
func TestNoopAgentGetSharePath(t *testing.T) {
n := &noopAgent{}
path := n.getSharePath("")
assert := assert.New(t)
assert.Empty(path)
}
func TestNoopAgentStartProxy(t *testing.T) {
assert := assert.New(t)
n := &noopAgent{}

View File

@@ -617,7 +617,7 @@ func (q *qemu) virtiofsdArgs(fd uintptr) []string {
// The daemon will terminate when the vhost-user socket
// connection with QEMU closes. Therefore we do not keep track
// of this child process after returning from this function.
sourcePath := filepath.Join(kataHostSharedDir(), q.id)
sourcePath := filepath.Join(getSharePath(q.id))
args := []string{
fmt.Sprintf("--fd=%v", fd),
"-o", "source=" + sourcePath,

View File

@@ -555,12 +555,12 @@ func TestQemuVirtiofsdArgs(t *testing.T) {
kataHostSharedDir = savedKataHostSharedDir
}()
result := "--fd=123 -o source=test-share-dir/foo -o cache=none --syslog -o no_posix_lock -d"
result := "--fd=123 -o source=test-share-dir/foo/shared -o cache=none --syslog -o no_posix_lock -d"
args := q.virtiofsdArgs(123)
assert.Equal(strings.Join(args, " "), result)
q.config.Debug = false
result = "--fd=123 -o source=test-share-dir/foo -o cache=none --syslog -o no_posix_lock -f"
result = "--fd=123 -o source=test-share-dir/foo/shared -o cache=none --syslog -o no_posix_lock -f"
args = q.virtiofsdArgs(123)
assert.Equal(strings.Join(args, " "), result)
}

View File

@@ -13,6 +13,7 @@ import (
"os"
"os/exec"
"path/filepath"
"syscall"
"testing"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
@@ -59,6 +60,7 @@ func cleanUp() {
globalSandboxList.removeSandbox(testSandboxID)
os.RemoveAll(fs.MockRunStoragePath())
os.RemoveAll(fs.MockRunVMStoragePath())
syscall.Unmount(getSharePath(testSandboxID), syscall.MNT_DETACH|UmountNoFollow)
os.RemoveAll(testDir)
os.MkdirAll(testDir, DirMode)

View File

@@ -412,7 +412,7 @@ func (v *VM) assignSandbox(s *Sandbox) error {
vmSharePath := buildVMSharePath(v.id, v.store.RunVMStoragePath())
vmSockDir := filepath.Join(v.store.RunVMStoragePath(), v.id)
sbSharePath := s.agent.getSharePath(s.id)
sbSharePath := getMountPath(s.id)
sbSockDir := filepath.Join(v.store.RunVMStoragePath(), s.id)
v.logger().WithFields(logrus.Fields{