From 687f2dbe84e06c1e3430755e4b2f6711725fa9d0 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 20 Nov 2019 21:48:08 +0800 Subject: [PATCH 01/11] persist: move "newstore" out of experimental Fixes #803 Move "newstore" features out of experimental feature list, from this commit "newstore" will be default enabled. Signed-off-by: Wei Zhang --- .gitignore | 1 + cli/config/configuration-acrn.toml.in | 4 +- cli/config/configuration-clh.toml.in | 4 +- cli/config/configuration-fc.toml.in | 4 +- .../configuration-qemu-virtiofs.toml.in | 4 +- cli/config/configuration-qemu.toml.in | 4 +- virtcontainers/api_test.go | 18 ++++---- virtcontainers/factory/cache/cache_test.go | 11 +++-- virtcontainers/persist.go | 7 +--- virtcontainers/persist_test.go | 42 ------------------- virtcontainers/sandbox.go | 20 ++++----- 11 files changed, 33 insertions(+), 86 deletions(-) diff --git a/.gitignore b/.gitignore index 55a515bc3..336240fcb 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ /cli/config/configuration-nemu.toml /cli/config/configuration-qemu.toml /cli/config/configuration-qemu-virtiofs.toml +/cli/config/configuration-clh.toml /cli/config-generated.go /cli/coverage.html /containerd-shim-kata-v2 diff --git a/cli/config/configuration-acrn.toml.in b/cli/config/configuration-acrn.toml.in index 0fad4fc5b..98d91082b 100644 --- a/cli/config/configuration-acrn.toml.in +++ b/cli/config/configuration-acrn.toml.in @@ -231,9 +231,7 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ # Enabled experimental feature list, format: ["a", "b"]. # Experimental features are features not stable enough for production, -# They may break compatibility, and are prepared for a big version bump. +# they may break compatibility, and are prepared for a big version bump. # Supported experimental features: -# 1. "newstore": new persist storage driver which breaks backward compatibility, -# expected to move out of experimental in 2.0.0. # (default: []) experimental=@DEFAULTEXPFEATURES@ diff --git a/cli/config/configuration-clh.toml.in b/cli/config/configuration-clh.toml.in index 532c0b4b8..7692584c4 100644 --- a/cli/config/configuration-clh.toml.in +++ b/cli/config/configuration-clh.toml.in @@ -207,9 +207,7 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ # Enabled experimental feature list, format: ["a", "b"]. # Experimental features are features not stable enough for production, -# They may break compatibility, and are prepared for a big version bump. +# they may break compatibility, and are prepared for a big version bump. # Supported experimental features: -# 1. "newstore": new persist storage driver which breaks backward compatibility, -# expected to move out of experimental in 2.0.0. # (default: []) experimental=@DEFAULTEXPFEATURES@ diff --git a/cli/config/configuration-fc.toml.in b/cli/config/configuration-fc.toml.in index f595245f0..99d1a487e 100644 --- a/cli/config/configuration-fc.toml.in +++ b/cli/config/configuration-fc.toml.in @@ -333,9 +333,7 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ # Enabled experimental feature list, format: ["a", "b"]. # Experimental features are features not stable enough for production, -# They may break compatibility, and are prepared for a big version bump. +# they may break compatibility, and are prepared for a big version bump. # Supported experimental features: -# 1. "newstore": new persist storage driver which breaks backward compatibility, -# expected to move out of experimental in 2.0.0. # (default: []) experimental=@DEFAULTEXPFEATURES@ diff --git a/cli/config/configuration-qemu-virtiofs.toml.in b/cli/config/configuration-qemu-virtiofs.toml.in index b0e3bc55f..a6e403004 100644 --- a/cli/config/configuration-qemu-virtiofs.toml.in +++ b/cli/config/configuration-qemu-virtiofs.toml.in @@ -435,9 +435,7 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ # Enabled experimental feature list, format: ["a", "b"]. # Experimental features are features not stable enough for production, -# They may break compatibility, and are prepared for a big version bump. +# they may break compatibility, and are prepared for a big version bump. # Supported experimental features: -# 1. "newstore": new persist storage driver which breaks backward compatibility, -# expected to move out of experimental in 2.0.0. # (default: []) experimental=@DEFAULTEXPFEATURES@ diff --git a/cli/config/configuration-qemu.toml.in b/cli/config/configuration-qemu.toml.in index c42218d56..d87e5269f 100644 --- a/cli/config/configuration-qemu.toml.in +++ b/cli/config/configuration-qemu.toml.in @@ -430,9 +430,7 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ # Enabled experimental feature list, format: ["a", "b"]. # Experimental features are features not stable enough for production, -# They may break compatibility, and are prepared for a big version bump. +# they may break compatibility, and are prepared for a big version bump. # Supported experimental features: -# 1. "newstore": new persist storage driver which breaks backward compatibility, -# expected to move out of experimental in 2.0.0. # (default: []) experimental=@DEFAULTEXPFEATURES@ diff --git a/virtcontainers/api_test.go b/virtcontainers/api_test.go index c6bb1a27c..740b9f181 100644 --- a/virtcontainers/api_test.go +++ b/virtcontainers/api_test.go @@ -16,6 +16,7 @@ import ( "testing" ktu "github.com/kata-containers/runtime/pkg/katatestutils" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/pkg/annotations" "github.com/kata-containers/runtime/virtcontainers/pkg/mock" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" @@ -517,12 +518,12 @@ func TestStatusSandboxSuccessfulStateReady(t *testing.T) { expectedStatus := SandboxStatus{ ID: testSandboxID, State: types.SandboxState{ - State: types.StateReady, + State: types.StateReady, + PersistVersion: 2, }, Hypervisor: MockHypervisor, HypervisorConfig: hypervisorConfig, Agent: NoopAgentType, - Annotations: sandboxAnnotations, ContainersStatus: []ContainerStatus{ { ID: containerID, @@ -576,12 +577,12 @@ func TestStatusSandboxSuccessfulStateRunning(t *testing.T) { expectedStatus := SandboxStatus{ ID: testSandboxID, State: types.SandboxState{ - State: types.StateRunning, + State: types.StateRunning, + PersistVersion: 2, }, Hypervisor: MockHypervisor, HypervisorConfig: hypervisorConfig, Agent: NoopAgentType, - Annotations: sandboxAnnotations, ContainersStatus: []ContainerStatus{ { ID: containerID, @@ -616,6 +617,8 @@ func TestStatusSandboxSuccessfulStateRunning(t *testing.T) { assert.Exactly(status, expectedStatus) } +/*FIXME: replace DeleteAll with newstore.Destroy + func TestStatusSandboxFailingFetchSandboxConfig(t *testing.T) { defer cleanUp() assert := assert.New(t) @@ -650,7 +653,7 @@ func TestStatusPodSandboxFailingFetchSandboxState(t *testing.T) { _, err = StatusSandbox(ctx, p.ID()) assert.Error(err) -} +}*/ func newTestContainerConfigNoop(contID string) ContainerConfig { // Define the container command and bundle. @@ -708,7 +711,7 @@ func TestCreateContainerFailingNoSandbox(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.Error(err) @@ -1243,6 +1246,7 @@ func TestStatusContainerStateRunning(t *testing.T) { assert.Exactly(status, expectedStatus) } +/* FIXME: replace DeleteAll with newstore.Destroy func TestStatusContainerFailing(t *testing.T) { defer cleanUp() assert := assert.New(t) @@ -1260,7 +1264,7 @@ func TestStatusContainerFailing(t *testing.T) { _, err = StatusContainer(ctx, p.ID(), contID) assert.Error(err) -} +}*/ func TestStatsContainerFailing(t *testing.T) { defer cleanUp() diff --git a/virtcontainers/factory/cache/cache_test.go b/virtcontainers/factory/cache/cache_test.go index 37fddc009..65189374b 100644 --- a/virtcontainers/factory/cache/cache_test.go +++ b/virtcontainers/factory/cache/cache_test.go @@ -8,6 +8,7 @@ package cache import ( "context" "io/ioutil" + "path/filepath" "testing" "github.com/stretchr/testify/assert" @@ -34,10 +35,14 @@ func TestTemplateFactory(t *testing.T) { ctx := context.Background() - var savedStorePath = store.VCStorePrefix - store.VCStorePrefix = testDir + ConfigStoragePathSaved := store.ConfigStoragePath + RunStoragePathSaved := store.RunStoragePath + // allow the tests to run without affecting the host system. + store.ConfigStoragePath = func() string { return filepath.Join(testDir, store.StoragePathSuffix, "config") } + store.RunStoragePath = func() string { return filepath.Join(testDir, store.StoragePathSuffix, "run") } defer func() { - store.VCStorePrefix = savedStorePath + store.ConfigStoragePath = ConfigStoragePathSaved + store.RunStoragePath = RunStoragePathSaved }() // New diff --git a/virtcontainers/persist.go b/virtcontainers/persist.go index 39bede130..c3ed4f9a0 100644 --- a/virtcontainers/persist.go +++ b/virtcontainers/persist.go @@ -444,12 +444,7 @@ func (c *Container) Restore() error { } func (s *Sandbox) supportNewStore() bool { - for _, f := range s.config.Experimental { - if f == persist.NewStoreFeature && exp.Get("newstore") != nil { - return true - } - } - return false + return true } func loadSandboxConfig(id string) (*SandboxConfig, error) { diff --git a/virtcontainers/persist_test.go b/virtcontainers/persist_test.go index 3f98e71fc..03b6252f4 100644 --- a/virtcontainers/persist_test.go +++ b/virtcontainers/persist_test.go @@ -7,7 +7,6 @@ package virtcontainers import ( "context" - "fmt" "os" "testing" @@ -19,47 +18,6 @@ import ( "github.com/kata-containers/runtime/virtcontainers/types" ) -func testCreateExpSandbox() (*Sandbox, error) { - sconfig := SandboxConfig{ - ID: "test-exp", - HypervisorType: MockHypervisor, - HypervisorConfig: newHypervisorConfig(nil, nil), - AgentType: NoopAgentType, - NetworkConfig: NetworkConfig{}, - Volumes: nil, - Containers: nil, - Experimental: []exp.Feature{persist.NewStoreFeature}, - } - - // support experimental - sandbox, err := createSandbox(context.Background(), sconfig, nil) - if err != nil { - return nil, fmt.Errorf("Could not create sandbox: %s", err) - } - - if err := sandbox.agent.startSandbox(sandbox); err != nil { - return nil, err - } - - return sandbox, nil -} - -func TestSupportNewStore(t *testing.T) { - assert := assert.New(t) - hConfig := newHypervisorConfig(nil, nil) - sandbox, err := testCreateSandbox(t, testSandboxID, MockHypervisor, hConfig, NoopAgentType, NetworkConfig{}, nil, nil) - assert.NoError(err) - defer cleanUp() - - // not support experimental - assert.False(sandbox.supportNewStore()) - - // support experimental - sandbox, err = testCreateExpSandbox() - assert.NoError(err) - assert.True(sandbox.supportNewStore()) -} - func TestSandboxRestore(t *testing.T) { var err error assert := assert.New(t) diff --git a/virtcontainers/sandbox.go b/virtcontainers/sandbox.go index f0a3e688d..ae20cfe6b 100644 --- a/virtcontainers/sandbox.go +++ b/virtcontainers/sandbox.go @@ -677,19 +677,7 @@ func unlockSandbox(ctx context.Context, sandboxID, token string) error { } func supportNewStore(ctx context.Context) bool { - if exp.Get("newstore") == nil { - return false - } - - // check if client context enabled "newstore" feature - exps := exp.ExpFromContext(ctx) - for _, v := range exps { - if v == "newstore" { - return true - } - } - - return false + return true } // fetchSandbox fetches a sandbox config from a sandbox ID and returns a sandbox. @@ -812,6 +800,12 @@ func (s *Sandbox) Delete() error { s.agent.cleanup(s) + if s.supportNewStore() { + if err := s.newStore.Destroy(); err != nil { + return err + } + } + return s.store.Delete() } From 633748aa763ed82589e7fa195ea6fbe8d7e68673 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 25 Nov 2019 15:13:48 +0800 Subject: [PATCH 02/11] persist: remove VCStore from hypervisor Remove usage of VCStore from hypervisors. Signed-off-by: Wei Zhang --- cli/kata-check_amd64.go | 9 +- virtcontainers/acrn.go | 139 +++++++++++++++++-------- virtcontainers/acrn_test.go | 2 +- virtcontainers/clh.go | 36 ++----- virtcontainers/fc.go | 30 +----- virtcontainers/hypervisor.go | 4 +- virtcontainers/mock_hypervisor.go | 5 +- virtcontainers/mock_hypervisor_test.go | 4 +- virtcontainers/qemu.go | 52 ++------- virtcontainers/qemu_test.go | 18 ++-- virtcontainers/sandbox.go | 36 ++----- virtcontainers/vm.go | 27 +---- 12 files changed, 142 insertions(+), 220 deletions(-) diff --git a/cli/kata-check_amd64.go b/cli/kata-check_amd64.go index 1464b9090..8376293a8 100644 --- a/cli/kata-check_amd64.go +++ b/cli/kata-check_amd64.go @@ -6,7 +6,6 @@ package main import ( - "context" "fmt" "io/ioutil" "strings" @@ -14,7 +13,6 @@ import ( "unsafe" vc "github.com/kata-containers/runtime/virtcontainers" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/sirupsen/logrus" ) @@ -238,12 +236,7 @@ func acrnIsUsable() error { kataLog.WithField("device", acrnDevice).Info("device available") acrnInst := vc.Acrn{} - vcStore, err := store.NewVCSandboxStore(context.Background(), "kata-check") - if err != nil { - return err - } - - uuidStr, err := acrnInst.GetNextAvailableUUID(vcStore) + uuidStr, err := acrnInst.GetNextAvailableUUID() if err != nil { return err } diff --git a/virtcontainers/acrn.go b/virtcontainers/acrn.go index 5787a25f4..85b821d47 100644 --- a/virtcontainers/acrn.go +++ b/virtcontainers/acrn.go @@ -7,6 +7,7 @@ package virtcontainers import ( "context" + "encoding/json" "fmt" "os" "os/exec" @@ -20,6 +21,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" + "github.com/kata-containers/runtime/pkg/rootless" "github.com/kata-containers/runtime/virtcontainers/device/config" persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" "github.com/kata-containers/runtime/virtcontainers/pkg/uuid" @@ -28,6 +30,25 @@ import ( "github.com/kata-containers/runtime/virtcontainers/utils" ) +// Since ACRN is using the store in a quite abnormal way, let's first draw it back from store to here + +// UUIDPathSuffix is the suffix used for uuid storage +const ( + UUIDPathSuffix = "uuid" + uuidFile = "uuid.json" +) + +// VMUUIDStoragePath is the uuid directory. +// It will contain all uuid info used by guest vm. +var VMUUIDStoragePath = func() string { + path := filepath.Join("/run/vc", UUIDPathSuffix) + if rootless.IsRootless() { + return filepath.Join(rootless.GetRootlessDir(), path) + } + return path + +} + // ACRN currently supports only known UUIDs for security // reasons (FuSa). When launching VM, only these pre-defined // UUID should be used else VM launch will fail. The main @@ -73,7 +94,6 @@ type AcrnState struct { // Acrn is an Hypervisor interface implementation for the Linux acrn hypervisor. type Acrn struct { id string - store *store.VCStore config HypervisorConfig acrnConfig Config state AcrnState @@ -276,7 +296,7 @@ func (a *Acrn) buildDevices(imagePath string) ([]Device, error) { } // setup sets the Acrn structure up. -func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore) error { +func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig) error { span, _ := a.trace("setup") defer span.Finish() @@ -286,24 +306,19 @@ func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto } a.id = id - a.store = vcStore a.config = *hypervisorConfig a.arch = newAcrnArch(a.config) var create bool var uuid string - if a.store != nil { //use old store - if err = a.store.Load(store.Hypervisor, &a.state); err != nil { - create = true - } - } else if a.state.UUID == "" { // new store + if a.state.UUID == "" { create = true } if create { a.Logger().Debug("Setting UUID") - if uuid, err = a.GetNextAvailableUUID(nil); err != nil { + if uuid, err = a.GetNextAvailableUUID(); err != nil { return err } a.state.UUID = uuid @@ -316,10 +331,6 @@ func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto return err } - if err = a.storeState(); err != nil { - return err - } - if err = a.storeInfo(); err != nil { return err } @@ -348,14 +359,14 @@ func (a *Acrn) createDummyVirtioBlkDev(devices []Device) ([]Device, error) { } // createSandbox is the Hypervisor sandbox creation. -func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, store *store.VCStore, stateful bool) error { +func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error { // Save the tracing context a.ctx = ctx span, _ := a.trace("createSandbox") defer span.Finish() - if err := a.setup(id, hypervisorConfig, store); err != nil { + if err := a.setup(id, hypervisorConfig); err != nil { return err } @@ -458,11 +469,6 @@ func (a *Acrn) startSandbox(timeoutSecs int) error { return err } - //Store VMM information - if err = a.storeState(); err != nil { - return err - } - return nil } @@ -499,7 +505,7 @@ func (a *Acrn) stopSandbox() (err error) { uuid := a.state.UUID Idx := acrnUUIDsToIdx[uuid] - if err = a.store.Load(store.UUID, &a.info); err != nil { + if err = a.loadInfo(); err != nil { a.Logger().Info("Failed to load UUID availabiity info") return err } @@ -698,7 +704,7 @@ func (a *Acrn) getPids() []int { return []int{a.state.PID} } -func (a *Acrn) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error { +func (a *Acrn) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error { return errors.New("acrn is not supported by VM cache") } @@ -737,20 +743,14 @@ func (a *Acrn) GetACRNUUIDBytes(uid string) (uuid.UUID, error) { // GetNextAvailableUUID returns next available UUID VM creation // If no validl UUIDs are available it returns err. -func (a *Acrn) GetNextAvailableUUID(uuidstore *store.VCStore) (string, error) { +func (a *Acrn) GetNextAvailableUUID() (string, error) { var MaxVMSupported uint8 var Idx uint8 var uuidStr string var err error - if uuidstore == nil { - uuidstore = a.store - } - - if uuidstore != nil { //use old store - if err = uuidstore.Load(store.UUID, &a.info); err != nil { - a.Logger().Infof("Load UUID store failed") - } + if err = a.loadInfo(); err != nil { + a.Logger().Infof("Load UUID store failed") } if MaxVMSupported, err = a.GetMaxSupportedACRNVM(); err != nil { @@ -795,22 +795,79 @@ func (a *Acrn) GetMaxSupportedACRNVM() (uint8, error) { return platformInfo.maxKataContainers, nil } -func (a *Acrn) storeState() error { - if a.store != nil { - if err := a.store.Store(store.Hypervisor, a.state); err != nil { - a.Logger().WithError(err).Error("failed to store acrn state") +func (a *Acrn) storeInfo() error { + dirPath := VMUUIDStoragePath() + + _, err := os.Stat(dirPath) + if os.IsNotExist(err) { + // Root directory + a.Logger().WithField("path", dirPath).Debugf("Creating UUID directory") + if err := os.MkdirAll(dirPath, store.DirMode); err != nil { return err } + } else if err != nil { + return err } + + dirf, err := os.Open(dirPath) + if err != nil { + return err + } + defer dirf.Close() + + if err := syscall.Flock(int(dirf.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { + return err + } + + // write data + f, err := os.OpenFile(filepath.Join(dirPath, uuidFile), os.O_RDWR|os.O_CREATE, 0755) + if err != nil { + return fmt.Errorf("failed to store information into uuid.json: %v", err) + } + defer f.Close() + + jsonOut, err := json.Marshal(a.info) + if err != nil { + return fmt.Errorf("Could not marshall data: %s", err) + } + f.Write(jsonOut) return nil } -func (a *Acrn) storeInfo() error { - if a.store != nil { - if err := a.store.Store(store.UUID, a.info); err != nil { - a.Logger().WithError(err).Error("failed to store acrn info") - return err - } +func (a *Acrn) loadInfo() error { + dirPath := VMUUIDStoragePath() + + _, err := os.Stat(dirPath) + if err != nil { + return fmt.Errorf("failed to load ACRN information: %v", err) + } + + dirf, err := os.Open(dirPath) + if err != nil { + return err + } + + if err := syscall.Flock(int(dirf.Fd()), syscall.LOCK_SH|syscall.LOCK_NB); err != nil { + dirf.Close() + return err + } + + defer dirf.Close() + + // write data + f, err := os.Open(filepath.Join(dirPath, uuidFile)) + if err != nil { + return fmt.Errorf("failed to load information into uuid.json: %v", err) + } + + dec := json.NewDecoder(f) + if dec != nil { + return fmt.Errorf("failed to create json decoder") + } + + err = dec.Decode(&a.info) + if err != nil { + return fmt.Errorf("could not decode data: %v", err) } return nil } diff --git a/virtcontainers/acrn_test.go b/virtcontainers/acrn_test.go index 12a0ea5fd..a0dde2048 100644 --- a/virtcontainers/acrn_test.go +++ b/virtcontainers/acrn_test.go @@ -230,7 +230,7 @@ func TestAcrnCreateSandbox(t *testing.T) { //set PID to 1 to ignore hypercall to get UUID and set a random UUID a.state.PID = 1 a.state.UUID = "f81d4fae-7dec-11d0-a765-00a0c91e6bf6" - err = a.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, nil, false) + err = a.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false) assert.NoError(err) assert.Exactly(acrnConfig, a.config) } diff --git a/virtcontainers/clh.go b/virtcontainers/clh.go index 3dd1da84c..49daa2898 100644 --- a/virtcontainers/clh.go +++ b/virtcontainers/clh.go @@ -104,7 +104,6 @@ func (s *CloudHypervisorState) reset() { type cloudHypervisor struct { id string state CloudHypervisorState - store *store.VCStore config HypervisorConfig ctx context.Context APIClient clhClient @@ -139,7 +138,7 @@ var clhDebugKernelParams = []Param{ // For cloudHypervisor this call only sets the internal structure up. // The VM will be created and started through startSandbox(). -func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore, stateful bool) error { +func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error { clh.ctx = ctx span, _ := clh.trace("createSandbox") @@ -151,7 +150,6 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ } clh.id = id - clh.store = vcStore clh.config = *hypervisorConfig clh.state.state = clhNotReady @@ -187,12 +185,7 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ } - // No need to return an error from there since there might be nothing - // to fetch if this is the first time the hypervisor is created. - err = clh.store.Load(store.Hypervisor, &clh.state) - if err != nil { - clh.Logger().WithField("function", "createSandbox").WithError(err).Info("Sandbox not found creating ") - } else { + if clh.state.PID > 0 { clh.Logger().WithField("function", "createSandbox").Info("Sandbox already exist, loading from state") clh.virtiofsd = &virtiofsd{ PID: clh.state.VirtiofsdPID, @@ -203,6 +196,10 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ return nil } + // No need to return an error from there since there might be nothing + // to fetch if this is the first time the hypervisor is created. + clh.Logger().WithField("function", "createSandbox").WithError(err).Info("Sandbox not found creating ") + // Set initial memomory size of the virtual machine clh.vmconfig.Memory.Size = int64(clh.config.MemorySize) << utils.MibToBytesShift clh.vmconfig.Memory.File = "/dev/shm" @@ -323,9 +320,6 @@ func (clh *cloudHypervisor) startSandbox(timeout int) error { return err } clh.state.VirtiofsdPID = pid - if err = clh.storeState(); err != nil { - return err - } } else { return errors.New("cloud-hypervisor only supports virtio based file sharing") } @@ -350,10 +344,6 @@ func (clh *cloudHypervisor) startSandbox(timeout int) error { } clh.state.state = clhReady - if err = clh.storeState(); err != nil { - return err - } - return nil } @@ -431,7 +421,7 @@ func (clh *cloudHypervisor) stopSandbox() (err error) { return clh.terminate() } -func (clh *cloudHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error { +func (clh *cloudHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error { return errors.New("cloudHypervisor is not supported by VM cache") } @@ -442,6 +432,7 @@ func (clh *cloudHypervisor) toGrpc() ([]byte, error) { func (clh *cloudHypervisor) save() (s persistapi.HypervisorState) { s.Pid = clh.state.PID s.Type = string(ClhHypervisor) + s.VirtiofsdPid = clh.state.VirtiofsdPID return } @@ -589,7 +580,6 @@ func (clh *cloudHypervisor) terminate() (err error) { func (clh *cloudHypervisor) reset() { clh.state.reset() - clh.storeState() } func (clh *cloudHypervisor) generateSocket(id string, useVsock bool) (interface{}, error) { @@ -633,17 +623,7 @@ func (clh *cloudHypervisor) logFilePath(id string) (string, error) { return utils.BuildSocketPath(store.RunVMStoragePath(), id, clhLogFile) } -func (clh *cloudHypervisor) storeState() error { - if clh.store != nil { - if err := clh.store.Store(store.Hypervisor, clh.state); err != nil { - return err - } - } - return nil -} - func (clh *cloudHypervisor) waitVMM(timeout uint) error { - clhRunning, err := clh.isClhRunning(timeout) if err != nil { diff --git a/virtcontainers/fc.go b/virtcontainers/fc.go index 81eaa5ab9..97570a456 100644 --- a/virtcontainers/fc.go +++ b/virtcontainers/fc.go @@ -143,7 +143,6 @@ type firecracker struct { firecrackerd *exec.Cmd //Tracks the firecracker process itself connection *client.Firecracker //Tracks the current active connection - store *store.VCStore ctx context.Context config HypervisorConfig pendingDevices []firecrackerDevice // Devices to be added before the FC VM ready @@ -222,7 +221,7 @@ func (fc *firecracker) bindMount(ctx context.Context, source, destination string // For firecracker this call only sets the internal structure up. // The sandbox will be created and started through startSandbox(). -func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore, stateful bool) error { +func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error { fc.ctx = ctx span, _ := fc.trace("createSandbox") @@ -231,7 +230,6 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N //TODO: check validity of the hypervisor config provided //https://github.com/kata-containers/runtime/issues/1065 fc.id = id - fc.store = vcStore fc.state.set(notReady) fc.config = *hypervisorConfig fc.stateful = stateful @@ -263,15 +261,6 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N fc.fcConfig = &types.FcConfig{} fc.fcConfigPath = filepath.Join(fc.vmPath, defaultFcConfig) - - // No need to return an error from there since there might be nothing - // to fetch if this is the first time the hypervisor is created. - if fc.store != nil { - if err := fc.store.Load(store.Hypervisor, &fc.info); err != nil { - fc.Logger().WithField("function", "init").WithError(err).Info("No info could be fetched") - } - } - return nil } @@ -385,16 +374,6 @@ func (fc *firecracker) fcInit(timeout int) error { } // Fetch sandbox network to be able to access it from the sandbox structure. - var networkNS NetworkNamespace - if fc.store != nil { - if err := fc.store.Load(store.Network, &networkNS); err == nil { - if networkNS.NetNsPath == "" { - fc.Logger().WithField("NETWORK NAMESPACE NULL", networkNS).Warn() - } - fc.netNSPath = networkNS.NetNsPath - } - } - err := os.MkdirAll(fc.jailerRoot, store.DirMode) if err != nil { return err @@ -476,11 +455,6 @@ func (fc *firecracker) fcInit(timeout int) error { fc.Logger().WithField("fcInit failed:", err).Debug() return err } - - // Store VMM information - if fc.store != nil { - return fc.store.Store(store.Hypervisor, fc.info) - } return nil } @@ -1155,7 +1129,7 @@ func (fc *firecracker) getPids() []int { return []int{fc.info.PID} } -func (fc *firecracker) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error { +func (fc *firecracker) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error { return errors.New("firecracker is not supported by VM cache") } diff --git a/virtcontainers/hypervisor.go b/virtcontainers/hypervisor.go index d96232dc8..38f87d024 100644 --- a/virtcontainers/hypervisor.go +++ b/virtcontainers/hypervisor.go @@ -736,7 +736,7 @@ func generateVMSocket(id string, useVsock bool) (interface{}, error) { // hypervisor is the virtcontainers hypervisor interface. // The default hypervisor implementation is Qemu. type hypervisor interface { - createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, store *store.VCStore, stateful bool) error + createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error startSandbox(timeout int) error stopSandbox() error pauseSandbox() error @@ -756,7 +756,7 @@ type hypervisor interface { // getPids returns a slice of hypervisor related process ids. // The hypervisor pid must be put at index 0. getPids() []int - fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error + fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error toGrpc() ([]byte, error) check() error diff --git a/virtcontainers/mock_hypervisor.go b/virtcontainers/mock_hypervisor.go index 30bd38cb4..672c1ddd3 100644 --- a/virtcontainers/mock_hypervisor.go +++ b/virtcontainers/mock_hypervisor.go @@ -11,7 +11,6 @@ import ( "os" persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" ) @@ -27,7 +26,7 @@ func (m *mockHypervisor) hypervisorConfig() HypervisorConfig { return HypervisorConfig{} } -func (m *mockHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, store *store.VCStore, stateful bool) error { +func (m *mockHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error { err := hypervisorConfig.valid() if err != nil { return err @@ -108,7 +107,7 @@ func (m *mockHypervisor) getPids() []int { return []int{m.mockPid} } -func (m *mockHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error { +func (m *mockHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error { return errors.New("mockHypervisor is not supported by VM cache") } diff --git a/virtcontainers/mock_hypervisor_test.go b/virtcontainers/mock_hypervisor_test.go index 10c6a90cd..b73b28f2d 100644 --- a/virtcontainers/mock_hypervisor_test.go +++ b/virtcontainers/mock_hypervisor_test.go @@ -31,7 +31,7 @@ func TestMockHypervisorCreateSandbox(t *testing.T) { ctx := context.Background() // wrong config - err := m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, nil, false) + err := m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false) assert.Error(err) sandbox.config.HypervisorConfig = HypervisorConfig{ @@ -40,7 +40,7 @@ func TestMockHypervisorCreateSandbox(t *testing.T) { HypervisorPath: fmt.Sprintf("%s/%s", testDir, testHypervisor), } - err = m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, nil, false) + err = m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false) assert.NoError(err) } diff --git a/virtcontainers/qemu.go b/virtcontainers/qemu.go index 056f574da..754c9dcc9 100644 --- a/virtcontainers/qemu.go +++ b/virtcontainers/qemu.go @@ -77,8 +77,6 @@ type QemuState struct { type qemu struct { id string - store *store.VCStore - config HypervisorConfig qmpMonitorCh qmpChannel @@ -226,7 +224,7 @@ func (q *qemu) trace(name string) (opentracing.Span, context.Context) { } // setup sets the Qemu structure up. -func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore) error { +func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig) error { span, _ := q.trace("setup") defer span.Finish() @@ -236,7 +234,6 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto } q.id = id - q.store = vcStore q.config = *hypervisorConfig q.arch = newQemuArch(q.config) @@ -255,12 +252,7 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto } var create bool - if q.store != nil { //use old store - if err := q.store.Load(store.Hypervisor, &q.state); err != nil { - // hypervisor doesn't exist, create new one - create = true - } - } else if q.state.UUID == "" { // new store + if q.state.UUID == "" { create = true } @@ -280,10 +272,6 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto if err = os.MkdirAll(store.SandboxRuntimeRootPath(id), store.DirMode); err != nil { return err } - - if err = q.storeState(); err != nil { - return err - } } nested, err := RunningOnVMM(procCPUInfo) @@ -463,14 +451,14 @@ func (q *qemu) setupFileBackedMem(knobs *govmmQemu.Knobs, memory *govmmQemu.Memo } // createSandbox is the Hypervisor sandbox creation implementation for govmmQemu. -func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore, stateful bool) error { +func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error { // Save the tracing context q.ctx = ctx span, _ := q.trace("createSandbox") defer span.Finish() - if err := q.setup(id, hypervisorConfig, vcStore); err != nil { + if err := q.setup(id, hypervisorConfig); err != nil { return err } @@ -729,9 +717,6 @@ func (q *qemu) startSandbox(timeout int) error { if err != nil { return err } - if err = q.storeState(); err != nil { - return err - } } var strErr string @@ -1289,7 +1274,7 @@ func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interf return data, err } - return data, q.storeState() + return data, nil } func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) { @@ -1301,7 +1286,7 @@ func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (int return data, err } - return data, q.storeState() + return data, nil } func (q *qemu) hotplugCPUs(vcpus uint32, op operation) (uint32, error) { @@ -1383,15 +1368,10 @@ func (q *qemu) hotplugAddCPUs(amount uint32) (uint32, error) { hotpluggedVCPUs++ if hotpluggedVCPUs == amount { // All vCPUs were hotplugged - return amount, q.storeState() + return amount, nil } } - // All vCPUs were NOT hotplugged - if err := q.storeState(); err != nil { - q.Logger().Errorf("failed to save hypervisor state after hotplug %d vCPUs: %v", hotpluggedVCPUs, err) - } - return hotpluggedVCPUs, fmt.Errorf("failed to hot add vCPUs: only %d vCPUs of %d were added", hotpluggedVCPUs, amount) } @@ -1408,7 +1388,6 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) { // get the last vCPUs and try to remove it cpu := q.state.HotpluggedVCPUs[len(q.state.HotpluggedVCPUs)-1] if err := q.qmpMonitorCh.qmp.ExecuteDeviceDel(q.qmpMonitorCh.ctx, cpu.ID); err != nil { - q.storeState() return i, fmt.Errorf("failed to hotunplug CPUs, only %d CPUs were hotunplugged: %v", i, err) } @@ -1416,7 +1395,7 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) { q.state.HotpluggedVCPUs = q.state.HotpluggedVCPUs[:len(q.state.HotpluggedVCPUs)-1] } - return amount, q.storeState() + return amount, nil } func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) (int, error) { @@ -1522,7 +1501,7 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) (int, error) { } } q.state.HotpluggedMemory += memDev.sizeMB - return memDev.sizeMB, q.storeState() + return memDev.sizeMB, nil } func (q *qemu) pauseSandbox() error { @@ -1938,7 +1917,7 @@ type qemuGrpc struct { QemuSMP govmmQemu.SMP } -func (q *qemu) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error { +func (q *qemu) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error { var qp qemuGrpc err := json.Unmarshal(j, &qp) if err != nil { @@ -1946,7 +1925,6 @@ func (q *qemu) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, } q.id = qp.ID - q.store = store q.config = *hypervisorConfig q.qmpMonitorCh.ctx = ctx q.qmpMonitorCh.path = qp.QmpChannelpath @@ -1978,16 +1956,6 @@ func (q *qemu) toGrpc() ([]byte, error) { return json.Marshal(&qp) } -func (q *qemu) storeState() error { - if q.store != nil { - q.state.Bridges = q.arch.getBridges() - if err := q.store.Store(store.Hypervisor, q.state); err != nil { - return err - } - } - return nil -} - func (q *qemu) save() (s persistapi.HypervisorState) { pids := q.getPids() if len(pids) != 0 { diff --git a/virtcontainers/qemu_test.go b/virtcontainers/qemu_test.go index a503bb4f7..fd90c5cb3 100644 --- a/virtcontainers/qemu_test.go +++ b/virtcontainers/qemu_test.go @@ -99,7 +99,7 @@ func TestQemuCreateSandbox(t *testing.T) { parentDir := store.SandboxConfigurationRootPath(sandbox.id) assert.NoError(os.MkdirAll(parentDir, store.DirMode)) - err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false) + err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false) assert.NoError(err) assert.NoError(os.RemoveAll(parentDir)) assert.Exactly(qemuConfig, q.config) @@ -131,7 +131,7 @@ func TestQemuCreateSandboxMissingParentDirFail(t *testing.T) { parentDir := store.SandboxConfigurationRootPath(sandbox.id) assert.NoError(os.RemoveAll(parentDir)) - err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false) + err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false) assert.NoError(err) } @@ -364,11 +364,7 @@ func TestHotplugUnsupportedDeviceType(t *testing.T) { config: qemuConfig, } - vcStore, err := store.NewVCSandboxStore(q.ctx, q.id) - assert.NoError(err) - q.store = vcStore - - _, err = q.hotplugAddDevice(&memoryDevice{0, 128, uint64(0), false}, fsDev) + _, err := q.hotplugAddDevice(&memoryDevice{0, 128, uint64(0), false}, fsDev) assert.Error(err) _, err = q.hotplugRemoveDevice(&memoryDevice{0, 128, uint64(0), false}, fsDev) assert.Error(err) @@ -414,7 +410,7 @@ func TestQemuGrpc(t *testing.T) { assert.Nil(err) var q2 qemu - err = q2.fromGrpc(context.Background(), &config, nil, json) + err = q2.fromGrpc(context.Background(), &config, json) assert.Nil(err) assert.True(q.id == q2.id) @@ -429,7 +425,7 @@ func TestQemuFileBackedMem(t *testing.T) { q := &qemu{} sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS - err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false) + err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false) assert.NoError(err) assert.Equal(q.qemuConfig.Knobs.FileBackedMem, true) @@ -445,7 +441,7 @@ func TestQemuFileBackedMem(t *testing.T) { sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS sandbox.config.HypervisorConfig.MemoryPath = fallbackFileBackedMemDir - err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false) + err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false) expectErr := errors.New("VM templating has been enabled with either virtio-fs or file backed memory and this configuration will not work") assert.Equal(expectErr.Error(), err.Error()) @@ -456,7 +452,7 @@ func TestQemuFileBackedMem(t *testing.T) { q = &qemu{} sandbox.config.HypervisorConfig.FileBackedMemRootDir = "/tmp/xyzabc" - err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false) + err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false) assert.NoError(err) assert.Equal(q.qemuConfig.Knobs.FileBackedMem, false) assert.Equal(q.qemuConfig.Knobs.MemShared, false) diff --git a/virtcontainers/sandbox.go b/virtcontainers/sandbox.go index ae20cfe6b..9dfebf07b 100644 --- a/virtcontainers/sandbox.go +++ b/virtcontainers/sandbox.go @@ -566,38 +566,14 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor } }() - if s.supportNewStore() { - s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, nil) + s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, nil) - // Ignore the error. Restore can fail for a new sandbox - s.Restore() + // Ignore the error. Restore can fail for a new sandbox + s.Restore() - // new store doesn't require hypervisor to be stored immediately - if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, nil, s.stateful); err != nil { - return nil, err - } - } else { - // Fetch sandbox network to be able to access it from the sandbox structure. - var networkNS NetworkNamespace - if err = s.store.Load(store.Network, &networkNS); err == nil { - s.networkNS = networkNS - } - - devices, err := s.store.LoadDevices() - if err != nil { - s.Logger().WithError(err).WithField("sandboxid", s.id).Warning("load sandbox devices failed") - } - s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, devices) - - // Load sandbox state. The hypervisor.createSandbox call, may need to access statei. - state, err := s.store.LoadState() - if err == nil { - s.state = state - } - - if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, s.store, s.stateful); err != nil { - return nil, err - } + // new store doesn't require hypervisor to be stored immediately + if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, s.stateful); err != nil { + return nil, err } agentConfig, err := newAgentConfig(sandboxConfig.AgentType, sandboxConfig.AgentConfig) diff --git a/virtcontainers/vm.go b/virtcontainers/vm.go index cdfbb703d..292cf877d 100644 --- a/virtcontainers/vm.go +++ b/virtcontainers/vm.go @@ -34,8 +34,6 @@ type VM struct { memory uint32 cpuDelta uint32 - - store *store.VCStore } // VMConfig is a collection of all info that a new blackbox VM needs. @@ -157,22 +155,13 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) { virtLog.WithField("vm", id).WithField("config", config).Info("create new vm") - vcStore, err := store.NewVCStore(ctx, - store.SandboxConfigurationRoot(id), - store.SandboxRuntimeRoot(id)) - if err != nil { - return nil, err - } - defer func() { if err != nil { virtLog.WithField("vm", id).WithError(err).Error("failed to create new vm") - virtLog.WithField("vm", id).Errorf("Deleting store for %s", id) - vcStore.Delete() } }() - if err = hypervisor.createSandbox(ctx, id, NetworkNamespace{}, &config.HypervisorConfig, vcStore, false); err != nil { + if err = hypervisor.createSandbox(ctx, id, NetworkNamespace{}, &config.HypervisorConfig, false); err != nil { return nil, err } @@ -230,7 +219,6 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) { proxyURL: url, cpu: config.HypervisorConfig.NumVCPUs, memory: config.HypervisorConfig.MemorySize, - store: vcStore, }, nil } @@ -243,22 +231,13 @@ func NewVMFromGrpc(ctx context.Context, v *pb.GrpcVM, config VMConfig) (*VM, err return nil, err } - vcStore, err := store.NewVCStore(ctx, - store.SandboxConfigurationRoot(v.Id), - store.SandboxRuntimeRoot(v.Id)) - if err != nil { - return nil, err - } - defer func() { if err != nil { virtLog.WithField("vm", v.Id).WithError(err).Error("failed to create new vm from Grpc") - virtLog.WithField("vm", v.Id).Errorf("Deleting store for %s", v.Id) - vcStore.Delete() } }() - err = hypervisor.fromGrpc(ctx, &config.HypervisorConfig, vcStore, v.Hypervisor) + err = hypervisor.fromGrpc(ctx, &config.HypervisorConfig, v.Hypervisor) if err != nil { return nil, err } @@ -339,7 +318,7 @@ func (v *VM) Stop() error { return err } - return v.store.Delete() + return nil } // AddCPUs adds num of CPUs to the VM. From 29b55ab88b0b6e9fee91e4938428bcb0c9c94179 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 25 Nov 2019 19:09:33 +0800 Subject: [PATCH 03/11] persist: remove VCStore from container Remove VCStore from container struct. Signed-off-by: Wei Zhang --- virtcontainers/api_test.go | 42 +++--- virtcontainers/container.go | 189 +++--------------------- virtcontainers/container_test.go | 17 --- virtcontainers/kata_agent.go | 7 - virtcontainers/persist/api/interface.go | 5 +- virtcontainers/persist/fs/fs.go | 106 +++++++------ virtcontainers/persist/fs/fs_test.go | 47 ++++-- virtcontainers/sandbox.go | 42 ++---- virtcontainers/sandbox_test.go | 44 ------ virtcontainers/store/vc.go | 6 - virtcontainers/virtcontainers_test.go | 2 +- 11 files changed, 152 insertions(+), 355 deletions(-) diff --git a/virtcontainers/api_test.go b/virtcontainers/api_test.go index 740b9f181..85c1ba762 100644 --- a/virtcontainers/api_test.go +++ b/virtcontainers/api_test.go @@ -139,7 +139,7 @@ func TestCreateSandboxNoopAgentSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) } @@ -176,7 +176,7 @@ func TestCreateSandboxKataAgentSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) } @@ -203,7 +203,7 @@ func TestDeleteSandboxNoopAgentSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -248,7 +248,7 @@ func TestDeleteSandboxKataAgentSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -264,7 +264,7 @@ func TestDeleteSandboxFailing(t *testing.T) { defer cleanUp() assert := assert.New(t) - sandboxDir := store.SandboxConfigurationRootPath(testSandboxID) + sandboxDir := store.SandboxRuntimeRootPath(testSandboxID) os.Remove(sandboxDir) p, err := DeleteSandbox(context.Background(), testSandboxID) @@ -328,7 +328,7 @@ func TestStartSandboxFailing(t *testing.T) { defer cleanUp() assert := assert.New(t) - sandboxDir := store.SandboxConfigurationRootPath(testSandboxID) + sandboxDir := store.SandboxRuntimeRootPath(testSandboxID) os.Remove(sandboxDir) p, err := StartSandbox(context.Background(), testSandboxID) @@ -395,7 +395,7 @@ func TestStopSandboxKataAgentSuccessful(t *testing.T) { func TestStopSandboxFailing(t *testing.T) { defer cleanUp() - sandboxDir := store.SandboxConfigurationRootPath(testSandboxID) + sandboxDir := store.SandboxRuntimeRootPath(testSandboxID) os.Remove(sandboxDir) p, err := StopSandbox(context.Background(), testSandboxID, false) @@ -413,7 +413,7 @@ func TestRunSandboxNoopAgentSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) } @@ -451,7 +451,7 @@ func TestRunSandboxKataAgentSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -680,7 +680,7 @@ func TestCreateContainerSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -734,7 +734,7 @@ func TestDeleteContainerSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -778,7 +778,7 @@ func TestDeleteContainerFailingNoContainer(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -835,7 +835,7 @@ func TestStartContainerFailingNoContainer(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -856,7 +856,7 @@ func TestStartContainerFailingSandboxNotStarted(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -936,7 +936,7 @@ func TestStopContainerFailingNoContainer(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -1040,7 +1040,7 @@ func TestEnterContainerFailingNoContainer(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -1093,7 +1093,7 @@ func TestStatusContainerSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -1136,7 +1136,7 @@ func TestStatusContainerStateReady(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -1199,7 +1199,7 @@ func TestStatusContainerStateRunning(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -1414,7 +1414,7 @@ func createAndStartSandbox(ctx context.Context, config SandboxConfig) (sandbox V return nil, "", err } - sandboxDir = store.SandboxConfigurationRootPath(sandbox.ID()) + sandboxDir = store.SandboxRuntimeRootPath(sandbox.ID()) _, err = os.Stat(sandboxDir) if err != nil { return nil, "", err @@ -1699,7 +1699,7 @@ func TestCleanupContainer(t *testing.T) { CleanupContainer(ctx, p.ID(), c.ID(), true) } - sandboxDir := store.SandboxConfigurationRootPath(p.ID()) + sandboxDir := store.SandboxRuntimeRootPath(p.ID()) _, err = os.Stat(sandboxDir) if err == nil { diff --git a/virtcontainers/container.go b/virtcontainers/container.go index 66f71fcb1..f9fe271d0 100644 --- a/virtcontainers/container.go +++ b/virtcontainers/container.go @@ -338,8 +338,6 @@ type Container struct { systemMountsInfo SystemMountsInfo ctx context.Context - - store *store.VCStore } // ID returns the container identifier string. @@ -391,13 +389,6 @@ func (c *Container) GetPid() int { func (c *Container) setStateFstype(fstype string) error { c.state.Fstype = fstype - if !c.sandbox.supportNewStore() { - // experimental runtime use "persist.json" which doesn't need "state.json" anymore - if err := c.storeState(); err != nil { - return err - } - } - return nil } @@ -421,48 +412,10 @@ func (c *Container) GetPatchedOCISpec() *specs.Spec { // storeContainer stores a container config. func (c *Container) storeContainer() error { - if c.sandbox.supportNewStore() { - if err := c.sandbox.Save(); err != nil { - return err - } - return nil + if err := c.sandbox.Save(); err != nil { + return err } - return c.store.Store(store.Configuration, *(c.config)) -} - -func (c *Container) storeProcess() error { - return c.store.Store(store.Process, c.process) -} - -func (c *Container) storeMounts() error { - return c.store.Store(store.Mounts, c.mounts) -} - -func (c *Container) storeDevices() error { - return c.store.Store(store.DeviceIDs, c.devices) -} - -func (c *Container) storeState() error { - return c.store.Store(store.State, c.state) -} - -func (c *Container) loadMounts() ([]Mount, error) { - var mounts []Mount - if err := c.store.Load(store.Mounts, &mounts); err != nil { - return []Mount{}, err - } - - return mounts, nil -} - -func (c *Container) loadDevices() ([]ContainerDevice, error) { - var devices []ContainerDevice - - if err := c.store.Load(store.DeviceIDs, &devices); err != nil { - return []ContainerDevice{}, err - } - - return devices, nil + return nil } // setContainerState sets both the in-memory and on-disk state of the @@ -476,17 +429,9 @@ func (c *Container) setContainerState(state types.StateString) error { // update in-memory state c.state.State = state - if c.sandbox.supportNewStore() { - // flush data to storage - if err := c.sandbox.Save(); err != nil { - return err - } - } else { - // experimental runtime use "persist.json" which doesn't need "state.json" anymore - // update on-disk state - if err := c.store.Store(store.State, c.state); err != nil { - return err - } + // flush data to storage + if err := c.sandbox.Save(); err != nil { + return err } return nil @@ -571,13 +516,6 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, guestSharedDir string) ( if err := c.sandbox.devManager.AttachDevice(m.BlockDeviceID, c.sandbox); err != nil { return nil, nil, err } - - if !c.sandbox.supportNewStore() { - if err := c.sandbox.storeSandboxDevices(); err != nil { - //TODO: roll back? - return nil, nil, err - } - } continue } @@ -620,12 +558,6 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, guestSharedDir string) ( sharedDirMounts[sharedDirMount.Destination] = sharedDirMount } - if !c.sandbox.supportNewStore() { - if err := c.storeMounts(); err != nil { - return nil, nil, err - } - } - return sharedDirMounts, ignoredMounts, nil } @@ -751,46 +683,19 @@ func newContainer(sandbox *Sandbox, contConfig *ContainerConfig) (*Container, er ctx: sandbox.ctx, } - storeAlreadyExists := store.VCContainerStoreExists(sandbox.ctx, c.sandboxID, c.id) - ctrStore, err := store.NewVCContainerStore(sandbox.ctx, c.sandboxID, c.id) - if err != nil { + // experimental runtime use "persist.json" instead of legacy "state.json" as storage + err := c.Restore() + if err == nil { + //container restored + return c, nil + } + + // Unexpected error + if !os.IsNotExist(err) && err != errContainerPersistNotExist { return nil, err } - defer func() { - if err != nil && !storeAlreadyExists { - if delerr := c.store.Delete(); delerr != nil { - c.Logger().WithError(delerr).WithField("cid", c.id).Error("delete store failed") - } - } - }() - - c.store = ctrStore - - // experimental runtime use "persist.json" instead of legacy "state.json" as storage - if c.sandbox.supportNewStore() { - err := c.Restore() - if err == nil { - //container restored - return c, nil - } - - // Unexpected error - if !os.IsNotExist(err) && err != errContainerPersistNotExist { - return nil, err - } - // Go to next step for first created container - } else { - state, err := c.store.LoadContainerState() - if err == nil { - c.state = state - } - - var process Process - if err := c.store.Load(store.Process, &process); err == nil { - c.process = process - } - } + // Go to next step for first created container if err = c.createMounts(); err != nil { return nil, err } @@ -803,17 +708,6 @@ func newContainer(sandbox *Sandbox, contConfig *ContainerConfig) (*Container, er } func (c *Container) createMounts() error { - // If sandbox supports "newstore", only newly created container can reach this function, - // so we don't call restore when `supportNewStore` is true - if !c.sandbox.supportNewStore() { - mounts, err := c.loadMounts() - if err == nil { - // restore mounts from disk - c.mounts = mounts - return nil - } - } - // Create block devices for newly created container if err := c.createBlockDevices(); err != nil { return err @@ -823,18 +717,6 @@ func (c *Container) createMounts() error { } func (c *Container) createDevices(contConfig *ContainerConfig) error { - // If sandbox supports "newstore", only newly created container can reach this function, - // so we don't call restore when `supportNewStore` is true - if !c.sandbox.supportNewStore() { - // Devices will be found in storage after create stage has completed. - // We load devices from storage at all other stages. - storedDevices, err := c.loadDevices() - if err == nil { - c.devices = storedDevices - return nil - } - } - // If devices were not found in storage, create Device implementations // from the configuration. This should happen at create. var storedDevices []ContainerDevice @@ -914,12 +796,6 @@ func (c *Container) create() (err error) { // inside the VM c.getSystemMountInfo() - if !c.sandbox.supportNewStore() { - if err = c.storeDevices(); err != nil { - return - } - } - process, err := c.sandbox.agent.createContainer(c.sandbox, c) if err != nil { return err @@ -932,13 +808,6 @@ func (c *Container) create() (err error) { } } - if !c.sandbox.supportNewStore() { - // Store the container process returned by the agent. - if err = c.storeProcess(); err != nil { - return - } - } - if err = c.setContainerState(types.StateReady); err != nil { return } @@ -964,7 +833,7 @@ func (c *Container) delete() error { } } - return c.store.Delete() + return c.sandbox.storeSandbox() } // checkSandboxRunning validates the container state. @@ -1383,12 +1252,6 @@ func (c *Container) plugDevice(devicePath string) error { if err := c.sandbox.devManager.AttachDevice(b.DeviceID(), c.sandbox); err != nil { return err } - - if !c.sandbox.supportNewStore() { - if err := c.sandbox.storeSandboxDevices(); err != nil { - return err - } - } } return nil } @@ -1419,12 +1282,6 @@ func (c *Container) removeDrive() (err error) { return err } } - - if !c.sandbox.supportNewStore() { - if err := c.sandbox.storeSandboxDevices(); err != nil { - return err - } - } } return nil @@ -1439,12 +1296,6 @@ func (c *Container) attachDevices() error { return err } } - - if !c.sandbox.supportNewStore() { - if err := c.sandbox.storeSandboxDevices(); err != nil { - return err - } - } return nil } @@ -1467,12 +1318,6 @@ func (c *Container) detachDevices() error { } } } - - if !c.sandbox.supportNewStore() { - if err := c.sandbox.storeSandboxDevices(); err != nil { - return err - } - } return nil } diff --git a/virtcontainers/container_test.go b/virtcontainers/container_test.go index 995fccee6..2ace0a726 100644 --- a/virtcontainers/container_test.go +++ b/virtcontainers/container_test.go @@ -341,18 +341,6 @@ func TestContainerAddDriveDir(t *testing.T) { rootFs: RootFs{Target: fakeRootfs, Mounted: true}, } - containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, container.id) - assert.Nil(err) - container.store = containerStore - - // create state file - path := store.ContainerRuntimeRootPath(testSandboxID, container.ID()) - stateFilePath := filepath.Join(path, store.StateFile) - os.Remove(stateFilePath) - - _, err = os.Create(stateFilePath) - assert.NoError(err) - // Make the checkStorageDriver func variable point to a fake check function savedFunc := checkStorageDriver checkStorageDriver = func(major, minor int) (bool, error) { @@ -405,11 +393,6 @@ func TestContainerRootfsPath(t *testing.T) { rootFs: RootFs{Target: fakeRootfs, Mounted: true}, rootfsSuffix: "rootfs", } - cvcstore, err := store.NewVCContainerStore(context.Background(), - sandbox.id, - container.id) - assert.Nil(t, err) - container.store = cvcstore container.hotplugDrive() assert.Empty(t, container.rootfsSuffix) diff --git a/virtcontainers/kata_agent.go b/virtcontainers/kata_agent.go index 141f5402d..a08d8325e 100644 --- a/virtcontainers/kata_agent.go +++ b/virtcontainers/kata_agent.go @@ -1444,13 +1444,6 @@ func (k *kataAgent) handleBlockVolumes(c *Container) ([]*grpc.Storage, error) { // device is detached with detachDevices() for a container. c.devices = append(c.devices, ContainerDevice{ID: id, ContainerPath: m.Destination}) - if !c.sandbox.supportNewStore() { - if err := c.storeDevices(); err != nil { - k.Logger().WithField("device", id).WithError(err).Error("store device failed") - return nil, err - } - } - vol := &grpc.Storage{} device := c.sandbox.devManager.GetDeviceByID(id) diff --git a/virtcontainers/persist/api/interface.go b/virtcontainers/persist/api/interface.go index a14dbc5e1..433e5ad41 100644 --- a/virtcontainers/persist/api/interface.go +++ b/virtcontainers/persist/api/interface.go @@ -13,5 +13,8 @@ type PersistDriver interface { // We only support get data for one whole sandbox FromDisk(sid string) (SandboxState, map[string]ContainerState, error) // Destroy will remove everything from storage - Destroy() error + Destroy(sid string) error + // Lock locks the persist driver, "exclusive" decides whether the lock is exclusive or shared. + // It returns Unlock Function and errors + Lock(sid string, exclusive bool) (func() error, error) } diff --git a/virtcontainers/persist/fs/fs.go b/virtcontainers/persist/fs/fs.go index afc1c570c..ac8d1c48b 100644 --- a/virtcontainers/persist/fs/fs.go +++ b/virtcontainers/persist/fs/fs.go @@ -51,8 +51,6 @@ var RunStoragePath = func() string { type FS struct { sandboxState *persistapi.SandboxState containerState map[string]persistapi.ContainerState - - lockFile *os.File } var fsLog = logrus.WithField("source", "virtcontainers/persist/fs") @@ -77,21 +75,21 @@ func Init() (persistapi.PersistDriver, error) { }, nil } -func (fs *FS) sandboxDir() (string, error) { - id := fs.sandboxState.SandboxContainer - if id == "" { - return "", fmt.Errorf("sandbox container id required") - } - - return filepath.Join(RunStoragePath(), id), nil +func (fs *FS) sandboxDir(sandboxID string) (string, error) { + return filepath.Join(RunStoragePath(), sandboxID), nil } // ToDisk sandboxState and containerState to disk func (fs *FS) ToDisk(ss persistapi.SandboxState, cs map[string]persistapi.ContainerState) (retErr error) { + id := ss.SandboxContainer + if id == "" { + return fmt.Errorf("sandbox container id required") + } + fs.sandboxState = &ss fs.containerState = cs - sandboxDir, err := fs.sandboxDir() + sandboxDir, err := fs.sandboxDir(id) if err != nil { return err } @@ -100,15 +98,10 @@ func (fs *FS) ToDisk(ss persistapi.SandboxState, cs map[string]persistapi.Contai return err } - if err := fs.lock(); err != nil { - return err - } - defer fs.unlock() - // if error happened, destroy all dirs defer func() { if retErr != nil { - if err := fs.Destroy(); err != nil { + if err := fs.Destroy(id); err != nil { fs.Logger().WithError(err).Errorf("failed to destroy dirs") } } @@ -155,6 +148,27 @@ func (fs *FS) ToDisk(ss persistapi.SandboxState, cs map[string]persistapi.Contai } } + // Walk sandbox dir and find container. + files, err := ioutil.ReadDir(sandboxDir) + if err != nil { + return err + } + + // Remove non-existing containers + for _, file := range files { + if !file.IsDir() { + continue + } + // Container dir exists. + cid := file.Name() + + // Container should be removed when container id doesn't exist in cs. + if _, ok := cs[cid]; !ok { + if err := os.RemoveAll(filepath.Join(sandboxDir, cid)); err != nil { + return err + } + } + } return nil } @@ -165,18 +179,11 @@ func (fs *FS) FromDisk(sid string) (persistapi.SandboxState, map[string]persista return ss, nil, fmt.Errorf("restore requires sandbox id") } - fs.sandboxState.SandboxContainer = sid - - sandboxDir, err := fs.sandboxDir() + sandboxDir, err := fs.sandboxDir(sid) if err != nil { return ss, nil, err } - if err := fs.lock(); err != nil { - return ss, nil, err - } - defer fs.unlock() - // get sandbox configuration from persist data sandboxFile := filepath.Join(sandboxDir, persistFile) f, err := os.OpenFile(sandboxFile, os.O_RDONLY, fileMode) @@ -224,8 +231,12 @@ func (fs *FS) FromDisk(sid string) (persistapi.SandboxState, map[string]persista } // Destroy removes everything from disk -func (fs *FS) Destroy() error { - sandboxDir, err := fs.sandboxDir() +func (fs *FS) Destroy(sandboxID string) error { + if sandboxID == "" { + return fmt.Errorf("sandbox container id required") + } + + sandboxDir, err := fs.sandboxDir(sandboxID) if err != nil { return err } @@ -236,39 +247,42 @@ func (fs *FS) Destroy() error { return nil } -func (fs *FS) lock() error { - sandboxDir, err := fs.sandboxDir() +func (fs *FS) Lock(sandboxID string, exclusive bool) (func() error, error) { + if sandboxID == "" { + return nil, fmt.Errorf("sandbox container id required") + } + + sandboxDir, err := fs.sandboxDir(sandboxID) if err != nil { - return err + return nil, err } f, err := os.Open(sandboxDir) if err != nil { - return err + return nil, err } - if err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { + var lockType int + if exclusive { + lockType = syscall.LOCK_EX | syscall.LOCK_NB + } else { + lockType = syscall.LOCK_SH | syscall.LOCK_NB + } + + if err := syscall.Flock(int(f.Fd()), lockType); err != nil { f.Close() - return err + return nil, err } - fs.lockFile = f - return nil -} + unlockFunc := func() error { + defer f.Close() + if err := syscall.Flock(int(f.Fd()), syscall.LOCK_UN); err != nil { + return err + } -func (fs *FS) unlock() error { - if fs.lockFile == nil { return nil } - - lockFile := fs.lockFile - defer lockFile.Close() - fs.lockFile = nil - if err := syscall.Flock(int(lockFile.Fd()), syscall.LOCK_UN); err != nil { - return err - } - - return nil + return unlockFunc, nil } // TestSetRunStoragePath set RunStoragePath to path diff --git a/virtcontainers/persist/fs/fs_test.go b/virtcontainers/persist/fs/fs_test.go index 4b5d853f5..75bb4d879 100644 --- a/virtcontainers/persist/fs/fs_test.go +++ b/virtcontainers/persist/fs/fs_test.go @@ -40,18 +40,25 @@ func TestFsLock(t *testing.T) { os.RemoveAll(testDir) }() - fs.sandboxState.SandboxContainer = "test-fs-driver" - sandboxDir, err := fs.sandboxDir() + sid := "test-fs-driver" + fs.sandboxState.SandboxContainer = sid + sandboxDir, err := fs.sandboxDir(sid) assert.Nil(t, err) err = os.MkdirAll(sandboxDir, dirMode) assert.Nil(t, err) - assert.Nil(t, fs.lock()) - assert.NotNil(t, fs.lock()) + unlockFunc, err := fs.Lock(sid, false) + assert.Nil(t, err) + unlockFunc2, err := fs.Lock(sid, false) + assert.Nil(t, err) + _, err = fs.Lock(sid, true) + assert.NotNil(t, err) - assert.Nil(t, fs.unlock()) - assert.Nil(t, fs.unlock()) + assert.Nil(t, unlockFunc()) + // double unlock should return error + assert.NotNil(t, unlockFunc()) + assert.Nil(t, unlockFunc2()) } func TestFsDriver(t *testing.T) { @@ -88,7 +95,7 @@ func TestFsDriver(t *testing.T) { assert.Equal(t, ss.SandboxContainer, id) assert.Equal(t, ss.State, "") - // flush all to disk + // flush all to disk. ss.State = "running" assert.Nil(t, fs.ToDisk(ss, cs)) ss, cs, err = fs.FromDisk(id) @@ -99,9 +106,31 @@ func TestFsDriver(t *testing.T) { assert.Equal(t, ss.SandboxContainer, id) assert.Equal(t, ss.State, "running") - assert.Nil(t, fs.Destroy()) + // add new container state. + cs["test-container"] = persistapi.ContainerState{ + State: "ready", + } + assert.Nil(t, fs.ToDisk(ss, cs)) + ss, cs, err = fs.FromDisk(id) + assert.Nil(t, err) + assert.NotNil(t, ss) + assert.Equal(t, len(cs), 1) + c, ok := cs["test-container"] + assert.True(t, ok) + assert.Equal(t, c.State, "ready") - dir, err := fs.sandboxDir() + // clean all container. + cs = make(map[string]persistapi.ContainerState) + assert.Nil(t, fs.ToDisk(ss, cs)) + ss, cs, err = fs.FromDisk(id) + assert.Nil(t, err) + assert.NotNil(t, ss) + assert.Equal(t, len(cs), 0) + + // destroy whole sandbox dir. + assert.Nil(t, fs.Destroy(id)) + + dir, err := fs.sandboxDir(id) assert.Nil(t, err) assert.NotEqual(t, len(dir), 0) diff --git a/virtcontainers/sandbox.go b/virtcontainers/sandbox.go index 9dfebf07b..dea3cfe65 100644 --- a/virtcontainers/sandbox.go +++ b/virtcontainers/sandbox.go @@ -597,23 +597,9 @@ func (s *Sandbox) storeSandbox() error { span, _ := s.trace("storeSandbox") defer span.Finish() - if s.supportNewStore() { - // flush data to storage - if err := s.Save(); err != nil { - return err - } - } else { - err := s.store.Store(store.Configuration, *(s.config)) - if err != nil { - return err - } - - for _, container := range s.containers { - err = container.store.Store(store.Configuration, *(container.config)) - if err != nil { - return err - } - } + // flush data to storage + if err := s.Save(); err != nil { + return err } return nil } @@ -777,7 +763,7 @@ func (s *Sandbox) Delete() error { s.agent.cleanup(s) if s.supportNewStore() { - if err := s.newStore.Destroy(); err != nil { + if err := s.newStore.Destroy(s.id); err != nil { return err } } @@ -1070,7 +1056,6 @@ func (s *Sandbox) stopVM() error { if s.disableVMShutdown { // Do not kill the VM - allow the agent to shut it down // (only used to support static agent tracing). - s.Logger().Info("Not stopping VM") return nil } @@ -1118,7 +1103,6 @@ func (s *Sandbox) fetchContainers() error { // This should be called only when the sandbox is already created. // It will add new container config to sandbox.config.Containers func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, error) { - storeAlreadyExists := store.VCContainerStoreExists(s.ctx, s.id, contConfig.ID) // Create the container. c, err := newContainer(s, &contConfig) if err != nil { @@ -1134,11 +1118,6 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro // delete container config s.config.Containers = s.config.Containers[:len(s.config.Containers)-1] } - if !storeAlreadyExists { - if delerr := c.store.Delete(); delerr != nil { - c.Logger().WithError(delerr).WithField("cid", c.id).Error("delete store failed") - } - } } }() @@ -1152,6 +1131,13 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro return nil, err } + defer func() { + // Rollback if error happens. + if err != nil { + s.removeContainer(c.id) + } + }() + // Sandbox is reponsable to update VM resources needed by Containers // Update resources after having added containers to the sandbox, since // container status is requiered to know if more resources should be added. @@ -1160,12 +1146,6 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro return nil, err } - // Store it. - err = c.storeContainer() - if err != nil { - return nil, err - } - if err = s.cgroupsUpdate(); err != nil { return nil, err } diff --git a/virtcontainers/sandbox_test.go b/virtcontainers/sandbox_test.go index 08d43449d..40cb284f4 100644 --- a/virtcontainers/sandbox_test.go +++ b/virtcontainers/sandbox_test.go @@ -7,7 +7,6 @@ package virtcontainers import ( "context" - "encoding/json" "fmt" "io/ioutil" "os" @@ -668,17 +667,6 @@ func TestContainerStateSetFstype(t *testing.T) { cImpl, ok := c.(*Container) assert.True(ok) - containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, c.ID()) - assert.NoError(err) - - cImpl.store = containerStore - - path := store.ContainerRuntimeRootPath(testSandboxID, c.ID()) - stateFilePath := filepath.Join(path, store.StateFile) - - f, err := os.Create(stateFilePath) - assert.NoError(err) - state := types.ContainerState{ State: "ready", Fstype: "vfs", @@ -686,34 +674,10 @@ func TestContainerStateSetFstype(t *testing.T) { cImpl.state = state - stateData := `{ - "state":"ready", - "fstype":"vfs", - }` - - n, err := f.WriteString(stateData) - defer f.Close() - assert.NoError(err) - assert.Equal(n, len(stateData)) - newFstype := "ext4" err = cImpl.setStateFstype(newFstype) assert.NoError(err) assert.Equal(cImpl.state.Fstype, newFstype) - - fileData, err := ioutil.ReadFile(stateFilePath) - assert.NoError(err) - - // experimental features doesn't write state.json - if sandbox.supportNewStore() { - return - } - - var res types.ContainerState - err = json.Unmarshal([]byte(string(fileData)), &res) - assert.NoError(err) - assert.Equal(res.Fstype, newFstype) - assert.Equal(res.State, state.State) } const vfioPath = "/dev/vfio/" @@ -916,8 +880,6 @@ func TestCreateContainer(t *testing.T) { _, err = s.CreateContainer(contConfig) assert.NotNil(t, err, "Should failed to create a duplicated container") assert.Equal(t, len(s.config.Containers), 1, "Container config list length from sandbox structure should be 1") - ret := store.VCContainerStoreExists(s.ctx, testSandboxID, contID) - assert.True(t, ret, "Should not delete container store that already existed") } func TestDeleteContainer(t *testing.T) { @@ -1023,8 +985,6 @@ func TestDeleteStoreWhenCreateContainerFail(t *testing.T) { s.state.CgroupPath = filepath.Join(testDir, "bad-cgroup") _, err = s.CreateContainer(contConfig) assert.NotNil(t, err, "Should fail to create container due to wrong cgroup") - ret := store.VCContainerStoreExists(s.ctx, testSandboxID, contID) - assert.False(t, ret, "Should delete configuration root after failed to create a container") } func TestDeleteStoreWhenNewContainerFail(t *testing.T) { @@ -1307,10 +1267,6 @@ func TestPreAddDevice(t *testing.T) { } container.state.State = types.StateReady - containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, container.id) - assert.Nil(t, err) - container.store = containerStore - // create state file path := store.ContainerRuntimeRootPath(testSandboxID, container.ID()) err = os.MkdirAll(path, store.DirMode) diff --git a/virtcontainers/store/vc.go b/virtcontainers/store/vc.go index 79642b059..92e7eb368 100644 --- a/virtcontainers/store/vc.go +++ b/virtcontainers/store/vc.go @@ -334,9 +334,3 @@ func VCSandboxStoreExists(ctx context.Context, sandboxID string) bool { s := stores.findStore(SandboxConfigurationRoot(sandboxID)) return s != nil } - -// VCContainerStoreExists returns true if a container store already exists. -func VCContainerStoreExists(ctx context.Context, sandboxID string, containerID string) bool { - s := stores.findStore(ContainerConfigurationRoot(sandboxID, containerID)) - return s != nil -} diff --git a/virtcontainers/virtcontainers_test.go b/virtcontainers/virtcontainers_test.go index e9af85826..1a6f41c64 100644 --- a/virtcontainers/virtcontainers_test.go +++ b/virtcontainers/virtcontainers_test.go @@ -175,7 +175,7 @@ func TestMain(m *testing.M) { // allow the tests to run without affecting the host system. store.ConfigStoragePath = func() string { return filepath.Join(testDir, store.StoragePathSuffix, "config") } store.RunStoragePath = func() string { return filepath.Join(testDir, store.StoragePathSuffix, "run") } - fs.TestSetRunStoragePath(filepath.Join(testDir, "vc", "sbs")) + fs.TestSetRunStoragePath(filepath.Join(testDir, "vc", "run")) defer func() { store.ConfigStoragePath = ConfigStoragePathSaved From 508101bc0fcd9f73a48566ba9795273b9dba36f2 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 26 Nov 2019 12:21:26 +0800 Subject: [PATCH 04/11] persist: fix vmtemplate storage leak Fix VM template storage leak by adding delete operations, we need to delete sandbox storage dirs when stop VM. Signed-off-by: Wei Zhang --- virtcontainers/vm.go | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/virtcontainers/vm.go b/virtcontainers/vm.go index 292cf877d..99fda8b17 100644 --- a/virtcontainers/vm.go +++ b/virtcontainers/vm.go @@ -14,6 +14,8 @@ import ( "time" pb "github.com/kata-containers/runtime/protocols/cache" + "github.com/kata-containers/runtime/virtcontainers/persist" + persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" "github.com/kata-containers/runtime/virtcontainers/pkg/uuid" "github.com/kata-containers/runtime/virtcontainers/store" "github.com/sirupsen/logrus" @@ -34,6 +36,8 @@ type VM struct { memory uint32 cpuDelta uint32 + + store persistapi.PersistDriver } // VMConfig is a collection of all info that a new blackbox VM needs. @@ -155,9 +159,16 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) { virtLog.WithField("vm", id).WithField("config", config).Info("create new vm") + store, err := persist.GetDriver("fs") + if err != nil { + return nil, err + } + defer func() { if err != nil { virtLog.WithField("vm", id).WithError(err).Error("failed to create new vm") + virtLog.WithField("vm", id).Errorf("Deleting store for %s", id) + store.Destroy(id) } }() @@ -219,6 +230,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) { proxyURL: url, cpu: config.HypervisorConfig.NumVCPUs, memory: config.HypervisorConfig.MemorySize, + store: store, }, nil } @@ -231,9 +243,16 @@ func NewVMFromGrpc(ctx context.Context, v *pb.GrpcVM, config VMConfig) (*VM, err return nil, err } + store, err := persist.GetDriver("fs") + if err != nil { + return nil, err + } + defer func() { if err != nil { virtLog.WithField("vm", v.Id).WithError(err).Error("failed to create new vm from Grpc") + virtLog.WithField("vm", v.Id).Errorf("Deleting store for %s", v.Id) + store.Destroy(v.Id) } }() @@ -261,6 +280,7 @@ func NewVMFromGrpc(ctx context.Context, v *pb.GrpcVM, config VMConfig) (*VM, err cpu: v.Cpu, memory: v.Memory, cpuDelta: v.CpuDelta, + store: store, }, nil } @@ -318,7 +338,7 @@ func (v *VM) Stop() error { return err } - return nil + return v.store.Destroy(v.id) } // AddCPUs adds num of CPUs to the VM. From b63e517f6d8ec79fe9df5d878b3ae9075691dee1 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 27 Nov 2019 17:04:53 +0800 Subject: [PATCH 05/11] persist: replace sandbox lock with newstore.Lock Replace rLockSandbox and rwLockSandbox with new store lock functions. Signed-off-by: Wei Zhang --- virtcontainers/api.go | 108 +++++++++++++-------------- virtcontainers/persist/fs/fs.go | 4 +- virtcontainers/persist/fs/fs_test.go | 35 +++++++-- virtcontainers/sandbox.go | 32 ++------ 4 files changed, 90 insertions(+), 89 deletions(-) diff --git a/virtcontainers/api.go b/virtcontainers/api.go index 7072926e2..3b86a03eb 100644 --- a/virtcontainers/api.go +++ b/virtcontainers/api.go @@ -145,11 +145,11 @@ func DeleteSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) { return nil, vcTypes.ErrNeedSandboxID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() // Fetch the sandbox from storage and create it. s, err := fetchSandbox(ctx, sandboxID) @@ -178,11 +178,11 @@ func FetchSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) { return nil, vcTypes.ErrNeedSandboxID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() // Fetch the sandbox from storage and create it. s, err := fetchSandbox(ctx, sandboxID) @@ -215,11 +215,11 @@ func StartSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) { return nil, vcTypes.ErrNeedSandboxID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() // Fetch the sandbox from storage and create it. s, err := fetchSandbox(ctx, sandboxID) @@ -251,11 +251,11 @@ func StopSandbox(ctx context.Context, sandboxID string, force bool) (VCSandbox, return nil, vcTypes.ErrNeedSandbox } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() // Fetch the sandbox from storage and create it. s, err := fetchSandbox(ctx, sandboxID) @@ -290,11 +290,11 @@ func RunSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor } defer s.releaseStatelessSandbox() - lockFile, err := rwLockSandbox(ctx, s.id) + unlock, err := rwLockSandbox(s.id) if err != nil { return nil, err } - defer unlockSandbox(ctx, s.id, lockFile) + defer unlock() // Start the sandbox err = s.Start() @@ -310,12 +310,7 @@ func ListSandbox(ctx context.Context) ([]SandboxStatus, error) { span, ctx := trace(ctx, "ListSandbox") defer span.Finish() - var sbsdir string - if supportNewStore(ctx) { - sbsdir = fs.RunStoragePath() - } else { - sbsdir = store.RunStoragePath() - } + sbsdir := fs.RunStoragePath() dir, err := os.Open(sbsdir) if err != nil { @@ -356,15 +351,14 @@ func StatusSandbox(ctx context.Context, sandboxID string) (SandboxStatus, error) return SandboxStatus{}, vcTypes.ErrNeedSandboxID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return SandboxStatus{}, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { - unlockSandbox(ctx, sandboxID, lockFile) return SandboxStatus{}, err } defer s.releaseStatelessSandbox() @@ -402,11 +396,11 @@ func CreateContainer(ctx context.Context, sandboxID string, containerConfig Cont return nil, nil, vcTypes.ErrNeedSandboxID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return nil, nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -441,11 +435,11 @@ func DeleteContainer(ctx context.Context, sandboxID, containerID string) (VCCont return nil, vcTypes.ErrNeedContainerID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -470,11 +464,11 @@ func StartContainer(ctx context.Context, sandboxID, containerID string) (VCConta return nil, vcTypes.ErrNeedContainerID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -499,11 +493,11 @@ func StopContainer(ctx context.Context, sandboxID, containerID string) (VCContai return nil, vcTypes.ErrNeedContainerID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -528,11 +522,11 @@ func EnterContainer(ctx context.Context, sandboxID, containerID string, cmd type return nil, nil, nil, vcTypes.ErrNeedContainerID } - lockFile, err := rLockSandbox(ctx, sandboxID) + unlock, err := rLockSandbox(sandboxID) if err != nil { return nil, nil, nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -562,15 +556,14 @@ func StatusContainer(ctx context.Context, sandboxID, containerID string) (Contai return ContainerStatus{}, vcTypes.ErrNeedContainerID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return ContainerStatus{}, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { - unlockSandbox(ctx, sandboxID, lockFile) return ContainerStatus{}, err } defer s.releaseStatelessSandbox() @@ -646,11 +639,11 @@ func KillContainer(ctx context.Context, sandboxID, containerID string, signal sy return vcTypes.ErrNeedContainerID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -675,11 +668,11 @@ func ProcessListContainer(ctx context.Context, sandboxID, containerID string, op return nil, vcTypes.ErrNeedContainerID } - lockFile, err := rLockSandbox(ctx, sandboxID) + unlock, err := rLockSandbox(sandboxID) if err != nil { return nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -704,11 +697,11 @@ func UpdateContainer(ctx context.Context, sandboxID, containerID string, resourc return vcTypes.ErrNeedContainerID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -732,12 +725,12 @@ func StatsContainer(ctx context.Context, sandboxID, containerID string) (Contain if containerID == "" { return ContainerStats{}, vcTypes.ErrNeedContainerID } - lockFile, err := rLockSandbox(ctx, sandboxID) + + unlock, err := rLockSandbox(sandboxID) if err != nil { return ContainerStats{}, err } - - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -758,12 +751,11 @@ func StatsSandbox(ctx context.Context, sandboxID string) (SandboxStats, []Contai return SandboxStats{}, []ContainerStats{}, vcTypes.ErrNeedSandboxID } - lockFile, err := rLockSandbox(ctx, sandboxID) + unlock, err := rLockSandbox(sandboxID) if err != nil { return SandboxStats{}, []ContainerStats{}, err } - - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -797,11 +789,11 @@ func togglePauseContainer(ctx context.Context, sandboxID, containerID string, pa return vcTypes.ErrNeedContainerID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -841,11 +833,11 @@ func AddDevice(ctx context.Context, sandboxID string, info deviceConfig.DeviceIn return nil, vcTypes.ErrNeedSandboxID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -861,11 +853,11 @@ func toggleInterface(ctx context.Context, sandboxID string, inf *vcTypes.Interfa return nil, vcTypes.ErrNeedSandboxID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -905,11 +897,11 @@ func ListInterfaces(ctx context.Context, sandboxID string) ([]*vcTypes.Interface return nil, vcTypes.ErrNeedSandboxID } - lockFile, err := rLockSandbox(ctx, sandboxID) + unlock, err := rLockSandbox(sandboxID) if err != nil { return nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -929,11 +921,11 @@ func UpdateRoutes(ctx context.Context, sandboxID string, routes []*vcTypes.Route return nil, vcTypes.ErrNeedSandboxID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -953,11 +945,11 @@ func ListRoutes(ctx context.Context, sandboxID string) ([]*vcTypes.Route, error) return nil, vcTypes.ErrNeedSandboxID } - lockFile, err := rLockSandbox(ctx, sandboxID) + unlock, err := rLockSandbox(sandboxID) if err != nil { return nil, err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { @@ -983,11 +975,11 @@ func CleanupContainer(ctx context.Context, sandboxID, containerID string, force return vcTypes.ErrNeedContainerID } - lockFile, err := rwLockSandbox(ctx, sandboxID) + unlock, err := rwLockSandbox(sandboxID) if err != nil { return err } - defer unlockSandbox(ctx, sandboxID, lockFile) + defer unlock() s, err := fetchSandbox(ctx, sandboxID) if err != nil { diff --git a/virtcontainers/persist/fs/fs.go b/virtcontainers/persist/fs/fs.go index ac8d1c48b..d5ac624ec 100644 --- a/virtcontainers/persist/fs/fs.go +++ b/virtcontainers/persist/fs/fs.go @@ -264,9 +264,9 @@ func (fs *FS) Lock(sandboxID string, exclusive bool) (func() error, error) { var lockType int if exclusive { - lockType = syscall.LOCK_EX | syscall.LOCK_NB + lockType = syscall.LOCK_EX } else { - lockType = syscall.LOCK_SH | syscall.LOCK_NB + lockType = syscall.LOCK_SH } if err := syscall.Flock(int(f.Fd()), lockType); err != nil { diff --git a/virtcontainers/persist/fs/fs_test.go b/virtcontainers/persist/fs/fs_test.go index 75bb4d879..9fe889674 100644 --- a/virtcontainers/persist/fs/fs_test.go +++ b/virtcontainers/persist/fs/fs_test.go @@ -28,7 +28,7 @@ func getFsDriver() (*FS, error) { return fs, nil } -func TestFsLock(t *testing.T) { +func TestFsLockShared(t *testing.T) { fs, err := getFsDriver() assert.Nil(t, err) assert.NotNil(t, fs) @@ -48,17 +48,42 @@ func TestFsLock(t *testing.T) { err = os.MkdirAll(sandboxDir, dirMode) assert.Nil(t, err) + // Take 2 shared locks unlockFunc, err := fs.Lock(sid, false) assert.Nil(t, err) + unlockFunc2, err := fs.Lock(sid, false) assert.Nil(t, err) - _, err = fs.Lock(sid, true) - assert.NotNil(t, err) assert.Nil(t, unlockFunc()) - // double unlock should return error - assert.NotNil(t, unlockFunc()) assert.Nil(t, unlockFunc2()) + assert.NotNil(t, unlockFunc2()) +} + +func TestFsLockExclusive(t *testing.T) { + fs, err := getFsDriver() + assert.Nil(t, err) + assert.NotNil(t, fs) + + sid := "test-fs-driver" + fs.sandboxState.SandboxContainer = sid + sandboxDir, err := fs.sandboxDir(sid) + assert.Nil(t, err) + + err = os.MkdirAll(sandboxDir, dirMode) + assert.Nil(t, err) + + // Take 1 exclusive lock + unlockFunc, err := fs.Lock(sid, true) + assert.Nil(t, err) + + assert.Nil(t, unlockFunc()) + + unlockFunc, err = fs.Lock(sid, true) + assert.Nil(t, err) + + assert.Nil(t, unlockFunc()) + assert.NotNil(t, unlockFunc()) } func TestFsDriver(t *testing.T) { diff --git a/virtcontainers/sandbox.go b/virtcontainers/sandbox.go index dea3cfe65..570dae6f9 100644 --- a/virtcontainers/sandbox.go +++ b/virtcontainers/sandbox.go @@ -604,38 +604,22 @@ func (s *Sandbox) storeSandbox() error { return nil } -func rLockSandbox(ctx context.Context, sandboxID string) (string, error) { - store, err := store.NewVCSandboxStore(ctx, sandboxID) +func rLockSandbox(sandboxID string) (func() error, error) { + store, err := persist.GetDriver("fs") if err != nil { - return "", err + return nil, fmt.Errorf("failed to get fs persist driver: %v", err) } - return store.RLock() + return store.Lock(sandboxID, false) } -func rwLockSandbox(ctx context.Context, sandboxID string) (string, error) { - store, err := store.NewVCSandboxStore(ctx, sandboxID) +func rwLockSandbox(sandboxID string) (func() error, error) { + store, err := persist.GetDriver("fs") if err != nil { - return "", err + return nil, fmt.Errorf("failed to get fs persist driver: %v", err) } - return store.Lock() -} - -func unlockSandbox(ctx context.Context, sandboxID, token string) error { - // If the store no longer exists, we won't be able to unlock. - // Creating a new store for locking an item that does not even exist - // does not make sense. - if !store.VCSandboxStoreExists(ctx, sandboxID) { - return nil - } - - store, err := store.NewVCSandboxStore(ctx, sandboxID) - if err != nil { - return err - } - - return store.Unlock(token) + return store.Lock(sandboxID, true) } func supportNewStore(ctx context.Context) bool { From 01b4a64be2130c21091056fdeedf28fdf32a3cc9 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 26 Nov 2019 22:09:03 +0800 Subject: [PATCH 06/11] persist: remove VCStore from sandbox/apis Remove VCStore usage from sandbox. Signed-off-by: Wei Zhang --- virtcontainers/acrn_test.go | 6 +- virtcontainers/container.go | 6 +- virtcontainers/container_test.go | 21 +---- virtcontainers/kata_agent.go | 20 ----- virtcontainers/kata_agent_test.go | 17 ++--- virtcontainers/persist.go | 4 - virtcontainers/qemu_test.go | 22 ++---- virtcontainers/sandbox.go | 122 +++--------------------------- virtcontainers/sandbox_test.go | 43 +---------- 9 files changed, 35 insertions(+), 226 deletions(-) diff --git a/virtcontainers/acrn_test.go b/virtcontainers/acrn_test.go index a0dde2048..5c0e563b4 100644 --- a/virtcontainers/acrn_test.go +++ b/virtcontainers/acrn_test.go @@ -218,11 +218,7 @@ func TestAcrnCreateSandbox(t *testing.T) { }, } - vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) - assert.NoError(err) - sandbox.store = vcStore - - err = globalSandboxList.addSandbox(sandbox) + err := globalSandboxList.addSandbox(sandbox) assert.NoError(err) defer globalSandboxList.removeSandbox(sandbox.id) diff --git a/virtcontainers/container.go b/virtcontainers/container.go index f9fe271d0..45b94be0e 100644 --- a/virtcontainers/container.go +++ b/virtcontainers/container.go @@ -968,10 +968,8 @@ func (c *Container) stop(force bool) error { defer func() { // Save device and drive data. // TODO: can we merge this saving with setContainerState()? - if c.sandbox.supportNewStore() { - if err := c.sandbox.Save(); err != nil { - c.Logger().WithError(err).Info("save container state failed") - } + if err := c.sandbox.Save(); err != nil { + c.Logger().WithError(err).Info("save container state failed") } }() diff --git a/virtcontainers/container_test.go b/virtcontainers/container_test.go index 2ace0a726..d4fea83d5 100644 --- a/virtcontainers/container_test.go +++ b/virtcontainers/container_test.go @@ -92,18 +92,13 @@ func TestContainerRemoveDrive(t *testing.T) { config: &SandboxConfig{}, } - vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) - assert.Nil(t, err) - - sandbox.store = vcStore - container := Container{ sandbox: sandbox, id: "testContainer", } container.state.Fstype = "" - err = container.removeDrive() + err := container.removeDrive() // hotplugRemoveDevice for hypervisor should not be called. // test should pass without a hypervisor created for the container's sandbox. @@ -124,8 +119,6 @@ func TestContainerRemoveDrive(t *testing.T) { assert.True(t, ok) err = device.Attach(devReceiver) assert.Nil(t, err) - err = sandbox.storeSandboxDevices() - assert.Nil(t, err) container.state.Fstype = "xfs" container.state.BlockDeviceID = device.DeviceID() @@ -324,16 +317,12 @@ func TestContainerAddDriveDir(t *testing.T) { }, } - defer store.DeleteAll() - - sandboxStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) - assert.Nil(err) - sandbox.store = sandboxStore - sandbox.newStore, err = persist.GetDriver("fs") assert.NoError(err) assert.NotNil(sandbox.newStore) + defer sandbox.newStore.Destroy(sandbox.id) + contID := "100" container := Container{ sandbox: sandbox, @@ -384,9 +373,7 @@ func TestContainerRootfsPath(t *testing.T) { }, }, } - vcstore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) - sandbox.store = vcstore - assert.Nil(t, err) + container := Container{ id: "rootfstestcontainerid", sandbox: sandbox, diff --git a/virtcontainers/kata_agent.go b/virtcontainers/kata_agent.go index a08d8325e..c7fb9102f 100644 --- a/virtcontainers/kata_agent.go +++ b/virtcontainers/kata_agent.go @@ -317,13 +317,6 @@ func (k *kataAgent) init(ctx context.Context, sandbox *Sandbox, config interface k.proxyBuiltIn = isProxyBuiltIn(sandbox.config.ProxyType) - // Fetch agent runtime info. - if !sandbox.supportNewStore() { - if err := sandbox.store.Load(store.Agent, &k.state); err != nil { - k.Logger().Debug("Could not retrieve anything from storage") - } - } - return disableVMShutdown, nil } @@ -730,12 +723,6 @@ func (k *kataAgent) setProxy(sandbox *Sandbox, proxy proxy, pid int, url string) k.proxy = proxy k.state.ProxyPid = pid k.state.URL = url - if sandbox != nil && !sandbox.supportNewStore() { - if err := sandbox.store.Store(store.Agent, k.state); err != nil { - return err - } - } - return nil } @@ -952,13 +939,6 @@ func (k *kataAgent) stopSandbox(sandbox *Sandbox) error { // clean up agent state k.state.ProxyPid = -1 k.state.URL = "" - if !sandbox.supportNewStore() { - if err := sandbox.store.Store(store.Agent, k.state); err != nil { - // ignore error - k.Logger().WithError(err).WithField("sandbox", sandbox.id).Error("failed to clean up agent state") - } - } - return nil } diff --git a/virtcontainers/kata_agent_test.go b/virtcontainers/kata_agent_test.go index 7f5e99d4e..5dc880c74 100644 --- a/virtcontainers/kata_agent_test.go +++ b/virtcontainers/kata_agent_test.go @@ -31,9 +31,9 @@ import ( "github.com/kata-containers/runtime/virtcontainers/device/config" "github.com/kata-containers/runtime/virtcontainers/device/drivers" "github.com/kata-containers/runtime/virtcontainers/device/manager" + "github.com/kata-containers/runtime/virtcontainers/persist" "github.com/kata-containers/runtime/virtcontainers/pkg/mock" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" ) @@ -714,10 +714,10 @@ func TestAgentCreateContainer(t *testing.T) { hypervisor: &mockHypervisor{}, } - vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) - assert.Nil(err) - - sandbox.store = vcStore + newStore, err := persist.GetDriver("fs") + assert.NoError(err) + assert.NotNil(newStore) + sandbox.newStore = newStore container := &Container{ ctx: sandbox.ctx, @@ -815,12 +815,7 @@ func TestKataAgentSetProxy(t *testing.T) { id: "foobar", } - vcStore, err := store.NewVCSandboxStore(s.ctx, s.id) - assert.Nil(err) - - s.store = vcStore - - err = k.setProxy(s, p, 0, "") + err := k.setProxy(s, p, 0, "") assert.Error(err) } diff --git a/virtcontainers/persist.go b/virtcontainers/persist.go index c3ed4f9a0..95cdb5df6 100644 --- a/virtcontainers/persist.go +++ b/virtcontainers/persist.go @@ -443,10 +443,6 @@ func (c *Container) Restore() error { return nil } -func (s *Sandbox) supportNewStore() bool { - return true -} - func loadSandboxConfig(id string) (*SandboxConfig, error) { store, err := persist.GetDriver("fs") if err != nil || store == nil { diff --git a/virtcontainers/qemu_test.go b/virtcontainers/qemu_test.go index fd90c5cb3..1afb744a7 100644 --- a/virtcontainers/qemu_test.go +++ b/virtcontainers/qemu_test.go @@ -16,6 +16,7 @@ import ( govmmQemu "github.com/intel/govmm/qemu" "github.com/kata-containers/runtime/virtcontainers/device/config" + "github.com/kata-containers/runtime/virtcontainers/persist" "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/pkg/errors" @@ -85,18 +86,13 @@ func TestQemuCreateSandbox(t *testing.T) { }, } - vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) - assert.NoError(err) - - sandbox.store = vcStore - // Create the hypervisor fake binary testQemuPath := filepath.Join(testDir, testHypervisor) - _, err = os.Create(testQemuPath) + _, err := os.Create(testQemuPath) assert.NoError(err) // Create parent dir path for hypervisor.json - parentDir := store.SandboxConfigurationRootPath(sandbox.id) + parentDir := store.SandboxRuntimeRootPath(sandbox.id) assert.NoError(os.MkdirAll(parentDir, store.DirMode)) err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false) @@ -118,17 +114,13 @@ func TestQemuCreateSandboxMissingParentDirFail(t *testing.T) { }, } - vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) - assert.NoError(err) - sandbox.store = vcStore - // Create the hypervisor fake binary testQemuPath := filepath.Join(testDir, testHypervisor) - _, err = os.Create(testQemuPath) + _, err := os.Create(testQemuPath) assert.NoError(err) // Ensure parent dir path for hypervisor.json does not exist. - parentDir := store.SandboxConfigurationRootPath(sandbox.id) + parentDir := store.SandboxRuntimeRootPath(sandbox.id) assert.NoError(os.RemoveAll(parentDir)) err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false) @@ -470,11 +462,11 @@ func createQemuSandboxConfig() (*Sandbox, error) { }, } - vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) + newStore, err := persist.GetDriver("fs") if err != nil { return &Sandbox{}, err } - sandbox.store = vcStore + sandbox.newStore = newStore return &sandbox, nil } diff --git a/virtcontainers/sandbox.go b/virtcontainers/sandbox.go index 570dae6f9..4bf81bb0a 100644 --- a/virtcontainers/sandbox.go +++ b/virtcontainers/sandbox.go @@ -175,9 +175,7 @@ type Sandbox struct { factory Factory hypervisor hypervisor agent agent - store *store.VCStore - // store is used to replace VCStore step by step - newStore persistapi.PersistDriver + newStore persistapi.PersistDriver network Network monitor *monitor @@ -244,10 +242,6 @@ func (s *Sandbox) SetAnnotations(annotations map[string]string) error { for k, v := range annotations { s.config.Annotations[k] = v } - - if !s.supportNewStore() { - return s.store.Store(store.Configuration, *(s.config)) - } return nil } @@ -454,12 +448,6 @@ func (s *Sandbox) getAndStoreGuestDetails() error { s.seccompSupported = guestDetailRes.AgentDetails.SupportsSeccomp } s.state.GuestMemoryHotplugProbe = guestDetailRes.SupportMemHotplugProbe - - if !s.supportNewStore() { - if err = s.store.Store(store.State, s.state); err != nil { - return err - } - } } return nil @@ -543,15 +531,8 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor ctx: ctx, } - vcStore, err := store.NewVCSandboxStore(ctx, s.id) - if err != nil { - return nil, err - } - - s.store = vcStore - if s.newStore, err = persist.GetDriver("fs"); err != nil || s.newStore == nil { - return nil, fmt.Errorf("failed to get fs persist driver") + return nil, fmt.Errorf("failed to get fs persist driver: %v", err) } if err = globalSandboxList.addSandbox(s); err != nil { @@ -562,7 +543,7 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor if err != nil { s.Logger().WithError(err).WithField("sandboxid", s.id).Error("Create new sandbox failed") globalSandboxList.removeSandbox(s.id) - s.store.Delete() + s.newStore.Destroy(s.id) } }() @@ -588,10 +569,6 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor return s, nil } -func (s *Sandbox) storeSandboxDevices() error { - return s.store.StoreDevices(s.devManager.GetAllDevices()) -} - // storeSandbox stores a sandbox config. func (s *Sandbox) storeSandbox() error { span, _ := s.trace("storeSandbox") @@ -622,10 +599,6 @@ func rwLockSandbox(sandboxID string) (func() error, error) { return store.Lock(sandboxID, true) } -func supportNewStore(ctx context.Context) bool { - return true -} - // fetchSandbox fetches a sandbox config from a sandbox ID and returns a sandbox. func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err error) { virtLog.Info("fetch sandbox") @@ -640,23 +613,11 @@ func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err var config SandboxConfig - if supportNewStore(ctx) { - c, err := loadSandboxConfig(sandboxID) - if err != nil { - return nil, err - } - config = *c - } else { - // We're bootstrapping - vcStore, err := store.NewVCSandboxStore(ctx, sandboxID) - if err != nil { - return nil, err - } - - if err := vcStore.Load(store.Configuration, &config); err != nil { - return nil, err - } + c, err := loadSandboxConfig(sandboxID) + if err != nil { + return nil, err } + config = *c // fetchSandbox is not suppose to create new sandbox VM. sandbox, err = createSandbox(ctx, config, nil) @@ -746,13 +707,7 @@ func (s *Sandbox) Delete() error { s.agent.cleanup(s) - if s.supportNewStore() { - if err := s.newStore.Destroy(s.id); err != nil { - return err - } - } - - return s.store.Delete() + return s.newStore.Destroy(s.id) } func (s *Sandbox) startNetworkMonitor() error { @@ -820,11 +775,6 @@ func (s *Sandbox) createNetwork() error { } } } - - // Store the network - if !s.supportNewStore() { - return s.store.Store(store.Network, s.networkNS) - } return nil } @@ -898,14 +848,8 @@ func (s *Sandbox) AddInterface(inf *vcTypes.Interface) (*vcTypes.Interface, erro // Update the sandbox storage s.networkNS.Endpoints = append(s.networkNS.Endpoints, endpoint) - if s.supportNewStore() { - if err := s.Save(); err != nil { - return nil, err - } - } else { - if err := s.store.Store(store.Network, s.networkNS); err != nil { - return nil, err - } + if err := s.Save(); err != nil { + return nil, err } // Add network for vm @@ -923,14 +867,8 @@ func (s *Sandbox) RemoveInterface(inf *vcTypes.Interface) (*vcTypes.Interface, e } s.networkNS.Endpoints = append(s.networkNS.Endpoints[:i], s.networkNS.Endpoints[i+1:]...) - if s.supportNewStore() { - if err := s.Save(); err != nil { - return inf, err - } - } else { - if err := s.store.Store(store.Network, s.networkNS); err != nil { - return inf, err - } + if err := s.Save(); err != nil { + return inf, err } break @@ -1004,12 +942,6 @@ func (s *Sandbox) startVM() (err error) { return err } } - - if !s.supportNewStore() { - if err := s.store.Store(store.Network, s.networkNS); err != nil { - return err - } - } } s.Logger().Info("VM started") @@ -1316,9 +1248,6 @@ func (s *Sandbox) UpdateContainer(containerID string, resources specs.LinuxResou return err } - if err := c.storeContainer(); err != nil { - return err - } if err = s.storeSandbox(); err != nil { return err } @@ -1550,11 +1479,6 @@ func (s *Sandbox) setSandboxState(state types.StateString) error { // update in-memory state s.state.State = state - - // update on-disk state - if !s.supportNewStore() { - return s.store.Store(store.State, s.state) - } return nil } @@ -1573,14 +1497,6 @@ func (s *Sandbox) getAndSetSandboxBlockIndex() (int, error) { // Increment so that container gets incremented block index s.state.BlockIndex++ - if !s.supportNewStore() { - // experimental runtime use "persist.json" which doesn't need "state.json" anymore - // update on-disk state - if err := s.store.Store(store.State, s.state); err != nil { - return -1, err - } - } - return currentIndex, nil } @@ -1596,14 +1512,6 @@ func (s *Sandbox) decrementSandboxBlockIndex() error { } }() - if !s.supportNewStore() { - // experimental runtime use "persist.json" which doesn't need "state.json" anymore - // update on-disk state - if err = s.store.Store(store.State, s.state); err != nil { - return err - } - } - return nil } @@ -1733,12 +1641,6 @@ func (s *Sandbox) AddDevice(info config.DeviceInfo) (api.Device, error) { } }() - if !s.supportNewStore() { - if err = s.storeSandboxDevices(); err != nil { - return nil, err - } - } - return b, nil } diff --git a/virtcontainers/sandbox_test.go b/virtcontainers/sandbox_test.go index 40cb284f4..af8a40dda 100644 --- a/virtcontainers/sandbox_test.go +++ b/virtcontainers/sandbox_test.go @@ -574,11 +574,6 @@ func TestSetAnnotations(t *testing.T) { }, } - vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) - assert.NoError(err) - - sandbox.store = vcStore - keyAnnotation := "annotation2" valueAnnotation := "xyz" newAnnotations := map[string]string{ @@ -657,10 +652,6 @@ func TestContainerStateSetFstype(t *testing.T) { assert.Nil(err) defer cleanUp() - vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) - assert.Nil(err) - sandbox.store = vcStore - c := sandbox.GetContainer("100") assert.NotNil(c) @@ -737,14 +728,8 @@ func TestSandboxAttachDevicesVFIO(t *testing.T) { config: &SandboxConfig{}, } - store, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) - assert.Nil(t, err) - sandbox.store = store - containers[c.id].sandbox = &sandbox - err = sandbox.storeSandboxDevices() - assert.Nil(t, err, "Error while store sandbox devices %s", err) err = containers[c.id].attachDevices() assert.Nil(t, err, "Error while attaching devices %s", err) @@ -1005,7 +990,7 @@ func TestDeleteStoreWhenNewContainerFail(t *testing.T) { } _, err = newContainer(p, &contConfig) assert.NotNil(t, err, "New container with invalid device info should fail") - storePath := store.ContainerConfigurationRootPath(testSandboxID, contID) + storePath := store.ContainerRuntimeRootPath(testSandboxID, contID) _, err = os.Stat(storePath) assert.NotNil(t, err, "Should delete configuration root after failed to create a container") } @@ -1168,10 +1153,6 @@ func TestAttachBlockDevice(t *testing.T) { ctx: context.Background(), } - vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) - assert.Nil(t, err) - sandbox.store = vcStore - contID := "100" container := Container{ sandbox: sandbox, @@ -1180,18 +1161,11 @@ func TestAttachBlockDevice(t *testing.T) { // create state file path := store.ContainerRuntimeRootPath(testSandboxID, container.ID()) - err = os.MkdirAll(path, store.DirMode) + err := os.MkdirAll(path, store.DirMode) assert.NoError(t, err) defer os.RemoveAll(path) - stateFilePath := filepath.Join(path, store.StateFile) - os.Remove(stateFilePath) - - _, err = os.Create(stateFilePath) - assert.NoError(t, err) - defer os.Remove(stateFilePath) - path = "/dev/hda" deviceInfo := config.DeviceInfo{ HostPath: path, @@ -1255,10 +1229,6 @@ func TestPreAddDevice(t *testing.T) { ctx: context.Background(), } - vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) - assert.Nil(t, err) - sandbox.store = vcStore - contID := "100" container := Container{ sandbox: sandbox, @@ -1269,18 +1239,11 @@ func TestPreAddDevice(t *testing.T) { // create state file path := store.ContainerRuntimeRootPath(testSandboxID, container.ID()) - err = os.MkdirAll(path, store.DirMode) + err := os.MkdirAll(path, store.DirMode) assert.NoError(t, err) defer os.RemoveAll(path) - stateFilePath := filepath.Join(path, store.StateFile) - os.Remove(stateFilePath) - - _, err = os.Create(stateFilePath) - assert.NoError(t, err) - defer os.Remove(stateFilePath) - path = "/dev/hda" deviceInfo := config.DeviceInfo{ HostPath: path, From 8e88859ee45d5e9f496455980a8686ec328411f1 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 28 Nov 2019 14:59:26 +0800 Subject: [PATCH 07/11] persist: remove all usage of VCStore Remove VCStore usage from all modules Signed-off-by: Wei Zhang --- virtcontainers/acrn.go | 12 ++-- virtcontainers/acrn_arch_base_test.go | 4 +- virtcontainers/acrn_test.go | 4 +- virtcontainers/api.go | 2 - virtcontainers/api_test.go | 72 ++++++++++++---------- virtcontainers/clh.go | 27 +++----- virtcontainers/clh_test.go | 29 +-------- virtcontainers/container.go | 5 -- virtcontainers/container_test.go | 3 +- virtcontainers/factory/cache/cache_test.go | 11 ++-- virtcontainers/fc.go | 9 +-- virtcontainers/hypervisor.go | 4 +- virtcontainers/kata_agent.go | 8 +-- virtcontainers/persist/fs/fs.go | 14 +++++ virtcontainers/proxy.go | 4 +- virtcontainers/proxy_test.go | 8 +-- virtcontainers/qemu.go | 24 +++----- virtcontainers/qemu_arch_base_test.go | 4 +- virtcontainers/qemu_test.go | 10 +-- virtcontainers/sandbox.go | 9 +-- virtcontainers/sandbox_test.go | 17 +++-- virtcontainers/virtcontainers_test.go | 41 +++--------- virtcontainers/vm.go | 4 +- 23 files changed, 135 insertions(+), 190 deletions(-) diff --git a/virtcontainers/acrn.go b/virtcontainers/acrn.go index 85b821d47..6ac68c76d 100644 --- a/virtcontainers/acrn.go +++ b/virtcontainers/acrn.go @@ -24,8 +24,8 @@ import ( "github.com/kata-containers/runtime/pkg/rootless" "github.com/kata-containers/runtime/virtcontainers/device/config" persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/pkg/uuid" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/kata-containers/runtime/virtcontainers/utils" ) @@ -327,7 +327,7 @@ func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig) error { // The path might already exist, but in case of VM templating, // we have to create it since the sandbox has not created it yet. - if err = os.MkdirAll(store.SandboxRuntimeRootPath(id), store.DirMode); err != nil { + if err = os.MkdirAll(filepath.Join(fs.RunStoragePath(), id), DirMode); err != nil { return err } @@ -443,8 +443,8 @@ func (a *Acrn) startSandbox(timeoutSecs int) error { a.Logger().WithField("default-kernel-parameters", formatted).Debug() } - vmPath := filepath.Join(store.RunVMStoragePath(), a.id) - err := os.MkdirAll(vmPath, store.DirMode) + vmPath := filepath.Join(fs.RunVMStoragePath(), a.id) + err := os.MkdirAll(vmPath, DirMode) if err != nil { return err } @@ -657,7 +657,7 @@ func (a *Acrn) getSandboxConsole(id string) (string, error) { span, _ := a.trace("getSandboxConsole") defer span.Finish() - return utils.BuildSocketPath(store.RunVMStoragePath(), id, acrnConsoleSocket) + return utils.BuildSocketPath(fs.RunVMStoragePath(), id, acrnConsoleSocket) } func (a *Acrn) saveSandbox() error { @@ -802,7 +802,7 @@ func (a *Acrn) storeInfo() error { if os.IsNotExist(err) { // Root directory a.Logger().WithField("path", dirPath).Debugf("Creating UUID directory") - if err := os.MkdirAll(dirPath, store.DirMode); err != nil { + if err := os.MkdirAll(dirPath, DirMode); err != nil { return err } } else if err != nil { diff --git a/virtcontainers/acrn_arch_base_test.go b/virtcontainers/acrn_arch_base_test.go index 1ba57ac63..69fa55093 100644 --- a/virtcontainers/acrn_arch_base_test.go +++ b/virtcontainers/acrn_arch_base_test.go @@ -13,7 +13,7 @@ import ( "testing" "github.com/kata-containers/runtime/virtcontainers/device/config" - "github.com/kata-containers/runtime/virtcontainers/store" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/stretchr/testify/assert" ) @@ -106,7 +106,7 @@ func TestAcrnArchBaseAppendConsoles(t *testing.T) { assert := assert.New(t) acrnArchBase := newAcrnArchBase() - path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), consoleSocket) + path := filepath.Join(filepath.Join(fs.RunStoragePath(), sandboxID), consoleSocket) expectedOut := []Device{ ConsoleDevice{ diff --git a/virtcontainers/acrn_test.go b/virtcontainers/acrn_test.go index 5c0e563b4..52cba5521 100644 --- a/virtcontainers/acrn_test.go +++ b/virtcontainers/acrn_test.go @@ -12,7 +12,7 @@ import ( "testing" "github.com/kata-containers/runtime/virtcontainers/device/config" - "github.com/kata-containers/runtime/virtcontainers/store" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/stretchr/testify/assert" ) @@ -198,7 +198,7 @@ func TestAcrnGetSandboxConsole(t *testing.T) { ctx: context.Background(), } sandboxID := "testSandboxID" - expected := filepath.Join(store.RunVMStoragePath(), sandboxID, consoleSocket) + expected := filepath.Join(fs.RunVMStoragePath(), sandboxID, consoleSocket) result, err := a.getSandboxConsole(sandboxID) assert.NoError(err) diff --git a/virtcontainers/api.go b/virtcontainers/api.go index 3b86a03eb..1024d28e4 100644 --- a/virtcontainers/api.go +++ b/virtcontainers/api.go @@ -16,7 +16,6 @@ import ( "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/pkg/compatoci" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" specs "github.com/opencontainers/runtime-spec/specs-go" opentracing "github.com/opentracing/opentracing-go" @@ -50,7 +49,6 @@ func SetLogger(ctx context.Context, logger *logrus.Entry) { virtLog = logger.WithFields(fields) deviceApi.SetLogger(virtLog) - store.SetLogger(virtLog) compatoci.SetLogger(virtLog) } diff --git a/virtcontainers/api_test.go b/virtcontainers/api_test.go index 85c1ba762..b695fb390 100644 --- a/virtcontainers/api_test.go +++ b/virtcontainers/api_test.go @@ -16,11 +16,11 @@ import ( "testing" ktu "github.com/kata-containers/runtime/pkg/katatestutils" + "github.com/kata-containers/runtime/virtcontainers/persist" "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/pkg/annotations" "github.com/kata-containers/runtime/virtcontainers/pkg/mock" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/kata-containers/runtime/virtcontainers/utils" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -69,6 +69,16 @@ func newBasicTestCmd() types.Cmd { return cmd } +func rmSandboxDir(sid string) error { + store, err := persist.GetDriver("fs") + if err != nil { + return fmt.Errorf("failed to get fs persist driver: %v", err) + } + + store.Destroy(sid) + return nil +} + func newTestSandboxConfigNoop() SandboxConfig { bundlePath := filepath.Join(testDir, testBundle) containerAnnotations[annotations.BundlePathKey] = bundlePath @@ -139,7 +149,7 @@ func TestCreateSandboxNoopAgentSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) } @@ -176,7 +186,7 @@ func TestCreateSandboxKataAgentSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) } @@ -203,7 +213,7 @@ func TestDeleteSandboxNoopAgentSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -248,7 +258,7 @@ func TestDeleteSandboxKataAgentSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -264,7 +274,7 @@ func TestDeleteSandboxFailing(t *testing.T) { defer cleanUp() assert := assert.New(t) - sandboxDir := store.SandboxRuntimeRootPath(testSandboxID) + sandboxDir := filepath.Join(fs.RunStoragePath(), testSandboxID) os.Remove(sandboxDir) p, err := DeleteSandbox(context.Background(), testSandboxID) @@ -328,7 +338,7 @@ func TestStartSandboxFailing(t *testing.T) { defer cleanUp() assert := assert.New(t) - sandboxDir := store.SandboxRuntimeRootPath(testSandboxID) + sandboxDir := filepath.Join(fs.RunStoragePath(), testSandboxID) os.Remove(sandboxDir) p, err := StartSandbox(context.Background(), testSandboxID) @@ -395,7 +405,7 @@ func TestStopSandboxKataAgentSuccessful(t *testing.T) { func TestStopSandboxFailing(t *testing.T) { defer cleanUp() - sandboxDir := store.SandboxRuntimeRootPath(testSandboxID) + sandboxDir := filepath.Join(fs.RunStoragePath(), testSandboxID) os.Remove(sandboxDir) p, err := StopSandbox(context.Background(), testSandboxID, false) @@ -413,7 +423,7 @@ func TestRunSandboxNoopAgentSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) } @@ -451,7 +461,7 @@ func TestRunSandboxKataAgentSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -617,8 +627,6 @@ func TestStatusSandboxSuccessfulStateRunning(t *testing.T) { assert.Exactly(status, expectedStatus) } -/*FIXME: replace DeleteAll with newstore.Destroy - func TestStatusSandboxFailingFetchSandboxConfig(t *testing.T) { defer cleanUp() assert := assert.New(t) @@ -630,7 +638,7 @@ func TestStatusSandboxFailingFetchSandboxConfig(t *testing.T) { assert.NoError(err) assert.NotNil(p) - store.DeleteAll() + rmSandboxDir(p.ID()) globalSandboxList.removeSandbox(p.ID()) _, err = StatusSandbox(ctx, p.ID()) @@ -648,12 +656,12 @@ func TestStatusPodSandboxFailingFetchSandboxState(t *testing.T) { assert.NoError(err) assert.NotNil(p) - store.DeleteAll() + rmSandboxDir(p.ID()) globalSandboxList.removeSandbox(p.ID()) _, err = StatusSandbox(ctx, p.ID()) assert.Error(err) -}*/ +} func newTestContainerConfigNoop(contID string) ContainerConfig { // Define the container command and bundle. @@ -680,7 +688,7 @@ func TestCreateContainerSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -734,7 +742,7 @@ func TestDeleteContainerSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -778,7 +786,7 @@ func TestDeleteContainerFailingNoContainer(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -835,7 +843,7 @@ func TestStartContainerFailingNoContainer(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -856,7 +864,7 @@ func TestStartContainerFailingSandboxNotStarted(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -936,7 +944,7 @@ func TestStopContainerFailingNoContainer(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -1040,7 +1048,7 @@ func TestEnterContainerFailingNoContainer(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -1093,7 +1101,7 @@ func TestStatusContainerSuccessful(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -1136,7 +1144,7 @@ func TestStatusContainerStateReady(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -1199,7 +1207,7 @@ func TestStatusContainerStateRunning(t *testing.T) { assert.NoError(err) assert.NotNil(p) - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) assert.NoError(err) @@ -1246,7 +1254,6 @@ func TestStatusContainerStateRunning(t *testing.T) { assert.Exactly(status, expectedStatus) } -/* FIXME: replace DeleteAll with newstore.Destroy func TestStatusContainerFailing(t *testing.T) { defer cleanUp() assert := assert.New(t) @@ -1259,12 +1266,12 @@ func TestStatusContainerFailing(t *testing.T) { assert.NoError(err) assert.NotNil(p) - store.DeleteAll() + rmSandboxDir(p.ID()) globalSandboxList.removeSandbox(p.ID()) _, err = StatusContainer(ctx, p.ID(), contID) assert.Error(err) -}*/ +} func TestStatsContainerFailing(t *testing.T) { defer cleanUp() @@ -1278,7 +1285,7 @@ func TestStatsContainerFailing(t *testing.T) { assert.NoError(err) assert.NotNil(p) - store.DeleteAll() + rmSandboxDir(p.ID()) globalSandboxList.removeSandbox(p.ID()) _, err = StatsContainer(ctx, p.ID(), contID) @@ -1312,7 +1319,6 @@ func TestStatsContainer(t *testing.T) { pImpl, ok := p.(*Sandbox) assert.True(ok) - defer store.DeleteAll() contConfig := newTestContainerConfigNoop(contID) _, c, err := CreateContainer(ctx, p.ID(), contConfig) @@ -1358,7 +1364,7 @@ func TestProcessListContainer(t *testing.T) { pImpl, ok := p.(*Sandbox) assert.True(ok) - defer store.DeleteAll() + // defer store.DeleteAll() contConfig := newTestContainerConfigNoop(contID) _, c, err := CreateContainer(ctx, p.ID(), contConfig) @@ -1414,7 +1420,7 @@ func createAndStartSandbox(ctx context.Context, config SandboxConfig) (sandbox V return nil, "", err } - sandboxDir = store.SandboxRuntimeRootPath(sandbox.ID()) + sandboxDir = filepath.Join(fs.RunStoragePath(), sandbox.ID()) _, err = os.Stat(sandboxDir) if err != nil { return nil, "", err @@ -1699,7 +1705,7 @@ func TestCleanupContainer(t *testing.T) { CleanupContainer(ctx, p.ID(), c.ID(), true) } - sandboxDir := store.SandboxRuntimeRootPath(p.ID()) + sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID()) _, err = os.Stat(sandboxDir) if err == nil { diff --git a/virtcontainers/clh.go b/virtcontainers/clh.go index 49daa2898..56e796b54 100644 --- a/virtcontainers/clh.go +++ b/virtcontainers/clh.go @@ -21,13 +21,13 @@ import ( "time" persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" chclient "github.com/kata-containers/runtime/virtcontainers/pkg/cloud-hypervisor/client" opentracing "github.com/opentracing/opentracing-go" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/kata-containers/runtime/virtcontainers/device/config" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/kata-containers/runtime/virtcontainers/utils" ) @@ -303,8 +303,8 @@ func (clh *cloudHypervisor) startSandbox(timeout int) error { clh.Logger().WithField("function", "startSandbox").Info("starting Sandbox") - vmPath := filepath.Join(store.RunVMStoragePath(), clh.id) - err := os.MkdirAll(vmPath, store.DirMode) + vmPath := filepath.Join(fs.RunVMStoragePath(), clh.id) + err := os.MkdirAll(vmPath, DirMode) if err != nil { return err } @@ -604,23 +604,23 @@ func (clh *cloudHypervisor) generateSocket(id string, useVsock bool) (interface{ } func (clh *cloudHypervisor) virtioFsSocketPath(id string) (string, error) { - return utils.BuildSocketPath(store.RunVMStoragePath(), id, virtioFsSocket) + return utils.BuildSocketPath(fs.RunVMStoragePath(), id, virtioFsSocket) } func (clh *cloudHypervisor) vsockSocketPath(id string) (string, error) { - return utils.BuildSocketPath(store.RunVMStoragePath(), id, clhSocket) + return utils.BuildSocketPath(fs.RunVMStoragePath(), id, clhSocket) } func (clh *cloudHypervisor) serialPath(id string) (string, error) { - return utils.BuildSocketPath(store.RunVMStoragePath(), id, clhSerial) + return utils.BuildSocketPath(fs.RunVMStoragePath(), id, clhSerial) } func (clh *cloudHypervisor) apiSocketPath(id string) (string, error) { - return utils.BuildSocketPath(store.RunVMStoragePath(), id, clhAPISocket) + return utils.BuildSocketPath(fs.RunVMStoragePath(), id, clhAPISocket) } func (clh *cloudHypervisor) logFilePath(id string) (string, error) { - return utils.BuildSocketPath(store.RunVMStoragePath(), id, clhLogFile) + return utils.BuildSocketPath(fs.RunVMStoragePath(), id, clhLogFile) } func (clh *cloudHypervisor) waitVMM(timeout uint) error { @@ -998,7 +998,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error { } // cleanup vm path - dir := filepath.Join(store.RunVMStoragePath(), clh.id) + dir := filepath.Join(fs.RunVMStoragePath(), clh.id) // If it's a symlink, remove both dir and the target. link, err := filepath.EvalSymlinks(dir) @@ -1027,14 +1027,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error { } if clh.config.VMid != "" { - dir = store.SandboxConfigurationRootPath(clh.config.VMid) - if err := os.RemoveAll(dir); err != nil { - if !force { - return err - } - clh.Logger().WithError(err).WithField("path", dir).Warnf("failed to remove vm path") - } - dir = store.SandboxRuntimeRootPath(clh.config.VMid) + dir = filepath.Join(fs.RunStoragePath(), clh.config.VMid) if err := os.RemoveAll(dir); err != nil { if !force { return err diff --git a/virtcontainers/clh_test.go b/virtcontainers/clh_test.go index 0d336ae00..380d1073b 100644 --- a/virtcontainers/clh_test.go +++ b/virtcontainers/clh_test.go @@ -219,18 +219,8 @@ func TestClhCreateSandbox(t *testing.T) { }, } - vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) + err = clh.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false) assert.NoError(err) - - sandbox.store = vcStore - - // Create parent dir path for hypervisor.json - parentDir := store.SandboxConfigurationRootPath(sandbox.id) - assert.NoError(os.MkdirAll(parentDir, store.DirMode)) - - err = clh.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false) - assert.NoError(err) - assert.NoError(os.RemoveAll(parentDir)) assert.Exactly(clhConfig, clh.config) } @@ -245,23 +235,6 @@ func TestClooudHypervisorStartSandbox(t *testing.T) { virtiofsd: &virtiofsdMock{}, } - sandbox := &Sandbox{ - ctx: context.Background(), - id: "testSandbox", - config: &SandboxConfig{ - HypervisorConfig: clhConfig, - }, - } - - vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id) - assert.NoError(err) - - sandbox.store = vcStore - - // Create parent dir path for hypervisor.json - parentDir := store.SandboxConfigurationRootPath(sandbox.id) - assert.NoError(os.MkdirAll(parentDir, store.DirMode)) - err = clh.startSandbox(10) assert.NoError(err) } diff --git a/virtcontainers/container.go b/virtcontainers/container.go index 45b94be0e..eedfe225b 100644 --- a/virtcontainers/container.go +++ b/virtcontainers/container.go @@ -29,7 +29,6 @@ import ( "github.com/kata-containers/runtime/pkg/rootless" "github.com/kata-containers/runtime/virtcontainers/device/config" "github.com/kata-containers/runtime/virtcontainers/device/manager" - "github.com/kata-containers/runtime/virtcontainers/store" ) // https://github.com/torvalds/linux/blob/master/include/uapi/linux/major.h @@ -322,8 +321,6 @@ type Container struct { sandbox *Sandbox - runPath string - configPath string containerPath string rootfsSuffix string @@ -673,8 +670,6 @@ func newContainer(sandbox *Sandbox, contConfig *ContainerConfig) (*Container, er rootFs: contConfig.RootFs, config: contConfig, sandbox: sandbox, - runPath: store.ContainerRuntimeRootPath(sandbox.id, contConfig.ID), - configPath: store.ContainerConfigurationRootPath(sandbox.id, contConfig.ID), containerPath: filepath.Join(sandbox.id, contConfig.ID), rootfsSuffix: "rootfs", state: types.ContainerState{}, diff --git a/virtcontainers/container_test.go b/virtcontainers/container_test.go index d4fea83d5..9b003b66e 100644 --- a/virtcontainers/container_test.go +++ b/virtcontainers/container_test.go @@ -21,7 +21,6 @@ import ( "github.com/kata-containers/runtime/virtcontainers/device/drivers" "github.com/kata-containers/runtime/virtcontainers/device/manager" "github.com/kata-containers/runtime/virtcontainers/persist" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/stretchr/testify/assert" ) @@ -267,7 +266,7 @@ func testSetupFakeRootfs(t *testing.T) (testRawFile, loopDev, mntDir string, err assert.NoError(err) mntDir = filepath.Join(tmpDir, "rootfs") - err = os.Mkdir(mntDir, store.DirMode) + err = os.Mkdir(mntDir, DirMode) assert.NoError(err) err = syscall.Mount(loopDev, mntDir, "ext4", uintptr(0), "") diff --git a/virtcontainers/factory/cache/cache_test.go b/virtcontainers/factory/cache/cache_test.go index 65189374b..5d8ab24ca 100644 --- a/virtcontainers/factory/cache/cache_test.go +++ b/virtcontainers/factory/cache/cache_test.go @@ -15,7 +15,7 @@ import ( vc "github.com/kata-containers/runtime/virtcontainers" "github.com/kata-containers/runtime/virtcontainers/factory/direct" - "github.com/kata-containers/runtime/virtcontainers/store" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" ) func TestTemplateFactory(t *testing.T) { @@ -35,14 +35,11 @@ func TestTemplateFactory(t *testing.T) { ctx := context.Background() - ConfigStoragePathSaved := store.ConfigStoragePath - RunStoragePathSaved := store.RunStoragePath + runPathSave := fs.RunStoragePath() + fs.TestSetRunStoragePath(filepath.Join(testDir, "vc", "run")) // allow the tests to run without affecting the host system. - store.ConfigStoragePath = func() string { return filepath.Join(testDir, store.StoragePathSuffix, "config") } - store.RunStoragePath = func() string { return filepath.Join(testDir, store.StoragePathSuffix, "run") } defer func() { - store.ConfigStoragePath = ConfigStoragePathSaved - store.RunStoragePath = RunStoragePathSaved + fs.TestSetRunStoragePath(runPathSave) }() // New diff --git a/virtcontainers/fc.go b/virtcontainers/fc.go index 97570a456..21c5086c7 100644 --- a/virtcontainers/fc.go +++ b/virtcontainers/fc.go @@ -38,7 +38,6 @@ import ( "github.com/blang/semver" "github.com/containerd/console" "github.com/kata-containers/runtime/virtcontainers/device/config" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/kata-containers/runtime/virtcontainers/utils" ) @@ -76,6 +75,8 @@ const ( fcMetricsFifo = "metrics.fifo" defaultFcConfig = "fcConfig.json" + // storagePathSuffix mirrors persist/fs/fs.go:storagePathSuffix + storagePathSuffix = "vc" ) // Specify the minimum version of firecracker supported @@ -244,8 +245,8 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N // Also jailer based on the id implicitly sets up cgroups under // /// hypervisorName := filepath.Base(hypervisorConfig.HypervisorPath) - //store.ConfigStoragePath cannot be used as we need exec perms - fc.chrootBaseDir = filepath.Join("/var/lib/", store.StoragePathSuffix) + //fs.RunStoragePath cannot be used as we need exec perms + fc.chrootBaseDir = filepath.Join("/run", storagePathSuffix) fc.vmPath = filepath.Join(fc.chrootBaseDir, hypervisorName, fc.id) fc.jailerRoot = filepath.Join(fc.vmPath, "root") // auto created by jailer @@ -374,7 +375,7 @@ func (fc *firecracker) fcInit(timeout int) error { } // Fetch sandbox network to be able to access it from the sandbox structure. - err := os.MkdirAll(fc.jailerRoot, store.DirMode) + err := os.MkdirAll(fc.jailerRoot, DirMode) if err != nil { return err } diff --git a/virtcontainers/hypervisor.go b/virtcontainers/hypervisor.go index 38f87d024..e376d0ccd 100644 --- a/virtcontainers/hypervisor.go +++ b/virtcontainers/hypervisor.go @@ -17,7 +17,7 @@ import ( "github.com/kata-containers/runtime/virtcontainers/device/config" persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" - "github.com/kata-containers/runtime/virtcontainers/store" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/kata-containers/runtime/virtcontainers/utils" ) @@ -720,7 +720,7 @@ func generateVMSocket(id string, useVsock bool) (interface{}, error) { }, nil } - path, err := utils.BuildSocketPath(filepath.Join(store.RunVMStoragePath(), id), defaultSocketName) + path, err := utils.BuildSocketPath(filepath.Join(fs.RunVMStoragePath(), id), defaultSocketName) if err != nil { return nil, err } diff --git a/virtcontainers/kata_agent.go b/virtcontainers/kata_agent.go index c7fb9102f..a3fa65fe2 100644 --- a/virtcontainers/kata_agent.go +++ b/virtcontainers/kata_agent.go @@ -26,11 +26,11 @@ import ( "github.com/kata-containers/runtime/pkg/rootless" "github.com/kata-containers/runtime/virtcontainers/device/config" persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations" ns "github.com/kata-containers/runtime/virtcontainers/pkg/nsenter" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" "github.com/kata-containers/runtime/virtcontainers/pkg/uuid" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/opencontainers/runtime-spec/specs-go" opentracing "github.com/opentracing/opentracing-go" @@ -217,7 +217,7 @@ func (k *kataAgent) Logger() *logrus.Entry { } func (k *kataAgent) getVMPath(id string) string { - return filepath.Join(store.RunVMStoragePath(), id) + return filepath.Join(fs.RunVMStoragePath(), id) } func (k *kataAgent) getSharePath(id string) string { @@ -402,7 +402,7 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, builtin bool, HostPath: sharePath, } - if err = os.MkdirAll(sharedVolume.HostPath, store.DirMode); err != nil { + if err = os.MkdirAll(sharedVolume.HostPath, DirMode); err != nil { return err } @@ -2126,7 +2126,7 @@ func (k *kataAgent) copyFile(src, dst string) error { cpReq := &grpc.CopyFileRequest{ Path: dst, - DirMode: uint32(store.DirMode), + DirMode: uint32(DirMode), FileMode: st.Mode, FileSize: fileSize, Uid: int32(st.Uid), diff --git a/virtcontainers/persist/fs/fs.go b/virtcontainers/persist/fs/fs.go index d5ac624ec..8290d6e52 100644 --- a/virtcontainers/persist/fs/fs.go +++ b/virtcontainers/persist/fs/fs.go @@ -37,6 +37,9 @@ const storagePathSuffix = "vc" // sandboxPathSuffix is the suffix used for sandbox storage const sandboxPathSuffix = "sbs" +// vmPathSuffix is the suffix used for guest VMs. +const vmPathSuffix = "vm" + // RunStoragePath is the sandbox runtime directory. // It will contain one state.json and one lock file for each created sandbox. var RunStoragePath = func() string { @@ -47,6 +50,17 @@ var RunStoragePath = func() string { return path } +// RunVMStoragePath is the vm directory. +// It will contain all guest vm sockets and shared mountpoints. +// The function is declared this way for mocking in unit tests +var RunVMStoragePath = func() string { + path := filepath.Join("/run", storagePathSuffix, vmPathSuffix) + if rootless.IsRootless() { + return filepath.Join(rootless.GetRootlessDir(), path) + } + return path +} + // FS storage driver implementation type FS struct { sandboxState *persistapi.SandboxState diff --git a/virtcontainers/proxy.go b/virtcontainers/proxy.go index e4e26cdfd..86de050d9 100644 --- a/virtcontainers/proxy.go +++ b/virtcontainers/proxy.go @@ -14,7 +14,7 @@ import ( "strings" kataclient "github.com/kata-containers/agent/protocols/client" - "github.com/kata-containers/runtime/virtcontainers/store" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/sirupsen/logrus" ) @@ -146,7 +146,7 @@ func validateProxyConfig(proxyConfig ProxyConfig) error { func defaultProxyURL(id, socketType string) (string, error) { switch socketType { case SocketTypeUNIX: - socketPath := filepath.Join(store.SandboxRuntimeRootPath(id), "proxy.sock") + socketPath := filepath.Join(filepath.Join(fs.RunStoragePath(), id), "proxy.sock") return fmt.Sprintf("unix://%s", socketPath), nil case SocketTypeVSOCK: // TODO Build the VSOCK default URL diff --git a/virtcontainers/proxy_test.go b/virtcontainers/proxy_test.go index 09b558695..af251154c 100644 --- a/virtcontainers/proxy_test.go +++ b/virtcontainers/proxy_test.go @@ -12,7 +12,7 @@ import ( "path/filepath" "testing" - "github.com/kata-containers/runtime/virtcontainers/store" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" ) @@ -173,7 +173,7 @@ func testDefaultProxyURL(expectedURL string, socketType string, sandboxID string } func TestDefaultProxyURLUnix(t *testing.T) { - path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), "proxy.sock") + path := filepath.Join(filepath.Join(fs.RunStoragePath(), sandboxID), "proxy.sock") socketPath := fmt.Sprintf("unix://%s", path) assert.NoError(t, testDefaultProxyURL(socketPath, SocketTypeUNIX, sandboxID)) } @@ -183,7 +183,7 @@ func TestDefaultProxyURLVSock(t *testing.T) { } func TestDefaultProxyURLUnknown(t *testing.T) { - path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), "proxy.sock") + path := filepath.Join(filepath.Join(fs.RunStoragePath(), sandboxID), "proxy.sock") socketPath := fmt.Sprintf("unix://%s", path) assert.Error(t, testDefaultProxyURL(socketPath, "foobar", sandboxID)) } @@ -204,7 +204,7 @@ func testProxyStart(t *testing.T, agent agent, proxy proxy) { } invalidPath := filepath.Join(tmpdir, "enoent") - expectedSocketPath := filepath.Join(store.SandboxRuntimeRootPath(testSandboxID), "proxy.sock") + expectedSocketPath := filepath.Join(filepath.Join(fs.RunStoragePath(), testSandboxID), "proxy.sock") expectedURI := fmt.Sprintf("unix://%s", expectedSocketPath) data := []testData{ diff --git a/virtcontainers/qemu.go b/virtcontainers/qemu.go index 754c9dcc9..22a1d274e 100644 --- a/virtcontainers/qemu.go +++ b/virtcontainers/qemu.go @@ -32,8 +32,8 @@ import ( "github.com/kata-containers/runtime/virtcontainers/device/config" persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/pkg/uuid" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/kata-containers/runtime/virtcontainers/utils" ) @@ -269,7 +269,7 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig) error { // The path might already exist, but in case of VM templating, // we have to create it since the sandbox has not created it yet. - if err = os.MkdirAll(store.SandboxRuntimeRootPath(id), store.DirMode); err != nil { + if err = os.MkdirAll(filepath.Join(fs.RunStoragePath(), id), DirMode); err != nil { return err } } @@ -324,7 +324,7 @@ func (q *qemu) memoryTopology() (govmmQemu.Memory, error) { } func (q *qemu) qmpSocketPath(id string) (string, error) { - return utils.BuildSocketPath(store.RunVMStoragePath(), id, qmpSocket) + return utils.BuildSocketPath(fs.RunVMStoragePath(), id, qmpSocket) } func (q *qemu) getQemuMachine() (govmmQemu.Machine, error) { @@ -568,7 +568,7 @@ func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNa VGA: "none", GlobalParam: "kvm-pit.lost_tick_policy=discard", Bios: firmwarePath, - PidFile: filepath.Join(store.RunVMStoragePath(), q.id, "pid"), + PidFile: filepath.Join(fs.RunVMStoragePath(), q.id, "pid"), } if ioThread != nil { @@ -590,7 +590,7 @@ func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNa } func (q *qemu) vhostFSSocketPath(id string) (string, error) { - return utils.BuildSocketPath(store.RunVMStoragePath(), id, vhostFSSocket) + return utils.BuildSocketPath(fs.RunVMStoragePath(), id, vhostFSSocket) } func (q *qemu) virtiofsdArgs(fd uintptr) []string { @@ -694,8 +694,8 @@ func (q *qemu) startSandbox(timeout int) error { q.fds = []*os.File{} }() - vmPath := filepath.Join(store.RunVMStoragePath(), q.id) - err := os.MkdirAll(vmPath, store.DirMode) + vmPath := filepath.Join(fs.RunVMStoragePath(), q.id) + err := os.MkdirAll(vmPath, DirMode) if err != nil { return err } @@ -867,7 +867,7 @@ func (q *qemu) stopSandbox() error { func (q *qemu) cleanupVM() error { // cleanup vm path - dir := filepath.Join(store.RunVMStoragePath(), q.id) + dir := filepath.Join(fs.RunVMStoragePath(), q.id) // If it's a symlink, remove both dir and the target. // This can happen when vm template links a sandbox to a vm. @@ -888,11 +888,7 @@ func (q *qemu) cleanupVM() error { } if q.config.VMid != "" { - dir = store.SandboxConfigurationRootPath(q.config.VMid) - if err := os.RemoveAll(dir); err != nil { - q.Logger().WithError(err).WithField("path", dir).Warnf("failed to remove vm path") - } - dir = store.SandboxRuntimeRootPath(q.config.VMid) + dir = filepath.Join(fs.RunStoragePath(), q.config.VMid) if err := os.RemoveAll(dir); err != nil { q.Logger().WithError(err).WithField("path", dir).Warnf("failed to remove vm path") } @@ -1582,7 +1578,7 @@ func (q *qemu) getSandboxConsole(id string) (string, error) { span, _ := q.trace("getSandboxConsole") defer span.Finish() - return utils.BuildSocketPath(store.RunVMStoragePath(), id, consoleSocket) + return utils.BuildSocketPath(fs.RunVMStoragePath(), id, consoleSocket) } func (q *qemu) saveSandbox() error { diff --git a/virtcontainers/qemu_arch_base_test.go b/virtcontainers/qemu_arch_base_test.go index 008e903c7..5a0432eff 100644 --- a/virtcontainers/qemu_arch_base_test.go +++ b/virtcontainers/qemu_arch_base_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/kata-containers/runtime/virtcontainers/device/config" - "github.com/kata-containers/runtime/virtcontainers/store" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/pkg/errors" ) @@ -259,7 +259,7 @@ func TestQemuArchBaseAppendConsoles(t *testing.T) { assert := assert.New(t) qemuArchBase := newQemuArchBase() - path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), consoleSocket) + path := filepath.Join(filepath.Join(fs.RunStoragePath(), sandboxID), consoleSocket) expectedOut := []govmmQemu.Device{ govmmQemu.SerialDevice{ diff --git a/virtcontainers/qemu_test.go b/virtcontainers/qemu_test.go index 1afb744a7..c4772f6ee 100644 --- a/virtcontainers/qemu_test.go +++ b/virtcontainers/qemu_test.go @@ -17,7 +17,7 @@ import ( govmmQemu "github.com/intel/govmm/qemu" "github.com/kata-containers/runtime/virtcontainers/device/config" "github.com/kata-containers/runtime/virtcontainers/persist" - "github.com/kata-containers/runtime/virtcontainers/store" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/pkg/errors" "github.com/stretchr/testify/assert" @@ -92,8 +92,8 @@ func TestQemuCreateSandbox(t *testing.T) { assert.NoError(err) // Create parent dir path for hypervisor.json - parentDir := store.SandboxRuntimeRootPath(sandbox.id) - assert.NoError(os.MkdirAll(parentDir, store.DirMode)) + parentDir := filepath.Join(fs.RunStoragePath(), sandbox.id) + assert.NoError(os.MkdirAll(parentDir, DirMode)) err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false) assert.NoError(err) @@ -120,7 +120,7 @@ func TestQemuCreateSandboxMissingParentDirFail(t *testing.T) { assert.NoError(err) // Ensure parent dir path for hypervisor.json does not exist. - parentDir := store.SandboxRuntimeRootPath(sandbox.id) + parentDir := filepath.Join(fs.RunStoragePath(), sandbox.id) assert.NoError(os.RemoveAll(parentDir)) err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false) @@ -280,7 +280,7 @@ func TestQemuGetSandboxConsole(t *testing.T) { ctx: context.Background(), } sandboxID := "testSandboxID" - expected := filepath.Join(store.RunVMStoragePath(), sandboxID, consoleSocket) + expected := filepath.Join(fs.RunVMStoragePath(), sandboxID, consoleSocket) result, err := q.getSandboxConsole(sandboxID) assert.NoError(err) diff --git a/virtcontainers/sandbox.go b/virtcontainers/sandbox.go index 4bf81bb0a..e14a040ee 100644 --- a/virtcontainers/sandbox.go +++ b/virtcontainers/sandbox.go @@ -37,7 +37,6 @@ import ( "github.com/kata-containers/runtime/virtcontainers/pkg/annotations" "github.com/kata-containers/runtime/virtcontainers/pkg/compatoci" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/kata-containers/runtime/virtcontainers/utils" ) @@ -46,6 +45,9 @@ const ( // vmStartTimeout represents the time in seconds a sandbox can wait before // to consider the VM starting operation failed. vmStartTimeout = 10 + + // DirMode is the permission bits used for creating a directory + DirMode = os.FileMode(0750) | os.ModeDir ) // SandboxStatus describes a sandbox status. @@ -188,9 +190,6 @@ type Sandbox struct { containers map[string]*Container - runPath string - configPath string - state types.SandboxState networkNS NetworkNamespace @@ -519,8 +518,6 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor config: &sandboxConfig, volumes: sandboxConfig.Volumes, containers: map[string]*Container{}, - runPath: store.SandboxRuntimeRootPath(sandboxConfig.ID), - configPath: store.SandboxConfigurationRootPath(sandboxConfig.ID), state: types.SandboxState{}, annotationsLock: &sync.RWMutex{}, wg: &sync.WaitGroup{}, diff --git a/virtcontainers/sandbox_test.go b/virtcontainers/sandbox_test.go index af8a40dda..054db6331 100644 --- a/virtcontainers/sandbox_test.go +++ b/virtcontainers/sandbox_test.go @@ -21,8 +21,8 @@ import ( "github.com/kata-containers/runtime/virtcontainers/device/drivers" "github.com/kata-containers/runtime/virtcontainers/device/manager" exp "github.com/kata-containers/runtime/virtcontainers/experimental" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/pkg/annotations" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" @@ -682,7 +682,7 @@ func TestSandboxAttachDevicesVFIO(t *testing.T) { testDeviceBDFPath := "0000:00:1c.0" devicesDir := filepath.Join(tmpDir, testFDIOGroup, "devices") - err = os.MkdirAll(devicesDir, store.DirMode) + err = os.MkdirAll(devicesDir, DirMode) assert.Nil(t, err) deviceFile := filepath.Join(devicesDir, testDeviceBDFPath) @@ -990,7 +990,7 @@ func TestDeleteStoreWhenNewContainerFail(t *testing.T) { } _, err = newContainer(p, &contConfig) assert.NotNil(t, err, "New container with invalid device info should fail") - storePath := store.ContainerRuntimeRootPath(testSandboxID, contID) + storePath := filepath.Join(fs.RunStoragePath(), testSandboxID, contID) _, err = os.Stat(storePath) assert.NotNil(t, err, "Should delete configuration root after failed to create a container") } @@ -1160,8 +1160,8 @@ func TestAttachBlockDevice(t *testing.T) { } // create state file - path := store.ContainerRuntimeRootPath(testSandboxID, container.ID()) - err := os.MkdirAll(path, store.DirMode) + path := filepath.Join(fs.RunStoragePath(), testSandboxID, container.ID()) + err := os.MkdirAll(path, DirMode) assert.NoError(t, err) defer os.RemoveAll(path) @@ -1238,8 +1238,8 @@ func TestPreAddDevice(t *testing.T) { container.state.State = types.StateReady // create state file - path := store.ContainerRuntimeRootPath(testSandboxID, container.ID()) - err := os.MkdirAll(path, store.DirMode) + path := filepath.Join(fs.RunStoragePath(), testSandboxID, container.ID()) + err := os.MkdirAll(path, DirMode) assert.NoError(t, err) defer os.RemoveAll(path) @@ -1336,9 +1336,6 @@ func checkDirNotExist(path string) error { func checkSandboxRemains() error { var err error - if err = checkDirNotExist(sandboxDirConfig); err != nil { - return fmt.Errorf("%s still exists", sandboxDirConfig) - } if err = checkDirNotExist(sandboxDirState); err != nil { return fmt.Errorf("%s still exists", sandboxDirState) } diff --git a/virtcontainers/virtcontainers_test.go b/virtcontainers/virtcontainers_test.go index 1a6f41c64..7bda40069 100644 --- a/virtcontainers/virtcontainers_test.go +++ b/virtcontainers/virtcontainers_test.go @@ -16,7 +16,6 @@ import ( "testing" "github.com/kata-containers/runtime/virtcontainers/persist/fs" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/utils" "github.com/sirupsen/logrus" ) @@ -35,12 +34,7 @@ const testDisabledAsNonRoot = "Test disabled as requires root privileges" // package variables set in TestMain var testDir = "" -var sandboxDirConfig = "" -var sandboxFileConfig = "" var sandboxDirState = "" -var sandboxDirLock = "" -var sandboxFileState = "" -var sandboxFileLock = "" var testQemuKernelPath = "" var testQemuInitrdPath = "" var testQemuImagePath = "" @@ -63,22 +57,16 @@ var savedRunVMStoragePathFunc func() string // the next test to run. func cleanUp() { globalSandboxList.removeSandbox(testSandboxID) - store.DeleteAll() + os.RemoveAll(fs.RunStoragePath()) + os.RemoveAll(fs.RunVMStoragePath()) os.RemoveAll(testDir) - store.VCStorePrefix = "" - store.RunVMStoragePath = savedRunVMStoragePathFunc + os.MkdirAll(testDir, DirMode) setup() } func setup() { - store.VCStorePrefix = testDir - savedRunVMStoragePathFunc = store.RunVMStoragePath - store.RunVMStoragePath = func() string { - return filepath.Join("testDir", "vm") - } - os.MkdirAll(store.RunVMStoragePath(), store.DirMode) - os.MkdirAll(filepath.Join(testDir, testBundle), store.DirMode) + os.Mkdir(filepath.Join(testDir, testBundle), DirMode) for _, filename := range []string{testQemuKernelPath, testQemuInitrdPath, testQemuImagePath, testQemuPath} { _, err := os.Create(filename) @@ -90,7 +78,7 @@ func setup() { } func setupAcrn() { - os.Mkdir(filepath.Join(testDir, testBundle), store.DirMode) + os.Mkdir(filepath.Join(testDir, testBundle), DirMode) for _, filename := range []string{testAcrnKernelPath, testAcrnImagePath, testAcrnPath, testAcrnCtlPath} { _, err := os.Create(filename) @@ -102,7 +90,7 @@ func setupAcrn() { } func setupClh() { - os.Mkdir(filepath.Join(testDir, testBundle), store.DirMode) + os.Mkdir(filepath.Join(testDir, testBundle), DirMode) for _, filename := range []string{testClhKernelPath, testClhImagePath, testClhPath, testVirtiofsdPath} { _, err := os.Create(filename) @@ -135,7 +123,7 @@ func TestMain(m *testing.M) { } fmt.Printf("INFO: Creating virtcontainers test directory %s\n", testDir) - err = os.MkdirAll(testDir, store.DirMode) + err = os.MkdirAll(testDir, DirMode) if err != nil { fmt.Println("Could not create test directories:", err) os.Exit(1) @@ -170,25 +158,16 @@ func TestMain(m *testing.M) { setupClh() - ConfigStoragePathSaved := store.ConfigStoragePath - RunStoragePathSaved := store.RunStoragePath // allow the tests to run without affecting the host system. - store.ConfigStoragePath = func() string { return filepath.Join(testDir, store.StoragePathSuffix, "config") } - store.RunStoragePath = func() string { return filepath.Join(testDir, store.StoragePathSuffix, "run") } + runPathSave := fs.RunStoragePath() fs.TestSetRunStoragePath(filepath.Join(testDir, "vc", "run")) defer func() { - store.ConfigStoragePath = ConfigStoragePathSaved - store.RunStoragePath = RunStoragePathSaved + fs.TestSetRunStoragePath(runPathSave) }() // set now that configStoragePath has been overridden. - sandboxDirConfig = filepath.Join(store.ConfigStoragePath(), testSandboxID) - sandboxFileConfig = filepath.Join(store.ConfigStoragePath(), testSandboxID, store.ConfigurationFile) - sandboxDirState = filepath.Join(store.RunStoragePath(), testSandboxID) - sandboxDirLock = filepath.Join(store.RunStoragePath(), testSandboxID) - sandboxFileState = filepath.Join(store.RunStoragePath(), testSandboxID, store.StateFile) - sandboxFileLock = filepath.Join(store.RunStoragePath(), testSandboxID, store.LockFile) + sandboxDirState = filepath.Join(fs.RunStoragePath(), testSandboxID) testHyperstartCtlSocket = filepath.Join(testDir, "test_hyper.sock") testHyperstartTtySocket = filepath.Join(testDir, "test_tty.sock") diff --git a/virtcontainers/vm.go b/virtcontainers/vm.go index 99fda8b17..e76273d03 100644 --- a/virtcontainers/vm.go +++ b/virtcontainers/vm.go @@ -16,8 +16,8 @@ import ( pb "github.com/kata-containers/runtime/protocols/cache" "github.com/kata-containers/runtime/virtcontainers/persist" persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/pkg/uuid" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/sirupsen/logrus" ) @@ -285,7 +285,7 @@ func NewVMFromGrpc(ctx context.Context, v *pb.GrpcVM, config VMConfig) (*VM, err } func buildVMSharePath(id string) string { - return filepath.Join(store.RunVMStoragePath(), id, "shared") + return filepath.Join(fs.RunVMStoragePath(), id, "shared") } func (v *VM) logger() logrus.FieldLogger { From ed4a1954e40675f5ea54411d4ac6a7bd96430694 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 28 Nov 2019 15:05:35 +0800 Subject: [PATCH 08/11] persist: remove unused struct Remove unused struct from persist structures. Signed-off-by: Wei Zhang --- virtcontainers/persist/api/config.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/virtcontainers/persist/api/config.go b/virtcontainers/persist/api/config.go index 5cc12195d..6b2143e47 100644 --- a/virtcontainers/persist/api/config.go +++ b/virtcontainers/persist/api/config.go @@ -173,13 +173,6 @@ type KataAgentConfig struct { UseVSock bool } -// HyperstartConfig is a structure storing information needed for -// hyperstart agent initialization. -type HyperstartConfig struct { - SockCtlName string - SockTtyName string -} - // ProxyConfig is a structure storing information needed from any // proxy in order to be properly initialized. type ProxyConfig struct { From d33b154dd7582363f457f665e95102cb5f66d5d3 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 29 Nov 2019 22:43:03 +0800 Subject: [PATCH 09/11] persist: add interface for global read/write Add two interfaces for fs storage driver for supporting global writing and reading, which is used by ACRN. Signed-off-by: Wei Zhang --- virtcontainers/acrn.go | 71 +++++---------------- virtcontainers/api_test.go | 1 - virtcontainers/persist/api/interface.go | 8 +++ virtcontainers/persist/fs/fs.go | 85 ++++++++++++++++++++----- virtcontainers/persist/fs/fs_test.go | 47 ++++++++++++++ virtcontainers/sandbox.go | 4 +- 6 files changed, 144 insertions(+), 72 deletions(-) diff --git a/virtcontainers/acrn.go b/virtcontainers/acrn.go index 6ac68c76d..ac3ba20ec 100644 --- a/virtcontainers/acrn.go +++ b/virtcontainers/acrn.go @@ -23,6 +23,7 @@ import ( "github.com/kata-containers/runtime/pkg/rootless" "github.com/kata-containers/runtime/virtcontainers/device/config" + "github.com/kata-containers/runtime/virtcontainers/persist" persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/pkg/uuid" @@ -41,7 +42,7 @@ const ( // VMUUIDStoragePath is the uuid directory. // It will contain all uuid info used by guest vm. var VMUUIDStoragePath = func() string { - path := filepath.Join("/run/vc", UUIDPathSuffix) + path := filepath.Join(fs.StorageRootPath(), UUIDPathSuffix) if rootless.IsRootless() { return filepath.Join(rootless.GetRootlessDir(), path) } @@ -742,7 +743,7 @@ func (a *Acrn) GetACRNUUIDBytes(uid string) (uuid.UUID, error) { } // GetNextAvailableUUID returns next available UUID VM creation -// If no validl UUIDs are available it returns err. +// If no valid UUIDs are available it returns err. func (a *Acrn) GetNextAvailableUUID() (string, error) { var MaxVMSupported uint8 var Idx uint8 @@ -796,78 +797,38 @@ func (a *Acrn) GetMaxSupportedACRNVM() (uint8, error) { } func (a *Acrn) storeInfo() error { - dirPath := VMUUIDStoragePath() - - _, err := os.Stat(dirPath) - if os.IsNotExist(err) { - // Root directory - a.Logger().WithField("path", dirPath).Debugf("Creating UUID directory") - if err := os.MkdirAll(dirPath, DirMode); err != nil { - return err - } - } else if err != nil { - return err - } - - dirf, err := os.Open(dirPath) + store, err := persist.GetDriver("fs") if err != nil { return err } - defer dirf.Close() - - if err := syscall.Flock(int(dirf.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { - return err - } - - // write data - f, err := os.OpenFile(filepath.Join(dirPath, uuidFile), os.O_RDWR|os.O_CREATE, 0755) - if err != nil { - return fmt.Errorf("failed to store information into uuid.json: %v", err) - } - defer f.Close() + relPath := filepath.Join(UUIDPathSuffix, uuidFile) jsonOut, err := json.Marshal(a.info) if err != nil { return fmt.Errorf("Could not marshall data: %s", err) } - f.Write(jsonOut) + + if err := store.GlobalWrite(relPath, jsonOut); err != nil { + return fmt.Errorf("failed to write uuid to file: %v", err) + } + return nil } func (a *Acrn) loadInfo() error { - dirPath := VMUUIDStoragePath() - - _, err := os.Stat(dirPath) - if err != nil { - return fmt.Errorf("failed to load ACRN information: %v", err) - } - - dirf, err := os.Open(dirPath) + store, err := persist.GetDriver("fs") if err != nil { return err } + relPath := filepath.Join(UUIDPathSuffix, uuidFile) - if err := syscall.Flock(int(dirf.Fd()), syscall.LOCK_SH|syscall.LOCK_NB); err != nil { - dirf.Close() - return err - } - - defer dirf.Close() - - // write data - f, err := os.Open(filepath.Join(dirPath, uuidFile)) + data, err := store.GlobalRead(relPath) if err != nil { - return fmt.Errorf("failed to load information into uuid.json: %v", err) + return fmt.Errorf("failed to read uuid from file: %v", err) } - dec := json.NewDecoder(f) - if dec != nil { - return fmt.Errorf("failed to create json decoder") - } - - err = dec.Decode(&a.info) - if err != nil { - return fmt.Errorf("could not decode data: %v", err) + if err := json.Unmarshal(data, &a.info); err != nil { + return fmt.Errorf("failed to unmarshal uuid info: %v", err) } return nil } diff --git a/virtcontainers/api_test.go b/virtcontainers/api_test.go index b695fb390..c570b4ad8 100644 --- a/virtcontainers/api_test.go +++ b/virtcontainers/api_test.go @@ -1364,7 +1364,6 @@ func TestProcessListContainer(t *testing.T) { pImpl, ok := p.(*Sandbox) assert.True(ok) - // defer store.DeleteAll() contConfig := newTestContainerConfigNoop(contID) _, c, err := CreateContainer(ctx, p.ID(), contConfig) diff --git a/virtcontainers/persist/api/interface.go b/virtcontainers/persist/api/interface.go index 433e5ad41..ea26dfbc3 100644 --- a/virtcontainers/persist/api/interface.go +++ b/virtcontainers/persist/api/interface.go @@ -17,4 +17,12 @@ type PersistDriver interface { // Lock locks the persist driver, "exclusive" decides whether the lock is exclusive or shared. // It returns Unlock Function and errors Lock(sid string, exclusive bool) (func() error, error) + + // GlobalWrite writes "data" to "StorageRootPath"/"relativePath"; + // GlobalRead reads "data" from "StorageRootPath"/"relativePath"; + // these functions are used for writing/reading some global data, + // they are specially designed for ACRN so far. + // Don't use them too much unless you have no other choice! @weizhang555 + GlobalWrite(relativePath string, data []byte) error + GlobalRead(relativePath string) ([]byte, error) } diff --git a/virtcontainers/persist/fs/fs.go b/virtcontainers/persist/fs/fs.go index 8290d6e52..aa124d7c1 100644 --- a/virtcontainers/persist/fs/fs.go +++ b/virtcontainers/persist/fs/fs.go @@ -23,10 +23,10 @@ import ( const persistFile = "persist.json" // dirMode is the permission bits used for creating a directory -const dirMode = os.FileMode(0700) +const dirMode = os.FileMode(0700) | os.ModeDir // fileMode is the permission bits used for creating a file -const fileMode = os.FileMode(0640) +const fileMode = os.FileMode(0600) // storagePathSuffix is the suffix used for all storage paths // @@ -40,25 +40,33 @@ const sandboxPathSuffix = "sbs" // vmPathSuffix is the suffix used for guest VMs. const vmPathSuffix = "vm" -// RunStoragePath is the sandbox runtime directory. -// It will contain one state.json and one lock file for each created sandbox. -var RunStoragePath = func() string { - path := filepath.Join("/run", storagePathSuffix, sandboxPathSuffix) +var StorageRootPath = func() string { + path := filepath.Join("/run", storagePathSuffix) if rootless.IsRootless() { return filepath.Join(rootless.GetRootlessDir(), path) } return path } +// RunStoragePath is the sandbox runtime directory. +// It will contain one state.json and one lock file for each created sandbox. +var RunStoragePath = func() string { + return filepath.Join(StorageRootPath(), sandboxPathSuffix) +} + // RunVMStoragePath is the vm directory. // It will contain all guest vm sockets and shared mountpoints. // The function is declared this way for mocking in unit tests var RunVMStoragePath = func() string { - path := filepath.Join("/run", storagePathSuffix, vmPathSuffix) - if rootless.IsRootless() { - return filepath.Join(rootless.GetRootlessDir(), path) + return filepath.Join(StorageRootPath(), vmPathSuffix) +} + +// TestSetRunStoragePath set RunStoragePath to path +// this function is only used for testing purpose +func TestSetRunStoragePath(path string) { + RunStoragePath = func() string { + return path } - return path } // FS storage driver implementation @@ -299,10 +307,57 @@ func (fs *FS) Lock(sandboxID string, exclusive bool) (func() error, error) { return unlockFunc, nil } -// TestSetRunStoragePath set RunStoragePath to path -// this function is only used for testing purpose -func TestSetRunStoragePath(path string) { - RunStoragePath = func() string { - return path +func (fs *FS) GlobalWrite(relativePath string, data []byte) error { + path := filepath.Join(StorageRootPath(), relativePath) + path, err := filepath.Abs(filepath.Clean(path)) + if err != nil { + return fmt.Errorf("failed to find abs path for %q: %v", relativePath, err) } + + dir := filepath.Dir(path) + + _, err = os.Stat(dir) + if os.IsNotExist(err) { + if err := os.MkdirAll(dir, dirMode); err != nil { + fs.Logger().WithError(err).Errorf("failed to create dir %q", dir) + return err + } + } else if err != nil { + return err + } + + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, fileMode) + if err != nil { + fs.Logger().WithError(err).Errorf("failed to open file %q for writting", path) + return err + } + defer f.Close() + + if _, err := f.Write(data); err != nil { + fs.Logger().WithError(err).Errorf("failed to write file %q: %v ", path, err) + return err + } + return nil +} + +func (fs *FS) GlobalRead(relativePath string) ([]byte, error) { + path := filepath.Join(StorageRootPath(), relativePath) + path, err := filepath.Abs(filepath.Clean(path)) + if err != nil { + return nil, fmt.Errorf("failed to find abs path for %q: %v", relativePath, err) + } + + f, err := os.Open(path) + if err != nil { + fs.Logger().WithError(err).Errorf("failed to open file %q for reading", path) + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + fs.Logger().WithError(err).Errorf("failed to read file %q: %v ", path, err) + return nil, err + } + return data, nil } diff --git a/virtcontainers/persist/fs/fs_test.go b/virtcontainers/persist/fs/fs_test.go index 9fe889674..72c836cba 100644 --- a/virtcontainers/persist/fs/fs_test.go +++ b/virtcontainers/persist/fs/fs_test.go @@ -9,6 +9,7 @@ import ( "fmt" "io/ioutil" "os" + "path/filepath" "testing" persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" @@ -28,7 +29,27 @@ func getFsDriver() (*FS, error) { return fs, nil } +func initTestDir() func() { + testDir, _ := ioutil.TempDir("", "vc-tmp-") + // allow the tests to run without affecting the host system. + rootSave := StorageRootPath() + StorageRootPath = func() string { + return filepath.Join(testDir, "vc") + } + + os.MkdirAll(testDir, dirMode) + + return func() { + StorageRootPath = func() string { + return rootSave + } + os.RemoveAll(testDir) + } +} + func TestFsLockShared(t *testing.T) { + defer initTestDir()() + fs, err := getFsDriver() assert.Nil(t, err) assert.NotNil(t, fs) @@ -61,6 +82,8 @@ func TestFsLockShared(t *testing.T) { } func TestFsLockExclusive(t *testing.T) { + defer initTestDir()() + fs, err := getFsDriver() assert.Nil(t, err) assert.NotNil(t, fs) @@ -87,6 +110,8 @@ func TestFsLockExclusive(t *testing.T) { } func TestFsDriver(t *testing.T) { + defer initTestDir()() + fs, err := getFsDriver() assert.Nil(t, err) assert.NotNil(t, fs) @@ -163,3 +188,25 @@ func TestFsDriver(t *testing.T) { assert.NotNil(t, err) assert.True(t, os.IsNotExist(err)) } + +func TestGlobalReadWrite(t *testing.T) { + defer initTestDir()() + + relPath := "test/123/aaa.json" + data := "hello this is testing global read write" + + fs, err := getFsDriver() + assert.Nil(t, err) + assert.NotNil(t, fs) + + err = fs.GlobalWrite(relPath, []byte(data)) + assert.Nil(t, err) + + out, err := fs.GlobalRead(relPath) + assert.Nil(t, err) + assert.Equal(t, string(out), data) + + out, err = fs.GlobalRead("nonexist") + assert.NotNil(t, err) + assert.Nil(t, out) +} diff --git a/virtcontainers/sandbox.go b/virtcontainers/sandbox.go index e14a040ee..18363fc37 100644 --- a/virtcontainers/sandbox.go +++ b/virtcontainers/sandbox.go @@ -547,7 +547,9 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, nil) // Ignore the error. Restore can fail for a new sandbox - s.Restore() + if err := s.Restore(); err != nil { + s.Logger().WithError(err).Debug("restore sandbox failed") + } // new store doesn't require hypervisor to be stored immediately if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, s.stateful); err != nil { From 4a298cb9b74c02852b36257ce3dca6c3419c0f0d Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 1 Dec 2019 20:39:02 +0800 Subject: [PATCH 10/11] persist: address comments Address some comments. Signed-off-by: Wei Zhang --- virtcontainers/acrn.go | 2 +- virtcontainers/clh_test.go | 4 +- virtcontainers/factory/cache/cache_test.go | 10 +++++ virtcontainers/factory/direct/direct_test.go | 21 +++++++++-- virtcontainers/factory/factory_test.go | 37 +++++++++++++++++++ .../factory/template/template_test.go | 11 ++++++ virtcontainers/persist/fs/fs.go | 16 +++++--- virtcontainers/persist/fs/fs_test.go | 22 +---------- virtcontainers/pkg/oci/utils_test.go | 31 ++++++++++++---- virtcontainers/virtcontainers_test.go | 5 ++- 10 files changed, 117 insertions(+), 42 deletions(-) diff --git a/virtcontainers/acrn.go b/virtcontainers/acrn.go index ac3ba20ec..1b1c7f7df 100644 --- a/virtcontainers/acrn.go +++ b/virtcontainers/acrn.go @@ -805,7 +805,7 @@ func (a *Acrn) storeInfo() error { jsonOut, err := json.Marshal(a.info) if err != nil { - return fmt.Errorf("Could not marshall data: %s", err) + return fmt.Errorf("Could not marshal data: %s", err) } if err := store.GlobalWrite(relPath, jsonOut); err != nil { diff --git a/virtcontainers/clh_test.go b/virtcontainers/clh_test.go index 380d1073b..0e96186f0 100644 --- a/virtcontainers/clh_test.go +++ b/virtcontainers/clh_test.go @@ -13,8 +13,8 @@ import ( "testing" "github.com/kata-containers/runtime/virtcontainers/device/config" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" chclient "github.com/kata-containers/runtime/virtcontainers/pkg/cloud-hypervisor/client" - "github.com/kata-containers/runtime/virtcontainers/store" "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) @@ -184,7 +184,7 @@ func TestCloudHypervisorCleanupVM(t *testing.T) { t.Errorf("cloudHypervisor.cleanupVM() expected error != %v", err) } - dir := filepath.Join(store.RunVMStoragePath(), clh.id) + dir := filepath.Join(fs.RunVMStoragePath(), clh.id) os.MkdirAll(dir, os.ModePerm) if err := clh.cleanupVM(false); err != nil { diff --git a/virtcontainers/factory/cache/cache_test.go b/virtcontainers/factory/cache/cache_test.go index 5d8ab24ca..8489fe42d 100644 --- a/virtcontainers/factory/cache/cache_test.go +++ b/virtcontainers/factory/cache/cache_test.go @@ -8,6 +8,7 @@ package cache import ( "context" "io/ioutil" + "os" "path/filepath" "testing" @@ -18,10 +19,19 @@ import ( "github.com/kata-containers/runtime/virtcontainers/persist/fs" ) +var rootPathSave = fs.StorageRootPath() + func TestTemplateFactory(t *testing.T) { assert := assert.New(t) testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + fs.TestSetStorageRootPath(filepath.Join(testDir, "vc")) + + defer func() { + os.RemoveAll(testDir) + fs.TestSetStorageRootPath(rootPathSave) + }() + hyperConfig := vc.HypervisorConfig{ KernelPath: testDir, ImagePath: testDir, diff --git a/virtcontainers/factory/direct/direct_test.go b/virtcontainers/factory/direct/direct_test.go index 5ca7df298..20eec0eb8 100644 --- a/virtcontainers/factory/direct/direct_test.go +++ b/virtcontainers/factory/direct/direct_test.go @@ -9,23 +9,36 @@ import ( "context" "io/ioutil" "os" + "path/filepath" "testing" "github.com/stretchr/testify/assert" vc "github.com/kata-containers/runtime/virtcontainers" - "github.com/kata-containers/runtime/virtcontainers/store" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" ) +var rootPathSave = fs.StorageRootPath() + func TestTemplateFactory(t *testing.T) { assert := assert.New(t) testDir, err := ioutil.TempDir("", "vmfactory-tmp-") - assert.Nil(err) - store.VCStorePrefix = testDir + fs.TestSetStorageRootPath(filepath.Join(testDir, "vc")) + defer func() { os.RemoveAll(testDir) - store.VCStorePrefix = "" + fs.TestSetStorageRootPath(rootPathSave) + }() + + assert.Nil(err) + + runPathSave := fs.RunStoragePath() + fs.TestSetRunStoragePath(filepath.Join(testDir, "vc", "run")) + + defer func() { + os.RemoveAll(testDir) + fs.TestSetRunStoragePath(runPathSave) }() hyperConfig := vc.HypervisorConfig{ diff --git a/virtcontainers/factory/factory_test.go b/virtcontainers/factory/factory_test.go index e04fee41e..91e0a2c32 100644 --- a/virtcontainers/factory/factory_test.go +++ b/virtcontainers/factory/factory_test.go @@ -9,10 +9,12 @@ import ( "context" "io/ioutil" "os" + "path/filepath" "testing" vc "github.com/kata-containers/runtime/virtcontainers" "github.com/kata-containers/runtime/virtcontainers/factory/base" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" "github.com/kata-containers/runtime/virtcontainers/utils" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -20,6 +22,8 @@ import ( const testDisabledAsNonRoot = "Test disabled as requires root privileges" +var rootPathSave = fs.StorageRootPath() + func TestNewFactory(t *testing.T) { var config Config @@ -41,6 +45,12 @@ func TestNewFactory(t *testing.T) { assert.Error(err) testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + fs.TestSetStorageRootPath(filepath.Join(testDir, "vc")) + + defer func() { + os.RemoveAll(testDir) + fs.TestSetStorageRootPath(rootPathSave) + }() config.VMConfig.HypervisorConfig = vc.HypervisorConfig{ KernelPath: testDir, @@ -110,6 +120,12 @@ func TestVMConfigValid(t *testing.T) { assert := assert.New(t) testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + fs.TestSetStorageRootPath(filepath.Join(testDir, "vc")) + + defer func() { + os.RemoveAll(testDir) + fs.TestSetStorageRootPath(rootPathSave) + }() config := vc.VMConfig{ HypervisorType: vc.MockHypervisor, @@ -159,6 +175,13 @@ func TestCheckVMConfig(t *testing.T) { assert.Nil(err) testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + fs.TestSetStorageRootPath(filepath.Join(testDir, "vc")) + + defer func() { + os.RemoveAll(testDir) + fs.TestSetStorageRootPath(rootPathSave) + }() + config1.HypervisorConfig = vc.HypervisorConfig{ KernelPath: testDir, ImagePath: testDir, @@ -178,6 +201,13 @@ func TestFactoryGetVM(t *testing.T) { assert := assert.New(t) testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + fs.TestSetStorageRootPath(filepath.Join(testDir, "vc")) + + defer func() { + os.RemoveAll(testDir) + fs.TestSetStorageRootPath(rootPathSave) + }() + hyperConfig := vc.HypervisorConfig{ KernelPath: testDir, ImagePath: testDir, @@ -337,6 +367,13 @@ func TestDeepCompare(t *testing.T) { ProxyType: vc.NoopProxyType, } testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + fs.TestSetStorageRootPath(filepath.Join(testDir, "vc")) + + defer func() { + os.RemoveAll(testDir) + fs.TestSetStorageRootPath(rootPathSave) + }() + config.VMConfig.HypervisorConfig = vc.HypervisorConfig{ KernelPath: testDir, ImagePath: testDir, diff --git a/virtcontainers/factory/template/template_test.go b/virtcontainers/factory/template/template_test.go index 3f61ba3a3..150d81e2b 100644 --- a/virtcontainers/factory/template/template_test.go +++ b/virtcontainers/factory/template/template_test.go @@ -9,16 +9,20 @@ import ( "context" "io/ioutil" "os" + "path/filepath" "testing" "time" "github.com/stretchr/testify/assert" vc "github.com/kata-containers/runtime/virtcontainers" + "github.com/kata-containers/runtime/virtcontainers/persist/fs" ) const testDisabledAsNonRoot = "Test disabled as requires root privileges" +var rootPathSave = fs.StorageRootPath() + func TestTemplateFactory(t *testing.T) { if os.Geteuid() != 0 { t.Skip(testDisabledAsNonRoot) @@ -29,6 +33,13 @@ func TestTemplateFactory(t *testing.T) { templateWaitForAgent = 1 * time.Microsecond testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") + fs.TestSetStorageRootPath(filepath.Join(testDir, "vc")) + + defer func() { + os.RemoveAll(testDir) + fs.TestSetStorageRootPath(rootPathSave) + }() + hyperConfig := vc.HypervisorConfig{ KernelPath: testDir, ImagePath: testDir, diff --git a/virtcontainers/persist/fs/fs.go b/virtcontainers/persist/fs/fs.go index aa124d7c1..33acebecd 100644 --- a/virtcontainers/persist/fs/fs.go +++ b/virtcontainers/persist/fs/fs.go @@ -69,6 +69,12 @@ func TestSetRunStoragePath(path string) { } } +func TestSetStorageRootPath(path string) { + StorageRootPath = func() string { + return path + } +} + // FS storage driver implementation type FS struct { sandboxState *persistapi.SandboxState @@ -319,7 +325,7 @@ func (fs *FS) GlobalWrite(relativePath string, data []byte) error { _, err = os.Stat(dir) if os.IsNotExist(err) { if err := os.MkdirAll(dir, dirMode); err != nil { - fs.Logger().WithError(err).Errorf("failed to create dir %q", dir) + fs.Logger().WithError(err).WithField("directory", dir).Error("failed to create dir") return err } } else if err != nil { @@ -328,13 +334,13 @@ func (fs *FS) GlobalWrite(relativePath string, data []byte) error { f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, fileMode) if err != nil { - fs.Logger().WithError(err).Errorf("failed to open file %q for writting", path) + fs.Logger().WithError(err).WithField("file", path).Error("failed to open file for writting") return err } defer f.Close() if _, err := f.Write(data); err != nil { - fs.Logger().WithError(err).Errorf("failed to write file %q: %v ", path, err) + fs.Logger().WithError(err).WithField("file", path).Error("failed to write file") return err } return nil @@ -349,14 +355,14 @@ func (fs *FS) GlobalRead(relativePath string) ([]byte, error) { f, err := os.Open(path) if err != nil { - fs.Logger().WithError(err).Errorf("failed to open file %q for reading", path) + fs.Logger().WithError(err).WithField("file", path).Error("failed to open file for reading") return nil, err } defer f.Close() data, err := ioutil.ReadAll(f) if err != nil { - fs.Logger().WithError(err).Errorf("failed to read file %q: %v ", path, err) + fs.Logger().WithError(err).WithField("file", path).Error("failed to read file") return nil, err } return data, nil diff --git a/virtcontainers/persist/fs/fs_test.go b/virtcontainers/persist/fs/fs_test.go index 72c836cba..431baa9a0 100644 --- a/virtcontainers/persist/fs/fs_test.go +++ b/virtcontainers/persist/fs/fs_test.go @@ -33,16 +33,12 @@ func initTestDir() func() { testDir, _ := ioutil.TempDir("", "vc-tmp-") // allow the tests to run without affecting the host system. rootSave := StorageRootPath() - StorageRootPath = func() string { - return filepath.Join(testDir, "vc") - } + TestSetStorageRootPath(filepath.Join(testDir, "vc")) os.MkdirAll(testDir, dirMode) return func() { - StorageRootPath = func() string { - return rootSave - } + TestSetStorageRootPath(rootSave) os.RemoveAll(testDir) } } @@ -54,13 +50,6 @@ func TestFsLockShared(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, fs) - testDir, err := ioutil.TempDir("", "fs-tmp-") - assert.Nil(t, err) - TestSetRunStoragePath(testDir) - defer func() { - os.RemoveAll(testDir) - }() - sid := "test-fs-driver" fs.sandboxState.SandboxContainer = sid sandboxDir, err := fs.sandboxDir(sid) @@ -116,13 +105,6 @@ func TestFsDriver(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, fs) - testDir, err := ioutil.TempDir("", "fs-tmp-") - assert.Nil(t, err) - TestSetRunStoragePath(testDir) - defer func() { - os.RemoveAll(testDir) - }() - ss := persistapi.SandboxState{} cs := make(map[string]persistapi.ContainerState) // missing sandbox container id diff --git a/virtcontainers/pkg/oci/utils_test.go b/virtcontainers/pkg/oci/utils_test.go index 592cf67d5..49976fcf5 100644 --- a/virtcontainers/pkg/oci/utils_test.go +++ b/virtcontainers/pkg/oci/utils_test.go @@ -29,11 +29,15 @@ import ( ) const ( - tempBundlePath = "/tmp/virtc/ocibundle/" - containerID = "virtc-oci-test" - consolePath = "/tmp/virtc/console" - fileMode = os.FileMode(0640) - dirMode = os.FileMode(0750) + containerID = "virtc-oci-test" + fileMode = os.FileMode(0640) + dirMode = os.FileMode(0750) +) + +var ( + tempRoot = "" + tempBundlePath = "" + consolePath = "" ) func createConfig(fileName string, fileData string) (string, error) { @@ -633,16 +637,27 @@ func TestGetShmSizeBindMounted(t *testing.T) { } func TestMain(m *testing.M) { + var err error + tempRoot, err = ioutil.TempDir("", "virtc-") + if err != nil { + panic(err) + } + + tempBundlePath = filepath.Join(tempRoot, "ocibundle") + consolePath = filepath.Join(tempRoot, "console") + /* Create temp bundle directory if necessary */ - err := os.MkdirAll(tempBundlePath, dirMode) + err = os.MkdirAll(tempBundlePath, dirMode) if err != nil { fmt.Printf("Unable to create %s %v\n", tempBundlePath, err) os.Exit(1) } - defer os.RemoveAll(tempBundlePath) + ret := m.Run() - os.Exit(m.Run()) + os.RemoveAll(tempRoot) + + os.Exit(ret) } func TestAddAssetAnnotations(t *testing.T) { diff --git a/virtcontainers/virtcontainers_test.go b/virtcontainers/virtcontainers_test.go index 7bda40069..a690e438c 100644 --- a/virtcontainers/virtcontainers_test.go +++ b/virtcontainers/virtcontainers_test.go @@ -51,8 +51,6 @@ var testVirtiofsdPath = "" var testHyperstartCtlSocket = "" var testHyperstartTtySocket = "" -var savedRunVMStoragePathFunc func() string - // cleanUp Removes any stale sandbox/container state that can affect // the next test to run. func cleanUp() { @@ -160,10 +158,13 @@ func TestMain(m *testing.M) { // allow the tests to run without affecting the host system. runPathSave := fs.RunStoragePath() + rootPathSave := fs.StorageRootPath() fs.TestSetRunStoragePath(filepath.Join(testDir, "vc", "run")) + fs.TestSetStorageRootPath(filepath.Join(testDir, "vc")) defer func() { fs.TestSetRunStoragePath(runPathSave) + fs.TestSetStorageRootPath(rootPathSave) }() // set now that configStoragePath has been overridden. From 290339da6b9d094a8c73e1252ccafa42ca75c6b1 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 8 Jan 2020 22:40:21 +0800 Subject: [PATCH 11/11] compatibility: keep oldstore for compatibility Keep old store restore functions for keeping backward compatibility, if old store files are found from disk, restore them with old store first. Signed-off-by: Wei Zhang --- virtcontainers/container.go | 96 +++++++++++++++++++++++---- virtcontainers/kata_agent.go | 7 ++ virtcontainers/persist.go | 24 +++++++ virtcontainers/sandbox.go | 70 ++++++++++++++++--- virtcontainers/virtcontainers_test.go | 5 ++ 5 files changed, 177 insertions(+), 25 deletions(-) diff --git a/virtcontainers/container.go b/virtcontainers/container.go index eedfe225b..6c3d759ed 100644 --- a/virtcontainers/container.go +++ b/virtcontainers/container.go @@ -29,6 +29,7 @@ import ( "github.com/kata-containers/runtime/pkg/rootless" "github.com/kata-containers/runtime/virtcontainers/device/config" "github.com/kata-containers/runtime/virtcontainers/device/manager" + "github.com/kata-containers/runtime/virtcontainers/store" ) // https://github.com/torvalds/linux/blob/master/include/uapi/linux/major.h @@ -335,6 +336,8 @@ type Container struct { systemMountsInfo SystemMountsInfo ctx context.Context + + store *store.VCStore } // ID returns the container identifier string. @@ -426,9 +429,17 @@ func (c *Container) setContainerState(state types.StateString) error { // update in-memory state c.state.State = state - // flush data to storage - if err := c.sandbox.Save(); err != nil { - return err + if useOldStore(c.sandbox.ctx) { + // experimental runtime use "persist.json" which doesn't need "state.json" anymore + // update on-disk state + if err := c.store.Store(store.State, c.state); err != nil { + return err + } + } else { + // flush data to storage + if err := c.sandbox.Save(); err != nil { + return err + } } return nil @@ -678,31 +689,76 @@ func newContainer(sandbox *Sandbox, contConfig *ContainerConfig) (*Container, er ctx: sandbox.ctx, } - // experimental runtime use "persist.json" instead of legacy "state.json" as storage - err := c.Restore() - if err == nil { - //container restored - return c, nil - } + if useOldStore(sandbox.ctx) { + ctrStore, err := store.NewVCContainerStore(sandbox.ctx, c.sandboxID, c.id) + if err != nil { + return nil, err + } + c.store = ctrStore + state, err := c.store.LoadContainerState() + if err == nil { + c.state = state + } - // Unexpected error - if !os.IsNotExist(err) && err != errContainerPersistNotExist { - return nil, err + var process Process + if err := c.store.Load(store.Process, &process); err == nil { + c.process = process + } + } else { + // experimental runtime use "persist.json" instead of legacy "state.json" as storage + err := c.Restore() + if err == nil { + //container restored + return c, nil + } + + // Unexpected error + if !os.IsNotExist(err) && err != errContainerPersistNotExist { + return nil, err + } } // Go to next step for first created container - if err = c.createMounts(); err != nil { + if err := c.createMounts(); err != nil { return nil, err } - if err = c.createDevices(contConfig); err != nil { + if err := c.createDevices(contConfig); err != nil { return nil, err } return c, nil } +func (c *Container) loadMounts() ([]Mount, error) { + var mounts []Mount + if err := c.store.Load(store.Mounts, &mounts); err != nil { + return []Mount{}, err + } + + return mounts, nil +} + +func (c *Container) loadDevices() ([]ContainerDevice, error) { + var devices []ContainerDevice + + if err := c.store.Load(store.DeviceIDs, &devices); err != nil { + return []ContainerDevice{}, err + } + + return devices, nil +} + func (c *Container) createMounts() error { + if useOldStore(c.sandbox.ctx) { + mounts, err := c.loadMounts() + if err == nil { + // restore mounts from disk + c.mounts = mounts + return nil + } + } + // Create block devices for newly created container if err := c.createBlockDevices(); err != nil { return err @@ -712,6 +768,18 @@ func (c *Container) createMounts() error { } func (c *Container) createDevices(contConfig *ContainerConfig) error { + // If sandbox supports "newstore", only newly created container can reach this function, + // so we don't call restore when `supportNewStore` is true + if useOldStore(c.sandbox.ctx) { + // Devices will be found in storage after create stage has completed. + // We load devices from storage at all other stages. + storedDevices, err := c.loadDevices() + if err == nil { + c.devices = storedDevices + return nil + } + } + // If devices were not found in storage, create Device implementations // from the configuration. This should happen at create. var storedDevices []ContainerDevice diff --git a/virtcontainers/kata_agent.go b/virtcontainers/kata_agent.go index a3fa65fe2..1c72b5b52 100644 --- a/virtcontainers/kata_agent.go +++ b/virtcontainers/kata_agent.go @@ -31,6 +31,7 @@ import ( ns "github.com/kata-containers/runtime/virtcontainers/pkg/nsenter" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" "github.com/kata-containers/runtime/virtcontainers/pkg/uuid" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/opencontainers/runtime-spec/specs-go" opentracing "github.com/opentracing/opentracing-go" @@ -317,6 +318,12 @@ func (k *kataAgent) init(ctx context.Context, sandbox *Sandbox, config interface k.proxyBuiltIn = isProxyBuiltIn(sandbox.config.ProxyType) + // Fetch agent runtime info. + if useOldStore(sandbox.ctx) { + if err := sandbox.store.Load(store.Agent, &k.state); err != nil { + k.Logger().Debug("Could not retrieve anything from storage") + } + } return disableVMShutdown, nil } diff --git a/virtcontainers/persist.go b/virtcontainers/persist.go index 95cdb5df6..e9475a33a 100644 --- a/virtcontainers/persist.go +++ b/virtcontainers/persist.go @@ -6,12 +6,14 @@ package virtcontainers import ( + "context" "errors" "github.com/kata-containers/runtime/virtcontainers/device/api" exp "github.com/kata-containers/runtime/virtcontainers/experimental" "github.com/kata-containers/runtime/virtcontainers/persist" persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/mitchellh/mapstructure" ) @@ -559,3 +561,25 @@ func loadSandboxConfig(id string) (*SandboxConfig, error) { } return sconfig, nil } + +var oldstoreKey = struct{}{} + +func loadSandboxConfigFromOldStore(ctx context.Context, sid string) (*SandboxConfig, context.Context, error) { + var config SandboxConfig + // We're bootstrapping + vcStore, err := store.NewVCSandboxStore(ctx, sid) + if err != nil { + return nil, ctx, err + } + + if err := vcStore.Load(store.Configuration, &config); err != nil { + return nil, ctx, err + } + + return &config, context.WithValue(ctx, oldstoreKey, true), nil +} + +func useOldStore(ctx context.Context) bool { + v := ctx.Value(oldstoreKey) + return v != nil +} diff --git a/virtcontainers/sandbox.go b/virtcontainers/sandbox.go index 18363fc37..46b9ea9c0 100644 --- a/virtcontainers/sandbox.go +++ b/virtcontainers/sandbox.go @@ -37,6 +37,7 @@ import ( "github.com/kata-containers/runtime/virtcontainers/pkg/annotations" "github.com/kata-containers/runtime/virtcontainers/pkg/compatoci" vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/types" "github.com/kata-containers/runtime/virtcontainers/utils" ) @@ -177,7 +178,9 @@ type Sandbox struct { factory Factory hypervisor hypervisor agent agent - newStore persistapi.PersistDriver + store *store.VCStore + // store is used to replace VCStore step by step + newStore persistapi.PersistDriver network Network monitor *monitor @@ -544,16 +547,47 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor } }() - s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, nil) + if useOldStore(ctx) { + vcStore, err := store.NewVCSandboxStore(ctx, s.id) + if err != nil { + return nil, err + } - // Ignore the error. Restore can fail for a new sandbox - if err := s.Restore(); err != nil { - s.Logger().WithError(err).Debug("restore sandbox failed") - } + s.store = vcStore - // new store doesn't require hypervisor to be stored immediately - if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, s.stateful); err != nil { - return nil, err + // Fetch sandbox network to be able to access it from the sandbox structure. + var networkNS NetworkNamespace + if err = s.store.Load(store.Network, &networkNS); err == nil { + s.networkNS = networkNS + } + + devices, err := s.store.LoadDevices() + if err != nil { + s.Logger().WithError(err).WithField("sandboxid", s.id).Warning("load sandbox devices failed") + } + s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, devices) + + // Load sandbox state. The hypervisor.createSandbox call, may need to access statei. + state, err := s.store.LoadState() + if err == nil { + s.state = state + } + + if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, s.stateful); err != nil { + return nil, err + } + } else { + s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, nil) + + // Ignore the error. Restore can fail for a new sandbox + if err := s.Restore(); err != nil { + s.Logger().WithError(err).Debug("restore sandbox failed") + } + + // new store doesn't require hypervisor to be stored immediately + if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, s.stateful); err != nil { + return nil, err + } } agentConfig, err := newAgentConfig(sandboxConfig.AgentType, sandboxConfig.AgentConfig) @@ -612,12 +646,22 @@ func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err var config SandboxConfig - c, err := loadSandboxConfig(sandboxID) + // Try to load sandbox config from old store at first. + c, ctx, err := loadSandboxConfigFromOldStore(ctx, sandboxID) if err != nil { - return nil, err + virtLog.Warningf("failed to get sandbox config from old store: %v", err) + // If we failed to load sandbox config from old store, try again with new store. + c, err = loadSandboxConfig(sandboxID) + if err != nil { + virtLog.Warningf("failed to get sandbox config from new store: %v", err) + return nil, err + } } config = *c + if useOldStore(ctx) { + virtLog.Infof("Warning: old store has been deprecated.") + } // fetchSandbox is not suppose to create new sandbox VM. sandbox, err = createSandbox(ctx, config, nil) if err != nil { @@ -1478,6 +1522,10 @@ func (s *Sandbox) setSandboxState(state types.StateString) error { // update in-memory state s.state.State = state + + if useOldStore(s.ctx) { + return s.store.Store(store.State, s.state) + } return nil } diff --git a/virtcontainers/virtcontainers_test.go b/virtcontainers/virtcontainers_test.go index a690e438c..f7a17bdf3 100644 --- a/virtcontainers/virtcontainers_test.go +++ b/virtcontainers/virtcontainers_test.go @@ -16,6 +16,7 @@ import ( "testing" "github.com/kata-containers/runtime/virtcontainers/persist/fs" + "github.com/kata-containers/runtime/virtcontainers/store" "github.com/kata-containers/runtime/virtcontainers/utils" "github.com/sirupsen/logrus" ) @@ -60,10 +61,14 @@ func cleanUp() { os.RemoveAll(testDir) os.MkdirAll(testDir, DirMode) + store.DeleteAll() + store.VCStorePrefix = "" + setup() } func setup() { + store.VCStorePrefix = testDir os.Mkdir(filepath.Join(testDir, testBundle), DirMode) for _, filename := range []string{testQemuKernelPath, testQemuInitrdPath, testQemuImagePath, testQemuPath} {