Merge pull request #2239 from WeiZhang555/persist-storage

Land experimental "newstore" as formal feature
This commit is contained in:
Peng Tao
2020-01-14 13:12:05 +08:00
committed by GitHub
43 changed files with 803 additions and 1070 deletions

1
.gitignore vendored
View File

@@ -9,6 +9,7 @@
/cli/config/configuration-nemu.toml
/cli/config/configuration-qemu.toml
/cli/config/configuration-qemu-virtiofs.toml
/cli/config/configuration-clh.toml
/cli/config-generated.go
/cli/coverage.html
/containerd-shim-kata-v2

View File

@@ -231,9 +231,7 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
# Enabled experimental feature list, format: ["a", "b"].
# Experimental features are features not stable enough for production,
# They may break compatibility, and are prepared for a big version bump.
# they may break compatibility, and are prepared for a big version bump.
# Supported experimental features:
# 1. "newstore": new persist storage driver which breaks backward compatibility,
# expected to move out of experimental in 2.0.0.
# (default: [])
experimental=@DEFAULTEXPFEATURES@

View File

@@ -207,9 +207,7 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
# Enabled experimental feature list, format: ["a", "b"].
# Experimental features are features not stable enough for production,
# They may break compatibility, and are prepared for a big version bump.
# they may break compatibility, and are prepared for a big version bump.
# Supported experimental features:
# 1. "newstore": new persist storage driver which breaks backward compatibility,
# expected to move out of experimental in 2.0.0.
# (default: [])
experimental=@DEFAULTEXPFEATURES@

View File

@@ -333,9 +333,7 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
# Enabled experimental feature list, format: ["a", "b"].
# Experimental features are features not stable enough for production,
# They may break compatibility, and are prepared for a big version bump.
# they may break compatibility, and are prepared for a big version bump.
# Supported experimental features:
# 1. "newstore": new persist storage driver which breaks backward compatibility,
# expected to move out of experimental in 2.0.0.
# (default: [])
experimental=@DEFAULTEXPFEATURES@

View File

@@ -435,9 +435,7 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
# Enabled experimental feature list, format: ["a", "b"].
# Experimental features are features not stable enough for production,
# They may break compatibility, and are prepared for a big version bump.
# they may break compatibility, and are prepared for a big version bump.
# Supported experimental features:
# 1. "newstore": new persist storage driver which breaks backward compatibility,
# expected to move out of experimental in 2.0.0.
# (default: [])
experimental=@DEFAULTEXPFEATURES@

View File

@@ -430,9 +430,7 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
# Enabled experimental feature list, format: ["a", "b"].
# Experimental features are features not stable enough for production,
# They may break compatibility, and are prepared for a big version bump.
# they may break compatibility, and are prepared for a big version bump.
# Supported experimental features:
# 1. "newstore": new persist storage driver which breaks backward compatibility,
# expected to move out of experimental in 2.0.0.
# (default: [])
experimental=@DEFAULTEXPFEATURES@

View File

@@ -6,7 +6,6 @@
package main
import (
"context"
"fmt"
"io/ioutil"
"strings"
@@ -14,7 +13,6 @@ import (
"unsafe"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/sirupsen/logrus"
)
@@ -238,12 +236,7 @@ func acrnIsUsable() error {
kataLog.WithField("device", acrnDevice).Info("device available")
acrnInst := vc.Acrn{}
vcStore, err := store.NewVCSandboxStore(context.Background(), "kata-check")
if err != nil {
return err
}
uuidStr, err := acrnInst.GetNextAvailableUUID(vcStore)
uuidStr, err := acrnInst.GetNextAvailableUUID()
if err != nil {
return err
}

View File

@@ -7,6 +7,7 @@ package virtcontainers
import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
@@ -20,14 +21,35 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/kata-containers/runtime/pkg/rootless"
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/persist"
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/kata-containers/runtime/virtcontainers/utils"
)
// Since ACRN is using the store in a quite abnormal way, let's first draw it back from store to here
// UUIDPathSuffix is the suffix used for uuid storage
const (
UUIDPathSuffix = "uuid"
uuidFile = "uuid.json"
)
// VMUUIDStoragePath is the uuid directory.
// It will contain all uuid info used by guest vm.
var VMUUIDStoragePath = func() string {
path := filepath.Join(fs.StorageRootPath(), UUIDPathSuffix)
if rootless.IsRootless() {
return filepath.Join(rootless.GetRootlessDir(), path)
}
return path
}
// ACRN currently supports only known UUIDs for security
// reasons (FuSa). When launching VM, only these pre-defined
// UUID should be used else VM launch will fail. The main
@@ -73,7 +95,6 @@ type AcrnState struct {
// Acrn is an Hypervisor interface implementation for the Linux acrn hypervisor.
type Acrn struct {
id string
store *store.VCStore
config HypervisorConfig
acrnConfig Config
state AcrnState
@@ -276,7 +297,7 @@ func (a *Acrn) buildDevices(imagePath string) ([]Device, error) {
}
// setup sets the Acrn structure up.
func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore) error {
func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig) error {
span, _ := a.trace("setup")
defer span.Finish()
@@ -286,24 +307,19 @@ func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto
}
a.id = id
a.store = vcStore
a.config = *hypervisorConfig
a.arch = newAcrnArch(a.config)
var create bool
var uuid string
if a.store != nil { //use old store
if err = a.store.Load(store.Hypervisor, &a.state); err != nil {
create = true
}
} else if a.state.UUID == "" { // new store
if a.state.UUID == "" {
create = true
}
if create {
a.Logger().Debug("Setting UUID")
if uuid, err = a.GetNextAvailableUUID(nil); err != nil {
if uuid, err = a.GetNextAvailableUUID(); err != nil {
return err
}
a.state.UUID = uuid
@@ -312,11 +328,7 @@ func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto
// The path might already exist, but in case of VM templating,
// we have to create it since the sandbox has not created it yet.
if err = os.MkdirAll(store.SandboxRuntimeRootPath(id), store.DirMode); err != nil {
return err
}
if err = a.storeState(); err != nil {
if err = os.MkdirAll(filepath.Join(fs.RunStoragePath(), id), DirMode); err != nil {
return err
}
@@ -348,14 +360,14 @@ func (a *Acrn) createDummyVirtioBlkDev(devices []Device) ([]Device, error) {
}
// createSandbox is the Hypervisor sandbox creation.
func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, store *store.VCStore, stateful bool) error {
func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error {
// Save the tracing context
a.ctx = ctx
span, _ := a.trace("createSandbox")
defer span.Finish()
if err := a.setup(id, hypervisorConfig, store); err != nil {
if err := a.setup(id, hypervisorConfig); err != nil {
return err
}
@@ -432,8 +444,8 @@ func (a *Acrn) startSandbox(timeoutSecs int) error {
a.Logger().WithField("default-kernel-parameters", formatted).Debug()
}
vmPath := filepath.Join(store.RunVMStoragePath(), a.id)
err := os.MkdirAll(vmPath, store.DirMode)
vmPath := filepath.Join(fs.RunVMStoragePath(), a.id)
err := os.MkdirAll(vmPath, DirMode)
if err != nil {
return err
}
@@ -458,11 +470,6 @@ func (a *Acrn) startSandbox(timeoutSecs int) error {
return err
}
//Store VMM information
if err = a.storeState(); err != nil {
return err
}
return nil
}
@@ -499,7 +506,7 @@ func (a *Acrn) stopSandbox() (err error) {
uuid := a.state.UUID
Idx := acrnUUIDsToIdx[uuid]
if err = a.store.Load(store.UUID, &a.info); err != nil {
if err = a.loadInfo(); err != nil {
a.Logger().Info("Failed to load UUID availabiity info")
return err
}
@@ -651,7 +658,7 @@ func (a *Acrn) getSandboxConsole(id string) (string, error) {
span, _ := a.trace("getSandboxConsole")
defer span.Finish()
return utils.BuildSocketPath(store.RunVMStoragePath(), id, acrnConsoleSocket)
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, acrnConsoleSocket)
}
func (a *Acrn) saveSandbox() error {
@@ -698,7 +705,7 @@ func (a *Acrn) getPids() []int {
return []int{a.state.PID}
}
func (a *Acrn) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error {
func (a *Acrn) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error {
return errors.New("acrn is not supported by VM cache")
}
@@ -736,21 +743,15 @@ func (a *Acrn) GetACRNUUIDBytes(uid string) (uuid.UUID, error) {
}
// GetNextAvailableUUID returns next available UUID VM creation
// If no validl UUIDs are available it returns err.
func (a *Acrn) GetNextAvailableUUID(uuidstore *store.VCStore) (string, error) {
// If no valid UUIDs are available it returns err.
func (a *Acrn) GetNextAvailableUUID() (string, error) {
var MaxVMSupported uint8
var Idx uint8
var uuidStr string
var err error
if uuidstore == nil {
uuidstore = a.store
}
if uuidstore != nil { //use old store
if err = uuidstore.Load(store.UUID, &a.info); err != nil {
a.Logger().Infof("Load UUID store failed")
}
if err = a.loadInfo(); err != nil {
a.Logger().Infof("Load UUID store failed")
}
if MaxVMSupported, err = a.GetMaxSupportedACRNVM(); err != nil {
@@ -795,22 +796,39 @@ func (a *Acrn) GetMaxSupportedACRNVM() (uint8, error) {
return platformInfo.maxKataContainers, nil
}
func (a *Acrn) storeState() error {
if a.store != nil {
if err := a.store.Store(store.Hypervisor, a.state); err != nil {
a.Logger().WithError(err).Error("failed to store acrn state")
return err
}
func (a *Acrn) storeInfo() error {
store, err := persist.GetDriver("fs")
if err != nil {
return err
}
relPath := filepath.Join(UUIDPathSuffix, uuidFile)
jsonOut, err := json.Marshal(a.info)
if err != nil {
return fmt.Errorf("Could not marshal data: %s", err)
}
if err := store.GlobalWrite(relPath, jsonOut); err != nil {
return fmt.Errorf("failed to write uuid to file: %v", err)
}
return nil
}
func (a *Acrn) storeInfo() error {
if a.store != nil {
if err := a.store.Store(store.UUID, a.info); err != nil {
a.Logger().WithError(err).Error("failed to store acrn info")
return err
}
func (a *Acrn) loadInfo() error {
store, err := persist.GetDriver("fs")
if err != nil {
return err
}
relPath := filepath.Join(UUIDPathSuffix, uuidFile)
data, err := store.GlobalRead(relPath)
if err != nil {
return fmt.Errorf("failed to read uuid from file: %v", err)
}
if err := json.Unmarshal(data, &a.info); err != nil {
return fmt.Errorf("failed to unmarshal uuid info: %v", err)
}
return nil
}

View File

@@ -13,7 +13,7 @@ import (
"testing"
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/stretchr/testify/assert"
)
@@ -106,7 +106,7 @@ func TestAcrnArchBaseAppendConsoles(t *testing.T) {
assert := assert.New(t)
acrnArchBase := newAcrnArchBase()
path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), consoleSocket)
path := filepath.Join(filepath.Join(fs.RunStoragePath(), sandboxID), consoleSocket)
expectedOut := []Device{
ConsoleDevice{

View File

@@ -12,7 +12,7 @@ import (
"testing"
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/stretchr/testify/assert"
)
@@ -198,7 +198,7 @@ func TestAcrnGetSandboxConsole(t *testing.T) {
ctx: context.Background(),
}
sandboxID := "testSandboxID"
expected := filepath.Join(store.RunVMStoragePath(), sandboxID, consoleSocket)
expected := filepath.Join(fs.RunVMStoragePath(), sandboxID, consoleSocket)
result, err := a.getSandboxConsole(sandboxID)
assert.NoError(err)
@@ -218,11 +218,7 @@ func TestAcrnCreateSandbox(t *testing.T) {
},
}
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
assert.NoError(err)
sandbox.store = vcStore
err = globalSandboxList.addSandbox(sandbox)
err := globalSandboxList.addSandbox(sandbox)
assert.NoError(err)
defer globalSandboxList.removeSandbox(sandbox.id)
@@ -230,7 +226,7 @@ func TestAcrnCreateSandbox(t *testing.T) {
//set PID to 1 to ignore hypercall to get UUID and set a random UUID
a.state.PID = 1
a.state.UUID = "f81d4fae-7dec-11d0-a765-00a0c91e6bf6"
err = a.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, nil, false)
err = a.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
assert.NoError(err)
assert.Exactly(acrnConfig, a.config)
}

View File

@@ -16,7 +16,6 @@ import (
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/runtime/virtcontainers/pkg/compatoci"
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/types"
specs "github.com/opencontainers/runtime-spec/specs-go"
opentracing "github.com/opentracing/opentracing-go"
@@ -50,7 +49,6 @@ func SetLogger(ctx context.Context, logger *logrus.Entry) {
virtLog = logger.WithFields(fields)
deviceApi.SetLogger(virtLog)
store.SetLogger(virtLog)
compatoci.SetLogger(virtLog)
}
@@ -145,11 +143,11 @@ func DeleteSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
return nil, vcTypes.ErrNeedSandboxID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
// Fetch the sandbox from storage and create it.
s, err := fetchSandbox(ctx, sandboxID)
@@ -178,11 +176,11 @@ func FetchSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
return nil, vcTypes.ErrNeedSandboxID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
// Fetch the sandbox from storage and create it.
s, err := fetchSandbox(ctx, sandboxID)
@@ -215,11 +213,11 @@ func StartSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
return nil, vcTypes.ErrNeedSandboxID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
// Fetch the sandbox from storage and create it.
s, err := fetchSandbox(ctx, sandboxID)
@@ -251,11 +249,11 @@ func StopSandbox(ctx context.Context, sandboxID string, force bool) (VCSandbox,
return nil, vcTypes.ErrNeedSandbox
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
// Fetch the sandbox from storage and create it.
s, err := fetchSandbox(ctx, sandboxID)
@@ -290,11 +288,11 @@ func RunSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
}
defer s.releaseStatelessSandbox()
lockFile, err := rwLockSandbox(ctx, s.id)
unlock, err := rwLockSandbox(s.id)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, s.id, lockFile)
defer unlock()
// Start the sandbox
err = s.Start()
@@ -310,12 +308,7 @@ func ListSandbox(ctx context.Context) ([]SandboxStatus, error) {
span, ctx := trace(ctx, "ListSandbox")
defer span.Finish()
var sbsdir string
if supportNewStore(ctx) {
sbsdir = fs.RunStoragePath()
} else {
sbsdir = store.RunStoragePath()
}
sbsdir := fs.RunStoragePath()
dir, err := os.Open(sbsdir)
if err != nil {
@@ -356,15 +349,14 @@ func StatusSandbox(ctx context.Context, sandboxID string) (SandboxStatus, error)
return SandboxStatus{}, vcTypes.ErrNeedSandboxID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return SandboxStatus{}, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
unlockSandbox(ctx, sandboxID, lockFile)
return SandboxStatus{}, err
}
defer s.releaseStatelessSandbox()
@@ -402,11 +394,11 @@ func CreateContainer(ctx context.Context, sandboxID string, containerConfig Cont
return nil, nil, vcTypes.ErrNeedSandboxID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return nil, nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -441,11 +433,11 @@ func DeleteContainer(ctx context.Context, sandboxID, containerID string) (VCCont
return nil, vcTypes.ErrNeedContainerID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -470,11 +462,11 @@ func StartContainer(ctx context.Context, sandboxID, containerID string) (VCConta
return nil, vcTypes.ErrNeedContainerID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -499,11 +491,11 @@ func StopContainer(ctx context.Context, sandboxID, containerID string) (VCContai
return nil, vcTypes.ErrNeedContainerID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -528,11 +520,11 @@ func EnterContainer(ctx context.Context, sandboxID, containerID string, cmd type
return nil, nil, nil, vcTypes.ErrNeedContainerID
}
lockFile, err := rLockSandbox(ctx, sandboxID)
unlock, err := rLockSandbox(sandboxID)
if err != nil {
return nil, nil, nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -562,15 +554,14 @@ func StatusContainer(ctx context.Context, sandboxID, containerID string) (Contai
return ContainerStatus{}, vcTypes.ErrNeedContainerID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return ContainerStatus{}, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
unlockSandbox(ctx, sandboxID, lockFile)
return ContainerStatus{}, err
}
defer s.releaseStatelessSandbox()
@@ -646,11 +637,11 @@ func KillContainer(ctx context.Context, sandboxID, containerID string, signal sy
return vcTypes.ErrNeedContainerID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -675,11 +666,11 @@ func ProcessListContainer(ctx context.Context, sandboxID, containerID string, op
return nil, vcTypes.ErrNeedContainerID
}
lockFile, err := rLockSandbox(ctx, sandboxID)
unlock, err := rLockSandbox(sandboxID)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -704,11 +695,11 @@ func UpdateContainer(ctx context.Context, sandboxID, containerID string, resourc
return vcTypes.ErrNeedContainerID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -732,12 +723,12 @@ func StatsContainer(ctx context.Context, sandboxID, containerID string) (Contain
if containerID == "" {
return ContainerStats{}, vcTypes.ErrNeedContainerID
}
lockFile, err := rLockSandbox(ctx, sandboxID)
unlock, err := rLockSandbox(sandboxID)
if err != nil {
return ContainerStats{}, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -758,12 +749,11 @@ func StatsSandbox(ctx context.Context, sandboxID string) (SandboxStats, []Contai
return SandboxStats{}, []ContainerStats{}, vcTypes.ErrNeedSandboxID
}
lockFile, err := rLockSandbox(ctx, sandboxID)
unlock, err := rLockSandbox(sandboxID)
if err != nil {
return SandboxStats{}, []ContainerStats{}, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -797,11 +787,11 @@ func togglePauseContainer(ctx context.Context, sandboxID, containerID string, pa
return vcTypes.ErrNeedContainerID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -841,11 +831,11 @@ func AddDevice(ctx context.Context, sandboxID string, info deviceConfig.DeviceIn
return nil, vcTypes.ErrNeedSandboxID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -861,11 +851,11 @@ func toggleInterface(ctx context.Context, sandboxID string, inf *vcTypes.Interfa
return nil, vcTypes.ErrNeedSandboxID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -905,11 +895,11 @@ func ListInterfaces(ctx context.Context, sandboxID string) ([]*vcTypes.Interface
return nil, vcTypes.ErrNeedSandboxID
}
lockFile, err := rLockSandbox(ctx, sandboxID)
unlock, err := rLockSandbox(sandboxID)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -929,11 +919,11 @@ func UpdateRoutes(ctx context.Context, sandboxID string, routes []*vcTypes.Route
return nil, vcTypes.ErrNeedSandboxID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -953,11 +943,11 @@ func ListRoutes(ctx context.Context, sandboxID string) ([]*vcTypes.Route, error)
return nil, vcTypes.ErrNeedSandboxID
}
lockFile, err := rLockSandbox(ctx, sandboxID)
unlock, err := rLockSandbox(sandboxID)
if err != nil {
return nil, err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {
@@ -983,11 +973,11 @@ func CleanupContainer(ctx context.Context, sandboxID, containerID string, force
return vcTypes.ErrNeedContainerID
}
lockFile, err := rwLockSandbox(ctx, sandboxID)
unlock, err := rwLockSandbox(sandboxID)
if err != nil {
return err
}
defer unlockSandbox(ctx, sandboxID, lockFile)
defer unlock()
s, err := fetchSandbox(ctx, sandboxID)
if err != nil {

View File

@@ -16,10 +16,11 @@ import (
"testing"
ktu "github.com/kata-containers/runtime/pkg/katatestutils"
"github.com/kata-containers/runtime/virtcontainers/persist"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
"github.com/kata-containers/runtime/virtcontainers/pkg/mock"
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/kata-containers/runtime/virtcontainers/utils"
specs "github.com/opencontainers/runtime-spec/specs-go"
@@ -68,6 +69,16 @@ func newBasicTestCmd() types.Cmd {
return cmd
}
func rmSandboxDir(sid string) error {
store, err := persist.GetDriver("fs")
if err != nil {
return fmt.Errorf("failed to get fs persist driver: %v", err)
}
store.Destroy(sid)
return nil
}
func newTestSandboxConfigNoop() SandboxConfig {
bundlePath := filepath.Join(testDir, testBundle)
containerAnnotations[annotations.BundlePathKey] = bundlePath
@@ -138,7 +149,7 @@ func TestCreateSandboxNoopAgentSuccessful(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
}
@@ -175,7 +186,7 @@ func TestCreateSandboxKataAgentSuccessful(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
}
@@ -202,7 +213,7 @@ func TestDeleteSandboxNoopAgentSuccessful(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
@@ -247,7 +258,7 @@ func TestDeleteSandboxKataAgentSuccessful(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
@@ -263,7 +274,7 @@ func TestDeleteSandboxFailing(t *testing.T) {
defer cleanUp()
assert := assert.New(t)
sandboxDir := store.SandboxConfigurationRootPath(testSandboxID)
sandboxDir := filepath.Join(fs.RunStoragePath(), testSandboxID)
os.Remove(sandboxDir)
p, err := DeleteSandbox(context.Background(), testSandboxID)
@@ -327,7 +338,7 @@ func TestStartSandboxFailing(t *testing.T) {
defer cleanUp()
assert := assert.New(t)
sandboxDir := store.SandboxConfigurationRootPath(testSandboxID)
sandboxDir := filepath.Join(fs.RunStoragePath(), testSandboxID)
os.Remove(sandboxDir)
p, err := StartSandbox(context.Background(), testSandboxID)
@@ -394,7 +405,7 @@ func TestStopSandboxKataAgentSuccessful(t *testing.T) {
func TestStopSandboxFailing(t *testing.T) {
defer cleanUp()
sandboxDir := store.SandboxConfigurationRootPath(testSandboxID)
sandboxDir := filepath.Join(fs.RunStoragePath(), testSandboxID)
os.Remove(sandboxDir)
p, err := StopSandbox(context.Background(), testSandboxID, false)
@@ -412,7 +423,7 @@ func TestRunSandboxNoopAgentSuccessful(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
}
@@ -450,7 +461,7 @@ func TestRunSandboxKataAgentSuccessful(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
@@ -517,12 +528,12 @@ func TestStatusSandboxSuccessfulStateReady(t *testing.T) {
expectedStatus := SandboxStatus{
ID: testSandboxID,
State: types.SandboxState{
State: types.StateReady,
State: types.StateReady,
PersistVersion: 2,
},
Hypervisor: MockHypervisor,
HypervisorConfig: hypervisorConfig,
Agent: NoopAgentType,
Annotations: sandboxAnnotations,
ContainersStatus: []ContainerStatus{
{
ID: containerID,
@@ -576,12 +587,12 @@ func TestStatusSandboxSuccessfulStateRunning(t *testing.T) {
expectedStatus := SandboxStatus{
ID: testSandboxID,
State: types.SandboxState{
State: types.StateRunning,
State: types.StateRunning,
PersistVersion: 2,
},
Hypervisor: MockHypervisor,
HypervisorConfig: hypervisorConfig,
Agent: NoopAgentType,
Annotations: sandboxAnnotations,
ContainersStatus: []ContainerStatus{
{
ID: containerID,
@@ -627,7 +638,7 @@ func TestStatusSandboxFailingFetchSandboxConfig(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
store.DeleteAll()
rmSandboxDir(p.ID())
globalSandboxList.removeSandbox(p.ID())
_, err = StatusSandbox(ctx, p.ID())
@@ -645,7 +656,7 @@ func TestStatusPodSandboxFailingFetchSandboxState(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
store.DeleteAll()
rmSandboxDir(p.ID())
globalSandboxList.removeSandbox(p.ID())
_, err = StatusSandbox(ctx, p.ID())
@@ -677,7 +688,7 @@ func TestCreateContainerSuccessful(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
@@ -708,7 +719,7 @@ func TestCreateContainerFailingNoSandbox(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.Error(err)
@@ -731,7 +742,7 @@ func TestDeleteContainerSuccessful(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
@@ -775,7 +786,7 @@ func TestDeleteContainerFailingNoContainer(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
@@ -832,7 +843,7 @@ func TestStartContainerFailingNoContainer(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
@@ -853,7 +864,7 @@ func TestStartContainerFailingSandboxNotStarted(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
@@ -933,7 +944,7 @@ func TestStopContainerFailingNoContainer(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
@@ -1037,7 +1048,7 @@ func TestEnterContainerFailingNoContainer(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
@@ -1090,7 +1101,7 @@ func TestStatusContainerSuccessful(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
@@ -1133,7 +1144,7 @@ func TestStatusContainerStateReady(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
@@ -1196,7 +1207,7 @@ func TestStatusContainerStateRunning(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
assert.NoError(err)
@@ -1255,7 +1266,7 @@ func TestStatusContainerFailing(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
store.DeleteAll()
rmSandboxDir(p.ID())
globalSandboxList.removeSandbox(p.ID())
_, err = StatusContainer(ctx, p.ID(), contID)
@@ -1274,7 +1285,7 @@ func TestStatsContainerFailing(t *testing.T) {
assert.NoError(err)
assert.NotNil(p)
store.DeleteAll()
rmSandboxDir(p.ID())
globalSandboxList.removeSandbox(p.ID())
_, err = StatsContainer(ctx, p.ID(), contID)
@@ -1308,7 +1319,6 @@ func TestStatsContainer(t *testing.T) {
pImpl, ok := p.(*Sandbox)
assert.True(ok)
defer store.DeleteAll()
contConfig := newTestContainerConfigNoop(contID)
_, c, err := CreateContainer(ctx, p.ID(), contConfig)
@@ -1354,7 +1364,6 @@ func TestProcessListContainer(t *testing.T) {
pImpl, ok := p.(*Sandbox)
assert.True(ok)
defer store.DeleteAll()
contConfig := newTestContainerConfigNoop(contID)
_, c, err := CreateContainer(ctx, p.ID(), contConfig)
@@ -1410,7 +1419,7 @@ func createAndStartSandbox(ctx context.Context, config SandboxConfig) (sandbox V
return nil, "", err
}
sandboxDir = store.SandboxConfigurationRootPath(sandbox.ID())
sandboxDir = filepath.Join(fs.RunStoragePath(), sandbox.ID())
_, err = os.Stat(sandboxDir)
if err != nil {
return nil, "", err
@@ -1695,7 +1704,7 @@ func TestCleanupContainer(t *testing.T) {
CleanupContainer(ctx, p.ID(), c.ID(), true)
}
sandboxDir := store.SandboxConfigurationRootPath(p.ID())
sandboxDir := filepath.Join(fs.RunStoragePath(), p.ID())
_, err = os.Stat(sandboxDir)
if err == nil {

View File

@@ -21,13 +21,13 @@ import (
"time"
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
chclient "github.com/kata-containers/runtime/virtcontainers/pkg/cloud-hypervisor/client"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/kata-containers/runtime/virtcontainers/utils"
)
@@ -104,7 +104,6 @@ func (s *CloudHypervisorState) reset() {
type cloudHypervisor struct {
id string
state CloudHypervisorState
store *store.VCStore
config HypervisorConfig
ctx context.Context
APIClient clhClient
@@ -139,7 +138,7 @@ var clhDebugKernelParams = []Param{
// For cloudHypervisor this call only sets the internal structure up.
// The VM will be created and started through startSandbox().
func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore, stateful bool) error {
func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error {
clh.ctx = ctx
span, _ := clh.trace("createSandbox")
@@ -151,7 +150,6 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
}
clh.id = id
clh.store = vcStore
clh.config = *hypervisorConfig
clh.state.state = clhNotReady
@@ -187,12 +185,7 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
}
// No need to return an error from there since there might be nothing
// to fetch if this is the first time the hypervisor is created.
err = clh.store.Load(store.Hypervisor, &clh.state)
if err != nil {
clh.Logger().WithField("function", "createSandbox").WithError(err).Info("Sandbox not found creating ")
} else {
if clh.state.PID > 0 {
clh.Logger().WithField("function", "createSandbox").Info("Sandbox already exist, loading from state")
clh.virtiofsd = &virtiofsd{
PID: clh.state.VirtiofsdPID,
@@ -203,6 +196,10 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
return nil
}
// No need to return an error from there since there might be nothing
// to fetch if this is the first time the hypervisor is created.
clh.Logger().WithField("function", "createSandbox").WithError(err).Info("Sandbox not found creating ")
// Set initial memomory size of the virtual machine
clh.vmconfig.Memory.Size = int64(clh.config.MemorySize) << utils.MibToBytesShift
clh.vmconfig.Memory.File = "/dev/shm"
@@ -306,8 +303,8 @@ func (clh *cloudHypervisor) startSandbox(timeout int) error {
clh.Logger().WithField("function", "startSandbox").Info("starting Sandbox")
vmPath := filepath.Join(store.RunVMStoragePath(), clh.id)
err := os.MkdirAll(vmPath, store.DirMode)
vmPath := filepath.Join(fs.RunVMStoragePath(), clh.id)
err := os.MkdirAll(vmPath, DirMode)
if err != nil {
return err
}
@@ -323,9 +320,6 @@ func (clh *cloudHypervisor) startSandbox(timeout int) error {
return err
}
clh.state.VirtiofsdPID = pid
if err = clh.storeState(); err != nil {
return err
}
} else {
return errors.New("cloud-hypervisor only supports virtio based file sharing")
}
@@ -350,10 +344,6 @@ func (clh *cloudHypervisor) startSandbox(timeout int) error {
}
clh.state.state = clhReady
if err = clh.storeState(); err != nil {
return err
}
return nil
}
@@ -431,7 +421,7 @@ func (clh *cloudHypervisor) stopSandbox() (err error) {
return clh.terminate()
}
func (clh *cloudHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error {
func (clh *cloudHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error {
return errors.New("cloudHypervisor is not supported by VM cache")
}
@@ -442,6 +432,7 @@ func (clh *cloudHypervisor) toGrpc() ([]byte, error) {
func (clh *cloudHypervisor) save() (s persistapi.HypervisorState) {
s.Pid = clh.state.PID
s.Type = string(ClhHypervisor)
s.VirtiofsdPid = clh.state.VirtiofsdPID
return
}
@@ -589,7 +580,6 @@ func (clh *cloudHypervisor) terminate() (err error) {
func (clh *cloudHypervisor) reset() {
clh.state.reset()
clh.storeState()
}
func (clh *cloudHypervisor) generateSocket(id string, useVsock bool) (interface{}, error) {
@@ -614,36 +604,26 @@ func (clh *cloudHypervisor) generateSocket(id string, useVsock bool) (interface{
}
func (clh *cloudHypervisor) virtioFsSocketPath(id string) (string, error) {
return utils.BuildSocketPath(store.RunVMStoragePath(), id, virtioFsSocket)
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, virtioFsSocket)
}
func (clh *cloudHypervisor) vsockSocketPath(id string) (string, error) {
return utils.BuildSocketPath(store.RunVMStoragePath(), id, clhSocket)
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, clhSocket)
}
func (clh *cloudHypervisor) serialPath(id string) (string, error) {
return utils.BuildSocketPath(store.RunVMStoragePath(), id, clhSerial)
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, clhSerial)
}
func (clh *cloudHypervisor) apiSocketPath(id string) (string, error) {
return utils.BuildSocketPath(store.RunVMStoragePath(), id, clhAPISocket)
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, clhAPISocket)
}
func (clh *cloudHypervisor) logFilePath(id string) (string, error) {
return utils.BuildSocketPath(store.RunVMStoragePath(), id, clhLogFile)
}
func (clh *cloudHypervisor) storeState() error {
if clh.store != nil {
if err := clh.store.Store(store.Hypervisor, clh.state); err != nil {
return err
}
}
return nil
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, clhLogFile)
}
func (clh *cloudHypervisor) waitVMM(timeout uint) error {
clhRunning, err := clh.isClhRunning(timeout)
if err != nil {
@@ -1018,7 +998,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
}
// cleanup vm path
dir := filepath.Join(store.RunVMStoragePath(), clh.id)
dir := filepath.Join(fs.RunVMStoragePath(), clh.id)
// If it's a symlink, remove both dir and the target.
link, err := filepath.EvalSymlinks(dir)
@@ -1047,14 +1027,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
}
if clh.config.VMid != "" {
dir = store.SandboxConfigurationRootPath(clh.config.VMid)
if err := os.RemoveAll(dir); err != nil {
if !force {
return err
}
clh.Logger().WithError(err).WithField("path", dir).Warnf("failed to remove vm path")
}
dir = store.SandboxRuntimeRootPath(clh.config.VMid)
dir = filepath.Join(fs.RunStoragePath(), clh.config.VMid)
if err := os.RemoveAll(dir); err != nil {
if !force {
return err

View File

@@ -13,8 +13,8 @@ import (
"testing"
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
chclient "github.com/kata-containers/runtime/virtcontainers/pkg/cloud-hypervisor/client"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
@@ -184,7 +184,7 @@ func TestCloudHypervisorCleanupVM(t *testing.T) {
t.Errorf("cloudHypervisor.cleanupVM() expected error != %v", err)
}
dir := filepath.Join(store.RunVMStoragePath(), clh.id)
dir := filepath.Join(fs.RunVMStoragePath(), clh.id)
os.MkdirAll(dir, os.ModePerm)
if err := clh.cleanupVM(false); err != nil {
@@ -219,18 +219,8 @@ func TestClhCreateSandbox(t *testing.T) {
},
}
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
err = clh.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
assert.NoError(err)
sandbox.store = vcStore
// Create parent dir path for hypervisor.json
parentDir := store.SandboxConfigurationRootPath(sandbox.id)
assert.NoError(os.MkdirAll(parentDir, store.DirMode))
err = clh.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false)
assert.NoError(err)
assert.NoError(os.RemoveAll(parentDir))
assert.Exactly(clhConfig, clh.config)
}
@@ -245,23 +235,6 @@ func TestClooudHypervisorStartSandbox(t *testing.T) {
virtiofsd: &virtiofsdMock{},
}
sandbox := &Sandbox{
ctx: context.Background(),
id: "testSandbox",
config: &SandboxConfig{
HypervisorConfig: clhConfig,
},
}
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
assert.NoError(err)
sandbox.store = vcStore
// Create parent dir path for hypervisor.json
parentDir := store.SandboxConfigurationRootPath(sandbox.id)
assert.NoError(os.MkdirAll(parentDir, store.DirMode))
err = clh.startSandbox(10)
assert.NoError(err)
}

View File

@@ -322,8 +322,6 @@ type Container struct {
sandbox *Sandbox
runPath string
configPath string
containerPath string
rootfsSuffix string
@@ -391,13 +389,6 @@ func (c *Container) GetPid() int {
func (c *Container) setStateFstype(fstype string) error {
c.state.Fstype = fstype
if !c.sandbox.supportNewStore() {
// experimental runtime use "persist.json" which doesn't need "state.json" anymore
if err := c.storeState(); err != nil {
return err
}
}
return nil
}
@@ -421,48 +412,10 @@ func (c *Container) GetPatchedOCISpec() *specs.Spec {
// storeContainer stores a container config.
func (c *Container) storeContainer() error {
if c.sandbox.supportNewStore() {
if err := c.sandbox.Save(); err != nil {
return err
}
return nil
if err := c.sandbox.Save(); err != nil {
return err
}
return c.store.Store(store.Configuration, *(c.config))
}
func (c *Container) storeProcess() error {
return c.store.Store(store.Process, c.process)
}
func (c *Container) storeMounts() error {
return c.store.Store(store.Mounts, c.mounts)
}
func (c *Container) storeDevices() error {
return c.store.Store(store.DeviceIDs, c.devices)
}
func (c *Container) storeState() error {
return c.store.Store(store.State, c.state)
}
func (c *Container) loadMounts() ([]Mount, error) {
var mounts []Mount
if err := c.store.Load(store.Mounts, &mounts); err != nil {
return []Mount{}, err
}
return mounts, nil
}
func (c *Container) loadDevices() ([]ContainerDevice, error) {
var devices []ContainerDevice
if err := c.store.Load(store.DeviceIDs, &devices); err != nil {
return []ContainerDevice{}, err
}
return devices, nil
return nil
}
// setContainerState sets both the in-memory and on-disk state of the
@@ -476,17 +429,17 @@ func (c *Container) setContainerState(state types.StateString) error {
// update in-memory state
c.state.State = state
if c.sandbox.supportNewStore() {
// flush data to storage
if err := c.sandbox.Save(); err != nil {
return err
}
} else {
if useOldStore(c.sandbox.ctx) {
// experimental runtime use "persist.json" which doesn't need "state.json" anymore
// update on-disk state
if err := c.store.Store(store.State, c.state); err != nil {
return err
}
} else {
// flush data to storage
if err := c.sandbox.Save(); err != nil {
return err
}
}
return nil
@@ -571,13 +524,6 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, guestSharedDir string) (
if err := c.sandbox.devManager.AttachDevice(m.BlockDeviceID, c.sandbox); err != nil {
return nil, nil, err
}
if !c.sandbox.supportNewStore() {
if err := c.sandbox.storeSandboxDevices(); err != nil {
//TODO: roll back?
return nil, nil, err
}
}
continue
}
@@ -620,12 +566,6 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, guestSharedDir string) (
sharedDirMounts[sharedDirMount.Destination] = sharedDirMount
}
if !c.sandbox.supportNewStore() {
if err := c.storeMounts(); err != nil {
return nil, nil, err
}
}
return sharedDirMounts, ignoredMounts, nil
}
@@ -741,8 +681,6 @@ func newContainer(sandbox *Sandbox, contConfig *ContainerConfig) (*Container, er
rootFs: contConfig.RootFs,
config: contConfig,
sandbox: sandbox,
runPath: store.ContainerRuntimeRootPath(sandbox.id, contConfig.ID),
configPath: store.ContainerConfigurationRootPath(sandbox.id, contConfig.ID),
containerPath: filepath.Join(sandbox.id, contConfig.ID),
rootfsSuffix: "rootfs",
state: types.ContainerState{},
@@ -751,23 +689,23 @@ func newContainer(sandbox *Sandbox, contConfig *ContainerConfig) (*Container, er
ctx: sandbox.ctx,
}
storeAlreadyExists := store.VCContainerStoreExists(sandbox.ctx, c.sandboxID, c.id)
ctrStore, err := store.NewVCContainerStore(sandbox.ctx, c.sandboxID, c.id)
if err != nil {
return nil, err
}
defer func() {
if err != nil && !storeAlreadyExists {
if delerr := c.store.Delete(); delerr != nil {
c.Logger().WithError(delerr).WithField("cid", c.id).Error("delete store failed")
}
if useOldStore(sandbox.ctx) {
ctrStore, err := store.NewVCContainerStore(sandbox.ctx, c.sandboxID, c.id)
if err != nil {
return nil, err
}
c.store = ctrStore
state, err := c.store.LoadContainerState()
if err == nil {
c.state = state
}
}()
c.store = ctrStore
// experimental runtime use "persist.json" instead of legacy "state.json" as storage
if c.sandbox.supportNewStore() {
var process Process
if err := c.store.Load(store.Process, &process); err == nil {
c.process = process
}
} else {
// experimental runtime use "persist.json" instead of legacy "state.json" as storage
err := c.Restore()
if err == nil {
//container restored
@@ -778,34 +716,41 @@ func newContainer(sandbox *Sandbox, contConfig *ContainerConfig) (*Container, er
if !os.IsNotExist(err) && err != errContainerPersistNotExist {
return nil, err
}
// Go to next step for first created container
} else {
state, err := c.store.LoadContainerState()
if err == nil {
c.state = state
}
var process Process
if err := c.store.Load(store.Process, &process); err == nil {
c.process = process
}
}
if err = c.createMounts(); err != nil {
// Go to next step for first created container
if err := c.createMounts(); err != nil {
return nil, err
}
if err = c.createDevices(contConfig); err != nil {
if err := c.createDevices(contConfig); err != nil {
return nil, err
}
return c, nil
}
func (c *Container) loadMounts() ([]Mount, error) {
var mounts []Mount
if err := c.store.Load(store.Mounts, &mounts); err != nil {
return []Mount{}, err
}
return mounts, nil
}
func (c *Container) loadDevices() ([]ContainerDevice, error) {
var devices []ContainerDevice
if err := c.store.Load(store.DeviceIDs, &devices); err != nil {
return []ContainerDevice{}, err
}
return devices, nil
}
func (c *Container) createMounts() error {
// If sandbox supports "newstore", only newly created container can reach this function,
// so we don't call restore when `supportNewStore` is true
if !c.sandbox.supportNewStore() {
if useOldStore(c.sandbox.ctx) {
mounts, err := c.loadMounts()
if err == nil {
// restore mounts from disk
@@ -825,7 +770,7 @@ func (c *Container) createMounts() error {
func (c *Container) createDevices(contConfig *ContainerConfig) error {
// If sandbox supports "newstore", only newly created container can reach this function,
// so we don't call restore when `supportNewStore` is true
if !c.sandbox.supportNewStore() {
if useOldStore(c.sandbox.ctx) {
// Devices will be found in storage after create stage has completed.
// We load devices from storage at all other stages.
storedDevices, err := c.loadDevices()
@@ -914,12 +859,6 @@ func (c *Container) create() (err error) {
// inside the VM
c.getSystemMountInfo()
if !c.sandbox.supportNewStore() {
if err = c.storeDevices(); err != nil {
return
}
}
process, err := c.sandbox.agent.createContainer(c.sandbox, c)
if err != nil {
return err
@@ -932,13 +871,6 @@ func (c *Container) create() (err error) {
}
}
if !c.sandbox.supportNewStore() {
// Store the container process returned by the agent.
if err = c.storeProcess(); err != nil {
return
}
}
if err = c.setContainerState(types.StateReady); err != nil {
return
}
@@ -964,7 +896,7 @@ func (c *Container) delete() error {
}
}
return c.store.Delete()
return c.sandbox.storeSandbox()
}
// checkSandboxRunning validates the container state.
@@ -1099,10 +1031,8 @@ func (c *Container) stop(force bool) error {
defer func() {
// Save device and drive data.
// TODO: can we merge this saving with setContainerState()?
if c.sandbox.supportNewStore() {
if err := c.sandbox.Save(); err != nil {
c.Logger().WithError(err).Info("save container state failed")
}
if err := c.sandbox.Save(); err != nil {
c.Logger().WithError(err).Info("save container state failed")
}
}()
@@ -1383,12 +1313,6 @@ func (c *Container) plugDevice(devicePath string) error {
if err := c.sandbox.devManager.AttachDevice(b.DeviceID(), c.sandbox); err != nil {
return err
}
if !c.sandbox.supportNewStore() {
if err := c.sandbox.storeSandboxDevices(); err != nil {
return err
}
}
}
return nil
}
@@ -1419,12 +1343,6 @@ func (c *Container) removeDrive() (err error) {
return err
}
}
if !c.sandbox.supportNewStore() {
if err := c.sandbox.storeSandboxDevices(); err != nil {
return err
}
}
}
return nil
@@ -1439,12 +1357,6 @@ func (c *Container) attachDevices() error {
return err
}
}
if !c.sandbox.supportNewStore() {
if err := c.sandbox.storeSandboxDevices(); err != nil {
return err
}
}
return nil
}
@@ -1467,12 +1379,6 @@ func (c *Container) detachDevices() error {
}
}
}
if !c.sandbox.supportNewStore() {
if err := c.sandbox.storeSandboxDevices(); err != nil {
return err
}
}
return nil
}

View File

@@ -21,7 +21,6 @@ import (
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
"github.com/kata-containers/runtime/virtcontainers/device/manager"
"github.com/kata-containers/runtime/virtcontainers/persist"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/stretchr/testify/assert"
)
@@ -92,18 +91,13 @@ func TestContainerRemoveDrive(t *testing.T) {
config: &SandboxConfig{},
}
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
assert.Nil(t, err)
sandbox.store = vcStore
container := Container{
sandbox: sandbox,
id: "testContainer",
}
container.state.Fstype = ""
err = container.removeDrive()
err := container.removeDrive()
// hotplugRemoveDevice for hypervisor should not be called.
// test should pass without a hypervisor created for the container's sandbox.
@@ -124,8 +118,6 @@ func TestContainerRemoveDrive(t *testing.T) {
assert.True(t, ok)
err = device.Attach(devReceiver)
assert.Nil(t, err)
err = sandbox.storeSandboxDevices()
assert.Nil(t, err)
container.state.Fstype = "xfs"
container.state.BlockDeviceID = device.DeviceID()
@@ -274,7 +266,7 @@ func testSetupFakeRootfs(t *testing.T) (testRawFile, loopDev, mntDir string, err
assert.NoError(err)
mntDir = filepath.Join(tmpDir, "rootfs")
err = os.Mkdir(mntDir, store.DirMode)
err = os.Mkdir(mntDir, DirMode)
assert.NoError(err)
err = syscall.Mount(loopDev, mntDir, "ext4", uintptr(0), "")
@@ -324,16 +316,12 @@ func TestContainerAddDriveDir(t *testing.T) {
},
}
defer store.DeleteAll()
sandboxStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
assert.Nil(err)
sandbox.store = sandboxStore
sandbox.newStore, err = persist.GetDriver("fs")
assert.NoError(err)
assert.NotNil(sandbox.newStore)
defer sandbox.newStore.Destroy(sandbox.id)
contID := "100"
container := Container{
sandbox: sandbox,
@@ -341,18 +329,6 @@ func TestContainerAddDriveDir(t *testing.T) {
rootFs: RootFs{Target: fakeRootfs, Mounted: true},
}
containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, container.id)
assert.Nil(err)
container.store = containerStore
// create state file
path := store.ContainerRuntimeRootPath(testSandboxID, container.ID())
stateFilePath := filepath.Join(path, store.StateFile)
os.Remove(stateFilePath)
_, err = os.Create(stateFilePath)
assert.NoError(err)
// Make the checkStorageDriver func variable point to a fake check function
savedFunc := checkStorageDriver
checkStorageDriver = func(major, minor int) (bool, error) {
@@ -396,20 +372,13 @@ func TestContainerRootfsPath(t *testing.T) {
},
},
}
vcstore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
sandbox.store = vcstore
assert.Nil(t, err)
container := Container{
id: "rootfstestcontainerid",
sandbox: sandbox,
rootFs: RootFs{Target: fakeRootfs, Mounted: true},
rootfsSuffix: "rootfs",
}
cvcstore, err := store.NewVCContainerStore(context.Background(),
sandbox.id,
container.id)
assert.Nil(t, err)
container.store = cvcstore
container.hotplugDrive()
assert.Empty(t, container.rootfsSuffix)

View File

@@ -8,19 +8,30 @@ package cache
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/factory/direct"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
)
var rootPathSave = fs.StorageRootPath()
func TestTemplateFactory(t *testing.T) {
assert := assert.New(t)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
fs.TestSetStorageRootPath(filepath.Join(testDir, "vc"))
defer func() {
os.RemoveAll(testDir)
fs.TestSetStorageRootPath(rootPathSave)
}()
hyperConfig := vc.HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,
@@ -34,10 +45,11 @@ func TestTemplateFactory(t *testing.T) {
ctx := context.Background()
var savedStorePath = store.VCStorePrefix
store.VCStorePrefix = testDir
runPathSave := fs.RunStoragePath()
fs.TestSetRunStoragePath(filepath.Join(testDir, "vc", "run"))
// allow the tests to run without affecting the host system.
defer func() {
store.VCStorePrefix = savedStorePath
fs.TestSetRunStoragePath(runPathSave)
}()
// New

View File

@@ -9,23 +9,36 @@ import (
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
)
var rootPathSave = fs.StorageRootPath()
func TestTemplateFactory(t *testing.T) {
assert := assert.New(t)
testDir, err := ioutil.TempDir("", "vmfactory-tmp-")
assert.Nil(err)
store.VCStorePrefix = testDir
fs.TestSetStorageRootPath(filepath.Join(testDir, "vc"))
defer func() {
os.RemoveAll(testDir)
store.VCStorePrefix = ""
fs.TestSetStorageRootPath(rootPathSave)
}()
assert.Nil(err)
runPathSave := fs.RunStoragePath()
fs.TestSetRunStoragePath(filepath.Join(testDir, "vc", "run"))
defer func() {
os.RemoveAll(testDir)
fs.TestSetRunStoragePath(runPathSave)
}()
hyperConfig := vc.HypervisorConfig{

View File

@@ -9,10 +9,12 @@ import (
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/factory/base"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/runtime/virtcontainers/utils"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
@@ -20,6 +22,8 @@ import (
const testDisabledAsNonRoot = "Test disabled as requires root privileges"
var rootPathSave = fs.StorageRootPath()
func TestNewFactory(t *testing.T) {
var config Config
@@ -41,6 +45,12 @@ func TestNewFactory(t *testing.T) {
assert.Error(err)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
fs.TestSetStorageRootPath(filepath.Join(testDir, "vc"))
defer func() {
os.RemoveAll(testDir)
fs.TestSetStorageRootPath(rootPathSave)
}()
config.VMConfig.HypervisorConfig = vc.HypervisorConfig{
KernelPath: testDir,
@@ -110,6 +120,12 @@ func TestVMConfigValid(t *testing.T) {
assert := assert.New(t)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
fs.TestSetStorageRootPath(filepath.Join(testDir, "vc"))
defer func() {
os.RemoveAll(testDir)
fs.TestSetStorageRootPath(rootPathSave)
}()
config := vc.VMConfig{
HypervisorType: vc.MockHypervisor,
@@ -159,6 +175,13 @@ func TestCheckVMConfig(t *testing.T) {
assert.Nil(err)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
fs.TestSetStorageRootPath(filepath.Join(testDir, "vc"))
defer func() {
os.RemoveAll(testDir)
fs.TestSetStorageRootPath(rootPathSave)
}()
config1.HypervisorConfig = vc.HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,
@@ -178,6 +201,13 @@ func TestFactoryGetVM(t *testing.T) {
assert := assert.New(t)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
fs.TestSetStorageRootPath(filepath.Join(testDir, "vc"))
defer func() {
os.RemoveAll(testDir)
fs.TestSetStorageRootPath(rootPathSave)
}()
hyperConfig := vc.HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,
@@ -337,6 +367,13 @@ func TestDeepCompare(t *testing.T) {
ProxyType: vc.NoopProxyType,
}
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
fs.TestSetStorageRootPath(filepath.Join(testDir, "vc"))
defer func() {
os.RemoveAll(testDir)
fs.TestSetStorageRootPath(rootPathSave)
}()
config.VMConfig.HypervisorConfig = vc.HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,

View File

@@ -9,16 +9,20 @@ import (
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
)
const testDisabledAsNonRoot = "Test disabled as requires root privileges"
var rootPathSave = fs.StorageRootPath()
func TestTemplateFactory(t *testing.T) {
if os.Geteuid() != 0 {
t.Skip(testDisabledAsNonRoot)
@@ -29,6 +33,13 @@ func TestTemplateFactory(t *testing.T) {
templateWaitForAgent = 1 * time.Microsecond
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
fs.TestSetStorageRootPath(filepath.Join(testDir, "vc"))
defer func() {
os.RemoveAll(testDir)
fs.TestSetStorageRootPath(rootPathSave)
}()
hyperConfig := vc.HypervisorConfig{
KernelPath: testDir,
ImagePath: testDir,

View File

@@ -38,7 +38,6 @@ import (
"github.com/blang/semver"
"github.com/containerd/console"
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/kata-containers/runtime/virtcontainers/utils"
)
@@ -76,6 +75,8 @@ const (
fcMetricsFifo = "metrics.fifo"
defaultFcConfig = "fcConfig.json"
// storagePathSuffix mirrors persist/fs/fs.go:storagePathSuffix
storagePathSuffix = "vc"
)
// Specify the minimum version of firecracker supported
@@ -143,7 +144,6 @@ type firecracker struct {
firecrackerd *exec.Cmd //Tracks the firecracker process itself
connection *client.Firecracker //Tracks the current active connection
store *store.VCStore
ctx context.Context
config HypervisorConfig
pendingDevices []firecrackerDevice // Devices to be added before the FC VM ready
@@ -222,7 +222,7 @@ func (fc *firecracker) bindMount(ctx context.Context, source, destination string
// For firecracker this call only sets the internal structure up.
// The sandbox will be created and started through startSandbox().
func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore, stateful bool) error {
func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error {
fc.ctx = ctx
span, _ := fc.trace("createSandbox")
@@ -231,7 +231,6 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N
//TODO: check validity of the hypervisor config provided
//https://github.com/kata-containers/runtime/issues/1065
fc.id = id
fc.store = vcStore
fc.state.set(notReady)
fc.config = *hypervisorConfig
fc.stateful = stateful
@@ -246,8 +245,8 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N
// Also jailer based on the id implicitly sets up cgroups under
// <cgroups_base>/<exec_file_name>/<id>/
hypervisorName := filepath.Base(hypervisorConfig.HypervisorPath)
//store.ConfigStoragePath cannot be used as we need exec perms
fc.chrootBaseDir = filepath.Join("/var/lib/", store.StoragePathSuffix)
//fs.RunStoragePath cannot be used as we need exec perms
fc.chrootBaseDir = filepath.Join("/run", storagePathSuffix)
fc.vmPath = filepath.Join(fc.chrootBaseDir, hypervisorName, fc.id)
fc.jailerRoot = filepath.Join(fc.vmPath, "root") // auto created by jailer
@@ -263,15 +262,6 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N
fc.fcConfig = &types.FcConfig{}
fc.fcConfigPath = filepath.Join(fc.vmPath, defaultFcConfig)
// No need to return an error from there since there might be nothing
// to fetch if this is the first time the hypervisor is created.
if fc.store != nil {
if err := fc.store.Load(store.Hypervisor, &fc.info); err != nil {
fc.Logger().WithField("function", "init").WithError(err).Info("No info could be fetched")
}
}
return nil
}
@@ -382,17 +372,7 @@ func (fc *firecracker) fcInit(timeout int) error {
defer span.Finish()
// Fetch sandbox network to be able to access it from the sandbox structure.
var networkNS NetworkNamespace
if fc.store != nil {
if err := fc.store.Load(store.Network, &networkNS); err == nil {
if networkNS.NetNsPath == "" {
fc.Logger().WithField("NETWORK NAMESPACE NULL", networkNS).Warn()
}
fc.netNSPath = networkNS.NetNsPath
}
}
err := os.MkdirAll(fc.jailerRoot, store.DirMode)
err := os.MkdirAll(fc.jailerRoot, DirMode)
if err != nil {
return err
}
@@ -480,11 +460,6 @@ func (fc *firecracker) fcInit(timeout int) error {
fc.Logger().WithField("fcInit failed:", err).Debug()
return err
}
// Store VMM information
if fc.store != nil {
return fc.store.Store(store.Hypervisor, fc.info)
}
return nil
}
@@ -1164,7 +1139,7 @@ func (fc *firecracker) getPids() []int {
return []int{fc.info.PID}
}
func (fc *firecracker) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error {
func (fc *firecracker) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error {
return errors.New("firecracker is not supported by VM cache")
}

View File

@@ -17,7 +17,7 @@ import (
"github.com/kata-containers/runtime/virtcontainers/device/config"
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/kata-containers/runtime/virtcontainers/utils"
)
@@ -720,7 +720,7 @@ func generateVMSocket(id string, useVsock bool) (interface{}, error) {
}, nil
}
path, err := utils.BuildSocketPath(filepath.Join(store.RunVMStoragePath(), id), defaultSocketName)
path, err := utils.BuildSocketPath(filepath.Join(fs.RunVMStoragePath(), id), defaultSocketName)
if err != nil {
return nil, err
}
@@ -736,7 +736,7 @@ func generateVMSocket(id string, useVsock bool) (interface{}, error) {
// hypervisor is the virtcontainers hypervisor interface.
// The default hypervisor implementation is Qemu.
type hypervisor interface {
createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, store *store.VCStore, stateful bool) error
createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error
startSandbox(timeout int) error
stopSandbox() error
pauseSandbox() error
@@ -756,7 +756,7 @@ type hypervisor interface {
// getPids returns a slice of hypervisor related process ids.
// The hypervisor pid must be put at index 0.
getPids() []int
fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error
fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error
toGrpc() ([]byte, error)
check() error

View File

@@ -26,6 +26,7 @@ import (
"github.com/kata-containers/runtime/pkg/rootless"
"github.com/kata-containers/runtime/virtcontainers/device/config"
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
ns "github.com/kata-containers/runtime/virtcontainers/pkg/nsenter"
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
@@ -217,7 +218,7 @@ func (k *kataAgent) Logger() *logrus.Entry {
}
func (k *kataAgent) getVMPath(id string) string {
return filepath.Join(store.RunVMStoragePath(), id)
return filepath.Join(fs.RunVMStoragePath(), id)
}
func (k *kataAgent) getSharePath(id string) string {
@@ -318,12 +319,11 @@ func (k *kataAgent) init(ctx context.Context, sandbox *Sandbox, config interface
k.proxyBuiltIn = isProxyBuiltIn(sandbox.config.ProxyType)
// Fetch agent runtime info.
if !sandbox.supportNewStore() {
if useOldStore(sandbox.ctx) {
if err := sandbox.store.Load(store.Agent, &k.state); err != nil {
k.Logger().Debug("Could not retrieve anything from storage")
}
}
return disableVMShutdown, nil
}
@@ -409,7 +409,7 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, builtin bool,
HostPath: sharePath,
}
if err = os.MkdirAll(sharedVolume.HostPath, store.DirMode); err != nil {
if err = os.MkdirAll(sharedVolume.HostPath, DirMode); err != nil {
return err
}
@@ -730,12 +730,6 @@ func (k *kataAgent) setProxy(sandbox *Sandbox, proxy proxy, pid int, url string)
k.proxy = proxy
k.state.ProxyPid = pid
k.state.URL = url
if sandbox != nil && !sandbox.supportNewStore() {
if err := sandbox.store.Store(store.Agent, k.state); err != nil {
return err
}
}
return nil
}
@@ -952,13 +946,6 @@ func (k *kataAgent) stopSandbox(sandbox *Sandbox) error {
// clean up agent state
k.state.ProxyPid = -1
k.state.URL = ""
if !sandbox.supportNewStore() {
if err := sandbox.store.Store(store.Agent, k.state); err != nil {
// ignore error
k.Logger().WithError(err).WithField("sandbox", sandbox.id).Error("failed to clean up agent state")
}
}
return nil
}
@@ -1444,13 +1431,6 @@ func (k *kataAgent) handleBlockVolumes(c *Container) ([]*grpc.Storage, error) {
// device is detached with detachDevices() for a container.
c.devices = append(c.devices, ContainerDevice{ID: id, ContainerPath: m.Destination})
if !c.sandbox.supportNewStore() {
if err := c.storeDevices(); err != nil {
k.Logger().WithField("device", id).WithError(err).Error("store device failed")
return nil, err
}
}
vol := &grpc.Storage{}
device := c.sandbox.devManager.GetDeviceByID(id)
@@ -2153,7 +2133,7 @@ func (k *kataAgent) copyFile(src, dst string) error {
cpReq := &grpc.CopyFileRequest{
Path: dst,
DirMode: uint32(store.DirMode),
DirMode: uint32(DirMode),
FileMode: st.Mode,
FileSize: fileSize,
Uid: int32(st.Uid),

View File

@@ -31,9 +31,9 @@ import (
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
"github.com/kata-containers/runtime/virtcontainers/device/manager"
"github.com/kata-containers/runtime/virtcontainers/persist"
"github.com/kata-containers/runtime/virtcontainers/pkg/mock"
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/types"
)
@@ -714,10 +714,10 @@ func TestAgentCreateContainer(t *testing.T) {
hypervisor: &mockHypervisor{},
}
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
assert.Nil(err)
sandbox.store = vcStore
newStore, err := persist.GetDriver("fs")
assert.NoError(err)
assert.NotNil(newStore)
sandbox.newStore = newStore
container := &Container{
ctx: sandbox.ctx,
@@ -815,12 +815,7 @@ func TestKataAgentSetProxy(t *testing.T) {
id: "foobar",
}
vcStore, err := store.NewVCSandboxStore(s.ctx, s.id)
assert.Nil(err)
s.store = vcStore
err = k.setProxy(s, p, 0, "")
err := k.setProxy(s, p, 0, "")
assert.Error(err)
}

View File

@@ -11,7 +11,6 @@ import (
"os"
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/types"
)
@@ -27,7 +26,7 @@ func (m *mockHypervisor) hypervisorConfig() HypervisorConfig {
return HypervisorConfig{}
}
func (m *mockHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, store *store.VCStore, stateful bool) error {
func (m *mockHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error {
err := hypervisorConfig.valid()
if err != nil {
return err
@@ -108,7 +107,7 @@ func (m *mockHypervisor) getPids() []int {
return []int{m.mockPid}
}
func (m *mockHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error {
func (m *mockHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error {
return errors.New("mockHypervisor is not supported by VM cache")
}

View File

@@ -31,7 +31,7 @@ func TestMockHypervisorCreateSandbox(t *testing.T) {
ctx := context.Background()
// wrong config
err := m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, nil, false)
err := m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
assert.Error(err)
sandbox.config.HypervisorConfig = HypervisorConfig{
@@ -40,7 +40,7 @@ func TestMockHypervisorCreateSandbox(t *testing.T) {
HypervisorPath: fmt.Sprintf("%s/%s", testDir, testHypervisor),
}
err = m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, nil, false)
err = m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
assert.NoError(err)
}

View File

@@ -6,12 +6,14 @@
package virtcontainers
import (
"context"
"errors"
"github.com/kata-containers/runtime/virtcontainers/device/api"
exp "github.com/kata-containers/runtime/virtcontainers/experimental"
"github.com/kata-containers/runtime/virtcontainers/persist"
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/mitchellh/mapstructure"
)
@@ -443,15 +445,6 @@ func (c *Container) Restore() error {
return nil
}
func (s *Sandbox) supportNewStore() bool {
for _, f := range s.config.Experimental {
if f == persist.NewStoreFeature && exp.Get("newstore") != nil {
return true
}
}
return false
}
func loadSandboxConfig(id string) (*SandboxConfig, error) {
store, err := persist.GetDriver("fs")
if err != nil || store == nil {
@@ -568,3 +561,25 @@ func loadSandboxConfig(id string) (*SandboxConfig, error) {
}
return sconfig, nil
}
var oldstoreKey = struct{}{}
func loadSandboxConfigFromOldStore(ctx context.Context, sid string) (*SandboxConfig, context.Context, error) {
var config SandboxConfig
// We're bootstrapping
vcStore, err := store.NewVCSandboxStore(ctx, sid)
if err != nil {
return nil, ctx, err
}
if err := vcStore.Load(store.Configuration, &config); err != nil {
return nil, ctx, err
}
return &config, context.WithValue(ctx, oldstoreKey, true), nil
}
func useOldStore(ctx context.Context) bool {
v := ctx.Value(oldstoreKey)
return v != nil
}

View File

@@ -173,13 +173,6 @@ type KataAgentConfig struct {
UseVSock bool
}
// HyperstartConfig is a structure storing information needed for
// hyperstart agent initialization.
type HyperstartConfig struct {
SockCtlName string
SockTtyName string
}
// ProxyConfig is a structure storing information needed from any
// proxy in order to be properly initialized.
type ProxyConfig struct {

View File

@@ -13,5 +13,16 @@ type PersistDriver interface {
// We only support get data for one whole sandbox
FromDisk(sid string) (SandboxState, map[string]ContainerState, error)
// Destroy will remove everything from storage
Destroy() error
Destroy(sid string) error
// Lock locks the persist driver, "exclusive" decides whether the lock is exclusive or shared.
// It returns Unlock Function and errors
Lock(sid string, exclusive bool) (func() error, error)
// GlobalWrite writes "data" to "StorageRootPath"/"relativePath";
// GlobalRead reads "data" from "StorageRootPath"/"relativePath";
// these functions are used for writing/reading some global data,
// they are specially designed for ACRN so far.
// Don't use them too much unless you have no other choice! @weizhang555
GlobalWrite(relativePath string, data []byte) error
GlobalRead(relativePath string) ([]byte, error)
}

View File

@@ -23,10 +23,10 @@ import (
const persistFile = "persist.json"
// dirMode is the permission bits used for creating a directory
const dirMode = os.FileMode(0700)
const dirMode = os.FileMode(0700) | os.ModeDir
// fileMode is the permission bits used for creating a file
const fileMode = os.FileMode(0640)
const fileMode = os.FileMode(0600)
// storagePathSuffix is the suffix used for all storage paths
//
@@ -37,22 +37,48 @@ const storagePathSuffix = "vc"
// sandboxPathSuffix is the suffix used for sandbox storage
const sandboxPathSuffix = "sbs"
// RunStoragePath is the sandbox runtime directory.
// It will contain one state.json and one lock file for each created sandbox.
var RunStoragePath = func() string {
path := filepath.Join("/run", storagePathSuffix, sandboxPathSuffix)
// vmPathSuffix is the suffix used for guest VMs.
const vmPathSuffix = "vm"
var StorageRootPath = func() string {
path := filepath.Join("/run", storagePathSuffix)
if rootless.IsRootless() {
return filepath.Join(rootless.GetRootlessDir(), path)
}
return path
}
// RunStoragePath is the sandbox runtime directory.
// It will contain one state.json and one lock file for each created sandbox.
var RunStoragePath = func() string {
return filepath.Join(StorageRootPath(), sandboxPathSuffix)
}
// RunVMStoragePath is the vm directory.
// It will contain all guest vm sockets and shared mountpoints.
// The function is declared this way for mocking in unit tests
var RunVMStoragePath = func() string {
return filepath.Join(StorageRootPath(), vmPathSuffix)
}
// TestSetRunStoragePath set RunStoragePath to path
// this function is only used for testing purpose
func TestSetRunStoragePath(path string) {
RunStoragePath = func() string {
return path
}
}
func TestSetStorageRootPath(path string) {
StorageRootPath = func() string {
return path
}
}
// FS storage driver implementation
type FS struct {
sandboxState *persistapi.SandboxState
containerState map[string]persistapi.ContainerState
lockFile *os.File
}
var fsLog = logrus.WithField("source", "virtcontainers/persist/fs")
@@ -77,21 +103,21 @@ func Init() (persistapi.PersistDriver, error) {
}, nil
}
func (fs *FS) sandboxDir() (string, error) {
id := fs.sandboxState.SandboxContainer
if id == "" {
return "", fmt.Errorf("sandbox container id required")
}
return filepath.Join(RunStoragePath(), id), nil
func (fs *FS) sandboxDir(sandboxID string) (string, error) {
return filepath.Join(RunStoragePath(), sandboxID), nil
}
// ToDisk sandboxState and containerState to disk
func (fs *FS) ToDisk(ss persistapi.SandboxState, cs map[string]persistapi.ContainerState) (retErr error) {
id := ss.SandboxContainer
if id == "" {
return fmt.Errorf("sandbox container id required")
}
fs.sandboxState = &ss
fs.containerState = cs
sandboxDir, err := fs.sandboxDir()
sandboxDir, err := fs.sandboxDir(id)
if err != nil {
return err
}
@@ -100,15 +126,10 @@ func (fs *FS) ToDisk(ss persistapi.SandboxState, cs map[string]persistapi.Contai
return err
}
if err := fs.lock(); err != nil {
return err
}
defer fs.unlock()
// if error happened, destroy all dirs
defer func() {
if retErr != nil {
if err := fs.Destroy(); err != nil {
if err := fs.Destroy(id); err != nil {
fs.Logger().WithError(err).Errorf("failed to destroy dirs")
}
}
@@ -155,6 +176,27 @@ func (fs *FS) ToDisk(ss persistapi.SandboxState, cs map[string]persistapi.Contai
}
}
// Walk sandbox dir and find container.
files, err := ioutil.ReadDir(sandboxDir)
if err != nil {
return err
}
// Remove non-existing containers
for _, file := range files {
if !file.IsDir() {
continue
}
// Container dir exists.
cid := file.Name()
// Container should be removed when container id doesn't exist in cs.
if _, ok := cs[cid]; !ok {
if err := os.RemoveAll(filepath.Join(sandboxDir, cid)); err != nil {
return err
}
}
}
return nil
}
@@ -165,18 +207,11 @@ func (fs *FS) FromDisk(sid string) (persistapi.SandboxState, map[string]persista
return ss, nil, fmt.Errorf("restore requires sandbox id")
}
fs.sandboxState.SandboxContainer = sid
sandboxDir, err := fs.sandboxDir()
sandboxDir, err := fs.sandboxDir(sid)
if err != nil {
return ss, nil, err
}
if err := fs.lock(); err != nil {
return ss, nil, err
}
defer fs.unlock()
// get sandbox configuration from persist data
sandboxFile := filepath.Join(sandboxDir, persistFile)
f, err := os.OpenFile(sandboxFile, os.O_RDONLY, fileMode)
@@ -224,8 +259,12 @@ func (fs *FS) FromDisk(sid string) (persistapi.SandboxState, map[string]persista
}
// Destroy removes everything from disk
func (fs *FS) Destroy() error {
sandboxDir, err := fs.sandboxDir()
func (fs *FS) Destroy(sandboxID string) error {
if sandboxID == "" {
return fmt.Errorf("sandbox container id required")
}
sandboxDir, err := fs.sandboxDir(sandboxID)
if err != nil {
return err
}
@@ -236,45 +275,95 @@ func (fs *FS) Destroy() error {
return nil
}
func (fs *FS) lock() error {
sandboxDir, err := fs.sandboxDir()
func (fs *FS) Lock(sandboxID string, exclusive bool) (func() error, error) {
if sandboxID == "" {
return nil, fmt.Errorf("sandbox container id required")
}
sandboxDir, err := fs.sandboxDir(sandboxID)
if err != nil {
return err
return nil, err
}
f, err := os.Open(sandboxDir)
if err != nil {
return err
return nil, err
}
if err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
var lockType int
if exclusive {
lockType = syscall.LOCK_EX
} else {
lockType = syscall.LOCK_SH
}
if err := syscall.Flock(int(f.Fd()), lockType); err != nil {
f.Close()
return err
return nil, err
}
fs.lockFile = f
return nil
}
unlockFunc := func() error {
defer f.Close()
if err := syscall.Flock(int(f.Fd()), syscall.LOCK_UN); err != nil {
return err
}
func (fs *FS) unlock() error {
if fs.lockFile == nil {
return nil
}
return unlockFunc, nil
}
lockFile := fs.lockFile
defer lockFile.Close()
fs.lockFile = nil
if err := syscall.Flock(int(lockFile.Fd()), syscall.LOCK_UN); err != nil {
func (fs *FS) GlobalWrite(relativePath string, data []byte) error {
path := filepath.Join(StorageRootPath(), relativePath)
path, err := filepath.Abs(filepath.Clean(path))
if err != nil {
return fmt.Errorf("failed to find abs path for %q: %v", relativePath, err)
}
dir := filepath.Dir(path)
_, err = os.Stat(dir)
if os.IsNotExist(err) {
if err := os.MkdirAll(dir, dirMode); err != nil {
fs.Logger().WithError(err).WithField("directory", dir).Error("failed to create dir")
return err
}
} else if err != nil {
return err
}
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, fileMode)
if err != nil {
fs.Logger().WithError(err).WithField("file", path).Error("failed to open file for writting")
return err
}
defer f.Close()
if _, err := f.Write(data); err != nil {
fs.Logger().WithError(err).WithField("file", path).Error("failed to write file")
return err
}
return nil
}
// TestSetRunStoragePath set RunStoragePath to path
// this function is only used for testing purpose
func TestSetRunStoragePath(path string) {
RunStoragePath = func() string {
return path
func (fs *FS) GlobalRead(relativePath string) ([]byte, error) {
path := filepath.Join(StorageRootPath(), relativePath)
path, err := filepath.Abs(filepath.Clean(path))
if err != nil {
return nil, fmt.Errorf("failed to find abs path for %q: %v", relativePath, err)
}
f, err := os.Open(path)
if err != nil {
fs.Logger().WithError(err).WithField("file", path).Error("failed to open file for reading")
return nil, err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
fs.Logger().WithError(err).WithField("file", path).Error("failed to read file")
return nil, err
}
return data, nil
}

View File

@@ -9,6 +9,7 @@ import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
@@ -28,43 +29,81 @@ func getFsDriver() (*FS, error) {
return fs, nil
}
func TestFsLock(t *testing.T) {
func initTestDir() func() {
testDir, _ := ioutil.TempDir("", "vc-tmp-")
// allow the tests to run without affecting the host system.
rootSave := StorageRootPath()
TestSetStorageRootPath(filepath.Join(testDir, "vc"))
os.MkdirAll(testDir, dirMode)
return func() {
TestSetStorageRootPath(rootSave)
os.RemoveAll(testDir)
}
}
func TestFsLockShared(t *testing.T) {
defer initTestDir()()
fs, err := getFsDriver()
assert.Nil(t, err)
assert.NotNil(t, fs)
testDir, err := ioutil.TempDir("", "fs-tmp-")
assert.Nil(t, err)
TestSetRunStoragePath(testDir)
defer func() {
os.RemoveAll(testDir)
}()
fs.sandboxState.SandboxContainer = "test-fs-driver"
sandboxDir, err := fs.sandboxDir()
sid := "test-fs-driver"
fs.sandboxState.SandboxContainer = sid
sandboxDir, err := fs.sandboxDir(sid)
assert.Nil(t, err)
err = os.MkdirAll(sandboxDir, dirMode)
assert.Nil(t, err)
assert.Nil(t, fs.lock())
assert.NotNil(t, fs.lock())
// Take 2 shared locks
unlockFunc, err := fs.Lock(sid, false)
assert.Nil(t, err)
assert.Nil(t, fs.unlock())
assert.Nil(t, fs.unlock())
unlockFunc2, err := fs.Lock(sid, false)
assert.Nil(t, err)
assert.Nil(t, unlockFunc())
assert.Nil(t, unlockFunc2())
assert.NotNil(t, unlockFunc2())
}
func TestFsDriver(t *testing.T) {
func TestFsLockExclusive(t *testing.T) {
defer initTestDir()()
fs, err := getFsDriver()
assert.Nil(t, err)
assert.NotNil(t, fs)
testDir, err := ioutil.TempDir("", "fs-tmp-")
sid := "test-fs-driver"
fs.sandboxState.SandboxContainer = sid
sandboxDir, err := fs.sandboxDir(sid)
assert.Nil(t, err)
TestSetRunStoragePath(testDir)
defer func() {
os.RemoveAll(testDir)
}()
err = os.MkdirAll(sandboxDir, dirMode)
assert.Nil(t, err)
// Take 1 exclusive lock
unlockFunc, err := fs.Lock(sid, true)
assert.Nil(t, err)
assert.Nil(t, unlockFunc())
unlockFunc, err = fs.Lock(sid, true)
assert.Nil(t, err)
assert.Nil(t, unlockFunc())
assert.NotNil(t, unlockFunc())
}
func TestFsDriver(t *testing.T) {
defer initTestDir()()
fs, err := getFsDriver()
assert.Nil(t, err)
assert.NotNil(t, fs)
ss := persistapi.SandboxState{}
cs := make(map[string]persistapi.ContainerState)
@@ -88,7 +127,7 @@ func TestFsDriver(t *testing.T) {
assert.Equal(t, ss.SandboxContainer, id)
assert.Equal(t, ss.State, "")
// flush all to disk
// flush all to disk.
ss.State = "running"
assert.Nil(t, fs.ToDisk(ss, cs))
ss, cs, err = fs.FromDisk(id)
@@ -99,9 +138,31 @@ func TestFsDriver(t *testing.T) {
assert.Equal(t, ss.SandboxContainer, id)
assert.Equal(t, ss.State, "running")
assert.Nil(t, fs.Destroy())
// add new container state.
cs["test-container"] = persistapi.ContainerState{
State: "ready",
}
assert.Nil(t, fs.ToDisk(ss, cs))
ss, cs, err = fs.FromDisk(id)
assert.Nil(t, err)
assert.NotNil(t, ss)
assert.Equal(t, len(cs), 1)
c, ok := cs["test-container"]
assert.True(t, ok)
assert.Equal(t, c.State, "ready")
dir, err := fs.sandboxDir()
// clean all container.
cs = make(map[string]persistapi.ContainerState)
assert.Nil(t, fs.ToDisk(ss, cs))
ss, cs, err = fs.FromDisk(id)
assert.Nil(t, err)
assert.NotNil(t, ss)
assert.Equal(t, len(cs), 0)
// destroy whole sandbox dir.
assert.Nil(t, fs.Destroy(id))
dir, err := fs.sandboxDir(id)
assert.Nil(t, err)
assert.NotEqual(t, len(dir), 0)
@@ -109,3 +170,25 @@ func TestFsDriver(t *testing.T) {
assert.NotNil(t, err)
assert.True(t, os.IsNotExist(err))
}
func TestGlobalReadWrite(t *testing.T) {
defer initTestDir()()
relPath := "test/123/aaa.json"
data := "hello this is testing global read write"
fs, err := getFsDriver()
assert.Nil(t, err)
assert.NotNil(t, fs)
err = fs.GlobalWrite(relPath, []byte(data))
assert.Nil(t, err)
out, err := fs.GlobalRead(relPath)
assert.Nil(t, err)
assert.Equal(t, string(out), data)
out, err = fs.GlobalRead("nonexist")
assert.NotNil(t, err)
assert.Nil(t, out)
}

View File

@@ -7,7 +7,6 @@ package virtcontainers
import (
"context"
"fmt"
"os"
"testing"
@@ -19,47 +18,6 @@ import (
"github.com/kata-containers/runtime/virtcontainers/types"
)
func testCreateExpSandbox() (*Sandbox, error) {
sconfig := SandboxConfig{
ID: "test-exp",
HypervisorType: MockHypervisor,
HypervisorConfig: newHypervisorConfig(nil, nil),
AgentType: NoopAgentType,
NetworkConfig: NetworkConfig{},
Volumes: nil,
Containers: nil,
Experimental: []exp.Feature{persist.NewStoreFeature},
}
// support experimental
sandbox, err := createSandbox(context.Background(), sconfig, nil)
if err != nil {
return nil, fmt.Errorf("Could not create sandbox: %s", err)
}
if err := sandbox.agent.startSandbox(sandbox); err != nil {
return nil, err
}
return sandbox, nil
}
func TestSupportNewStore(t *testing.T) {
assert := assert.New(t)
hConfig := newHypervisorConfig(nil, nil)
sandbox, err := testCreateSandbox(t, testSandboxID, MockHypervisor, hConfig, NoopAgentType, NetworkConfig{}, nil, nil)
assert.NoError(err)
defer cleanUp()
// not support experimental
assert.False(sandbox.supportNewStore())
// support experimental
sandbox, err = testCreateExpSandbox()
assert.NoError(err)
assert.True(sandbox.supportNewStore())
}
func TestSandboxRestore(t *testing.T) {
var err error
assert := assert.New(t)

View File

@@ -29,11 +29,15 @@ import (
)
const (
tempBundlePath = "/tmp/virtc/ocibundle/"
containerID = "virtc-oci-test"
consolePath = "/tmp/virtc/console"
fileMode = os.FileMode(0640)
dirMode = os.FileMode(0750)
containerID = "virtc-oci-test"
fileMode = os.FileMode(0640)
dirMode = os.FileMode(0750)
)
var (
tempRoot = ""
tempBundlePath = ""
consolePath = ""
)
func createConfig(fileName string, fileData string) (string, error) {
@@ -633,16 +637,27 @@ func TestGetShmSizeBindMounted(t *testing.T) {
}
func TestMain(m *testing.M) {
var err error
tempRoot, err = ioutil.TempDir("", "virtc-")
if err != nil {
panic(err)
}
tempBundlePath = filepath.Join(tempRoot, "ocibundle")
consolePath = filepath.Join(tempRoot, "console")
/* Create temp bundle directory if necessary */
err := os.MkdirAll(tempBundlePath, dirMode)
err = os.MkdirAll(tempBundlePath, dirMode)
if err != nil {
fmt.Printf("Unable to create %s %v\n", tempBundlePath, err)
os.Exit(1)
}
defer os.RemoveAll(tempBundlePath)
ret := m.Run()
os.Exit(m.Run())
os.RemoveAll(tempRoot)
os.Exit(ret)
}
func TestAddAssetAnnotations(t *testing.T) {

View File

@@ -14,7 +14,7 @@ import (
"strings"
kataclient "github.com/kata-containers/agent/protocols/client"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/sirupsen/logrus"
)
@@ -146,7 +146,7 @@ func validateProxyConfig(proxyConfig ProxyConfig) error {
func defaultProxyURL(id, socketType string) (string, error) {
switch socketType {
case SocketTypeUNIX:
socketPath := filepath.Join(store.SandboxRuntimeRootPath(id), "proxy.sock")
socketPath := filepath.Join(filepath.Join(fs.RunStoragePath(), id), "proxy.sock")
return fmt.Sprintf("unix://%s", socketPath), nil
case SocketTypeVSOCK:
// TODO Build the VSOCK default URL

View File

@@ -12,7 +12,7 @@ import (
"path/filepath"
"testing"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
)
@@ -173,7 +173,7 @@ func testDefaultProxyURL(expectedURL string, socketType string, sandboxID string
}
func TestDefaultProxyURLUnix(t *testing.T) {
path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), "proxy.sock")
path := filepath.Join(filepath.Join(fs.RunStoragePath(), sandboxID), "proxy.sock")
socketPath := fmt.Sprintf("unix://%s", path)
assert.NoError(t, testDefaultProxyURL(socketPath, SocketTypeUNIX, sandboxID))
}
@@ -183,7 +183,7 @@ func TestDefaultProxyURLVSock(t *testing.T) {
}
func TestDefaultProxyURLUnknown(t *testing.T) {
path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), "proxy.sock")
path := filepath.Join(filepath.Join(fs.RunStoragePath(), sandboxID), "proxy.sock")
socketPath := fmt.Sprintf("unix://%s", path)
assert.Error(t, testDefaultProxyURL(socketPath, "foobar", sandboxID))
}
@@ -204,7 +204,7 @@ func testProxyStart(t *testing.T, agent agent, proxy proxy) {
}
invalidPath := filepath.Join(tmpdir, "enoent")
expectedSocketPath := filepath.Join(store.SandboxRuntimeRootPath(testSandboxID), "proxy.sock")
expectedSocketPath := filepath.Join(filepath.Join(fs.RunStoragePath(), testSandboxID), "proxy.sock")
expectedURI := fmt.Sprintf("unix://%s", expectedSocketPath)
data := []testData{

View File

@@ -32,8 +32,8 @@ import (
"github.com/kata-containers/runtime/virtcontainers/device/config"
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/kata-containers/runtime/virtcontainers/utils"
)
@@ -77,8 +77,6 @@ type QemuState struct {
type qemu struct {
id string
store *store.VCStore
config HypervisorConfig
qmpMonitorCh qmpChannel
@@ -226,7 +224,7 @@ func (q *qemu) trace(name string) (opentracing.Span, context.Context) {
}
// setup sets the Qemu structure up.
func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore) error {
func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig) error {
span, _ := q.trace("setup")
defer span.Finish()
@@ -236,7 +234,6 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto
}
q.id = id
q.store = vcStore
q.config = *hypervisorConfig
q.arch = newQemuArch(q.config)
@@ -255,12 +252,7 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto
}
var create bool
if q.store != nil { //use old store
if err := q.store.Load(store.Hypervisor, &q.state); err != nil {
// hypervisor doesn't exist, create new one
create = true
}
} else if q.state.UUID == "" { // new store
if q.state.UUID == "" {
create = true
}
@@ -277,11 +269,7 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto
// The path might already exist, but in case of VM templating,
// we have to create it since the sandbox has not created it yet.
if err = os.MkdirAll(store.SandboxRuntimeRootPath(id), store.DirMode); err != nil {
return err
}
if err = q.storeState(); err != nil {
if err = os.MkdirAll(filepath.Join(fs.RunStoragePath(), id), DirMode); err != nil {
return err
}
}
@@ -336,7 +324,7 @@ func (q *qemu) memoryTopology() (govmmQemu.Memory, error) {
}
func (q *qemu) qmpSocketPath(id string) (string, error) {
return utils.BuildSocketPath(store.RunVMStoragePath(), id, qmpSocket)
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, qmpSocket)
}
func (q *qemu) getQemuMachine() (govmmQemu.Machine, error) {
@@ -463,14 +451,14 @@ func (q *qemu) setupFileBackedMem(knobs *govmmQemu.Knobs, memory *govmmQemu.Memo
}
// createSandbox is the Hypervisor sandbox creation implementation for govmmQemu.
func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore, stateful bool) error {
func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error {
// Save the tracing context
q.ctx = ctx
span, _ := q.trace("createSandbox")
defer span.Finish()
if err := q.setup(id, hypervisorConfig, vcStore); err != nil {
if err := q.setup(id, hypervisorConfig); err != nil {
return err
}
@@ -580,7 +568,7 @@ func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNa
VGA: "none",
GlobalParam: "kvm-pit.lost_tick_policy=discard",
Bios: firmwarePath,
PidFile: filepath.Join(store.RunVMStoragePath(), q.id, "pid"),
PidFile: filepath.Join(fs.RunVMStoragePath(), q.id, "pid"),
}
if ioThread != nil {
@@ -602,7 +590,7 @@ func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNa
}
func (q *qemu) vhostFSSocketPath(id string) (string, error) {
return utils.BuildSocketPath(store.RunVMStoragePath(), id, vhostFSSocket)
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, vhostFSSocket)
}
func (q *qemu) virtiofsdArgs(fd uintptr) []string {
@@ -706,8 +694,8 @@ func (q *qemu) startSandbox(timeout int) error {
q.fds = []*os.File{}
}()
vmPath := filepath.Join(store.RunVMStoragePath(), q.id)
err := os.MkdirAll(vmPath, store.DirMode)
vmPath := filepath.Join(fs.RunVMStoragePath(), q.id)
err := os.MkdirAll(vmPath, DirMode)
if err != nil {
return err
}
@@ -729,9 +717,6 @@ func (q *qemu) startSandbox(timeout int) error {
if err != nil {
return err
}
if err = q.storeState(); err != nil {
return err
}
}
var strErr string
@@ -882,7 +867,7 @@ func (q *qemu) stopSandbox() error {
func (q *qemu) cleanupVM() error {
// cleanup vm path
dir := filepath.Join(store.RunVMStoragePath(), q.id)
dir := filepath.Join(fs.RunVMStoragePath(), q.id)
// If it's a symlink, remove both dir and the target.
// This can happen when vm template links a sandbox to a vm.
@@ -903,11 +888,7 @@ func (q *qemu) cleanupVM() error {
}
if q.config.VMid != "" {
dir = store.SandboxConfigurationRootPath(q.config.VMid)
if err := os.RemoveAll(dir); err != nil {
q.Logger().WithError(err).WithField("path", dir).Warnf("failed to remove vm path")
}
dir = store.SandboxRuntimeRootPath(q.config.VMid)
dir = filepath.Join(fs.RunStoragePath(), q.config.VMid)
if err := os.RemoveAll(dir); err != nil {
q.Logger().WithError(err).WithField("path", dir).Warnf("failed to remove vm path")
}
@@ -1289,7 +1270,7 @@ func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interf
return data, err
}
return data, q.storeState()
return data, nil
}
func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
@@ -1301,7 +1282,7 @@ func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (int
return data, err
}
return data, q.storeState()
return data, nil
}
func (q *qemu) hotplugCPUs(vcpus uint32, op operation) (uint32, error) {
@@ -1383,15 +1364,10 @@ func (q *qemu) hotplugAddCPUs(amount uint32) (uint32, error) {
hotpluggedVCPUs++
if hotpluggedVCPUs == amount {
// All vCPUs were hotplugged
return amount, q.storeState()
return amount, nil
}
}
// All vCPUs were NOT hotplugged
if err := q.storeState(); err != nil {
q.Logger().Errorf("failed to save hypervisor state after hotplug %d vCPUs: %v", hotpluggedVCPUs, err)
}
return hotpluggedVCPUs, fmt.Errorf("failed to hot add vCPUs: only %d vCPUs of %d were added", hotpluggedVCPUs, amount)
}
@@ -1408,7 +1384,6 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) {
// get the last vCPUs and try to remove it
cpu := q.state.HotpluggedVCPUs[len(q.state.HotpluggedVCPUs)-1]
if err := q.qmpMonitorCh.qmp.ExecuteDeviceDel(q.qmpMonitorCh.ctx, cpu.ID); err != nil {
q.storeState()
return i, fmt.Errorf("failed to hotunplug CPUs, only %d CPUs were hotunplugged: %v", i, err)
}
@@ -1416,7 +1391,7 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) {
q.state.HotpluggedVCPUs = q.state.HotpluggedVCPUs[:len(q.state.HotpluggedVCPUs)-1]
}
return amount, q.storeState()
return amount, nil
}
func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) (int, error) {
@@ -1522,7 +1497,7 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) (int, error) {
}
}
q.state.HotpluggedMemory += memDev.sizeMB
return memDev.sizeMB, q.storeState()
return memDev.sizeMB, nil
}
func (q *qemu) pauseSandbox() error {
@@ -1603,7 +1578,7 @@ func (q *qemu) getSandboxConsole(id string) (string, error) {
span, _ := q.trace("getSandboxConsole")
defer span.Finish()
return utils.BuildSocketPath(store.RunVMStoragePath(), id, consoleSocket)
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, consoleSocket)
}
func (q *qemu) saveSandbox() error {
@@ -1938,7 +1913,7 @@ type qemuGrpc struct {
QemuSMP govmmQemu.SMP
}
func (q *qemu) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error {
func (q *qemu) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error {
var qp qemuGrpc
err := json.Unmarshal(j, &qp)
if err != nil {
@@ -1946,7 +1921,6 @@ func (q *qemu) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig,
}
q.id = qp.ID
q.store = store
q.config = *hypervisorConfig
q.qmpMonitorCh.ctx = ctx
q.qmpMonitorCh.path = qp.QmpChannelpath
@@ -1978,16 +1952,6 @@ func (q *qemu) toGrpc() ([]byte, error) {
return json.Marshal(&qp)
}
func (q *qemu) storeState() error {
if q.store != nil {
q.state.Bridges = q.arch.getBridges()
if err := q.store.Store(store.Hypervisor, q.state); err != nil {
return err
}
}
return nil
}
func (q *qemu) save() (s persistapi.HypervisorState) {
pids := q.getPids()
if len(pids) != 0 {

View File

@@ -16,7 +16,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/pkg/errors"
)
@@ -259,7 +259,7 @@ func TestQemuArchBaseAppendConsoles(t *testing.T) {
assert := assert.New(t)
qemuArchBase := newQemuArchBase()
path := filepath.Join(store.SandboxRuntimeRootPath(sandboxID), consoleSocket)
path := filepath.Join(filepath.Join(fs.RunStoragePath(), sandboxID), consoleSocket)
expectedOut := []govmmQemu.Device{
govmmQemu.SerialDevice{

View File

@@ -16,7 +16,8 @@ import (
govmmQemu "github.com/intel/govmm/qemu"
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/persist"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/runtime/virtcontainers/types"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
@@ -85,21 +86,16 @@ func TestQemuCreateSandbox(t *testing.T) {
},
}
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
assert.NoError(err)
sandbox.store = vcStore
// Create the hypervisor fake binary
testQemuPath := filepath.Join(testDir, testHypervisor)
_, err = os.Create(testQemuPath)
_, err := os.Create(testQemuPath)
assert.NoError(err)
// Create parent dir path for hypervisor.json
parentDir := store.SandboxConfigurationRootPath(sandbox.id)
assert.NoError(os.MkdirAll(parentDir, store.DirMode))
parentDir := filepath.Join(fs.RunStoragePath(), sandbox.id)
assert.NoError(os.MkdirAll(parentDir, DirMode))
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false)
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
assert.NoError(err)
assert.NoError(os.RemoveAll(parentDir))
assert.Exactly(qemuConfig, q.config)
@@ -118,20 +114,16 @@ func TestQemuCreateSandboxMissingParentDirFail(t *testing.T) {
},
}
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
assert.NoError(err)
sandbox.store = vcStore
// Create the hypervisor fake binary
testQemuPath := filepath.Join(testDir, testHypervisor)
_, err = os.Create(testQemuPath)
_, err := os.Create(testQemuPath)
assert.NoError(err)
// Ensure parent dir path for hypervisor.json does not exist.
parentDir := store.SandboxConfigurationRootPath(sandbox.id)
parentDir := filepath.Join(fs.RunStoragePath(), sandbox.id)
assert.NoError(os.RemoveAll(parentDir))
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false)
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
assert.NoError(err)
}
@@ -288,7 +280,7 @@ func TestQemuGetSandboxConsole(t *testing.T) {
ctx: context.Background(),
}
sandboxID := "testSandboxID"
expected := filepath.Join(store.RunVMStoragePath(), sandboxID, consoleSocket)
expected := filepath.Join(fs.RunVMStoragePath(), sandboxID, consoleSocket)
result, err := q.getSandboxConsole(sandboxID)
assert.NoError(err)
@@ -364,11 +356,7 @@ func TestHotplugUnsupportedDeviceType(t *testing.T) {
config: qemuConfig,
}
vcStore, err := store.NewVCSandboxStore(q.ctx, q.id)
assert.NoError(err)
q.store = vcStore
_, err = q.hotplugAddDevice(&memoryDevice{0, 128, uint64(0), false}, fsDev)
_, err := q.hotplugAddDevice(&memoryDevice{0, 128, uint64(0), false}, fsDev)
assert.Error(err)
_, err = q.hotplugRemoveDevice(&memoryDevice{0, 128, uint64(0), false}, fsDev)
assert.Error(err)
@@ -414,7 +402,7 @@ func TestQemuGrpc(t *testing.T) {
assert.Nil(err)
var q2 qemu
err = q2.fromGrpc(context.Background(), &config, nil, json)
err = q2.fromGrpc(context.Background(), &config, json)
assert.Nil(err)
assert.True(q.id == q2.id)
@@ -429,7 +417,7 @@ func TestQemuFileBackedMem(t *testing.T) {
q := &qemu{}
sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false)
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
assert.NoError(err)
assert.Equal(q.qemuConfig.Knobs.FileBackedMem, true)
@@ -445,7 +433,7 @@ func TestQemuFileBackedMem(t *testing.T) {
sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS
sandbox.config.HypervisorConfig.MemoryPath = fallbackFileBackedMemDir
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false)
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
expectErr := errors.New("VM templating has been enabled with either virtio-fs or file backed memory and this configuration will not work")
assert.Equal(expectErr.Error(), err.Error())
@@ -456,7 +444,7 @@ func TestQemuFileBackedMem(t *testing.T) {
q = &qemu{}
sandbox.config.HypervisorConfig.FileBackedMemRootDir = "/tmp/xyzabc"
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false)
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
assert.NoError(err)
assert.Equal(q.qemuConfig.Knobs.FileBackedMem, false)
assert.Equal(q.qemuConfig.Knobs.MemShared, false)
@@ -474,11 +462,11 @@ func createQemuSandboxConfig() (*Sandbox, error) {
},
}
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
newStore, err := persist.GetDriver("fs")
if err != nil {
return &Sandbox{}, err
}
sandbox.store = vcStore
sandbox.newStore = newStore
return &sandbox, nil
}

View File

@@ -46,6 +46,9 @@ const (
// vmStartTimeout represents the time in seconds a sandbox can wait before
// to consider the VM starting operation failed.
vmStartTimeout = 10
// DirMode is the permission bits used for creating a directory
DirMode = os.FileMode(0750) | os.ModeDir
)
// SandboxStatus describes a sandbox status.
@@ -190,9 +193,6 @@ type Sandbox struct {
containers map[string]*Container
runPath string
configPath string
state types.SandboxState
networkNS NetworkNamespace
@@ -244,10 +244,6 @@ func (s *Sandbox) SetAnnotations(annotations map[string]string) error {
for k, v := range annotations {
s.config.Annotations[k] = v
}
if !s.supportNewStore() {
return s.store.Store(store.Configuration, *(s.config))
}
return nil
}
@@ -454,12 +450,6 @@ func (s *Sandbox) getAndStoreGuestDetails() error {
s.seccompSupported = guestDetailRes.AgentDetails.SupportsSeccomp
}
s.state.GuestMemoryHotplugProbe = guestDetailRes.SupportMemHotplugProbe
if !s.supportNewStore() {
if err = s.store.Store(store.State, s.state); err != nil {
return err
}
}
}
return nil
@@ -531,8 +521,6 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
config: &sandboxConfig,
volumes: sandboxConfig.Volumes,
containers: map[string]*Container{},
runPath: store.SandboxRuntimeRootPath(sandboxConfig.ID),
configPath: store.SandboxConfigurationRootPath(sandboxConfig.ID),
state: types.SandboxState{},
annotationsLock: &sync.RWMutex{},
wg: &sync.WaitGroup{},
@@ -543,15 +531,8 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
ctx: ctx,
}
vcStore, err := store.NewVCSandboxStore(ctx, s.id)
if err != nil {
return nil, err
}
s.store = vcStore
if s.newStore, err = persist.GetDriver("fs"); err != nil || s.newStore == nil {
return nil, fmt.Errorf("failed to get fs persist driver")
return nil, fmt.Errorf("failed to get fs persist driver: %v", err)
}
if err = globalSandboxList.addSandbox(s); err != nil {
@@ -562,21 +543,18 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
if err != nil {
s.Logger().WithError(err).WithField("sandboxid", s.id).Error("Create new sandbox failed")
globalSandboxList.removeSandbox(s.id)
s.store.Delete()
s.newStore.Destroy(s.id)
}
}()
if s.supportNewStore() {
s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, nil)
// Ignore the error. Restore can fail for a new sandbox
s.Restore()
// new store doesn't require hypervisor to be stored immediately
if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, nil, s.stateful); err != nil {
if useOldStore(ctx) {
vcStore, err := store.NewVCSandboxStore(ctx, s.id)
if err != nil {
return nil, err
}
} else {
s.store = vcStore
// Fetch sandbox network to be able to access it from the sandbox structure.
var networkNS NetworkNamespace
if err = s.store.Load(store.Network, &networkNS); err == nil {
@@ -595,7 +573,19 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
s.state = state
}
if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, s.store, s.stateful); err != nil {
if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, s.stateful); err != nil {
return nil, err
}
} else {
s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, nil)
// Ignore the error. Restore can fail for a new sandbox
if err := s.Restore(); err != nil {
s.Logger().WithError(err).Debug("restore sandbox failed")
}
// new store doesn't require hypervisor to be stored immediately
if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, s.stateful); err != nil {
return nil, err
}
}
@@ -612,84 +602,34 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
return s, nil
}
func (s *Sandbox) storeSandboxDevices() error {
return s.store.StoreDevices(s.devManager.GetAllDevices())
}
// storeSandbox stores a sandbox config.
func (s *Sandbox) storeSandbox() error {
span, _ := s.trace("storeSandbox")
defer span.Finish()
if s.supportNewStore() {
// flush data to storage
if err := s.Save(); err != nil {
return err
}
} else {
err := s.store.Store(store.Configuration, *(s.config))
if err != nil {
return err
}
for _, container := range s.containers {
err = container.store.Store(store.Configuration, *(container.config))
if err != nil {
return err
}
}
// flush data to storage
if err := s.Save(); err != nil {
return err
}
return nil
}
func rLockSandbox(ctx context.Context, sandboxID string) (string, error) {
store, err := store.NewVCSandboxStore(ctx, sandboxID)
func rLockSandbox(sandboxID string) (func() error, error) {
store, err := persist.GetDriver("fs")
if err != nil {
return "", err
return nil, fmt.Errorf("failed to get fs persist driver: %v", err)
}
return store.RLock()
return store.Lock(sandboxID, false)
}
func rwLockSandbox(ctx context.Context, sandboxID string) (string, error) {
store, err := store.NewVCSandboxStore(ctx, sandboxID)
func rwLockSandbox(sandboxID string) (func() error, error) {
store, err := persist.GetDriver("fs")
if err != nil {
return "", err
return nil, fmt.Errorf("failed to get fs persist driver: %v", err)
}
return store.Lock()
}
func unlockSandbox(ctx context.Context, sandboxID, token string) error {
// If the store no longer exists, we won't be able to unlock.
// Creating a new store for locking an item that does not even exist
// does not make sense.
if !store.VCSandboxStoreExists(ctx, sandboxID) {
return nil
}
store, err := store.NewVCSandboxStore(ctx, sandboxID)
if err != nil {
return err
}
return store.Unlock(token)
}
func supportNewStore(ctx context.Context) bool {
if exp.Get("newstore") == nil {
return false
}
// check if client context enabled "newstore" feature
exps := exp.ExpFromContext(ctx)
for _, v := range exps {
if v == "newstore" {
return true
}
}
return false
return store.Lock(sandboxID, true)
}
// fetchSandbox fetches a sandbox config from a sandbox ID and returns a sandbox.
@@ -706,24 +646,22 @@ func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err
var config SandboxConfig
if supportNewStore(ctx) {
c, err := loadSandboxConfig(sandboxID)
// Try to load sandbox config from old store at first.
c, ctx, err := loadSandboxConfigFromOldStore(ctx, sandboxID)
if err != nil {
virtLog.Warningf("failed to get sandbox config from old store: %v", err)
// If we failed to load sandbox config from old store, try again with new store.
c, err = loadSandboxConfig(sandboxID)
if err != nil {
return nil, err
}
config = *c
} else {
// We're bootstrapping
vcStore, err := store.NewVCSandboxStore(ctx, sandboxID)
if err != nil {
return nil, err
}
if err := vcStore.Load(store.Configuration, &config); err != nil {
virtLog.Warningf("failed to get sandbox config from new store: %v", err)
return nil, err
}
}
config = *c
if useOldStore(ctx) {
virtLog.Infof("Warning: old store has been deprecated.")
}
// fetchSandbox is not suppose to create new sandbox VM.
sandbox, err = createSandbox(ctx, config, nil)
if err != nil {
@@ -812,7 +750,7 @@ func (s *Sandbox) Delete() error {
s.agent.cleanup(s)
return s.store.Delete()
return s.newStore.Destroy(s.id)
}
func (s *Sandbox) startNetworkMonitor() error {
@@ -880,11 +818,6 @@ func (s *Sandbox) createNetwork() error {
}
}
}
// Store the network
if !s.supportNewStore() {
return s.store.Store(store.Network, s.networkNS)
}
return nil
}
@@ -958,14 +891,8 @@ func (s *Sandbox) AddInterface(inf *vcTypes.Interface) (*vcTypes.Interface, erro
// Update the sandbox storage
s.networkNS.Endpoints = append(s.networkNS.Endpoints, endpoint)
if s.supportNewStore() {
if err := s.Save(); err != nil {
return nil, err
}
} else {
if err := s.store.Store(store.Network, s.networkNS); err != nil {
return nil, err
}
if err := s.Save(); err != nil {
return nil, err
}
// Add network for vm
@@ -983,14 +910,8 @@ func (s *Sandbox) RemoveInterface(inf *vcTypes.Interface) (*vcTypes.Interface, e
}
s.networkNS.Endpoints = append(s.networkNS.Endpoints[:i], s.networkNS.Endpoints[i+1:]...)
if s.supportNewStore() {
if err := s.Save(); err != nil {
return inf, err
}
} else {
if err := s.store.Store(store.Network, s.networkNS); err != nil {
return inf, err
}
if err := s.Save(); err != nil {
return inf, err
}
break
@@ -1064,12 +985,6 @@ func (s *Sandbox) startVM() (err error) {
return err
}
}
if !s.supportNewStore() {
if err := s.store.Store(store.Network, s.networkNS); err != nil {
return err
}
}
}
s.Logger().Info("VM started")
@@ -1100,7 +1015,6 @@ func (s *Sandbox) stopVM() error {
if s.disableVMShutdown {
// Do not kill the VM - allow the agent to shut it down
// (only used to support static agent tracing).
s.Logger().Info("Not stopping VM")
return nil
}
@@ -1148,7 +1062,6 @@ func (s *Sandbox) fetchContainers() error {
// This should be called only when the sandbox is already created.
// It will add new container config to sandbox.config.Containers
func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, error) {
storeAlreadyExists := store.VCContainerStoreExists(s.ctx, s.id, contConfig.ID)
// Create the container.
c, err := newContainer(s, &contConfig)
if err != nil {
@@ -1164,11 +1077,6 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro
// delete container config
s.config.Containers = s.config.Containers[:len(s.config.Containers)-1]
}
if !storeAlreadyExists {
if delerr := c.store.Delete(); delerr != nil {
c.Logger().WithError(delerr).WithField("cid", c.id).Error("delete store failed")
}
}
}
}()
@@ -1182,6 +1090,13 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro
return nil, err
}
defer func() {
// Rollback if error happens.
if err != nil {
s.removeContainer(c.id)
}
}()
// Sandbox is reponsable to update VM resources needed by Containers
// Update resources after having added containers to the sandbox, since
// container status is requiered to know if more resources should be added.
@@ -1190,12 +1105,6 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro
return nil, err
}
// Store it.
err = c.storeContainer()
if err != nil {
return nil, err
}
if err = s.cgroupsUpdate(); err != nil {
return nil, err
}
@@ -1382,9 +1291,6 @@ func (s *Sandbox) UpdateContainer(containerID string, resources specs.LinuxResou
return err
}
if err := c.storeContainer(); err != nil {
return err
}
if err = s.storeSandbox(); err != nil {
return err
}
@@ -1617,8 +1523,7 @@ func (s *Sandbox) setSandboxState(state types.StateString) error {
// update in-memory state
s.state.State = state
// update on-disk state
if !s.supportNewStore() {
if useOldStore(s.ctx) {
return s.store.Store(store.State, s.state)
}
return nil
@@ -1639,14 +1544,6 @@ func (s *Sandbox) getAndSetSandboxBlockIndex() (int, error) {
// Increment so that container gets incremented block index
s.state.BlockIndex++
if !s.supportNewStore() {
// experimental runtime use "persist.json" which doesn't need "state.json" anymore
// update on-disk state
if err := s.store.Store(store.State, s.state); err != nil {
return -1, err
}
}
return currentIndex, nil
}
@@ -1662,14 +1559,6 @@ func (s *Sandbox) decrementSandboxBlockIndex() error {
}
}()
if !s.supportNewStore() {
// experimental runtime use "persist.json" which doesn't need "state.json" anymore
// update on-disk state
if err = s.store.Store(store.State, s.state); err != nil {
return err
}
}
return nil
}
@@ -1799,12 +1688,6 @@ func (s *Sandbox) AddDevice(info config.DeviceInfo) (api.Device, error) {
}
}()
if !s.supportNewStore() {
if err = s.storeSandboxDevices(); err != nil {
return nil, err
}
}
return b, nil
}

View File

@@ -7,7 +7,6 @@ package virtcontainers
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
@@ -22,8 +21,8 @@ import (
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
"github.com/kata-containers/runtime/virtcontainers/device/manager"
exp "github.com/kata-containers/runtime/virtcontainers/experimental"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/kata-containers/runtime/virtcontainers/types"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/stretchr/testify/assert"
@@ -575,11 +574,6 @@ func TestSetAnnotations(t *testing.T) {
},
}
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
assert.NoError(err)
sandbox.store = vcStore
keyAnnotation := "annotation2"
valueAnnotation := "xyz"
newAnnotations := map[string]string{
@@ -658,27 +652,12 @@ func TestContainerStateSetFstype(t *testing.T) {
assert.Nil(err)
defer cleanUp()
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
assert.Nil(err)
sandbox.store = vcStore
c := sandbox.GetContainer("100")
assert.NotNil(c)
cImpl, ok := c.(*Container)
assert.True(ok)
containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, c.ID())
assert.NoError(err)
cImpl.store = containerStore
path := store.ContainerRuntimeRootPath(testSandboxID, c.ID())
stateFilePath := filepath.Join(path, store.StateFile)
f, err := os.Create(stateFilePath)
assert.NoError(err)
state := types.ContainerState{
State: "ready",
Fstype: "vfs",
@@ -686,34 +665,10 @@ func TestContainerStateSetFstype(t *testing.T) {
cImpl.state = state
stateData := `{
"state":"ready",
"fstype":"vfs",
}`
n, err := f.WriteString(stateData)
defer f.Close()
assert.NoError(err)
assert.Equal(n, len(stateData))
newFstype := "ext4"
err = cImpl.setStateFstype(newFstype)
assert.NoError(err)
assert.Equal(cImpl.state.Fstype, newFstype)
fileData, err := ioutil.ReadFile(stateFilePath)
assert.NoError(err)
// experimental features doesn't write state.json
if sandbox.supportNewStore() {
return
}
var res types.ContainerState
err = json.Unmarshal([]byte(string(fileData)), &res)
assert.NoError(err)
assert.Equal(res.Fstype, newFstype)
assert.Equal(res.State, state.State)
}
const vfioPath = "/dev/vfio/"
@@ -727,7 +682,7 @@ func TestSandboxAttachDevicesVFIO(t *testing.T) {
testDeviceBDFPath := "0000:00:1c.0"
devicesDir := filepath.Join(tmpDir, testFDIOGroup, "devices")
err = os.MkdirAll(devicesDir, store.DirMode)
err = os.MkdirAll(devicesDir, DirMode)
assert.Nil(t, err)
deviceFile := filepath.Join(devicesDir, testDeviceBDFPath)
@@ -773,14 +728,8 @@ func TestSandboxAttachDevicesVFIO(t *testing.T) {
config: &SandboxConfig{},
}
store, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
assert.Nil(t, err)
sandbox.store = store
containers[c.id].sandbox = &sandbox
err = sandbox.storeSandboxDevices()
assert.Nil(t, err, "Error while store sandbox devices %s", err)
err = containers[c.id].attachDevices()
assert.Nil(t, err, "Error while attaching devices %s", err)
@@ -916,8 +865,6 @@ func TestCreateContainer(t *testing.T) {
_, err = s.CreateContainer(contConfig)
assert.NotNil(t, err, "Should failed to create a duplicated container")
assert.Equal(t, len(s.config.Containers), 1, "Container config list length from sandbox structure should be 1")
ret := store.VCContainerStoreExists(s.ctx, testSandboxID, contID)
assert.True(t, ret, "Should not delete container store that already existed")
}
func TestDeleteContainer(t *testing.T) {
@@ -1023,8 +970,6 @@ func TestDeleteStoreWhenCreateContainerFail(t *testing.T) {
s.state.CgroupPath = filepath.Join(testDir, "bad-cgroup")
_, err = s.CreateContainer(contConfig)
assert.NotNil(t, err, "Should fail to create container due to wrong cgroup")
ret := store.VCContainerStoreExists(s.ctx, testSandboxID, contID)
assert.False(t, ret, "Should delete configuration root after failed to create a container")
}
func TestDeleteStoreWhenNewContainerFail(t *testing.T) {
@@ -1045,7 +990,7 @@ func TestDeleteStoreWhenNewContainerFail(t *testing.T) {
}
_, err = newContainer(p, &contConfig)
assert.NotNil(t, err, "New container with invalid device info should fail")
storePath := store.ContainerConfigurationRootPath(testSandboxID, contID)
storePath := filepath.Join(fs.RunStoragePath(), testSandboxID, contID)
_, err = os.Stat(storePath)
assert.NotNil(t, err, "Should delete configuration root after failed to create a container")
}
@@ -1208,10 +1153,6 @@ func TestAttachBlockDevice(t *testing.T) {
ctx: context.Background(),
}
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
assert.Nil(t, err)
sandbox.store = vcStore
contID := "100"
container := Container{
sandbox: sandbox,
@@ -1219,19 +1160,12 @@ func TestAttachBlockDevice(t *testing.T) {
}
// create state file
path := store.ContainerRuntimeRootPath(testSandboxID, container.ID())
err = os.MkdirAll(path, store.DirMode)
path := filepath.Join(fs.RunStoragePath(), testSandboxID, container.ID())
err := os.MkdirAll(path, DirMode)
assert.NoError(t, err)
defer os.RemoveAll(path)
stateFilePath := filepath.Join(path, store.StateFile)
os.Remove(stateFilePath)
_, err = os.Create(stateFilePath)
assert.NoError(t, err)
defer os.Remove(stateFilePath)
path = "/dev/hda"
deviceInfo := config.DeviceInfo{
HostPath: path,
@@ -1295,10 +1229,6 @@ func TestPreAddDevice(t *testing.T) {
ctx: context.Background(),
}
vcStore, err := store.NewVCSandboxStore(sandbox.ctx, sandbox.id)
assert.Nil(t, err)
sandbox.store = vcStore
contID := "100"
container := Container{
sandbox: sandbox,
@@ -1307,24 +1237,13 @@ func TestPreAddDevice(t *testing.T) {
}
container.state.State = types.StateReady
containerStore, err := store.NewVCContainerStore(sandbox.ctx, sandbox.id, container.id)
assert.Nil(t, err)
container.store = containerStore
// create state file
path := store.ContainerRuntimeRootPath(testSandboxID, container.ID())
err = os.MkdirAll(path, store.DirMode)
path := filepath.Join(fs.RunStoragePath(), testSandboxID, container.ID())
err := os.MkdirAll(path, DirMode)
assert.NoError(t, err)
defer os.RemoveAll(path)
stateFilePath := filepath.Join(path, store.StateFile)
os.Remove(stateFilePath)
_, err = os.Create(stateFilePath)
assert.NoError(t, err)
defer os.Remove(stateFilePath)
path = "/dev/hda"
deviceInfo := config.DeviceInfo{
HostPath: path,
@@ -1417,9 +1336,6 @@ func checkDirNotExist(path string) error {
func checkSandboxRemains() error {
var err error
if err = checkDirNotExist(sandboxDirConfig); err != nil {
return fmt.Errorf("%s still exists", sandboxDirConfig)
}
if err = checkDirNotExist(sandboxDirState); err != nil {
return fmt.Errorf("%s still exists", sandboxDirState)
}

View File

@@ -334,9 +334,3 @@ func VCSandboxStoreExists(ctx context.Context, sandboxID string) bool {
s := stores.findStore(SandboxConfigurationRoot(sandboxID))
return s != nil
}
// VCContainerStoreExists returns true if a container store already exists.
func VCContainerStoreExists(ctx context.Context, sandboxID string, containerID string) bool {
s := stores.findStore(ContainerConfigurationRoot(sandboxID, containerID))
return s != nil
}

View File

@@ -35,12 +35,7 @@ const testDisabledAsNonRoot = "Test disabled as requires root privileges"
// package variables set in TestMain
var testDir = ""
var sandboxDirConfig = ""
var sandboxFileConfig = ""
var sandboxDirState = ""
var sandboxDirLock = ""
var sandboxFileState = ""
var sandboxFileLock = ""
var testQemuKernelPath = ""
var testQemuInitrdPath = ""
var testQemuImagePath = ""
@@ -57,28 +52,24 @@ var testVirtiofsdPath = ""
var testHyperstartCtlSocket = ""
var testHyperstartTtySocket = ""
var savedRunVMStoragePathFunc func() string
// cleanUp Removes any stale sandbox/container state that can affect
// the next test to run.
func cleanUp() {
globalSandboxList.removeSandbox(testSandboxID)
store.DeleteAll()
os.RemoveAll(fs.RunStoragePath())
os.RemoveAll(fs.RunVMStoragePath())
os.RemoveAll(testDir)
os.MkdirAll(testDir, DirMode)
store.DeleteAll()
store.VCStorePrefix = ""
store.RunVMStoragePath = savedRunVMStoragePathFunc
setup()
}
func setup() {
store.VCStorePrefix = testDir
savedRunVMStoragePathFunc = store.RunVMStoragePath
store.RunVMStoragePath = func() string {
return filepath.Join("testDir", "vm")
}
os.MkdirAll(store.RunVMStoragePath(), store.DirMode)
os.MkdirAll(filepath.Join(testDir, testBundle), store.DirMode)
os.Mkdir(filepath.Join(testDir, testBundle), DirMode)
for _, filename := range []string{testQemuKernelPath, testQemuInitrdPath, testQemuImagePath, testQemuPath} {
_, err := os.Create(filename)
@@ -90,7 +81,7 @@ func setup() {
}
func setupAcrn() {
os.Mkdir(filepath.Join(testDir, testBundle), store.DirMode)
os.Mkdir(filepath.Join(testDir, testBundle), DirMode)
for _, filename := range []string{testAcrnKernelPath, testAcrnImagePath, testAcrnPath, testAcrnCtlPath} {
_, err := os.Create(filename)
@@ -102,7 +93,7 @@ func setupAcrn() {
}
func setupClh() {
os.Mkdir(filepath.Join(testDir, testBundle), store.DirMode)
os.Mkdir(filepath.Join(testDir, testBundle), DirMode)
for _, filename := range []string{testClhKernelPath, testClhImagePath, testClhPath, testVirtiofsdPath} {
_, err := os.Create(filename)
@@ -135,7 +126,7 @@ func TestMain(m *testing.M) {
}
fmt.Printf("INFO: Creating virtcontainers test directory %s\n", testDir)
err = os.MkdirAll(testDir, store.DirMode)
err = os.MkdirAll(testDir, DirMode)
if err != nil {
fmt.Println("Could not create test directories:", err)
os.Exit(1)
@@ -170,25 +161,19 @@ func TestMain(m *testing.M) {
setupClh()
ConfigStoragePathSaved := store.ConfigStoragePath
RunStoragePathSaved := store.RunStoragePath
// allow the tests to run without affecting the host system.
store.ConfigStoragePath = func() string { return filepath.Join(testDir, store.StoragePathSuffix, "config") }
store.RunStoragePath = func() string { return filepath.Join(testDir, store.StoragePathSuffix, "run") }
fs.TestSetRunStoragePath(filepath.Join(testDir, "vc", "sbs"))
runPathSave := fs.RunStoragePath()
rootPathSave := fs.StorageRootPath()
fs.TestSetRunStoragePath(filepath.Join(testDir, "vc", "run"))
fs.TestSetStorageRootPath(filepath.Join(testDir, "vc"))
defer func() {
store.ConfigStoragePath = ConfigStoragePathSaved
store.RunStoragePath = RunStoragePathSaved
fs.TestSetRunStoragePath(runPathSave)
fs.TestSetStorageRootPath(rootPathSave)
}()
// set now that configStoragePath has been overridden.
sandboxDirConfig = filepath.Join(store.ConfigStoragePath(), testSandboxID)
sandboxFileConfig = filepath.Join(store.ConfigStoragePath(), testSandboxID, store.ConfigurationFile)
sandboxDirState = filepath.Join(store.RunStoragePath(), testSandboxID)
sandboxDirLock = filepath.Join(store.RunStoragePath(), testSandboxID)
sandboxFileState = filepath.Join(store.RunStoragePath(), testSandboxID, store.StateFile)
sandboxFileLock = filepath.Join(store.RunStoragePath(), testSandboxID, store.LockFile)
sandboxDirState = filepath.Join(fs.RunStoragePath(), testSandboxID)
testHyperstartCtlSocket = filepath.Join(testDir, "test_hyper.sock")
testHyperstartTtySocket = filepath.Join(testDir, "test_tty.sock")

View File

@@ -14,8 +14,10 @@ import (
"time"
pb "github.com/kata-containers/runtime/protocols/cache"
"github.com/kata-containers/runtime/virtcontainers/persist"
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
"github.com/kata-containers/runtime/virtcontainers/store"
"github.com/sirupsen/logrus"
)
@@ -35,7 +37,7 @@ type VM struct {
cpuDelta uint32
store *store.VCStore
store persistapi.PersistDriver
}
// VMConfig is a collection of all info that a new blackbox VM needs.
@@ -157,9 +159,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
virtLog.WithField("vm", id).WithField("config", config).Info("create new vm")
vcStore, err := store.NewVCStore(ctx,
store.SandboxConfigurationRoot(id),
store.SandboxRuntimeRoot(id))
store, err := persist.GetDriver("fs")
if err != nil {
return nil, err
}
@@ -168,11 +168,11 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
if err != nil {
virtLog.WithField("vm", id).WithError(err).Error("failed to create new vm")
virtLog.WithField("vm", id).Errorf("Deleting store for %s", id)
vcStore.Delete()
store.Destroy(id)
}
}()
if err = hypervisor.createSandbox(ctx, id, NetworkNamespace{}, &config.HypervisorConfig, vcStore, false); err != nil {
if err = hypervisor.createSandbox(ctx, id, NetworkNamespace{}, &config.HypervisorConfig, false); err != nil {
return nil, err
}
@@ -230,7 +230,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
proxyURL: url,
cpu: config.HypervisorConfig.NumVCPUs,
memory: config.HypervisorConfig.MemorySize,
store: vcStore,
store: store,
}, nil
}
@@ -243,9 +243,7 @@ func NewVMFromGrpc(ctx context.Context, v *pb.GrpcVM, config VMConfig) (*VM, err
return nil, err
}
vcStore, err := store.NewVCStore(ctx,
store.SandboxConfigurationRoot(v.Id),
store.SandboxRuntimeRoot(v.Id))
store, err := persist.GetDriver("fs")
if err != nil {
return nil, err
}
@@ -254,11 +252,11 @@ func NewVMFromGrpc(ctx context.Context, v *pb.GrpcVM, config VMConfig) (*VM, err
if err != nil {
virtLog.WithField("vm", v.Id).WithError(err).Error("failed to create new vm from Grpc")
virtLog.WithField("vm", v.Id).Errorf("Deleting store for %s", v.Id)
vcStore.Delete()
store.Destroy(v.Id)
}
}()
err = hypervisor.fromGrpc(ctx, &config.HypervisorConfig, vcStore, v.Hypervisor)
err = hypervisor.fromGrpc(ctx, &config.HypervisorConfig, v.Hypervisor)
if err != nil {
return nil, err
}
@@ -282,11 +280,12 @@ func NewVMFromGrpc(ctx context.Context, v *pb.GrpcVM, config VMConfig) (*VM, err
cpu: v.Cpu,
memory: v.Memory,
cpuDelta: v.CpuDelta,
store: store,
}, nil
}
func buildVMSharePath(id string) string {
return filepath.Join(store.RunVMStoragePath(), id, "shared")
return filepath.Join(fs.RunVMStoragePath(), id, "shared")
}
func (v *VM) logger() logrus.FieldLogger {
@@ -339,7 +338,7 @@ func (v *VM) Stop() error {
return err
}
return v.store.Delete()
return v.store.Destroy(v.id)
}
// AddCPUs adds num of CPUs to the VM.