diff --git a/.github/workflows/PR-wip-checks.yaml b/.github/workflows/PR-wip-checks.yaml
index 97c35145a..98195b886 100644
--- a/.github/workflows/PR-wip-checks.yaml
+++ b/.github/workflows/PR-wip-checks.yaml
@@ -9,6 +9,10 @@ on:
- labeled
- unlabeled
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
jobs:
pr_wip_check:
runs-on: ubuntu-latest
diff --git a/.github/workflows/add-backport-label.yaml b/.github/workflows/add-backport-label.yaml
index 3df518b54..790ff1721 100644
--- a/.github/workflows/add-backport-label.yaml
+++ b/.github/workflows/add-backport-label.yaml
@@ -10,6 +10,10 @@ on:
- labeled
- unlabeled
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
jobs:
check-issues:
if: ${{ github.event.label.name != 'auto-backport' }}
diff --git a/.github/workflows/add-issues-to-project.yaml b/.github/workflows/add-issues-to-project.yaml
index 93c31e7a1..117e62600 100644
--- a/.github/workflows/add-issues-to-project.yaml
+++ b/.github/workflows/add-issues-to-project.yaml
@@ -11,6 +11,10 @@ on:
- opened
- reopened
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
jobs:
add-new-issues-to-backlog:
runs-on: ubuntu-latest
diff --git a/.github/workflows/add-pr-sizing-label.yaml b/.github/workflows/add-pr-sizing-label.yaml
index ffd9b06a9..313c9f285 100644
--- a/.github/workflows/add-pr-sizing-label.yaml
+++ b/.github/workflows/add-pr-sizing-label.yaml
@@ -12,6 +12,10 @@ on:
- reopened
- synchronize
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
jobs:
add-pr-size-label:
runs-on: ubuntu-latest
diff --git a/.github/workflows/auto-backport.yaml b/.github/workflows/auto-backport.yaml
index 6504dc488..e2be39022 100644
--- a/.github/workflows/auto-backport.yaml
+++ b/.github/workflows/auto-backport.yaml
@@ -2,6 +2,10 @@ on:
pull_request_target:
types: ["labeled", "closed"]
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
jobs:
backport:
name: Backport PR
diff --git a/.github/workflows/build-kata-static-tarball-amd64.yaml b/.github/workflows/build-kata-static-tarball-amd64.yaml
index f0f606850..869d49bc6 100644
--- a/.github/workflows/build-kata-static-tarball-amd64.yaml
+++ b/.github/workflows/build-kata-static-tarball-amd64.yaml
@@ -99,7 +99,7 @@ jobs:
path: kata-artifacts
- name: merge-artifacts
run: |
- ./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
+ ./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
- name: store-artifacts
uses: actions/upload-artifact@v3
with:
diff --git a/.github/workflows/build-kata-static-tarball-arm64.yaml b/.github/workflows/build-kata-static-tarball-arm64.yaml
index 2ad97a0ba..cafc6e020 100644
--- a/.github/workflows/build-kata-static-tarball-arm64.yaml
+++ b/.github/workflows/build-kata-static-tarball-arm64.yaml
@@ -2,6 +2,10 @@ name: CI | Build kata-static tarball for arm64
on:
workflow_call:
inputs:
+ stage:
+ required: false
+ type: string
+ default: test
tarball-suffix:
required: false
type: string
@@ -29,6 +33,8 @@ jobs:
- rootfs-initrd
- shim-v2
- virtiofsd
+ stage:
+ - ${{ inputs.stage }}
steps:
- name: Adjust a permission for repo
run: |
@@ -83,7 +89,7 @@ jobs:
path: kata-artifacts
- name: merge-artifacts
run: |
- ./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
+ ./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
- name: store-artifacts
uses: actions/upload-artifact@v3
with:
diff --git a/.github/workflows/build-kata-static-tarball-s390x.yaml b/.github/workflows/build-kata-static-tarball-s390x.yaml
index cf2831033..0fe7e9200 100644
--- a/.github/workflows/build-kata-static-tarball-s390x.yaml
+++ b/.github/workflows/build-kata-static-tarball-s390x.yaml
@@ -2,6 +2,10 @@ name: CI | Build kata-static tarball for s390x
on:
workflow_call:
inputs:
+ stage:
+ required: false
+ type: string
+ default: test
tarball-suffix:
required: false
type: string
@@ -25,6 +29,8 @@ jobs:
- rootfs-initrd
- shim-v2
- virtiofsd
+ stage:
+ - ${{ inputs.stage }}
steps:
- name: Adjust a permission for repo
run: |
@@ -80,7 +86,7 @@ jobs:
path: kata-artifacts
- name: merge-artifacts
run: |
- ./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
+ ./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
- name: store-artifacts
uses: actions/upload-artifact@v3
with:
diff --git a/.github/workflows/cargo-deny-runner.yaml b/.github/workflows/cargo-deny-runner.yaml
index 65237c7be..21d3d1f53 100644
--- a/.github/workflows/cargo-deny-runner.yaml
+++ b/.github/workflows/cargo-deny-runner.yaml
@@ -7,6 +7,11 @@ on:
- reopened
- synchronize
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.jpeg', '**.svg', '/docs/**' ]
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
jobs:
cargo-deny-runner:
runs-on: ubuntu-latest
diff --git a/.github/workflows/cc-payload-amd64.yaml b/.github/workflows/cc-payload-amd64.yaml
index 56649657b..e3ad241af 100644
--- a/.github/workflows/cc-payload-amd64.yaml
+++ b/.github/workflows/cc-payload-amd64.yaml
@@ -23,7 +23,7 @@ jobs:
- ovmf
- qemu-snp-experimental
- qemu-tdx-experimental
- - cc-sev-rootfs-initrd
+ - rootfs-initrd-sev
- cc-tdx-td-shim
- tdvf
include:
@@ -34,7 +34,7 @@ jobs:
- measured_rootfs: yes
asset: cc-rootfs-image
- measured_rootfs: yes
- asset: cc-tdx-rootfs-image
+ asset: rootfs-image-tdx
steps:
- uses: actions/checkout@v3
- name: Build ${{ matrix.asset }}
diff --git a/.github/workflows/ci-nightly.yaml b/.github/workflows/ci-nightly.yaml
index 9a47ce0e4..5c7676710 100644
--- a/.github/workflows/ci-nightly.yaml
+++ b/.github/workflows/ci-nightly.yaml
@@ -4,6 +4,10 @@ on:
- cron: '0 0 * * *'
workflow_dispatch:
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
jobs:
kata-containers-ci-on-push:
uses: ./.github/workflows/ci.yaml
diff --git a/.github/workflows/ci-on-push.yaml b/.github/workflows/ci-on-push.yaml
index 6d4cc7fc0..99d483720 100644
--- a/.github/workflows/ci-on-push.yaml
+++ b/.github/workflows/ci-on-push.yaml
@@ -14,6 +14,11 @@ on:
- labeled
paths-ignore:
- 'docs/**'
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
jobs:
kata-containers-ci-on-push:
if: ${{ contains(github.event.pull_request.labels.*.name, 'ok-to-test') }}
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index faec7fca4..52a86b08d 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -74,3 +74,24 @@ jobs:
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
+
+ run-cri-containerd-tests:
+ needs: build-kata-static-tarball-amd64
+ uses: ./.github/workflows/run-cri-containerd-tests.yaml
+ with:
+ tarball-suffix: -${{ inputs.tag }}
+ commit-hash: ${{ inputs.commit-hash }}
+
+ run-nydus-tests:
+ needs: build-kata-static-tarball-amd64
+ uses: ./.github/workflows/run-nydus-tests.yaml
+ with:
+ tarball-suffix: -${{ inputs.tag }}
+ commit-hash: ${{ inputs.commit-hash }}
+
+ run-vfio-tests:
+ needs: build-kata-static-tarball-amd64
+ uses: ./.github/workflows/run-vfio-tests.yaml
+ with:
+ tarball-suffix: -${{ inputs.tag }}
+ commit-hash: ${{ inputs.commit-hash }}
diff --git a/.github/workflows/commit-message-check.yaml b/.github/workflows/commit-message-check.yaml
index 20be9f688..9a729be93 100644
--- a/.github/workflows/commit-message-check.yaml
+++ b/.github/workflows/commit-message-check.yaml
@@ -6,6 +6,10 @@ on:
- reopened
- synchronize
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
env:
error_msg: |+
See the document below for help on formatting commits for the project.
diff --git a/.github/workflows/darwin-tests.yaml b/.github/workflows/darwin-tests.yaml
index bf8813776..02bbb0e72 100644
--- a/.github/workflows/darwin-tests.yaml
+++ b/.github/workflows/darwin-tests.yaml
@@ -6,6 +6,11 @@ on:
- reopened
- synchronize
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.jpeg', '**.svg', '/docs/**' ]
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
name: Darwin tests
jobs:
test:
diff --git a/.github/workflows/kata-runtime-classes-sync.yaml b/.github/workflows/kata-runtime-classes-sync.yaml
new file mode 100644
index 000000000..9cb995df1
--- /dev/null
+++ b/.github/workflows/kata-runtime-classes-sync.yaml
@@ -0,0 +1,36 @@
+on:
+ pull_request:
+ types:
+ - opened
+ - edited
+ - reopened
+ - synchronize
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ kata-deploy-runtime-classes-check:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v3
+ - name: Ensure the split out runtime classes match the all-in-one file
+ run: |
+ pushd tools/packaging/kata-deploy/runtimeclasses/
+ echo "::group::Combine runtime classes"
+ for runtimeClass in `find . -type f \( -name "*.yaml" -and -not -name "kata-runtimeClasses.yaml" \) | sort`; do
+ echo "Adding ${runtimeClass} to the resultingRuntimeClasses.yaml"
+ cat ${runtimeClass} >> resultingRuntimeClasses.yaml;
+ done
+ echo "::endgroup::"
+ echo "::group::Displaying the content of resultingRuntimeClasses.yaml"
+ cat resultingRuntimeClasses.yaml
+ echo "::endgroup::"
+ echo ""
+ echo "::group::Displaying the content of kata-runtimeClasses.yaml"
+ cat kata-runtimeClasses.yaml
+ echo "::endgroup::"
+ echo ""
+ diff resultingRuntimeClasses.yaml kata-runtimeClasses.yaml
diff --git a/.github/workflows/payload-after-push.yaml b/.github/workflows/payload-after-push.yaml
index 871d73388..46766c54b 100644
--- a/.github/workflows/payload-after-push.yaml
+++ b/.github/workflows/payload-after-push.yaml
@@ -5,6 +5,10 @@ on:
- main
- stable-*
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
jobs:
build-assets-amd64:
uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index a50313fd0..d732a6723 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -4,6 +4,10 @@ on:
tags:
- '[0-9]+.[0-9]+.[0-9]+*'
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
jobs:
build-and-push-assets-amd64:
uses: ./.github/workflows/release-amd64.yaml
@@ -117,6 +121,21 @@ jobs:
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}"
popd
+ upload-versions-yaml:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - name: upload versions.yaml
+ env:
+ GITHUB_TOKEN: ${{ secrets.GIT_UPLOAD_TOKEN }}
+ run: |
+ tag=$(echo $GITHUB_REF | cut -d/ -f3-)
+ pushd $GITHUB_WORKSPACE
+ versions_file="kata-containers-$tag-versions.yaml"
+ cp versions.yaml ${versions_file}
+ hub release edit -m "" -a "${versions_file}" "${tag}"
+ popd
+
upload-cargo-vendored-tarball:
needs: upload-multi-arch-static-tarball
runs-on: ubuntu-latest
diff --git a/.github/workflows/require-pr-porting-labels.yaml b/.github/workflows/require-pr-porting-labels.yaml
index 585e86bc4..b16e5c371 100644
--- a/.github/workflows/require-pr-porting-labels.yaml
+++ b/.github/workflows/require-pr-porting-labels.yaml
@@ -15,6 +15,10 @@ on:
branches:
- main
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
jobs:
check-pr-porting-labels:
runs-on: ubuntu-latest
diff --git a/.github/workflows/run-cri-containerd-tests.yaml b/.github/workflows/run-cri-containerd-tests.yaml
new file mode 100644
index 000000000..4b439733b
--- /dev/null
+++ b/.github/workflows/run-cri-containerd-tests.yaml
@@ -0,0 +1,42 @@
+name: CI | Run cri-containerd tests
+on:
+ workflow_call:
+ inputs:
+ tarball-suffix:
+ required: false
+ type: string
+ commit-hash:
+ required: false
+ type: string
+
+jobs:
+ run-cri-containerd:
+ strategy:
+ fail-fast: true
+ matrix:
+ containerd_version: ['lts', 'active']
+ vmm: ['clh', 'qemu']
+ runs-on: garm-ubuntu-2204
+ env:
+ CONTAINERD_VERSION: ${{ matrix.containerd_version }}
+ GOPATH: ${{ github.workspace }}
+ KATA_HYPERVISOR: ${{ matrix.vmm }}
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ inputs.commit-hash }}
+
+ - name: Install dependencies
+ run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
+
+ - name: get-kata-tarball
+ uses: actions/download-artifact@v3
+ with:
+ name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
+ path: kata-artifacts
+
+ - name: Install kata
+ run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
+
+ - name: Run cri-containerd tests
+ run: bash tests/integration/cri-containerd/gha-run.sh run
diff --git a/.github/workflows/run-k8s-tests-on-aks.yaml b/.github/workflows/run-k8s-tests-on-aks.yaml
index d8658270a..130be1829 100644
--- a/.github/workflows/run-k8s-tests-on-aks.yaml
+++ b/.github/workflows/run-k8s-tests-on-aks.yaml
@@ -40,37 +40,43 @@ jobs:
GH_PR_NUMBER: ${{ inputs.pr-number }}
KATA_HOST_OS: ${{ matrix.host_os }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
+ USING_NFD: "false"
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
- name: Download Azure CLI
- run: bash tests/integration/gha-run.sh install-azure-cli
+ run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
- name: Log into the Azure account
- run: bash tests/integration/gha-run.sh login-azure
+ run: bash tests/integration/kubernetes/gha-run.sh login-azure
env:
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
- name: Create AKS cluster
- run: bash tests/integration/gha-run.sh create-cluster
+ timeout-minutes: 10
+ run: bash tests/integration/kubernetes/gha-run.sh create-cluster
- name: Install `bats`
- run: bash tests/integration/gha-run.sh install-bats
+ run: bash tests/integration/kubernetes/gha-run.sh install-bats
- name: Install `kubectl`
- run: bash tests/integration/gha-run.sh install-kubectl
+ run: bash tests/integration/kubernetes/gha-run.sh install-kubectl
- name: Download credentials for the Kubernetes CLI to use them
- run: bash tests/integration/gha-run.sh get-cluster-credentials
+ run: bash tests/integration/kubernetes/gha-run.sh get-cluster-credentials
+ - name: Deploy Kata
+ timeout-minutes: 10
+ run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-aks
+
- name: Run tests
timeout-minutes: 60
- run: bash tests/integration/gha-run.sh run-tests-aks
+ run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Delete AKS cluster
if: always()
- run: bash tests/integration/gha-run.sh delete-cluster
+ run: bash tests/integration/kubernetes/gha-run.sh delete-cluster
diff --git a/.github/workflows/run-k8s-tests-on-sev.yaml b/.github/workflows/run-k8s-tests-on-sev.yaml
index 3fc4ca835..a48425e1f 100644
--- a/.github/workflows/run-k8s-tests-on-sev.yaml
+++ b/.github/workflows/run-k8s-tests-on-sev.yaml
@@ -29,15 +29,20 @@ jobs:
DOCKER_TAG: ${{ inputs.tag }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBECONFIG: /home/kata/.kube/config
+ USING_NFD: "false"
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
+ - name: Deploy Kata
+ timeout-minutes: 10
+ run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-sev
+
- name: Run tests
timeout-minutes: 30
- run: bash tests/integration/gha-run.sh run-tests-sev
+ run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Delete kata-deploy
if: always()
- run: bash tests/integration/gha-run.sh cleanup-sev
+ run: bash tests/integration/kubernetes/gha-run.sh cleanup-sev
diff --git a/.github/workflows/run-k8s-tests-on-snp.yaml b/.github/workflows/run-k8s-tests-on-snp.yaml
index 8aa1763d2..7196a9a1b 100644
--- a/.github/workflows/run-k8s-tests-on-snp.yaml
+++ b/.github/workflows/run-k8s-tests-on-snp.yaml
@@ -29,15 +29,20 @@ jobs:
DOCKER_TAG: ${{ inputs.tag }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBECONFIG: /home/kata/.kube/config
+ USING_NFD: "false"
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
+ - name: Deploy Kata
+ timeout-minutes: 10
+ run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-snp
+
- name: Run tests
timeout-minutes: 30
- run: bash tests/integration/gha-run.sh run-tests-snp
-
+ run: bash tests/integration/kubernetes/gha-run.sh run-tests
+
- name: Delete kata-deploy
if: always()
- run: bash tests/integration/gha-run.sh cleanup-snp
+ run: bash tests/integration/kubernetes/gha-run.sh cleanup-snp
diff --git a/.github/workflows/run-k8s-tests-on-tdx.yaml b/.github/workflows/run-k8s-tests-on-tdx.yaml
index ccbc16db7..a3899177c 100644
--- a/.github/workflows/run-k8s-tests-on-tdx.yaml
+++ b/.github/workflows/run-k8s-tests-on-tdx.yaml
@@ -28,16 +28,20 @@ jobs:
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
- KUBECONFIG: /etc/rancher/k3s/k3s.yaml
+ USING_NFD: "true"
steps:
- uses: actions/checkout@v3
with:
ref: ${{ inputs.commit-hash }}
+ - name: Deploy Kata
+ timeout-minutes: 10
+ run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-tdx
+
- name: Run tests
timeout-minutes: 30
- run: bash tests/integration/gha-run.sh run-tests-tdx
-
+ run: bash tests/integration/kubernetes/gha-run.sh run-tests
+
- name: Delete kata-deploy
if: always()
- run: bash tests/integration/gha-run.sh cleanup-tdx
+ run: bash tests/integration/kubernetes/gha-run.sh cleanup-tdx
diff --git a/.github/workflows/run-metrics.yaml b/.github/workflows/run-metrics.yaml
index 92a5f8af9..f1ac9d61a 100644
--- a/.github/workflows/run-metrics.yaml
+++ b/.github/workflows/run-metrics.yaml
@@ -46,6 +46,9 @@ jobs:
- name: run blogbench test
run: bash tests/metrics/gha-run.sh run-test-blogbench
+ - name: run tensorflow test
+ run: bash tests/metrics/gha-run.sh run-test-tensorflow
+
- name: make metrics tarball ${{ matrix.vmm }}
run: bash tests/metrics/gha-run.sh make-tarball-results
diff --git a/.github/workflows/run-nydus-tests.yaml b/.github/workflows/run-nydus-tests.yaml
new file mode 100644
index 000000000..647582c08
--- /dev/null
+++ b/.github/workflows/run-nydus-tests.yaml
@@ -0,0 +1,42 @@
+name: CI | Run nydus tests
+on:
+ workflow_call:
+ inputs:
+ tarball-suffix:
+ required: false
+ type: string
+ commit-hash:
+ required: false
+ type: string
+
+jobs:
+ run-nydus:
+ strategy:
+ fail-fast: true
+ matrix:
+ containerd_version: ['lts', 'active']
+ vmm: ['clh', 'qemu', 'dragonball']
+ runs-on: garm-ubuntu-2204
+ env:
+ CONTAINERD_VERSION: ${{ matrix.containerd_version }}
+ GOPATH: ${{ github.workspace }}
+ KATA_HYPERVISOR: ${{ matrix.vmm }}
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ inputs.commit-hash }}
+
+ - name: Install dependencies
+ run: bash tests/integration/nydus/gha-run.sh install-dependencies
+
+ - name: get-kata-tarball
+ uses: actions/download-artifact@v3
+ with:
+ name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
+ path: kata-artifacts
+
+ - name: Install kata
+ run: bash tests/integration/nydus/gha-run.sh install-kata kata-artifacts
+
+ - name: Run nydus tests
+ run: bash tests/integration/nydus/gha-run.sh run
diff --git a/.github/workflows/run-vfio-tests.yaml b/.github/workflows/run-vfio-tests.yaml
new file mode 100644
index 000000000..ba34d2088
--- /dev/null
+++ b/.github/workflows/run-vfio-tests.yaml
@@ -0,0 +1,37 @@
+name: CI | Run vfio tests
+on:
+ workflow_call:
+ inputs:
+ tarball-suffix:
+ required: false
+ type: string
+ commit-hash:
+ required: false
+ type: string
+
+jobs:
+ run-vfio:
+ strategy:
+ fail-fast: false
+ matrix:
+ vmm: ['clh', 'qemu']
+ runs-on: garm-ubuntu-2204
+ env:
+ GOPATH: ${{ github.workspace }}
+ KATA_HYPERVISOR: ${{ matrix.vmm }}
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ inputs.commit-hash }}
+
+ - name: Install dependencies
+ run: bash tests/functional/vfio/gha-run.sh install-dependencies
+
+ - name: get-kata-tarball
+ uses: actions/download-artifact@v3
+ with:
+ name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
+ path: kata-artifacts
+
+ - name: Run vfio tests
+ run: bash tests/functional/vfio/gha-run.sh run
diff --git a/.github/workflows/static-checks-dragonball.yaml b/.github/workflows/static-checks-dragonball.yaml
index 61e3fe2c4..d47689e3a 100644
--- a/.github/workflows/static-checks-dragonball.yaml
+++ b/.github/workflows/static-checks-dragonball.yaml
@@ -7,10 +7,14 @@ on:
- synchronize
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.jpeg', '**.svg', '/docs/**' ]
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
name: Static checks dragonball
jobs:
test-dragonball:
- runs-on: self-hosted
+ runs-on: dragonball
env:
RUST_BACKTRACE: "1"
steps:
diff --git a/.github/workflows/static-checks.yaml b/.github/workflows/static-checks.yaml
index 616e9f5ab..bd9f76027 100644
--- a/.github/workflows/static-checks.yaml
+++ b/.github/workflows/static-checks.yaml
@@ -6,6 +6,10 @@ on:
- reopened
- synchronize
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
name: Static checks
jobs:
static-checks:
diff --git a/Makefile b/Makefile
index e70af93e4..0765ae2b6 100644
--- a/Makefile
+++ b/Makefile
@@ -24,6 +24,10 @@ TOOLS += trace-forwarder
STANDARD_TARGETS = build check clean install static-checks-build test vendor
+# Variables for the build-and-publish-kata-debug target
+KATA_DEBUG_REGISTRY ?= ""
+KATA_DEBUG_TAG ?= ""
+
default: all
include utils.mk
@@ -44,6 +48,9 @@ static-checks: static-checks-build
docs-url-alive-check:
bash ci/docs-url-alive-check.sh
+build-and-publish-kata-debug:
+ bash tools/packaging/kata-debug/kata-debug-build-and-upload-payload.sh ${KATA_DEBUG_REGISTRY} ${KATA_DEBUG_TAG}
+
.PHONY: \
all \
kata-tarball \
diff --git a/README.md b/README.md
index 78a62179c..d34110056 100644
--- a/README.md
+++ b/README.md
@@ -134,6 +134,7 @@ The table below lists the remaining parts of the project:
| [packaging](tools/packaging) | infrastructure | Scripts and metadata for producing packaged binaries
(components, hypervisors, kernel and rootfs). |
| [kernel](https://www.kernel.org) | kernel | Linux kernel used by the hypervisor to boot the guest image. Patches are stored [here](tools/packaging/kernel). |
| [osbuilder](tools/osbuilder) | infrastructure | Tool to create "mini O/S" rootfs and initrd images and kernel for the hypervisor. |
+| [kata-debug](tools/packaging/kata-debug/README.md) | infrastructure | Utility tool to gather Kata Containers debug information from Kubernetes clusters. |
| [`agent-ctl`](src/tools/agent-ctl) | utility | Tool that provides low-level access for testing the agent. |
| [`kata-ctl`](src/tools/kata-ctl) | utility | Tool that provides advanced commands and debug facilities. |
| [`log-parser-rs`](src/tools/log-parser-rs) | utility | Tool that aid in analyzing logs from the kata runtime. |
diff --git a/VERSION b/VERSION
index bb48c8b0a..ed590bd2a 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-3.2.0-alpha3
+3.2.0-alpha4
diff --git a/docs/design/architecture/kubernetes.md b/docs/design/architecture/kubernetes.md
index be8165d75..8d4d3144c 100644
--- a/docs/design/architecture/kubernetes.md
+++ b/docs/design/architecture/kubernetes.md
@@ -3,11 +3,11 @@
[Kubernetes](https://github.com/kubernetes/kubernetes/), or K8s, is a popular open source
container orchestration engine. In Kubernetes, a set of containers sharing resources
such as networking, storage, mount, PID, etc. is called a
-[pod](https://kubernetes.io/docs/user-guide/pods/).
+[pod](https://kubernetes.io/docs/concepts/workloads/pods/).
A node can have multiple pods, but at a minimum, a node within a Kubernetes cluster
only needs to run a container runtime and a container agent (called a
-[Kubelet](https://kubernetes.io/docs/admin/kubelet/)).
+[Kubelet](https://kubernetes.io/docs/concepts/overview/components/#kubelet)).
Kata Containers represents a Kubelet pod as a VM.
diff --git a/src/agent/Cargo.lock b/src/agent/Cargo.lock
index c984fbf47..9222389e2 100644
--- a/src/agent/Cargo.lock
+++ b/src/agent/Cargo.lock
@@ -2081,6 +2081,7 @@ dependencies = [
"slog",
"slog-scope",
"slog-stdlog",
+ "slog-term",
"tempfile",
"test-utils",
"thiserror",
@@ -2100,6 +2101,7 @@ dependencies = [
name = "kata-sys-util"
version = "0.1.0"
dependencies = [
+ "anyhow",
"byteorder",
"cgroups-rs",
"chrono",
diff --git a/src/agent/Cargo.toml b/src/agent/Cargo.toml
index 358412a85..cf8f97209 100644
--- a/src/agent/Cargo.toml
+++ b/src/agent/Cargo.toml
@@ -44,6 +44,7 @@ ipnetwork = "0.17.0"
logging = { path = "../libs/logging" }
slog = "2.5.2"
slog-scope = "4.1.2"
+slog-term = "2.9.0"
# Redirect ttrpc log calls
slog-stdlog = "4.0.0"
diff --git a/src/agent/Makefile b/src/agent/Makefile
index 69423edda..ba065b4d0 100644
--- a/src/agent/Makefile
+++ b/src/agent/Makefile
@@ -26,7 +26,7 @@ export VERSION_COMMIT := $(if $(COMMIT),$(VERSION)-$(COMMIT),$(VERSION))
EXTRA_RUSTFEATURES :=
##VAR SECCOMP=yes|no define if agent enables seccomp feature
-SECCOMP := yes
+SECCOMP ?= yes
# Enable seccomp feature of rust build
ifeq ($(SECCOMP),yes)
diff --git a/src/agent/rustjail/src/mount.rs b/src/agent/rustjail/src/mount.rs
index d9ba15041..b822736dc 100644
--- a/src/agent/rustjail/src/mount.rs
+++ b/src/agent/rustjail/src/mount.rs
@@ -1118,6 +1118,7 @@ mod tests {
use std::fs::create_dir;
use std::fs::create_dir_all;
use std::fs::remove_dir_all;
+ use std::fs::remove_file;
use std::io;
use std::os::unix::fs;
use std::os::unix::io::AsRawFd;
@@ -1333,14 +1334,9 @@ mod tests {
fn test_mknod_dev() {
skip_if_not_root!();
- let tempdir = tempdir().unwrap();
-
- let olddir = unistd::getcwd().unwrap();
- defer!(let _ = unistd::chdir(&olddir););
- let _ = unistd::chdir(tempdir.path());
-
+ let path = "/dev/fifo-test";
let dev = oci::LinuxDevice {
- path: "/fifo".to_string(),
+ path: path.to_string(),
r#type: "c".to_string(),
major: 0,
minor: 0,
@@ -1348,13 +1344,16 @@ mod tests {
uid: Some(unistd::getuid().as_raw()),
gid: Some(unistd::getgid().as_raw()),
};
- let path = Path::new("fifo");
- let ret = mknod_dev(&dev, path);
+ let ret = mknod_dev(&dev, Path::new(path));
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
let ret = stat::stat(path);
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
+
+ // clear test device node
+ let ret = remove_file(path);
+ assert!(ret.is_ok(), "Should pass, Got: {:?}", ret);
}
#[test]
diff --git a/src/agent/rustjail/src/process.rs b/src/agent/rustjail/src/process.rs
index 0e7fe73ef..cdecae130 100644
--- a/src/agent/rustjail/src/process.rs
+++ b/src/agent/rustjail/src/process.rs
@@ -161,7 +161,7 @@ impl Process {
pub fn notify_term_close(&mut self) {
let notify = self.term_exit_notifier.clone();
- notify.notify_one();
+ notify.notify_waiters();
}
pub fn close_stdin(&mut self) {
diff --git a/src/agent/src/linux_abi.rs b/src/agent/src/linux_abi.rs
index de131faf0..b87da3ceb 100644
--- a/src/agent/src/linux_abi.rs
+++ b/src/agent/src/linux_abi.rs
@@ -33,7 +33,7 @@ pub fn create_pci_root_bus_path() -> String {
// check if there is pci bus path for acpi
acpi_sysfs_dir.push_str(&acpi_root_bus_path);
- if let Ok(_) = fs::metadata(&acpi_sysfs_dir) {
+ if fs::metadata(&acpi_sysfs_dir).is_ok() {
return acpi_root_bus_path;
}
diff --git a/src/agent/src/mount.rs b/src/agent/src/mount.rs
index 5b0d95c19..d80aea4c6 100644
--- a/src/agent/src/mount.rs
+++ b/src/agent/src/mount.rs
@@ -36,6 +36,7 @@ use crate::Sandbox;
use crate::{ccw, device::get_virtio_blk_ccw_device_name};
use anyhow::{anyhow, Context, Result};
use slog::Logger;
+
use tracing::instrument;
pub const TYPE_ROOTFS: &str = "rootfs";
@@ -145,6 +146,11 @@ pub const STORAGE_HANDLER_LIST: &[&str] = &[
DRIVER_WATCHABLE_BIND_TYPE,
];
+#[instrument]
+pub fn get_mounts() -> Result {
+ fs::read_to_string("/proc/mounts")
+}
+
#[instrument]
pub fn baremount(
source: &Path,
@@ -168,6 +174,31 @@ pub fn baremount(
return Err(anyhow!("need mount FS type"));
}
+ let destination_str = destination.to_string_lossy();
+ let mounts = get_mounts().unwrap_or_else(|_| String::new());
+ let already_mounted = mounts
+ .lines()
+ .map(|line| line.split_whitespace().collect::>())
+ .filter(|parts| parts.len() >= 3) // ensure we have at least [source}, destination, and fs_type
+ .any(|parts| {
+ // Check if source, destination and fs_type match any entry in /proc/mounts
+ // minimal check is for destination an fstype since source can have different names like:
+ // udev /dev devtmpfs
+ // dev /dev devtmpfs
+ // depending on which entity is mounting the dev/fs/pseudo-fs
+ parts[1] == destination_str && parts[2] == fs_type
+ });
+
+ if already_mounted {
+ slog_info!(
+ logger,
+ "{:?} is already mounted at {:?}",
+ source,
+ destination
+ );
+ return Ok(());
+ }
+
info!(
logger,
"baremount source={:?}, dest={:?}, fs_type={:?}, options={:?}, flags={:?}",
@@ -725,6 +756,14 @@ pub fn recursive_ownership_change(
mask |= EXEC_MASK;
mask |= MODE_SETGID;
}
+
+ // We do not want to change the permission of the underlying file
+ // using symlink. Hence we skip symlinks from recursive ownership
+ // and permission changes.
+ if path.is_symlink() {
+ return Ok(());
+ }
+
nix::unistd::chown(path, uid, gid)?;
if gid.is_some() {
@@ -1102,6 +1141,7 @@ fn parse_options(option_list: Vec) -> HashMap {
mod tests {
use super::*;
use protocols::agent::FSGroup;
+ use slog::Drain;
use std::fs::File;
use std::fs::OpenOptions;
use std::io::Write;
@@ -1112,6 +1152,31 @@ mod tests {
skip_if_not_root, skip_loop_by_user, skip_loop_if_not_root, skip_loop_if_root,
};
+ #[test]
+ fn test_already_baremounted() {
+ let plain = slog_term::PlainSyncDecorator::new(std::io::stdout());
+ let logger = Logger::root(slog_term::FullFormat::new(plain).build().fuse(), o!());
+
+ let test_cases = [
+ ("dev", "/dev", "devtmpfs"),
+ ("udev", "/dev", "devtmpfs"),
+ ("proc", "/proc", "proc"),
+ ("sysfs", "/sys", "sysfs"),
+ ];
+
+ for &(source, destination, fs_type) in &test_cases {
+ let source = Path::new(source);
+ let destination = Path::new(destination);
+ let flags = MsFlags::MS_RDONLY;
+ let options = "mode=755";
+ println!(
+ "testing if already mounted baremount({:?} {:?} {:?})",
+ source, destination, fs_type
+ );
+ assert!(baremount(source, destination, fs_type, flags, options, &logger).is_ok());
+ }
+ }
+
#[test]
fn test_mount() {
#[derive(Debug)]
diff --git a/src/agent/src/rpc.rs b/src/agent/src/rpc.rs
index 4e7429e49..439521fae 100644
--- a/src/agent/src/rpc.rs
+++ b/src/agent/src/rpc.rs
@@ -665,15 +665,16 @@ impl AgentService {
let cid = req.container_id;
let eid = req.exec_id;
- let mut term_exit_notifier = Arc::new(tokio::sync::Notify::new());
+ let term_exit_notifier;
let reader = {
let s = self.sandbox.clone();
let mut sandbox = s.lock().await;
let p = sandbox.find_container_process(cid.as_str(), eid.as_str())?;
+ term_exit_notifier = p.term_exit_notifier.clone();
+
if p.term_master.is_some() {
- term_exit_notifier = p.term_exit_notifier.clone();
p.get_reader(StreamType::TermMaster)
} else if stdout {
if p.parent_stdout.is_some() {
@@ -693,9 +694,12 @@ impl AgentService {
let reader = reader.ok_or_else(|| anyhow!("cannot get stream reader"))?;
tokio::select! {
- _ = term_exit_notifier.notified() => {
- Err(anyhow!("eof"))
- }
+ // Poll the futures in the order they appear from top to bottom
+ // it is very important to avoid data loss. If there is still
+ // data in the buffer and read_stream branch will return
+ // Poll::Ready so that the term_exit_notifier will never polled
+ // before all data were read.
+ biased;
v = read_stream(reader, req.len as usize) => {
let vector = v?;
let mut resp = ReadStreamResponse::new();
@@ -703,6 +707,9 @@ impl AgentService {
Ok(resp)
}
+ _ = term_exit_notifier.notified() => {
+ Err(anyhow!("eof"))
+ }
}
}
diff --git a/src/agent/src/sandbox.rs b/src/agent/src/sandbox.rs
index 24b678747..b0caa0154 100644
--- a/src/agent/src/sandbox.rs
+++ b/src/agent/src/sandbox.rs
@@ -435,7 +435,7 @@ fn online_resources(logger: &Logger, path: &str, pattern: &str, num: i32) -> Res
}
// max wait for all CPUs to online will use 50 * 100 = 5 seconds.
-const ONLINE_CPUMEM_WATI_MILLIS: u64 = 50;
+const ONLINE_CPUMEM_WAIT_MILLIS: u64 = 50;
const ONLINE_CPUMEM_MAX_RETRIES: i32 = 100;
#[instrument]
@@ -465,7 +465,7 @@ fn online_cpus(logger: &Logger, num: i32) -> Result {
);
return Ok(num);
}
- thread::sleep(time::Duration::from_millis(ONLINE_CPUMEM_WATI_MILLIS));
+ thread::sleep(time::Duration::from_millis(ONLINE_CPUMEM_WAIT_MILLIS));
}
Err(anyhow!(
diff --git a/src/agent/src/signal.rs b/src/agent/src/signal.rs
index d67000b80..401ded953 100644
--- a/src/agent/src/signal.rs
+++ b/src/agent/src/signal.rs
@@ -57,7 +57,7 @@ async fn handle_sigchild(logger: Logger, sandbox: Arc>) -> Result
continue;
}
- let mut p = process.unwrap();
+ let p = process.unwrap();
let ret: i32 = match wait_status {
WaitStatus::Exited(_, c) => c,
diff --git a/src/dragonball/Cargo.lock b/src/dragonball/Cargo.lock
index 0ed990c0b..b71455729 100644
--- a/src/dragonball/Cargo.lock
+++ b/src/dragonball/Cargo.lock
@@ -210,8 +210,6 @@ dependencies = [
[[package]]
name = "dbs-address-space"
version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "95e20d28a9cd13bf00d0ecd1bd073d242242b04f0acb663d7adfc659f8879322"
dependencies = [
"arc-swap",
"lazy_static",
@@ -225,8 +223,6 @@ dependencies = [
[[package]]
name = "dbs-allocator"
version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "543711b94b4bc1437d2ebb45f856452e96a45a67ab39f8dcf8c887c2a3701004"
dependencies = [
"thiserror",
]
@@ -234,8 +230,6 @@ dependencies = [
[[package]]
name = "dbs-arch"
version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "194c844946cd1d13f7a9eb29b84afbc5354578eee2b06fea96226bc3872e7424"
dependencies = [
"kvm-bindings",
"kvm-ioctls",
@@ -249,8 +243,6 @@ dependencies = [
[[package]]
name = "dbs-boot"
version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5466a92f75aa928a9103dcb2088f6d1638ef9da8945fad7389a73864dfa0182c"
dependencies = [
"dbs-arch",
"kvm-bindings",
@@ -265,8 +257,6 @@ dependencies = [
[[package]]
name = "dbs-device"
version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "14ecea44b4bc861c0c2ccb51868bea781286dc70e40ae46b54d4511e690a654a"
dependencies = [
"thiserror",
]
@@ -274,8 +264,6 @@ dependencies = [
[[package]]
name = "dbs-interrupt"
version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1eb2c5bb9f8f123ace33b1b2e8d53dd2d87331ee770ad1f82e56c3382c6bed6d"
dependencies = [
"dbs-arch",
"dbs-device",
@@ -288,11 +276,10 @@ dependencies = [
[[package]]
name = "dbs-legacy-devices"
version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c4d089ac1c4d186c8133be59de09462e9793f7add10017c5b040318a3a7f431f"
dependencies = [
"dbs-device",
"dbs-utils",
+ "libc",
"log",
"serde",
"vm-superio",
@@ -302,8 +289,6 @@ dependencies = [
[[package]]
name = "dbs-upcall"
version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ea3a78128fd0be8b8b10257675c262b378dc5d00b1e18157736a6c27e45ce4fb"
dependencies = [
"anyhow",
"dbs-utils",
@@ -316,8 +301,6 @@ dependencies = [
[[package]]
name = "dbs-utils"
version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0cb6ff873451b76e22789af7fbe1d0478c42c717f817e66908be7a3a2288068c"
dependencies = [
"anyhow",
"event-manager",
@@ -332,8 +315,6 @@ dependencies = [
[[package]]
name = "dbs-virtio-devices"
version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24d671cc3e5f98b84ef6b6bed007d28f72f16d3aea8eb38e2d42b00b2973c1d8"
dependencies = [
"byteorder",
"caps",
@@ -349,9 +330,10 @@ dependencies = [
"log",
"nix 0.24.3",
"nydus-api",
- "nydus-blobfs",
"nydus-rafs",
+ "nydus-storage",
"rlimit",
+ "sendfd",
"serde",
"serde_json",
"thiserror",
@@ -498,10 +480,25 @@ dependencies = [
]
[[package]]
-name = "fuse-backend-rs"
-version = "0.10.2"
+name = "foreign-types"
+version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "08af89cb80a7c5693bd63a2b1ee7ac31a307670977c18fda036b3aa94be8c47f"
+checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
+dependencies = [
+ "foreign-types-shared",
+]
+
+[[package]]
+name = "foreign-types-shared"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
+
+[[package]]
+name = "fuse-backend-rs"
+version = "0.10.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc24820b14267bec37fa87f5c2a32b5f1c5405b8c60cc3aa77afd481bd2628a6"
dependencies = [
"arc-swap",
"bitflags",
@@ -518,95 +515,6 @@ dependencies = [
"vmm-sys-util 0.10.0",
]
-[[package]]
-name = "futures"
-version = "0.3.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84"
-dependencies = [
- "futures-channel",
- "futures-core",
- "futures-executor",
- "futures-io",
- "futures-sink",
- "futures-task",
- "futures-util",
-]
-
-[[package]]
-name = "futures-channel"
-version = "0.3.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5"
-dependencies = [
- "futures-core",
- "futures-sink",
-]
-
-[[package]]
-name = "futures-core"
-version = "0.3.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608"
-
-[[package]]
-name = "futures-executor"
-version = "0.3.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e"
-dependencies = [
- "futures-core",
- "futures-task",
- "futures-util",
-]
-
-[[package]]
-name = "futures-io"
-version = "0.3.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531"
-
-[[package]]
-name = "futures-macro"
-version = "0.3.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "futures-sink"
-version = "0.3.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364"
-
-[[package]]
-name = "futures-task"
-version = "0.3.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366"
-
-[[package]]
-name = "futures-util"
-version = "0.3.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1"
-dependencies = [
- "futures-channel",
- "futures-core",
- "futures-io",
- "futures-macro",
- "futures-sink",
- "futures-task",
- "memchr",
- "pin-project-lite",
- "pin-utils",
- "slab",
-]
-
[[package]]
name = "generic-array"
version = "0.14.6"
@@ -891,82 +799,45 @@ dependencies = [
[[package]]
name = "nydus-api"
-version = "0.2.2"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1899def1a22ed32b1d60de4e444f525c4023a208ee0d1136a65399cff82837ce"
+checksum = "33a6ca41dd10813e3d29397550fbb0f15ad149381f312e04659d39e0adcf2002"
dependencies = [
+ "backtrace",
"libc",
"log",
- "nydus-error",
"serde",
"serde_json",
"toml",
]
-[[package]]
-name = "nydus-blobfs"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "784cf6e1319da7a94734987dcc71d2940f74231256922431a505c832fc778dd3"
-dependencies = [
- "fuse-backend-rs",
- "libc",
- "log",
- "nydus-api",
- "nydus-error",
- "nydus-rafs",
- "nydus-storage",
- "serde",
- "serde_json",
- "vm-memory",
-]
-
-[[package]]
-name = "nydus-error"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae2ec1efd1589377dbefca6b1047294c71b2fbab164d93319f97b20faae92001"
-dependencies = [
- "backtrace",
- "httpdate",
- "libc",
- "log",
- "serde",
- "serde_json",
-]
-
[[package]]
name = "nydus-rafs"
-version = "0.2.2"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e0ace6945daa16842e72e9fe7647e2b8715856f50f07350cce82bd68db1ed02c"
+checksum = "ed21e44a99472850d2afc4fb07427ed46d4e6a8b1cce28b42bd689319e45076d"
dependencies = [
"anyhow",
"arc-swap",
"bitflags",
- "blake3",
"fuse-backend-rs",
- "futures",
"lazy_static",
"libc",
"log",
- "lz4-sys",
"nix 0.24.3",
"nydus-api",
- "nydus-error",
"nydus-storage",
"nydus-utils",
"serde",
"serde_json",
- "spmc",
"vm-memory",
]
[[package]]
name = "nydus-storage"
-version = "0.6.2"
+version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e08bc5ea9054fca2ec8b19dcce25ea600679b7fbf035aad86cfe4a659002c88b"
+checksum = "9591fbee1875895bf1f765656695d0be6887fe65372fbf4924b8b3959bd61375"
dependencies = [
"arc-swap",
"bitflags",
@@ -978,7 +849,6 @@ dependencies = [
"log",
"nix 0.24.3",
"nydus-api",
- "nydus-error",
"nydus-utils",
"serde",
"serde_json",
@@ -989,12 +859,13 @@ dependencies = [
[[package]]
name = "nydus-utils"
-version = "0.4.1"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d1e681d7207a1ec500323d5ca39ebb7e381fc4f14db5ff0c532c18ff1226a81f"
+checksum = "fe8b9269e3a370682f272a1b2cac4bdaf6d6657f3f6966560c4fedab36548362"
dependencies = [
"blake3",
"flate2",
+ "httpdate",
"lazy_static",
"libc",
"libz-sys",
@@ -1002,7 +873,8 @@ dependencies = [
"lz4",
"lz4-sys",
"nix 0.24.3",
- "nydus-error",
+ "nydus-api",
+ "openssl",
"serde",
"serde_json",
"sha2",
@@ -1025,6 +897,54 @@ version = "1.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
+[[package]]
+name = "openssl"
+version = "0.10.55"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d"
+dependencies = [
+ "bitflags",
+ "cfg-if",
+ "foreign-types",
+ "libc",
+ "once_cell",
+ "openssl-macros",
+ "openssl-sys",
+]
+
+[[package]]
+name = "openssl-macros"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.26",
+]
+
+[[package]]
+name = "openssl-src"
+version = "111.26.0+1.1.1u"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "efc62c9f12b22b8f5208c23a7200a442b2e5999f8bdf80233852122b5a4f6f37"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "openssl-sys"
+version = "0.9.90"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6"
+dependencies = [
+ "cc",
+ "libc",
+ "openssl-src",
+ "pkg-config",
+ "vcpkg",
+]
+
[[package]]
name = "parking_lot"
version = "0.12.1"
@@ -1054,12 +974,6 @@ version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116"
-[[package]]
-name = "pin-utils"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
-
[[package]]
name = "pkg-config"
version = "0.3.26"
@@ -1068,18 +982,18 @@ checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160"
[[package]]
name = "proc-macro2"
-version = "1.0.51"
+version = "1.0.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6"
+checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
-version = "1.0.23"
+version = "1.0.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"
+checksum = "5fe8a65d69dd0808184ebb5f836ab526bb259db23c657efa38711b1072ee47f0"
dependencies = [
"proc-macro2",
]
@@ -1166,6 +1080,15 @@ dependencies = [
"libc",
]
+[[package]]
+name = "sendfd"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "604b71b8fc267e13bb3023a2c901126c8f349393666a6d98ac1ae5729b701798"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "serde"
version = "1.0.156"
@@ -1183,7 +1106,7 @@ checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.109",
]
[[package]]
@@ -1275,12 +1198,6 @@ dependencies = [
"winapi",
]
-[[package]]
-name = "spmc"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "02a8428da277a8e3a15271d79943e80ccc2ef254e78813a166a08d65e4c3ece5"
-
[[package]]
name = "subtle"
version = "2.4.1"
@@ -1298,6 +1215,17 @@ dependencies = [
"unicode-ident",
]
+[[package]]
+name = "syn"
+version = "2.0.26"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "45c3457aacde3c65315de5031ec191ce46604304d2446e803d71ade03308d970"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
[[package]]
name = "take_mut"
version = "0.2.2"
@@ -1350,7 +1278,7 @@ checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.109",
]
[[package]]
@@ -1434,7 +1362,7 @@ checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.109",
]
[[package]]
@@ -1480,7 +1408,7 @@ checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.109",
]
[[package]]
diff --git a/src/dragonball/Cargo.toml b/src/dragonball/Cargo.toml
index b0e03047d..08bd34a8f 100644
--- a/src/dragonball/Cargo.toml
+++ b/src/dragonball/Cargo.toml
@@ -12,16 +12,16 @@ edition = "2018"
[dependencies]
arc-swap = "1.5.0"
bytes = "1.1.0"
-dbs-address-space = "0.3.0"
-dbs-allocator = "0.1.0"
-dbs-arch = "0.2.0"
-dbs-boot = "0.4.0"
-dbs-device = "0.2.0"
-dbs-interrupt = { version = "0.2.0", features = ["kvm-irq"] }
-dbs-legacy-devices = "0.1.0"
-dbs-upcall = { version = "0.3.0", optional = true }
-dbs-utils = "0.2.0"
-dbs-virtio-devices = { version = "0.3.1", optional = true, features = ["virtio-mmio"] }
+dbs-address-space = { path = "./src/dbs_address_space" }
+dbs-allocator = { path = "./src/dbs_allocator" }
+dbs-arch = { path = "./src/dbs_arch" }
+dbs-boot = { path = "./src/dbs_boot" }
+dbs-device = { path = "./src/dbs_device" }
+dbs-interrupt = { path = "./src/dbs_interrupt", features = ["kvm-irq"] }
+dbs-legacy-devices = { path = "./src/dbs_legacy_devices" }
+dbs-upcall = { path = "./src/dbs_upcall" , optional = true }
+dbs-utils = { path = "./src/dbs_utils" }
+dbs-virtio-devices = { path = "./src/dbs_virtio_devices", optional = true, features = ["virtio-mmio"] }
kvm-bindings = "0.6.0"
kvm-ioctls = "0.12.0"
lazy_static = "1.2"
diff --git a/src/dragonball/Makefile b/src/dragonball/Makefile
index ab8e5b694..68ee3bd46 100644
--- a/src/dragonball/Makefile
+++ b/src/dragonball/Makefile
@@ -39,12 +39,15 @@ clean:
test:
ifdef SUPPORT_VIRTUALIZATION
- cargo test --all-features --target $(TRIPLE) -- --nocapture
+ RUST_BACKTRACE=1 cargo test --all-features --target $(TRIPLE) -- --nocapture --test-threads=1
else
@echo "INFO: skip testing dragonball, it need virtualization support."
exit 0
endif
+coverage:
+ RUST_BACKTRACE=1 cargo llvm-cov --all-features --target $(TRIPLE) -- --nocapture --test-threads=1
+
endif # ifeq ($(ARCH), s390x)
.DEFAULT_GOAL := default
diff --git a/src/dragonball/README.md b/src/dragonball/README.md
index 3fde0782e..767b9af47 100644
--- a/src/dragonball/README.md
+++ b/src/dragonball/README.md
@@ -16,10 +16,22 @@ and configuration process.
# Documentation
-Device: [Device Document](docs/device.md)
-vCPU: [vCPU Document](docs/vcpu.md)
-API: [API Document](docs/api.md)
-`Upcall`: [`Upcall` Document](docs/upcall.md)
+- Device: [Device Document](docs/device.md)
+- vCPU: [vCPU Document](docs/vcpu.md)
+- API: [API Document](docs/api.md)
+- `Upcall`: [`Upcall` Document](docs/upcall.md)
+- `dbs_acpi`: [`dbs_acpi` Document](src/dbs_acpi/README.md)
+- `dbs_address_space`: [`dbs_address_space` Document](src/dbs_address_space/README.md)
+- `dbs_allocator`: [`dbs_allocator` Document](src/dbs_allocator/README.md)
+- `dbs_arch`: [`dbs_arch` Document](src/dbs_arch/README.md)
+- `dbs_boot`: [`dbs_boot` Document](src/dbs_boot/README.md)
+- `dbs_device`: [`dbs_device` Document](src/dbs_device/README.md)
+- `dbs_interrupt`: [`dbs_interrput` Document](src/dbs_interrupt/README.md)
+- `dbs_legacy_devices`: [`dbs_legacy_devices` Document](src/dbs_legacy_devices/README.md)
+- `dbs_tdx`: [`dbs_tdx` Document](src/dbs_tdx/README.md)
+- `dbs_upcall`: [`dbs_upcall` Document](src/dbs_upcall/README.md)
+- `dbs_utils`: [`dbs_utils` Document](src/dbs_utils/README.md)
+- `dbs_virtio_devices`: [`dbs_virtio_devices` Document](src/dbs_virtio_devices/README.md)
Currently, the documents are still actively adding.
You could see the [official documentation](docs/) page for more details.
diff --git a/src/dragonball/src/dbs_acpi/Cargo.toml b/src/dragonball/src/dbs_acpi/Cargo.toml
new file mode 100644
index 000000000..df5e7867a
--- /dev/null
+++ b/src/dragonball/src/dbs_acpi/Cargo.toml
@@ -0,0 +1,14 @@
+[package]
+name = "dbs-acpi"
+version = "0.1.0"
+authors = ["Alibaba Dragonball Team"]
+description = "acpi definitions for virtual machines."
+license = "Apache-2.0"
+edition = "2018"
+homepage = "https://github.com/openanolis/dragonball-sandbox"
+repository = "https://github.com/openanolis/dragonball-sandbox"
+keywords = ["dragonball", "acpi", "vmm", "secure-sandbox"]
+readme = "README.md"
+
+[dependencies]
+vm-memory = "0.9.0"
\ No newline at end of file
diff --git a/src/dragonball/src/dbs_acpi/README.md b/src/dragonball/src/dbs_acpi/README.md
new file mode 100644
index 000000000..cc2b49754
--- /dev/null
+++ b/src/dragonball/src/dbs_acpi/README.md
@@ -0,0 +1,11 @@
+# dbs-acpi
+
+`dbs-acpi` provides ACPI data structures for VMM to emulate ACPI behavior.
+
+## Acknowledgement
+
+Part of the code is derived from the [Cloud Hypervisor](https://github.com/cloud-hypervisor/cloud-hypervisor) project.
+
+## License
+
+This project is licensed under [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0).
diff --git a/src/dragonball/src/dbs_acpi/src/lib.rs b/src/dragonball/src/dbs_acpi/src/lib.rs
new file mode 100644
index 000000000..a3094e309
--- /dev/null
+++ b/src/dragonball/src/dbs_acpi/src/lib.rs
@@ -0,0 +1,29 @@
+// Copyright (c) 2019 Intel Corporation
+// Copyright (c) 2023 Alibaba Cloud
+//
+// SPDX-License-Identifier: Apache-2.0
+pub mod rsdp;
+pub mod sdt;
+
+fn generate_checksum(data: &[u8]) -> u8 {
+ (255 - data.iter().fold(0u8, |acc, x| acc.wrapping_add(*x))).wrapping_add(1)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ #[test]
+ fn test_generate_checksum() {
+ let mut buf = [0x00; 8];
+ let sum = generate_checksum(&buf);
+ assert_eq!(sum, 0);
+ buf[0] = 0xff;
+ let sum = generate_checksum(&buf);
+ assert_eq!(sum, 1);
+ buf[0] = 0xaa;
+ buf[1] = 0xcc;
+ buf[4] = generate_checksum(&buf);
+ let sum = buf.iter().fold(0u8, |s, v| s.wrapping_add(*v));
+ assert_eq!(sum, 0);
+ }
+}
diff --git a/src/dragonball/src/dbs_acpi/src/rsdp.rs b/src/dragonball/src/dbs_acpi/src/rsdp.rs
new file mode 100644
index 000000000..05c36f809
--- /dev/null
+++ b/src/dragonball/src/dbs_acpi/src/rsdp.rs
@@ -0,0 +1,60 @@
+// Copyright (c) 2019 Intel Corporation
+// Copyright (c) 2023 Alibaba Cloud
+//
+// SPDX-License-Identifier: Apache-2.0
+// RSDP (Root System Description Pointer) is a data structure used in the ACPI programming interface.
+use vm_memory::ByteValued;
+
+#[repr(packed)]
+#[derive(Clone, Copy, Default)]
+pub struct Rsdp {
+ pub signature: [u8; 8],
+ pub checksum: u8,
+ pub oem_id: [u8; 6],
+ pub revision: u8,
+ _rsdt_addr: u32,
+ pub length: u32,
+ pub xsdt_addr: u64,
+ pub extended_checksum: u8,
+ _reserved: [u8; 3],
+}
+
+// SAFETY: Rsdp only contains a series of integers
+unsafe impl ByteValued for Rsdp {}
+
+impl Rsdp {
+ pub fn new(xsdt_addr: u64) -> Self {
+ let mut rsdp = Rsdp {
+ signature: *b"RSD PTR ",
+ checksum: 0,
+ oem_id: *b"ALICLD",
+ revision: 1,
+ _rsdt_addr: 0,
+ length: std::mem::size_of::() as u32,
+ xsdt_addr,
+ extended_checksum: 0,
+ _reserved: [0; 3],
+ };
+ rsdp.checksum = super::generate_checksum(&rsdp.as_slice()[0..19]);
+ rsdp.extended_checksum = super::generate_checksum(rsdp.as_slice());
+ rsdp
+ }
+
+ pub fn len() -> usize {
+ std::mem::size_of::()
+ }
+}
+#[cfg(test)]
+mod tests {
+ use super::Rsdp;
+ use vm_memory::bytes::ByteValued;
+ #[test]
+ fn test_rsdp() {
+ let rsdp = Rsdp::new(0xa0000);
+ let sum = rsdp
+ .as_slice()
+ .iter()
+ .fold(0u8, |acc, x| acc.wrapping_add(*x));
+ assert_eq!(sum, 0);
+ }
+}
diff --git a/src/dragonball/src/dbs_acpi/src/sdt.rs b/src/dragonball/src/dbs_acpi/src/sdt.rs
new file mode 100644
index 000000000..f6a79f576
--- /dev/null
+++ b/src/dragonball/src/dbs_acpi/src/sdt.rs
@@ -0,0 +1,137 @@
+// Copyright (c) 2019 Intel Corporation
+// Copyright (c) 2023 Alibaba Cloud
+//
+// SPDX-License-Identifier: Apache-2.0
+#[repr(packed)]
+pub struct GenericAddress {
+ pub address_space_id: u8,
+ pub register_bit_width: u8,
+ pub register_bit_offset: u8,
+ pub access_size: u8,
+ pub address: u64,
+}
+
+impl GenericAddress {
+ pub fn io_port_address(address: u16) -> Self {
+ GenericAddress {
+ address_space_id: 1,
+ register_bit_width: 8 * std::mem::size_of::() as u8,
+ register_bit_offset: 0,
+ access_size: std::mem::size_of::() as u8,
+ address: u64::from(address),
+ }
+ }
+
+ pub fn mmio_address(address: u64) -> Self {
+ GenericAddress {
+ address_space_id: 0,
+ register_bit_width: 8 * std::mem::size_of::() as u8,
+ register_bit_offset: 0,
+ access_size: std::mem::size_of::() as u8,
+ address,
+ }
+ }
+}
+
+pub struct Sdt {
+ data: Vec,
+}
+
+#[allow(clippy::len_without_is_empty)]
+impl Sdt {
+ pub fn new(signature: [u8; 4], length: u32, revision: u8) -> Self {
+ assert!(length >= 36);
+ const OEM_ID: [u8; 6] = *b"ALICLD";
+ const OEM_TABLE: [u8; 8] = *b"RUND ";
+ const CREATOR_ID: [u8; 4] = *b"ALIC";
+ let mut data = Vec::with_capacity(length as usize);
+ data.extend_from_slice(&signature);
+ data.extend_from_slice(&length.to_le_bytes());
+ data.push(revision);
+ data.push(0); // checksum
+ data.extend_from_slice(&OEM_ID); // oem id u32
+ data.extend_from_slice(&OEM_TABLE); // oem table
+ data.extend_from_slice(&1u32.to_le_bytes()); // oem revision u32
+ data.extend_from_slice(&CREATOR_ID); // creator id u32
+ data.extend_from_slice(&1u32.to_le_bytes()); // creator revison u32
+ assert_eq!(data.len(), 36);
+ data.resize(length as usize, 0);
+ let mut sdt = Sdt { data };
+ sdt.update_checksum();
+ sdt
+ }
+
+ pub fn update_checksum(&mut self) {
+ self.data[9] = 0;
+ let checksum = super::generate_checksum(self.data.as_slice());
+ self.data[9] = checksum
+ }
+
+ pub fn as_slice(&self) -> &[u8] {
+ self.data.as_slice()
+ }
+
+ pub fn append(&mut self, value: T) {
+ let orig_length = self.data.len();
+ let new_length = orig_length + std::mem::size_of::();
+ self.data.resize(new_length, 0);
+ self.write_u32(4, new_length as u32);
+ self.write(orig_length, value);
+ }
+
+ pub fn append_slice(&mut self, data: &[u8]) {
+ let orig_length = self.data.len();
+ let new_length = orig_length + data.len();
+ self.write_u32(4, new_length as u32);
+ self.data.extend_from_slice(data);
+ self.update_checksum();
+ }
+
+ /// Write a value at the given offset
+ pub fn write(&mut self, offset: usize, value: T) {
+ assert!((offset + (std::mem::size_of::() - 1)) < self.data.len());
+ unsafe {
+ *(((self.data.as_mut_ptr() as usize) + offset) as *mut T) = value;
+ }
+ self.update_checksum();
+ }
+
+ pub fn write_u8(&mut self, offset: usize, val: u8) {
+ self.write(offset, val);
+ }
+
+ pub fn write_u16(&mut self, offset: usize, val: u16) {
+ self.write(offset, val);
+ }
+
+ pub fn write_u32(&mut self, offset: usize, val: u32) {
+ self.write(offset, val);
+ }
+
+ pub fn write_u64(&mut self, offset: usize, val: u64) {
+ self.write(offset, val);
+ }
+
+ pub fn len(&self) -> usize {
+ self.data.len()
+ }
+}
+#[cfg(test)]
+mod tests {
+ use super::Sdt;
+ #[test]
+ fn test_sdt() {
+ let mut sdt = Sdt::new(*b"TEST", 40, 1);
+ let sum: u8 = sdt
+ .as_slice()
+ .iter()
+ .fold(0u8, |acc, x| acc.wrapping_add(*x));
+ assert_eq!(sum, 0);
+ sdt.write_u32(36, 0x12345678);
+ let sum: u8 = sdt
+ .as_slice()
+ .iter()
+ .fold(0u8, |acc, x| acc.wrapping_add(*x));
+ assert_eq!(sum, 0);
+ }
+}
diff --git a/src/dragonball/src/dbs_address_space/Cargo.toml b/src/dragonball/src/dbs_address_space/Cargo.toml
new file mode 100644
index 000000000..f507fa4dc
--- /dev/null
+++ b/src/dragonball/src/dbs_address_space/Cargo.toml
@@ -0,0 +1,20 @@
+[package]
+name = "dbs-address-space"
+version = "0.3.0"
+authors = ["Alibaba Dragonball Team"]
+description = "address space manager for virtual machines."
+license = "Apache-2.0"
+edition = "2018"
+homepage = "https://github.com/openanolis/dragonball-sandbox"
+repository = "https://github.com/openanolis/dragonball-sandbox"
+keywords = ["dragonball", "address", "vmm", "secure-sandbox"]
+readme = "README.md"
+
+[dependencies]
+arc-swap = ">=0.4.8"
+libc = "0.2.39"
+nix = "0.23.1"
+lazy_static = "1"
+thiserror = "1"
+vmm-sys-util = "0.11.0"
+vm-memory = { version = "0.9", features = ["backend-mmap", "backend-atomic"] }
diff --git a/src/dragonball/src/dbs_address_space/LICENSE b/src/dragonball/src/dbs_address_space/LICENSE
new file mode 120000
index 000000000..30cff7403
--- /dev/null
+++ b/src/dragonball/src/dbs_address_space/LICENSE
@@ -0,0 +1 @@
+../../LICENSE
\ No newline at end of file
diff --git a/src/dragonball/src/dbs_address_space/README.md b/src/dragonball/src/dbs_address_space/README.md
new file mode 100644
index 000000000..e3ea81d4c
--- /dev/null
+++ b/src/dragonball/src/dbs_address_space/README.md
@@ -0,0 +1,80 @@
+# dbs-address-space
+
+## Design
+
+The `dbs-address-space` crate is an address space manager for virtual machines, which manages memory and MMIO resources resident in the guest physical address space.
+
+Main components are:
+- `AddressSpaceRegion`: Struct to maintain configuration information about a guest address region.
+```rust
+#[derive(Debug, Clone)]
+pub struct AddressSpaceRegion {
+ /// Type of address space regions.
+ pub ty: AddressSpaceRegionType,
+ /// Base address of the region in virtual machine's physical address space.
+ pub base: GuestAddress,
+ /// Size of the address space region.
+ pub size: GuestUsize,
+ /// Host NUMA node ids assigned to this region.
+ pub host_numa_node_id: Option,
+
+ /// File/offset tuple to back the memory allocation.
+ file_offset: Option,
+ /// Mmap permission flags.
+ perm_flags: i32,
+ /// Hugepage madvise hint.
+ ///
+ /// It needs 'advise' or 'always' policy in host shmem config.
+ is_hugepage: bool,
+ /// Hotplug hint.
+ is_hotplug: bool,
+ /// Anonymous memory hint.
+ ///
+ /// It should be true for regions with the MADV_DONTFORK flag enabled.
+ is_anon: bool,
+}
+```
+- `AddressSpaceBase`: Base implementation to manage guest physical address space, without support of region hotplug.
+```rust
+#[derive(Clone)]
+pub struct AddressSpaceBase {
+ regions: Vec>,
+ layout: AddressSpaceLayout,
+}
+```
+- `AddressSpaceBase`: An address space implementation with region hotplug capability.
+```rust
+/// The `AddressSpace` is a wrapper over [AddressSpaceBase] to support hotplug of
+/// address space regions.
+#[derive(Clone)]
+pub struct AddressSpace {
+ state: Arc>,
+}
+```
+
+## Usage
+```rust
+// 1. create several memory regions
+let reg = Arc::new(
+ AddressSpaceRegion::create_default_memory_region(
+ GuestAddress(0x100000),
+ 0x100000,
+ None,
+ "shmem",
+ "",
+ false,
+ false,
+ false,
+ )
+ .unwrap()
+);
+let regions = vec![reg];
+// 2. create layout (depending on archs)
+let layout = AddressSpaceLayout::new(GUEST_PHYS_END, GUEST_MEM_START, GUEST_MEM_END);
+// 3. create address space from regions and layout
+let address_space = AddressSpace::from_regions(regions, layout.clone());
+```
+
+## License
+
+This project is licensed under [Apache License](http://www.apache.org/licenses/LICENSE-2.0), Version 2.0.
diff --git a/src/dragonball/src/dbs_address_space/src/address_space.rs b/src/dragonball/src/dbs_address_space/src/address_space.rs
new file mode 100644
index 000000000..35bfab66d
--- /dev/null
+++ b/src/dragonball/src/dbs_address_space/src/address_space.rs
@@ -0,0 +1,830 @@
+// Copyright (C) 2021 Alibaba Cloud. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+//! Physical address space manager for virtual machines.
+
+use std::sync::Arc;
+
+use arc_swap::ArcSwap;
+use vm_memory::{GuestAddress, GuestMemoryMmap};
+
+use crate::{AddressSpaceError, AddressSpaceLayout, AddressSpaceRegion, AddressSpaceRegionType};
+
+/// Base implementation to manage guest physical address space, without support of region hotplug.
+#[derive(Clone)]
+pub struct AddressSpaceBase {
+ regions: Vec>,
+ layout: AddressSpaceLayout,
+}
+
+impl AddressSpaceBase {
+ /// Create an instance of `AddressSpaceBase` from an `AddressSpaceRegion` array.
+ ///
+ /// To achieve better performance by using binary search algorithm, the `regions` vector
+ /// will gotten sorted by guest physical address.
+ ///
+ /// Note, panicking if some regions intersects with each other.
+ ///
+ /// # Arguments
+ /// * `regions` - prepared regions to managed by the address space instance.
+ /// * `layout` - prepared address space layout configuration.
+ pub fn from_regions(
+ mut regions: Vec>,
+ layout: AddressSpaceLayout,
+ ) -> Self {
+ regions.sort_unstable_by_key(|v| v.base);
+ for region in regions.iter() {
+ if !layout.is_region_valid(region) {
+ panic!(
+ "Invalid region {:?} for address space layout {:?}",
+ region, layout
+ );
+ }
+ }
+ for idx in 1..regions.len() {
+ if regions[idx].intersect_with(®ions[idx - 1]) {
+ panic!("address space regions intersect with each other");
+ }
+ }
+ AddressSpaceBase { regions, layout }
+ }
+
+ /// Insert a new address space region into the address space.
+ ///
+ /// # Arguments
+ /// * `region` - the new region to be inserted.
+ pub fn insert_region(
+ &mut self,
+ region: Arc,
+ ) -> Result<(), AddressSpaceError> {
+ if !self.layout.is_region_valid(®ion) {
+ return Err(AddressSpaceError::InvalidAddressRange(
+ region.start_addr().0,
+ region.len(),
+ ));
+ }
+ for idx in 0..self.regions.len() {
+ if self.regions[idx].intersect_with(®ion) {
+ return Err(AddressSpaceError::InvalidAddressRange(
+ region.start_addr().0,
+ region.len(),
+ ));
+ }
+ }
+ self.regions.push(region);
+ Ok(())
+ }
+
+ /// Enumerate all regions in the address space.
+ ///
+ /// # Arguments
+ /// * `cb` - the callback function to apply to each region.
+ pub fn walk_regions(&self, mut cb: F) -> Result<(), AddressSpaceError>
+ where
+ F: FnMut(&Arc) -> Result<(), AddressSpaceError>,
+ {
+ for reg in self.regions.iter() {
+ cb(reg)?;
+ }
+
+ Ok(())
+ }
+
+ /// Get address space layout associated with the address space.
+ pub fn layout(&self) -> AddressSpaceLayout {
+ self.layout.clone()
+ }
+
+ /// Get maximum of guest physical address in the address space.
+ pub fn last_addr(&self) -> GuestAddress {
+ let mut last_addr = GuestAddress(self.layout.mem_start);
+ for reg in self.regions.iter() {
+ if reg.ty != AddressSpaceRegionType::DAXMemory && reg.last_addr() > last_addr {
+ last_addr = reg.last_addr();
+ }
+ }
+ last_addr
+ }
+
+ /// Check whether the guest physical address `guest_addr` belongs to a DAX memory region.
+ ///
+ /// # Arguments
+ /// * `guest_addr` - the guest physical address to inquire
+ pub fn is_dax_region(&self, guest_addr: GuestAddress) -> bool {
+ for reg in self.regions.iter() {
+ // Safe because we have validate the region when creating the address space object.
+ if reg.region_type() == AddressSpaceRegionType::DAXMemory
+ && reg.start_addr() <= guest_addr
+ && reg.start_addr().0 + reg.len() > guest_addr.0
+ {
+ return true;
+ }
+ }
+ false
+ }
+
+ /// Get protection flags of memory region that guest physical address `guest_addr` belongs to.
+ ///
+ /// # Arguments
+ /// * `guest_addr` - the guest physical address to inquire
+ pub fn prot_flags(&self, guest_addr: GuestAddress) -> Result {
+ for reg in self.regions.iter() {
+ if reg.start_addr() <= guest_addr && reg.start_addr().0 + reg.len() > guest_addr.0 {
+ return Ok(reg.prot_flags());
+ }
+ }
+
+ Err(AddressSpaceError::InvalidRegionType)
+ }
+
+ /// Get optional NUMA node id associated with guest physical address `gpa`.
+ ///
+ /// # Arguments
+ /// * `gpa` - guest physical address to query.
+ pub fn numa_node_id(&self, gpa: u64) -> Option {
+ for reg in self.regions.iter() {
+ if gpa >= reg.base.0 && gpa < (reg.base.0 + reg.size) {
+ return reg.host_numa_node_id;
+ }
+ }
+ None
+ }
+}
+
+/// An address space implementation with region hotplug capability.
+///
+/// The `AddressSpace` is a wrapper over [AddressSpaceBase] to support hotplug of
+/// address space regions.
+#[derive(Clone)]
+pub struct AddressSpace {
+ state: Arc>,
+}
+
+impl AddressSpace {
+ /// Convert a [GuestMemoryMmap] object into `GuestMemoryAtomic`.
+ pub fn convert_into_vm_as(
+ gm: GuestMemoryMmap,
+ ) -> vm_memory::atomic::GuestMemoryAtomic {
+ vm_memory::atomic::GuestMemoryAtomic::from(Arc::new(gm))
+ }
+
+ /// Create an instance of `AddressSpace` from an `AddressSpaceRegion` array.
+ ///
+ /// To achieve better performance by using binary search algorithm, the `regions` vector
+ /// will gotten sorted by guest physical address.
+ ///
+ /// Note, panicking if some regions intersects with each other.
+ ///
+ /// # Arguments
+ /// * `regions` - prepared regions to managed by the address space instance.
+ /// * `layout` - prepared address space layout configuration.
+ pub fn from_regions(regions: Vec>, layout: AddressSpaceLayout) -> Self {
+ let base = AddressSpaceBase::from_regions(regions, layout);
+
+ AddressSpace {
+ state: Arc::new(ArcSwap::new(Arc::new(base))),
+ }
+ }
+
+ /// Insert a new address space region into the address space.
+ ///
+ /// # Arguments
+ /// * `region` - the new region to be inserted.
+ pub fn insert_region(
+ &mut self,
+ region: Arc,
+ ) -> Result<(), AddressSpaceError> {
+ let curr = self.state.load().regions.clone();
+ let layout = self.state.load().layout.clone();
+ let mut base = AddressSpaceBase::from_regions(curr, layout);
+ base.insert_region(region)?;
+ let _old = self.state.swap(Arc::new(base));
+
+ Ok(())
+ }
+
+ /// Enumerate all regions in the address space.
+ ///
+ /// # Arguments
+ /// * `cb` - the callback function to apply to each region.
+ pub fn walk_regions(&self, cb: F) -> Result<(), AddressSpaceError>
+ where
+ F: FnMut(&Arc) -> Result<(), AddressSpaceError>,
+ {
+ self.state.load().walk_regions(cb)
+ }
+
+ /// Get address space layout associated with the address space.
+ pub fn layout(&self) -> AddressSpaceLayout {
+ self.state.load().layout()
+ }
+
+ /// Get maximum of guest physical address in the address space.
+ pub fn last_addr(&self) -> GuestAddress {
+ self.state.load().last_addr()
+ }
+
+ /// Check whether the guest physical address `guest_addr` belongs to a DAX memory region.
+ ///
+ /// # Arguments
+ /// * `guest_addr` - the guest physical address to inquire
+ pub fn is_dax_region(&self, guest_addr: GuestAddress) -> bool {
+ self.state.load().is_dax_region(guest_addr)
+ }
+
+ /// Get protection flags of memory region that guest physical address `guest_addr` belongs to.
+ ///
+ /// # Arguments
+ /// * `guest_addr` - the guest physical address to inquire
+ pub fn prot_flags(&self, guest_addr: GuestAddress) -> Result {
+ self.state.load().prot_flags(guest_addr)
+ }
+
+ /// Get optional NUMA node id associated with guest physical address `gpa`.
+ ///
+ /// # Arguments
+ /// * `gpa` - guest physical address to query.
+ pub fn numa_node_id(&self, gpa: u64) -> Option {
+ self.state.load().numa_node_id(gpa)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::io::Write;
+ use vm_memory::GuestUsize;
+ use vmm_sys_util::tempfile::TempFile;
+
+ // define macros for unit test
+ const GUEST_PHYS_END: u64 = (1 << 46) - 1;
+ const GUEST_MEM_START: u64 = 0;
+ const GUEST_MEM_END: u64 = GUEST_PHYS_END >> 1;
+ const GUEST_DEVICE_START: u64 = GUEST_MEM_END + 1;
+
+ #[test]
+ fn test_address_space_base_from_regions() {
+ let mut file = TempFile::new().unwrap().into_file();
+ let sample_buf = &[1, 2, 3, 4, 5];
+ assert!(file.write_all(sample_buf).is_ok());
+ file.set_len(0x10000).unwrap();
+
+ let reg = Arc::new(
+ AddressSpaceRegion::create_device_region(GuestAddress(GUEST_DEVICE_START), 0x1000)
+ .unwrap(),
+ );
+ let regions = vec![reg];
+ let layout = AddressSpaceLayout::new(GUEST_PHYS_END, GUEST_MEM_START, GUEST_MEM_END);
+ let address_space = AddressSpaceBase::from_regions(regions, layout.clone());
+ assert_eq!(address_space.layout(), layout);
+ }
+
+ #[test]
+ #[should_panic(expected = "Invalid region")]
+ fn test_address_space_base_from_regions_when_region_invalid() {
+ let reg = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x100),
+ 0x1000,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let regions = vec![reg];
+ let layout = AddressSpaceLayout::new(0x2000, 0x200, 0x1800);
+ let _address_space = AddressSpaceBase::from_regions(regions, layout);
+ }
+
+ #[test]
+ #[should_panic(expected = "address space regions intersect with each other")]
+ fn test_address_space_base_from_regions_when_region_intersected() {
+ let reg1 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x100),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let reg2 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x200),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let regions = vec![reg1, reg2];
+ let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
+ let _address_space = AddressSpaceBase::from_regions(regions, layout);
+ }
+
+ #[test]
+ fn test_address_space_base_insert_region() {
+ let reg1 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x100),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let reg2 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x300),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let regions = vec![reg1];
+ let layout = AddressSpaceLayout::new(0x2000, 0x100, 0x1800);
+ let mut address_space = AddressSpaceBase::from_regions(regions, layout);
+
+ // Normal case.
+ address_space.insert_region(reg2).unwrap();
+ assert!(!address_space.regions[1].intersect_with(&address_space.regions[0]));
+
+ // Error invalid address range case when region invaled.
+ let invalid_reg = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x0),
+ 0x100,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ assert_eq!(
+ format!(
+ "{:?}",
+ address_space.insert_region(invalid_reg).err().unwrap()
+ ),
+ format!("InvalidAddressRange({:?}, {:?})", 0x0, 0x100)
+ );
+
+ // Error Error invalid address range case when region to be inserted will intersect
+ // exsisting regions.
+ let intersected_reg = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x400),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ assert_eq!(
+ format!(
+ "{:?}",
+ address_space.insert_region(intersected_reg).err().unwrap()
+ ),
+ format!("InvalidAddressRange({:?}, {:?})", 0x400, 0x200)
+ );
+ }
+
+ #[test]
+ fn test_address_space_base_walk_regions() {
+ let reg1 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x100),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let reg2 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x300),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let regions = vec![reg1, reg2];
+ let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
+ let address_space = AddressSpaceBase::from_regions(regions, layout);
+
+ // The argument of walk_regions is a function which takes a &Arc
+ // and returns result. This function will be applied to all regions.
+ fn do_not_have_hotplug(region: &Arc) -> Result<(), AddressSpaceError> {
+ if region.is_hotplug() {
+ Err(AddressSpaceError::InvalidRegionType) // The Error type is dictated to AddressSpaceError.
+ } else {
+ Ok(())
+ }
+ }
+ assert!(matches!(
+ address_space.walk_regions(do_not_have_hotplug).unwrap(),
+ ()
+ ));
+ }
+
+ #[test]
+ fn test_address_space_base_last_addr() {
+ let reg1 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x100),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let reg2 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x300),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let regions = vec![reg1, reg2];
+ let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
+ let address_space = AddressSpaceBase::from_regions(regions, layout);
+
+ assert_eq!(address_space.last_addr(), GuestAddress(0x500 - 1));
+ }
+
+ #[test]
+ fn test_address_space_base_is_dax_region() {
+ let page_size = 4096;
+ let address_space_region = vec![
+ Arc::new(AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(page_size),
+ page_size as GuestUsize,
+ )),
+ Arc::new(AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(page_size * 2),
+ page_size as GuestUsize,
+ )),
+ Arc::new(AddressSpaceRegion::new(
+ AddressSpaceRegionType::DAXMemory,
+ GuestAddress(GUEST_DEVICE_START),
+ page_size as GuestUsize,
+ )),
+ ];
+ let layout = AddressSpaceLayout::new(GUEST_PHYS_END, GUEST_MEM_START, GUEST_MEM_END);
+ let address_space = AddressSpaceBase::from_regions(address_space_region, layout);
+
+ assert!(!address_space.is_dax_region(GuestAddress(page_size)));
+ assert!(!address_space.is_dax_region(GuestAddress(page_size * 2)));
+ assert!(address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START)));
+ assert!(address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START + 1)));
+ assert!(!address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START + page_size)));
+ assert!(address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START + page_size - 1)));
+ }
+
+ #[test]
+ fn test_address_space_base_prot_flags() {
+ let reg1 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x100),
+ 0x200,
+ Some(0),
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let reg2 = Arc::new(AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x300),
+ 0x300,
+ ));
+ let regions = vec![reg1, reg2];
+ let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
+ let address_space = AddressSpaceBase::from_regions(regions, layout);
+
+ // Normal case, reg1.
+ assert_eq!(address_space.prot_flags(GuestAddress(0x200)).unwrap(), 0);
+ // Normal case, reg2.
+ assert_eq!(
+ address_space.prot_flags(GuestAddress(0x500)).unwrap(),
+ libc::PROT_READ | libc::PROT_WRITE
+ );
+ // Inquire gpa where no region is set.
+ assert!(matches!(
+ address_space.prot_flags(GuestAddress(0x600)),
+ Err(AddressSpaceError::InvalidRegionType)
+ ));
+ }
+
+ #[test]
+ fn test_address_space_base_numa_node_id() {
+ let reg1 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x100),
+ 0x200,
+ Some(0),
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let reg2 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x300),
+ 0x300,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let regions = vec![reg1, reg2];
+ let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
+ let address_space = AddressSpaceBase::from_regions(regions, layout);
+
+ // Normal case.
+ assert_eq!(address_space.numa_node_id(0x200).unwrap(), 0);
+ // Inquire region with None as its numa node id.
+ assert_eq!(address_space.numa_node_id(0x400), None);
+ // Inquire gpa where no region is set.
+ assert_eq!(address_space.numa_node_id(0x600), None);
+ }
+
+ #[test]
+ fn test_address_space_convert_into_vm_as() {
+ // ! Further and detailed test is needed here.
+ let gmm = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x0), 0x400)]).unwrap();
+ let _vm = AddressSpace::convert_into_vm_as(gmm);
+ }
+
+ #[test]
+ fn test_address_space_insert_region() {
+ let reg1 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x100),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let reg2 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x300),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let regions = vec![reg1];
+ let layout = AddressSpaceLayout::new(0x2000, 0x100, 0x1800);
+ let mut address_space = AddressSpace::from_regions(regions, layout);
+
+ // Normal case.
+ assert!(matches!(address_space.insert_region(reg2).unwrap(), ()));
+
+ // Error invalid address range case when region invaled.
+ let invalid_reg = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x0),
+ 0x100,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ assert_eq!(
+ format!(
+ "{:?}",
+ address_space.insert_region(invalid_reg).err().unwrap()
+ ),
+ format!("InvalidAddressRange({:?}, {:?})", 0x0, 0x100)
+ );
+
+ // Error Error invalid address range case when region to be inserted will intersect
+ // exsisting regions.
+ let intersected_reg = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x400),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ assert_eq!(
+ format!(
+ "{:?}",
+ address_space.insert_region(intersected_reg).err().unwrap()
+ ),
+ format!("InvalidAddressRange({:?}, {:?})", 0x400, 0x200)
+ );
+ }
+
+ #[test]
+ fn test_address_space_walk_regions() {
+ let reg1 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x100),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let reg2 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x300),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let regions = vec![reg1, reg2];
+ let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
+ let address_space = AddressSpace::from_regions(regions, layout);
+
+ fn access_all_hotplug_flag(
+ region: &Arc,
+ ) -> Result<(), AddressSpaceError> {
+ region.is_hotplug();
+ Ok(())
+ }
+
+ assert!(matches!(
+ address_space.walk_regions(access_all_hotplug_flag).unwrap(),
+ ()
+ ));
+ }
+
+ #[test]
+ fn test_address_space_layout() {
+ let reg = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x100),
+ 0x1000,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let regions = vec![reg];
+ let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
+ let address_space = AddressSpace::from_regions(regions, layout.clone());
+
+ assert_eq!(layout, address_space.layout());
+ }
+
+ #[test]
+ fn test_address_space_last_addr() {
+ let reg1 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x100),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let reg2 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x300),
+ 0x200,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let regions = vec![reg1, reg2];
+ let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
+ let address_space = AddressSpace::from_regions(regions, layout);
+
+ assert_eq!(address_space.last_addr(), GuestAddress(0x500 - 1));
+ }
+
+ #[test]
+ fn test_address_space_is_dax_region() {
+ let page_size = 4096;
+ let address_space_region = vec![
+ Arc::new(AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(page_size),
+ page_size as GuestUsize,
+ )),
+ Arc::new(AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(page_size * 2),
+ page_size as GuestUsize,
+ )),
+ Arc::new(AddressSpaceRegion::new(
+ AddressSpaceRegionType::DAXMemory,
+ GuestAddress(GUEST_DEVICE_START),
+ page_size as GuestUsize,
+ )),
+ ];
+ let layout = AddressSpaceLayout::new(GUEST_PHYS_END, GUEST_MEM_START, GUEST_MEM_END);
+ let address_space = AddressSpace::from_regions(address_space_region, layout);
+
+ assert!(!address_space.is_dax_region(GuestAddress(page_size)));
+ assert!(!address_space.is_dax_region(GuestAddress(page_size * 2)));
+ assert!(address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START)));
+ assert!(address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START + 1)));
+ assert!(!address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START + page_size)));
+ assert!(address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START + page_size - 1)));
+ }
+
+ #[test]
+ fn test_address_space_prot_flags() {
+ let reg1 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x100),
+ 0x200,
+ Some(0),
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let reg2 = Arc::new(AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x300),
+ 0x300,
+ ));
+ let regions = vec![reg1, reg2];
+ let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
+ let address_space = AddressSpace::from_regions(regions, layout);
+
+ // Normal case, reg1.
+ assert_eq!(address_space.prot_flags(GuestAddress(0x200)).unwrap(), 0);
+ // Normal case, reg2.
+ assert_eq!(
+ address_space.prot_flags(GuestAddress(0x500)).unwrap(),
+ libc::PROT_READ | libc::PROT_WRITE
+ );
+ // Inquire gpa where no region is set.
+ assert!(matches!(
+ address_space.prot_flags(GuestAddress(0x600)),
+ Err(AddressSpaceError::InvalidRegionType)
+ ));
+ }
+
+ #[test]
+ fn test_address_space_numa_node_id() {
+ let reg1 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x100),
+ 0x200,
+ Some(0),
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let reg2 = Arc::new(AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x300),
+ 0x300,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ));
+ let regions = vec![reg1, reg2];
+ let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
+ let address_space = AddressSpace::from_regions(regions, layout);
+
+ // Normal case.
+ assert_eq!(address_space.numa_node_id(0x200).unwrap(), 0);
+ // Inquire region with None as its numa node id.
+ assert_eq!(address_space.numa_node_id(0x400), None);
+ // Inquire gpa where no region is set.
+ assert_eq!(address_space.numa_node_id(0x600), None);
+ }
+}
diff --git a/src/dragonball/src/dbs_address_space/src/layout.rs b/src/dragonball/src/dbs_address_space/src/layout.rs
new file mode 100644
index 000000000..cd6c6bfb0
--- /dev/null
+++ b/src/dragonball/src/dbs_address_space/src/layout.rs
@@ -0,0 +1,154 @@
+// Copyright (C) 2021 Alibaba Cloud. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+use lazy_static::lazy_static;
+
+use crate::{AddressSpaceRegion, AddressSpaceRegionType};
+
+// Max retry times for reading /proc
+const PROC_READ_RETRY: u64 = 5;
+
+lazy_static! {
+ /// Upper bound of host memory.
+ pub static ref USABLE_END: u64 = {
+ for _ in 0..PROC_READ_RETRY {
+ if let Ok(buf) = std::fs::read("/proc/meminfo") {
+ let content = String::from_utf8_lossy(&buf);
+ for line in content.lines() {
+ if line.starts_with("MemTotal:") {
+ if let Some(end) = line.find(" kB") {
+ if let Ok(size) = line[9..end].trim().parse::() {
+ return (size << 10) - 1;
+ }
+ }
+ }
+ }
+ }
+ }
+ panic!("Exceed max retry times. Cannot get total mem size from /proc/meminfo");
+ };
+}
+
+/// Address space layout configuration.
+///
+/// The layout configuration must guarantee that `mem_start` <= `mem_end` <= `phys_end`.
+/// Non-memory region should be arranged into the range [mem_end, phys_end).
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct AddressSpaceLayout {
+ /// end of guest physical address
+ pub phys_end: u64,
+ /// start of guest memory address
+ pub mem_start: u64,
+ /// end of guest memory address
+ pub mem_end: u64,
+ /// end of usable memory address
+ pub usable_end: u64,
+}
+
+impl AddressSpaceLayout {
+ /// Create a new instance of `AddressSpaceLayout`.
+ pub fn new(phys_end: u64, mem_start: u64, mem_end: u64) -> Self {
+ AddressSpaceLayout {
+ phys_end,
+ mem_start,
+ mem_end,
+ usable_end: *USABLE_END,
+ }
+ }
+
+ /// Check whether an region is valid with the constraints of the layout.
+ pub fn is_region_valid(&self, region: &AddressSpaceRegion) -> bool {
+ let region_end = match region.base.0.checked_add(region.size) {
+ None => return false,
+ Some(v) => v,
+ };
+
+ match region.ty {
+ AddressSpaceRegionType::DefaultMemory => {
+ if region.base.0 < self.mem_start || region_end > self.mem_end {
+ return false;
+ }
+ }
+ AddressSpaceRegionType::DeviceMemory | AddressSpaceRegionType::DAXMemory => {
+ if region.base.0 < self.mem_end || region_end > self.phys_end {
+ return false;
+ }
+ }
+ }
+
+ true
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use vm_memory::GuestAddress;
+
+ #[test]
+ fn test_is_region_valid() {
+ let layout = AddressSpaceLayout::new(0x1_0000_0000, 0x1000_0000, 0x2000_0000);
+
+ let region = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x0),
+ 0x1_0000,
+ );
+ assert!(!layout.is_region_valid(®ion));
+ let region = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x2000_0000),
+ 0x1_0000,
+ );
+ assert!(!layout.is_region_valid(®ion));
+ let region = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x1_0000),
+ 0x2000_0000,
+ );
+ assert!(!layout.is_region_valid(®ion));
+ let region = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(u64::MAX),
+ 0x1_0000_0000,
+ );
+ assert!(!layout.is_region_valid(®ion));
+ let region = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x1000_0000),
+ 0x1_0000,
+ );
+ assert!(layout.is_region_valid(®ion));
+
+ let region = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DeviceMemory,
+ GuestAddress(0x1000_0000),
+ 0x1_0000,
+ );
+ assert!(!layout.is_region_valid(®ion));
+ let region = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DeviceMemory,
+ GuestAddress(0x1_0000_0000),
+ 0x1_0000,
+ );
+ assert!(!layout.is_region_valid(®ion));
+ let region = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DeviceMemory,
+ GuestAddress(0x1_0000),
+ 0x1_0000_0000,
+ );
+ assert!(!layout.is_region_valid(®ion));
+ let region = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DeviceMemory,
+ GuestAddress(u64::MAX),
+ 0x1_0000_0000,
+ );
+ assert!(!layout.is_region_valid(®ion));
+ let region = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DeviceMemory,
+ GuestAddress(0x8000_0000),
+ 0x1_0000,
+ );
+ assert!(layout.is_region_valid(®ion));
+ }
+}
diff --git a/src/dragonball/src/dbs_address_space/src/lib.rs b/src/dragonball/src/dbs_address_space/src/lib.rs
new file mode 100644
index 000000000..7e38cbbdd
--- /dev/null
+++ b/src/dragonball/src/dbs_address_space/src/lib.rs
@@ -0,0 +1,87 @@
+// Copyright (C) 2021 Alibaba Cloud. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+#![deny(missing_docs)]
+
+//! Traits and Structs to manage guest physical address space for virtual machines.
+//!
+//! The [vm-memory](https://crates.io/crates/vm-memory) implements mechanisms to manage and access
+//! guest memory resident in guest physical address space. In addition to guest memory, there may
+//! be other type of devices resident in the same guest physical address space.
+//!
+//! The `dbs-address-space` crate provides traits and structs to manage the guest physical address
+//! space for virtual machines, and mechanisms to coordinate all the devices resident in the
+//! guest physical address space.
+
+use vm_memory::GuestUsize;
+
+mod address_space;
+pub use self::address_space::{AddressSpace, AddressSpaceBase};
+
+mod layout;
+pub use layout::{AddressSpaceLayout, USABLE_END};
+
+mod memory;
+pub use memory::{GuestMemoryHybrid, GuestMemoryManager, GuestRegionHybrid, GuestRegionRaw};
+
+mod numa;
+pub use self::numa::{NumaIdTable, NumaNode, NumaNodeInfo, MPOL_MF_MOVE, MPOL_PREFERRED};
+
+mod region;
+pub use region::{AddressSpaceRegion, AddressSpaceRegionType};
+
+/// Errors associated with virtual machine address space management.
+#[derive(Debug, thiserror::Error)]
+pub enum AddressSpaceError {
+ /// Invalid address space region type.
+ #[error("invalid address space region type")]
+ InvalidRegionType,
+
+ /// Invalid address range.
+ #[error("invalid address space region (0x{0:x}, 0x{1:x})")]
+ InvalidAddressRange(u64, GuestUsize),
+
+ /// Invalid guest memory source type.
+ #[error("invalid memory source type {0}")]
+ InvalidMemorySourceType(String),
+
+ /// Failed to create memfd to map anonymous memory.
+ #[error("can not create memfd to map anonymous memory")]
+ CreateMemFd(#[source] nix::Error),
+
+ /// Failed to open memory file.
+ #[error("can not open memory file")]
+ OpenFile(#[source] std::io::Error),
+
+ /// Failed to create directory.
+ #[error("can not create directory")]
+ CreateDir(#[source] std::io::Error),
+
+ /// Failed to set size for memory file.
+ #[error("can not set size for memory file")]
+ SetFileSize(#[source] std::io::Error),
+
+ /// Failed to unlink memory file.
+ #[error("can not unlink memory file")]
+ UnlinkFile(#[source] nix::Error),
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_error_code() {
+ let e = AddressSpaceError::InvalidRegionType;
+
+ assert_eq!(format!("{e}"), "invalid address space region type");
+ assert_eq!(format!("{e:?}"), "InvalidRegionType");
+ assert_eq!(
+ format!(
+ "{}",
+ AddressSpaceError::InvalidMemorySourceType("test".to_string())
+ ),
+ "invalid memory source type test"
+ );
+ }
+}
diff --git a/src/dragonball/src/dbs_address_space/src/memory/hybrid.rs b/src/dragonball/src/dbs_address_space/src/memory/hybrid.rs
new file mode 100644
index 000000000..87a09749e
--- /dev/null
+++ b/src/dragonball/src/dbs_address_space/src/memory/hybrid.rs
@@ -0,0 +1,1105 @@
+// Copyright (C) 2022 Alibaba Cloud. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::io::{Read, Write};
+use std::sync::atomic::Ordering;
+use std::sync::Arc;
+
+use vm_memory::bitmap::{Bitmap, BS};
+use vm_memory::guest_memory::GuestMemoryIterator;
+use vm_memory::mmap::{Error, NewBitmap};
+use vm_memory::{
+ guest_memory, AtomicAccess, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion,
+ GuestRegionMmap, GuestUsize, MemoryRegionAddress, VolatileSlice,
+};
+
+use crate::GuestRegionRaw;
+
+/// An adapter for different concrete implementations of `GuestMemoryRegion`.
+#[derive(Debug)]
+pub enum GuestRegionHybrid {
+ /// Region of type `GuestRegionMmap`.
+ Mmap(GuestRegionMmap),
+ /// Region of type `GuestRegionRaw`.
+ Raw(GuestRegionRaw),
+}
+
+impl GuestRegionHybrid {
+ /// Create a `GuestRegionHybrid` object from `GuestRegionMmap` object.
+ pub fn from_mmap_region(region: GuestRegionMmap) -> Self {
+ GuestRegionHybrid::Mmap(region)
+ }
+
+ /// Create a `GuestRegionHybrid` object from `GuestRegionRaw` object.
+ pub fn from_raw_region(region: GuestRegionRaw) -> Self {
+ GuestRegionHybrid::Raw(region)
+ }
+}
+
+impl Bytes for GuestRegionHybrid {
+ type E = guest_memory::Error;
+
+ fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.write(buf, addr),
+ GuestRegionHybrid::Raw(region) => region.write(buf, addr),
+ }
+ }
+
+ fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.read(buf, addr),
+ GuestRegionHybrid::Raw(region) => region.read(buf, addr),
+ }
+ }
+
+ fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.write_slice(buf, addr),
+ GuestRegionHybrid::Raw(region) => region.write_slice(buf, addr),
+ }
+ }
+
+ fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.read_slice(buf, addr),
+ GuestRegionHybrid::Raw(region) => region.read_slice(buf, addr),
+ }
+ }
+
+ fn read_from(
+ &self,
+ addr: MemoryRegionAddress,
+ src: &mut F,
+ count: usize,
+ ) -> guest_memory::Result
+ where
+ F: Read,
+ {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.read_from(addr, src, count),
+ GuestRegionHybrid::Raw(region) => region.read_from(addr, src, count),
+ }
+ }
+
+ fn read_exact_from(
+ &self,
+ addr: MemoryRegionAddress,
+ src: &mut F,
+ count: usize,
+ ) -> guest_memory::Result<()>
+ where
+ F: Read,
+ {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.read_exact_from(addr, src, count),
+ GuestRegionHybrid::Raw(region) => region.read_exact_from(addr, src, count),
+ }
+ }
+
+ fn write_to(
+ &self,
+ addr: MemoryRegionAddress,
+ dst: &mut F,
+ count: usize,
+ ) -> guest_memory::Result
+ where
+ F: Write,
+ {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.write_to(addr, dst, count),
+ GuestRegionHybrid::Raw(region) => region.write_to(addr, dst, count),
+ }
+ }
+
+ fn write_all_to(
+ &self,
+ addr: MemoryRegionAddress,
+ dst: &mut F,
+ count: usize,
+ ) -> guest_memory::Result<()>
+ where
+ F: Write,
+ {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.write_all_to(addr, dst, count),
+ GuestRegionHybrid::Raw(region) => region.write_all_to(addr, dst, count),
+ }
+ }
+
+ fn store(
+ &self,
+ val: T,
+ addr: MemoryRegionAddress,
+ order: Ordering,
+ ) -> guest_memory::Result<()> {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.store(val, addr, order),
+ GuestRegionHybrid::Raw(region) => region.store(val, addr, order),
+ }
+ }
+
+ fn load(
+ &self,
+ addr: MemoryRegionAddress,
+ order: Ordering,
+ ) -> guest_memory::Result {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.load(addr, order),
+ GuestRegionHybrid::Raw(region) => region.load(addr, order),
+ }
+ }
+}
+
+impl GuestMemoryRegion for GuestRegionHybrid {
+ type B = B;
+
+ fn len(&self) -> GuestUsize {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.len(),
+ GuestRegionHybrid::Raw(region) => region.len(),
+ }
+ }
+
+ fn start_addr(&self) -> GuestAddress {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.start_addr(),
+ GuestRegionHybrid::Raw(region) => region.start_addr(),
+ }
+ }
+
+ fn bitmap(&self) -> &Self::B {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.bitmap(),
+ GuestRegionHybrid::Raw(region) => region.bitmap(),
+ }
+ }
+
+ fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.get_host_address(addr),
+ GuestRegionHybrid::Raw(region) => region.get_host_address(addr),
+ }
+ }
+
+ fn file_offset(&self) -> Option<&FileOffset> {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.file_offset(),
+ GuestRegionHybrid::Raw(region) => region.file_offset(),
+ }
+ }
+
+ unsafe fn as_slice(&self) -> Option<&[u8]> {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.as_slice(),
+ GuestRegionHybrid::Raw(region) => region.as_slice(),
+ }
+ }
+
+ unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.as_mut_slice(),
+ GuestRegionHybrid::Raw(region) => region.as_mut_slice(),
+ }
+ }
+
+ fn get_slice(
+ &self,
+ offset: MemoryRegionAddress,
+ count: usize,
+ ) -> guest_memory::Result>> {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.get_slice(offset, count),
+ GuestRegionHybrid::Raw(region) => region.get_slice(offset, count),
+ }
+ }
+
+ #[cfg(target_os = "linux")]
+ fn is_hugetlbfs(&self) -> Option {
+ match self {
+ GuestRegionHybrid::Mmap(region) => region.is_hugetlbfs(),
+ GuestRegionHybrid::Raw(region) => region.is_hugetlbfs(),
+ }
+ }
+}
+
+/// [`GuestMemory`](trait.GuestMemory.html) implementation that manage hybrid types of guest memory
+/// regions.
+///
+/// Represents the entire physical memory of the guest by tracking all its memory regions.
+/// Each region is an instance of `GuestRegionHybrid`.
+#[derive(Clone, Debug, Default)]
+pub struct GuestMemoryHybrid {
+ pub(crate) regions: Vec>>,
+}
+
+impl GuestMemoryHybrid {
+ /// Creates an empty `GuestMemoryHybrid` instance.
+ pub fn new() -> Self {
+ Self::default()
+ }
+}
+
+impl GuestMemoryHybrid {
+ /// Creates a new `GuestMemoryHybrid` from a vector of regions.
+ ///
+ /// # Arguments
+ ///
+ /// * `regions` - The vector of regions.
+ /// The regions shouldn't overlap and they should be sorted
+ /// by the starting address.
+ pub fn from_regions(mut regions: Vec>) -> Result {
+ Self::from_arc_regions(regions.drain(..).map(Arc::new).collect())
+ }
+
+ /// Creates a new `GuestMemoryHybrid` from a vector of Arc regions.
+ ///
+ /// Similar to the constructor `from_regions()` as it returns a
+ /// `GuestMemoryHybrid`. The need for this constructor is to provide a way for
+ /// consumer of this API to create a new `GuestMemoryHybrid` based on existing
+ /// regions coming from an existing `GuestMemoryHybrid` instance.
+ ///
+ /// # Arguments
+ ///
+ /// * `regions` - The vector of `Arc` regions.
+ /// The regions shouldn't overlap and they should be sorted
+ /// by the starting address.
+ pub fn from_arc_regions(regions: Vec>>) -> Result {
+ if regions.is_empty() {
+ return Err(Error::NoMemoryRegion);
+ }
+
+ for window in regions.windows(2) {
+ let prev = &window[0];
+ let next = &window[1];
+
+ if prev.start_addr() > next.start_addr() {
+ return Err(Error::UnsortedMemoryRegions);
+ }
+
+ if prev.last_addr() >= next.start_addr() {
+ return Err(Error::MemoryRegionOverlap);
+ }
+ }
+
+ Ok(Self { regions })
+ }
+
+ /// Insert a region into the `GuestMemoryHybrid` object and return a new `GuestMemoryHybrid`.
+ ///
+ /// # Arguments
+ /// * `region`: the memory region to insert into the guest memory object.
+ pub fn insert_region(
+ &self,
+ region: Arc>,
+ ) -> Result, Error> {
+ let mut regions = self.regions.clone();
+ regions.push(region);
+ regions.sort_by_key(|x| x.start_addr());
+
+ Self::from_arc_regions(regions)
+ }
+
+ /// Remove a region into the `GuestMemoryHybrid` object and return a new `GuestMemoryHybrid`
+ /// on success, together with the removed region.
+ ///
+ /// # Arguments
+ /// * `base`: base address of the region to be removed
+ /// * `size`: size of the region to be removed
+ pub fn remove_region(
+ &self,
+ base: GuestAddress,
+ size: GuestUsize,
+ ) -> Result<(GuestMemoryHybrid, Arc>), Error> {
+ if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) {
+ if self.regions.get(region_index).unwrap().len() as GuestUsize == size {
+ let mut regions = self.regions.clone();
+ let region = regions.remove(region_index);
+ return Ok((Self { regions }, region));
+ }
+ }
+
+ Err(Error::InvalidGuestRegion)
+ }
+}
+
+/// An iterator over the elements of `GuestMemoryHybrid`.
+///
+/// This struct is created by `GuestMemory::iter()`. See its documentation for more.
+pub struct Iter<'a, B>(std::slice::Iter<'a, Arc>>);
+
+impl<'a, B> Iterator for Iter<'a, B> {
+ type Item = &'a GuestRegionHybrid;
+
+ fn next(&mut self) -> Option {
+ self.0.next().map(AsRef::as_ref)
+ }
+}
+
+impl<'a, B: 'a> GuestMemoryIterator<'a, GuestRegionHybrid> for GuestMemoryHybrid {
+ type Iter = Iter<'a, B>;
+}
+
+impl GuestMemory for GuestMemoryHybrid {
+ type R = GuestRegionHybrid;
+
+ type I = Self;
+
+ fn num_regions(&self) -> usize {
+ self.regions.len()
+ }
+
+ fn find_region(&self, addr: GuestAddress) -> Option<&GuestRegionHybrid> {
+ let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) {
+ Ok(x) => Some(x),
+ // Within the closest region with starting address < addr
+ Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1),
+ _ => None,
+ };
+ index.map(|x| self.regions[x].as_ref())
+ }
+
+ fn iter(&self) -> Iter {
+ Iter(self.regions.iter())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::io::Seek;
+ use vm_memory::{GuestMemoryError, MmapRegion};
+ use vmm_sys_util::tempfile::TempFile;
+
+ #[test]
+ fn test_region_new() {
+ let start_addr = GuestAddress(0x0);
+
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x400).unwrap(), start_addr).unwrap();
+ let guest_region = GuestRegionHybrid::from_mmap_region(mmap_reg);
+
+ assert_eq!(guest_region.start_addr(), start_addr);
+ assert_eq!(guest_region.len(), 0x400);
+
+ let mut buf = [0u8; 1024];
+ let raw_region =
+ unsafe { GuestRegionRaw::<()>::new(start_addr, &mut buf as *mut _, 0x800) };
+ let guest_region = GuestRegionHybrid::from_raw_region(raw_region);
+
+ assert_eq!(guest_region.start_addr(), start_addr);
+ assert_eq!(guest_region.len(), 0x800);
+ }
+
+ #[test]
+ fn test_write_and_read_on_mmap_region() {
+ let start_addr = GuestAddress(0x0);
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x800).unwrap(), start_addr).unwrap();
+ let guest_region = GuestRegionHybrid::from_mmap_region(mmap_reg);
+ let buf_to_write = [0xF0u8; 0x400];
+ let write_addr = MemoryRegionAddress(0x400);
+
+ // Normal case.
+ let number_of_bytes_write = guest_region.write(&buf_to_write, write_addr).unwrap();
+ assert_eq!(number_of_bytes_write, 0x400);
+ let mut buf_read = [0u8; 0x400];
+ let number_of_bytes_read = guest_region.read(&mut buf_read, write_addr).unwrap();
+ assert_eq!(number_of_bytes_read, 0x400);
+ assert_eq!(buf_read, [0xF0u8; 0x400]);
+
+ // Error invalid backend address case in write().
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_region
+ .write(&buf_to_write, invalid_addr)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error invalid backend address case in read().
+ assert!(matches!(
+ guest_region
+ .read(&mut buf_read, invalid_addr)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+ }
+
+ #[test]
+ fn test_write_and_read_on_raw_region() {
+ let start_addr = GuestAddress(0x0);
+ let mut buf_of_raw_region = [0u8; 0x800];
+ let raw_region = unsafe {
+ GuestRegionRaw::<()>::new(start_addr, &mut buf_of_raw_region as *mut _, 0x800)
+ };
+ let guest_region = GuestRegionHybrid::from_raw_region(raw_region);
+ let buf_to_write = [0xF0u8; 0x400];
+ let write_addr = MemoryRegionAddress(0x400);
+
+ // Normal case.
+ let number_of_bytes_write = guest_region.write(&buf_to_write, write_addr).unwrap();
+ assert_eq!(number_of_bytes_write, 0x400);
+ let mut buf_read = [0u8; 0x400];
+ let number_of_bytes_read = guest_region.read(&mut buf_read, write_addr).unwrap();
+ assert_eq!(number_of_bytes_read, 0x400);
+ assert_eq!(buf_read, [0xF0u8; 0x400]);
+
+ // Error invalid backend address case in write().
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_region
+ .write(&buf_to_write, invalid_addr)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error invalid backend address case in read().
+ assert!(matches!(
+ guest_region
+ .read(&mut buf_read, invalid_addr)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+ }
+
+ #[test]
+ fn test_write_slice_and_read_slice_on_mmap_region() {
+ let start_addr = GuestAddress(0x0);
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x800).unwrap(), start_addr).unwrap();
+ let guest_region = GuestRegionHybrid::from_mmap_region(mmap_reg);
+ let buf_to_write = [0xF0u8; 0x400];
+ let write_addr = MemoryRegionAddress(0x400);
+
+ // Normal case.
+ guest_region.write_slice(&buf_to_write, write_addr).unwrap();
+ let mut buf_read = [0x0u8; 0x400];
+ guest_region.read_slice(&mut buf_read, write_addr).unwrap();
+ assert_eq!(buf_read, [0xF0u8; 0x400]);
+
+ // Error invalid backend address case in write_slice().
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_region
+ .write_slice(&buf_to_write, invalid_addr)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error partial buffer case in write_slice().
+ let insufficient_addr = MemoryRegionAddress(0x600);
+ assert_eq!(
+ format!(
+ "{:?}",
+ guest_region
+ .write_slice(&buf_to_write, insufficient_addr)
+ .err()
+ .unwrap()
+ ),
+ format!(
+ "PartialBuffer {{ expected: {:?}, completed: {:?} }}",
+ buf_to_write.len(),
+ guest_region.len() as usize - 0x600_usize
+ )
+ );
+
+ // Error invalid backend address case in write_slice().
+ let invalid_addr = MemoryRegionAddress(0x900);
+ let mut buf_read = [0x0u8; 0x400];
+ assert!(matches!(
+ guest_region
+ .read_slice(&mut buf_read, invalid_addr)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error partial buffer case in write_slice().
+ let insufficient_addr = MemoryRegionAddress(0x600);
+ let mut buf_read = [0x0u8; 0x400];
+ assert_eq!(
+ format!(
+ "{:?}",
+ guest_region
+ .read_slice(&mut buf_read, insufficient_addr)
+ .err()
+ .unwrap()
+ ),
+ format!(
+ "PartialBuffer {{ expected: {:?}, completed: {:?} }}",
+ buf_to_write.len(),
+ guest_region.len() as usize - 0x600_usize
+ )
+ );
+ assert_eq!(
+ {
+ let mut buf = [0x0u8; 0x400];
+ for cell in buf.iter_mut().take(0x200) {
+ *cell = 0xF0;
+ }
+ buf
+ },
+ buf_read
+ );
+ }
+
+ #[test]
+ fn test_write_and_read_slice_on_raw_region() {
+ let start_addr = GuestAddress(0x0);
+ let mut buf_of_raw_region = [0u8; 0x800];
+ let raw_region = unsafe {
+ GuestRegionRaw::<()>::new(start_addr, &mut buf_of_raw_region as *mut _, 0x800)
+ };
+ let guest_region = GuestRegionHybrid::from_raw_region(raw_region);
+ let buf_to_write = [0xF0u8; 0x400];
+ let write_addr = MemoryRegionAddress(0x400);
+
+ // Normal case.
+ guest_region.write_slice(&buf_to_write, write_addr).unwrap();
+ let mut buf_read = [0x0u8; 0x400];
+ guest_region.read_slice(&mut buf_read, write_addr).unwrap();
+ assert_eq!(buf_read, [0xF0u8; 0x400]);
+
+ // Error invalid backend address case in write_slice().
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_region
+ .write_slice(&buf_to_write, invalid_addr)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error partial buffer case in write_slice().
+ let insufficient_addr = MemoryRegionAddress(0x600);
+ assert_eq!(
+ format!(
+ "{:?}",
+ guest_region
+ .write_slice(&buf_to_write, insufficient_addr)
+ .err()
+ .unwrap()
+ ),
+ format!(
+ "PartialBuffer {{ expected: {:?}, completed: {:?} }}",
+ buf_to_write.len(),
+ guest_region.len() as usize - 0x600_usize
+ )
+ );
+
+ // Error invalid backend address case in write_slice().
+ let invalid_addr = MemoryRegionAddress(0x900);
+ let mut buf_read = [0x0u8; 0x400];
+ assert!(matches!(
+ guest_region
+ .read_slice(&mut buf_read, invalid_addr)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error partial buffer case in write_slice().
+ let insufficient_addr = MemoryRegionAddress(0x600);
+ let mut buf_read = [0x0u8; 0x400];
+ assert_eq!(
+ format!(
+ "{:?}",
+ guest_region
+ .read_slice(&mut buf_read, insufficient_addr)
+ .err()
+ .unwrap()
+ ),
+ format!(
+ "PartialBuffer {{ expected: {:?}, completed: {:?} }}",
+ buf_to_write.len(),
+ guest_region.len() as usize - 0x600_usize
+ )
+ );
+ assert_eq!(
+ {
+ let mut buf = [0x0u8; 0x400];
+ for cell in buf.iter_mut().take(0x200) {
+ *cell = 0xF0;
+ }
+ buf
+ },
+ buf_read
+ );
+ }
+
+ #[test]
+ fn test_read_from_and_write_to_on_mmap_region() {
+ let start_addr = GuestAddress(0x0);
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x800).unwrap(), start_addr).unwrap();
+ let guest_region = GuestRegionHybrid::from_mmap_region(mmap_reg);
+ let write_addr = MemoryRegionAddress(0x400);
+ let original_content = b"hello world";
+ let size_of_file = original_content.len();
+
+ // Normal case.
+ let mut file_to_write_mmap_region = TempFile::new().unwrap().into_file();
+ file_to_write_mmap_region
+ .set_len(size_of_file as u64)
+ .unwrap();
+ file_to_write_mmap_region
+ .write_all(original_content)
+ .unwrap();
+ // Rewind file pointer after write operation.
+ file_to_write_mmap_region.rewind().unwrap();
+ guest_region
+ .read_from(write_addr, &mut file_to_write_mmap_region, size_of_file)
+ .unwrap();
+ let mut file_read_from_mmap_region = TempFile::new().unwrap().into_file();
+ file_read_from_mmap_region
+ .set_len(size_of_file as u64)
+ .unwrap();
+ guest_region
+ .write_all_to(write_addr, &mut file_read_from_mmap_region, size_of_file)
+ .unwrap();
+ // Rewind file pointer after write operation.
+ file_read_from_mmap_region.rewind().unwrap();
+ let mut content = String::new();
+ file_read_from_mmap_region
+ .read_to_string(&mut content)
+ .unwrap();
+ assert_eq!(content.as_bytes(), original_content);
+ assert_eq!(
+ file_read_from_mmap_region.metadata().unwrap().len(),
+ size_of_file as u64
+ );
+
+ // Error invalid backend address case in read_from() on mmap region.
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_region
+ .read_from(invalid_addr, &mut file_to_write_mmap_region, size_of_file)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error invalid backend address case in write_to() on mmap region.
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_region
+ .write_to(invalid_addr, &mut file_read_from_mmap_region, size_of_file)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+ }
+
+ #[test]
+ fn test_read_from_and_write_to_on_raw_region() {
+ let start_addr = GuestAddress(0x0);
+ let mut buf_of_raw_region = [0u8; 0x800];
+ let raw_region = unsafe {
+ GuestRegionRaw::<()>::new(start_addr, &mut buf_of_raw_region as *mut _, 0x800)
+ };
+ let guest_region = GuestRegionHybrid::from_raw_region(raw_region);
+ let write_addr = MemoryRegionAddress(0x400);
+ let original_content = b"hello world";
+ let size_of_file = original_content.len();
+
+ // Normal case.
+ let mut file_to_write_mmap_region = TempFile::new().unwrap().into_file();
+ file_to_write_mmap_region
+ .set_len(size_of_file as u64)
+ .unwrap();
+ file_to_write_mmap_region
+ .write_all(original_content)
+ .unwrap();
+ // Rewind file pointer after write operation.
+ file_to_write_mmap_region.rewind().unwrap();
+ guest_region
+ .read_from(write_addr, &mut file_to_write_mmap_region, size_of_file)
+ .unwrap();
+ let mut file_read_from_mmap_region = TempFile::new().unwrap().into_file();
+ file_read_from_mmap_region
+ .set_len(size_of_file as u64)
+ .unwrap();
+ guest_region
+ .write_all_to(write_addr, &mut file_read_from_mmap_region, size_of_file)
+ .unwrap();
+ // Rewind file pointer after write operation.
+ file_read_from_mmap_region.rewind().unwrap();
+ let mut content = String::new();
+ file_read_from_mmap_region
+ .read_to_string(&mut content)
+ .unwrap();
+ assert_eq!(content.as_bytes(), original_content);
+ assert_eq!(
+ file_read_from_mmap_region.metadata().unwrap().len(),
+ size_of_file as u64
+ );
+
+ // Error invalid backend address case in read_from() on raw region.
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_region
+ .read_from(invalid_addr, &mut file_to_write_mmap_region, size_of_file)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error invalid backend address case in write_to() on raw region.
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_region
+ .write_to(invalid_addr, &mut file_read_from_mmap_region, size_of_file)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+ }
+
+ #[test]
+ fn test_write_all_to_and_read_exact_from() {
+ let start_addr = GuestAddress(0x0);
+ let write_addr = MemoryRegionAddress(0x400);
+ let original_content = b"hello world";
+ let size_of_file = original_content.len();
+ // Preset a GuestRegionHybrid from a mmap region
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x800).unwrap(), start_addr).unwrap();
+ let guest_mmap_region = GuestRegionHybrid::from_mmap_region(mmap_reg);
+ // Preset a GuestRegionHybrid from a raw region
+ let mut buf_of_raw_region = [0u8; 0x800];
+ let raw_region = unsafe {
+ GuestRegionRaw::<()>::new(start_addr, &mut buf_of_raw_region as *mut _, 0x800)
+ };
+ let guest_raw_region = GuestRegionHybrid::from_raw_region(raw_region);
+
+ // Normal case on mmap region.
+ let mut file_to_write_mmap_region = TempFile::new().unwrap().into_file();
+ file_to_write_mmap_region
+ .set_len(size_of_file as u64)
+ .unwrap();
+ file_to_write_mmap_region
+ .write_all(original_content)
+ .unwrap();
+ file_to_write_mmap_region.rewind().unwrap();
+ guest_mmap_region
+ .read_exact_from(write_addr, &mut file_to_write_mmap_region, size_of_file)
+ .unwrap();
+ let mut file_read_from_mmap_region = TempFile::new().unwrap().into_file();
+ file_read_from_mmap_region
+ .set_len(size_of_file as u64)
+ .unwrap();
+ guest_mmap_region
+ .write_all_to(write_addr, &mut file_read_from_mmap_region, size_of_file)
+ .unwrap();
+ file_read_from_mmap_region.rewind().unwrap();
+ let mut content = String::new();
+ file_read_from_mmap_region
+ .read_to_string(&mut content)
+ .unwrap();
+ assert_eq!(content.as_bytes(), original_content);
+ assert_eq!(
+ file_read_from_mmap_region.metadata().unwrap().len(),
+ size_of_file as u64
+ );
+
+ // Normal case on raw region.
+ let mut file_to_write_raw_region = TempFile::new().unwrap().into_file();
+ file_to_write_raw_region
+ .set_len(size_of_file as u64)
+ .unwrap();
+ file_to_write_raw_region
+ .write_all(original_content)
+ .unwrap();
+ file_to_write_raw_region.rewind().unwrap();
+ guest_raw_region
+ .read_exact_from(write_addr, &mut file_to_write_raw_region, size_of_file)
+ .unwrap();
+ let mut file_read_from_raw_region = TempFile::new().unwrap().into_file();
+ file_read_from_raw_region
+ .set_len(size_of_file as u64)
+ .unwrap();
+ guest_raw_region
+ .write_all_to(write_addr, &mut file_read_from_raw_region, size_of_file)
+ .unwrap();
+ file_read_from_raw_region.rewind().unwrap();
+ let mut content = String::new();
+ file_read_from_raw_region
+ .read_to_string(&mut content)
+ .unwrap();
+ assert_eq!(content.as_bytes(), original_content);
+ assert_eq!(
+ file_read_from_raw_region.metadata().unwrap().len(),
+ size_of_file as u64
+ );
+
+ // Error invalid backend address case in read_exact_from() on mmap region.
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_mmap_region
+ .read_exact_from(invalid_addr, &mut file_to_write_mmap_region, size_of_file)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error invalid backend address case in write_all_to() on mmap region.
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_mmap_region
+ .write_all_to(invalid_addr, &mut file_read_from_mmap_region, size_of_file)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error invalid backend address case in read_exact_from() on raw region.
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_raw_region
+ .read_exact_from(invalid_addr, &mut file_to_write_raw_region, size_of_file)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error invalid backend address case in write_all_to() on raw region.
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_raw_region
+ .write_all_to(invalid_addr, &mut file_read_from_raw_region, size_of_file)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+ }
+
+ #[test]
+ fn test_store_and_load() {
+ let test_val = 0xFF;
+ let start_addr = GuestAddress(0x0);
+ let write_addr = MemoryRegionAddress(0x400);
+ // Preset a GuestRegionHybrid from a mmap region
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x800).unwrap(), start_addr).unwrap();
+ let guest_mmap_region = GuestRegionHybrid::from_mmap_region(mmap_reg);
+ // Preset a GuestRegionHybrid from a raw region
+ let mut buf_of_raw_region = [0u8; 0x800];
+ let raw_region = unsafe {
+ GuestRegionRaw::<()>::new(start_addr, &mut buf_of_raw_region as *mut _, 0x800)
+ };
+ let guest_raw_region = GuestRegionHybrid::from_raw_region(raw_region);
+
+ // Normal case.
+ guest_mmap_region
+ .store(test_val, write_addr, Ordering::Relaxed)
+ .unwrap();
+ let val_read_from_mmap_region: u64 = guest_mmap_region
+ .load(write_addr, Ordering::Relaxed)
+ .unwrap();
+ assert_eq!(val_read_from_mmap_region, test_val);
+ guest_raw_region
+ .store(test_val, write_addr, Ordering::Relaxed)
+ .unwrap();
+ let val_read_from_raw_region: u64 = guest_raw_region
+ .load(write_addr, Ordering::Relaxed)
+ .unwrap();
+ assert_eq!(val_read_from_raw_region, test_val);
+
+ // Error invalid backend address case in store() on mmap region.
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_mmap_region
+ .store(test_val, invalid_addr, Ordering::Relaxed)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error invalid backend address case in store() on raw region.
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_raw_region
+ .store(test_val, invalid_addr, Ordering::Relaxed)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error invalid backend address case in laod() on mmap region.
+ assert!(matches!(
+ guest_mmap_region
+ .load::(invalid_addr, Ordering::Relaxed)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+
+ // Error invalid backend address case in laod() on raw region.
+ assert!(matches!(
+ guest_raw_region
+ .load::(invalid_addr, Ordering::Relaxed)
+ .err()
+ .unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+ }
+
+ #[test]
+ fn test_bitmap() {
+ // TODO: #185 Need futher and detailed test on bitmap object.
+ let start_addr = GuestAddress(0x0);
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x800).unwrap(), start_addr).unwrap();
+ let guest_mmap_region = GuestRegionHybrid::from_mmap_region(mmap_reg);
+ let mut buf_of_raw_region = [0u8; 0x800];
+ let raw_region = unsafe {
+ GuestRegionRaw::<()>::new(start_addr, &mut buf_of_raw_region as *mut _, 0x800)
+ };
+ let guest_raw_region = GuestRegionHybrid::from_raw_region(raw_region);
+
+ assert_eq!(guest_mmap_region.bitmap(), guest_raw_region.bitmap());
+ }
+
+ #[test]
+ fn test_get_host_address_on_mmap_region() {
+ let start_addr = GuestAddress(0x0);
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x800).unwrap(), start_addr).unwrap();
+ let guest_region = GuestRegionHybrid::from_mmap_region(mmap_reg);
+
+ // Normal case.
+ let addr_1 = guest_region
+ .get_host_address(MemoryRegionAddress(0x0))
+ .unwrap();
+ let addr_2 = guest_region
+ .get_host_address(MemoryRegionAddress(0x400))
+ .unwrap();
+ assert_eq!(addr_1 as u64 + 0x400, addr_2 as u64);
+
+ // Error invalid backend address case.
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_region.get_host_address(invalid_addr).err().unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+ }
+
+ #[test]
+ fn test_get_host_address_on_raw_region() {
+ let start_addr = GuestAddress(0x0);
+ let mut buf_of_raw_region = [0u8; 0x800];
+ let raw_region = unsafe {
+ GuestRegionRaw::<()>::new(start_addr, &mut buf_of_raw_region as *mut _, 0x800)
+ };
+ let guest_region = GuestRegionHybrid::from_raw_region(raw_region);
+
+ // Normal case.
+ let addr_1 = guest_region
+ .get_host_address(MemoryRegionAddress(0x0))
+ .unwrap();
+ let addr_2 = guest_region
+ .get_host_address(MemoryRegionAddress(0x400))
+ .unwrap();
+ assert_eq!(addr_1 as u64 + 0x400, addr_2 as u64);
+
+ // Error invalid backend address case.
+ let invalid_addr = MemoryRegionAddress(0x900);
+ assert!(matches!(
+ guest_region.get_host_address(invalid_addr).err().unwrap(),
+ GuestMemoryError::InvalidBackendAddress
+ ));
+ }
+
+ // TODO: #186 The following function are not yet implemented:
+ // - 'fn file_offset()'
+ // - 'unsafe fn as_slice()'
+ // - 'unsafe fn as_mut_slice()'
+ // Tests of these functions will be needed when they are implemented.
+
+ #[test]
+ fn test_guest_memory_mmap_get_slice() {
+ //Preset a GuestRegionHybrid from a mmap region
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x400).unwrap(), GuestAddress(0)).unwrap();
+ let guest_mmap_region = GuestRegionHybrid::from_mmap_region(mmap_reg);
+
+ // Normal case.
+ let slice_addr = MemoryRegionAddress(0x100);
+ let slice_size = 0x200;
+ let slice = guest_mmap_region.get_slice(slice_addr, slice_size).unwrap();
+ assert_eq!(slice.len(), slice_size);
+
+ // Empty slice.
+ let slice_addr = MemoryRegionAddress(0x200);
+ let slice_size = 0x0;
+ let slice = guest_mmap_region.get_slice(slice_addr, slice_size).unwrap();
+ assert!(slice.is_empty());
+
+ // Error case when slice_size is beyond the boundary.
+ let slice_addr = MemoryRegionAddress(0x300);
+ let slice_size = 0x200;
+ assert!(guest_mmap_region.get_slice(slice_addr, slice_size).is_err());
+ }
+
+ #[test]
+ fn test_from_regions_on_guest_memory_hybrid() {
+ // Normal case.
+ let mut regions = Vec::>::new();
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x100).unwrap(), GuestAddress(0x100))
+ .unwrap();
+ regions.push(GuestRegionHybrid::Mmap(mmap_reg));
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x100).unwrap(), GuestAddress(0x200))
+ .unwrap();
+ regions.push(GuestRegionHybrid::Mmap(mmap_reg));
+ let guest_region = GuestMemoryHybrid::<()>::from_regions(regions).unwrap();
+ assert_eq!(guest_region.regions[0].start_addr(), GuestAddress(0x100));
+ assert_eq!(guest_region.regions[1].start_addr(), GuestAddress(0x200));
+
+ // Error unsorted region case.
+ let mut regions = Vec::>::new();
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x400).unwrap(), GuestAddress(0x200))
+ .unwrap();
+ regions.push(GuestRegionHybrid::Mmap(mmap_reg));
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x400).unwrap(), GuestAddress(0x100))
+ .unwrap();
+ regions.push(GuestRegionHybrid::Mmap(mmap_reg));
+ let guest_region = GuestMemoryHybrid::<()>::from_regions(regions);
+ assert!(matches!(
+ guest_region.err().unwrap(),
+ Error::UnsortedMemoryRegions
+ ));
+
+ // Error no memory region case.
+ let regions = Vec::>::new();
+ let guest_region = GuestMemoryHybrid::<()>::from_regions(regions);
+ assert!(matches!(guest_region.err().unwrap(), Error::NoMemoryRegion));
+ }
+
+ #[test]
+ fn test_iterator_on_guest_region_hybrid() {
+ let mut regions = Vec::>::new();
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x100).unwrap(), GuestAddress(0x100))
+ .unwrap();
+ regions.push(GuestRegionHybrid::Mmap(mmap_reg));
+ let mmap_reg =
+ GuestRegionMmap::new(MmapRegion::<()>::new(0x100).unwrap(), GuestAddress(0x200))
+ .unwrap();
+ regions.push(GuestRegionHybrid::Mmap(mmap_reg));
+ let guest_region = GuestMemoryHybrid::<()>::from_regions(regions).unwrap();
+ let mut region = guest_region.iter();
+
+ assert_eq!(region.next().unwrap().start_addr(), GuestAddress(0x100));
+ assert_eq!(region.next().unwrap().start_addr(), GuestAddress(0x200));
+ }
+}
diff --git a/src/dragonball/src/dbs_address_space/src/memory/mod.rs b/src/dragonball/src/dbs_address_space/src/memory/mod.rs
new file mode 100644
index 000000000..371acda9d
--- /dev/null
+++ b/src/dragonball/src/dbs_address_space/src/memory/mod.rs
@@ -0,0 +1,193 @@
+// Copyright (C) 2022 Alibaba Cloud. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+//! Structs to manage guest memory for virtual machines.
+//!
+//! The `vm-memory` crate only provides traits and structs to access normal guest memory,
+//! it doesn't support special guest memory like virtio-fs/virtio-pmem DAX window etc.
+//! So this crate provides `GuestMemoryManager` over `vm-memory` to provide uniform abstraction
+//! for all guest memory.
+//!
+//! It also provides interfaces to coordinate guest memory hotplug events.
+
+use std::str::FromStr;
+use std::sync::Arc;
+use vm_memory::{GuestAddressSpace, GuestMemoryAtomic, GuestMemoryLoadGuard, GuestMemoryMmap};
+
+mod raw_region;
+pub use raw_region::GuestRegionRaw;
+
+mod hybrid;
+pub use hybrid::{GuestMemoryHybrid, GuestRegionHybrid};
+
+/// Type of source to allocate memory for virtual machines.
+#[derive(Debug, Eq, PartialEq)]
+pub enum MemorySourceType {
+ /// File on HugeTlbFs.
+ FileOnHugeTlbFs,
+ /// mmap() without flag `MAP_HUGETLB`.
+ MmapAnonymous,
+ /// mmap() with flag `MAP_HUGETLB`.
+ MmapAnonymousHugeTlbFs,
+ /// memfd() without flag `MFD_HUGETLB`.
+ MemFdShared,
+ /// memfd() with flag `MFD_HUGETLB`.
+ MemFdOnHugeTlbFs,
+}
+
+impl MemorySourceType {
+ /// Check whether the memory source is huge page.
+ pub fn is_hugepage(&self) -> bool {
+ *self == Self::FileOnHugeTlbFs
+ || *self == Self::MmapAnonymousHugeTlbFs
+ || *self == Self::MemFdOnHugeTlbFs
+ }
+
+ /// Check whether the memory source is anonymous memory.
+ pub fn is_mmap_anonymous(&self) -> bool {
+ *self == Self::MmapAnonymous || *self == Self::MmapAnonymousHugeTlbFs
+ }
+}
+
+impl FromStr for MemorySourceType {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result {
+ match s {
+ "hugetlbfs" => Ok(MemorySourceType::FileOnHugeTlbFs),
+ "memfd" => Ok(MemorySourceType::MemFdShared),
+ "shmem" => Ok(MemorySourceType::MemFdShared),
+ "hugememfd" => Ok(MemorySourceType::MemFdOnHugeTlbFs),
+ "hugeshmem" => Ok(MemorySourceType::MemFdOnHugeTlbFs),
+ "anon" => Ok(MemorySourceType::MmapAnonymous),
+ "mmap" => Ok(MemorySourceType::MmapAnonymous),
+ "hugeanon" => Ok(MemorySourceType::MmapAnonymousHugeTlbFs),
+ "hugemmap" => Ok(MemorySourceType::MmapAnonymousHugeTlbFs),
+ _ => Err(format!("unknown memory source type {s}")),
+ }
+ }
+}
+
+#[derive(Debug, Default)]
+struct GuestMemoryHotplugManager {}
+
+/// The `GuestMemoryManager` manages all guest memory for virtual machines.
+///
+/// The `GuestMemoryManager` fulfills several different responsibilities.
+/// - First, it manages different types of guest memory, such as normal guest memory, virtio-fs
+/// DAX window and virtio-pmem DAX window etc. Different clients may want to access different
+/// types of memory. So the manager maintains two GuestMemory objects, one contains all guest
+/// memory, the other contains only normal guest memory.
+/// - Second, it coordinates memory/DAX window hotplug events, so clients may register hooks
+/// to receive hotplug notifications.
+#[allow(unused)]
+#[derive(Debug, Clone)]
+pub struct GuestMemoryManager {
+ default: GuestMemoryAtomic,
+ /// GuestMemory object hosts all guest memory.
+ hybrid: GuestMemoryAtomic,
+ /// GuestMemory object for vIOMMU.
+ iommu: GuestMemoryAtomic,
+ /// GuestMemory object hosts normal guest memory.
+ normal: GuestMemoryAtomic,
+ hotplug: Arc,
+}
+
+impl GuestMemoryManager {
+ /// Create a new instance of `GuestMemoryManager`.
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ /// Get a reference to the normal `GuestMemory` object.
+ pub fn get_normal_guest_memory(&self) -> &GuestMemoryAtomic {
+ &self.normal
+ }
+
+ /// Try to downcast the `GuestAddressSpace` object to a `GuestMemoryManager` object.
+ pub fn to_manager(_m: &AS) -> Option<&Self> {
+ None
+ }
+}
+
+impl Default for GuestMemoryManager {
+ fn default() -> Self {
+ let hybrid = GuestMemoryAtomic::new(GuestMemoryHybrid::new());
+ let iommu = GuestMemoryAtomic::new(GuestMemoryHybrid::new());
+ let normal = GuestMemoryAtomic::new(GuestMemoryMmap::new());
+ // By default, it provides to the `GuestMemoryHybrid` object containing all guest memory.
+ let default = hybrid.clone();
+
+ GuestMemoryManager {
+ default,
+ hybrid,
+ iommu,
+ normal,
+ hotplug: Arc::new(GuestMemoryHotplugManager::default()),
+ }
+ }
+}
+
+impl GuestAddressSpace for GuestMemoryManager {
+ type M = GuestMemoryHybrid;
+ type T = GuestMemoryLoadGuard;
+
+ fn memory(&self) -> Self::T {
+ self.default.memory()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_memory_source_type() {
+ assert_eq!(
+ MemorySourceType::from_str("hugetlbfs").unwrap(),
+ MemorySourceType::FileOnHugeTlbFs
+ );
+ assert_eq!(
+ MemorySourceType::from_str("memfd").unwrap(),
+ MemorySourceType::MemFdShared
+ );
+ assert_eq!(
+ MemorySourceType::from_str("shmem").unwrap(),
+ MemorySourceType::MemFdShared
+ );
+ assert_eq!(
+ MemorySourceType::from_str("hugememfd").unwrap(),
+ MemorySourceType::MemFdOnHugeTlbFs
+ );
+ assert_eq!(
+ MemorySourceType::from_str("hugeshmem").unwrap(),
+ MemorySourceType::MemFdOnHugeTlbFs
+ );
+ assert_eq!(
+ MemorySourceType::from_str("anon").unwrap(),
+ MemorySourceType::MmapAnonymous
+ );
+ assert_eq!(
+ MemorySourceType::from_str("mmap").unwrap(),
+ MemorySourceType::MmapAnonymous
+ );
+ assert_eq!(
+ MemorySourceType::from_str("hugeanon").unwrap(),
+ MemorySourceType::MmapAnonymousHugeTlbFs
+ );
+ assert_eq!(
+ MemorySourceType::from_str("hugemmap").unwrap(),
+ MemorySourceType::MmapAnonymousHugeTlbFs
+ );
+ assert!(MemorySourceType::from_str("test").is_err());
+ }
+
+ #[ignore]
+ #[test]
+ fn test_to_manager() {
+ let manager = GuestMemoryManager::new();
+ let mgr = GuestMemoryManager::to_manager(&manager).unwrap();
+
+ assert_eq!(&manager as *const _, mgr as *const _);
+ }
+}
diff --git a/src/dragonball/src/dbs_address_space/src/memory/raw_region.rs b/src/dragonball/src/dbs_address_space/src/memory/raw_region.rs
new file mode 100644
index 000000000..5af21ca3e
--- /dev/null
+++ b/src/dragonball/src/dbs_address_space/src/memory/raw_region.rs
@@ -0,0 +1,990 @@
+// Copyright (C) 2022 Alibaba Cloud. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::io::{Read, Write};
+use std::sync::atomic::Ordering;
+
+use vm_memory::bitmap::{Bitmap, BS};
+use vm_memory::mmap::NewBitmap;
+use vm_memory::volatile_memory::compute_offset;
+use vm_memory::{
+ guest_memory, volatile_memory, Address, AtomicAccess, Bytes, FileOffset, GuestAddress,
+ GuestMemoryRegion, GuestUsize, MemoryRegionAddress, VolatileSlice,
+};
+
+/// Guest memory region for virtio-fs DAX window.
+#[derive(Debug)]
+pub struct GuestRegionRaw {
+ guest_base: GuestAddress,
+ addr: *mut u8,
+ size: usize,
+ bitmap: B,
+}
+
+impl GuestRegionRaw {
+ /// Create a `GuestRegionRaw` object from raw pointer.
+ ///
+ /// # Safety
+ /// Caller needs to ensure `addr` and `size` are valid with static lifetime.
+ pub unsafe fn new(guest_base: GuestAddress, addr: *mut u8, size: usize) -> Self {
+ let bitmap = B::with_len(size);
+
+ GuestRegionRaw {
+ guest_base,
+ addr,
+ size,
+ bitmap,
+ }
+ }
+}
+
+impl Bytes for GuestRegionRaw {
+ type E = guest_memory::Error;
+
+ fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .write(buf, maddr)
+ .map_err(Into::into)
+ }
+
+ fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .read(buf, maddr)
+ .map_err(Into::into)
+ }
+
+ fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .write_slice(buf, maddr)
+ .map_err(Into::into)
+ }
+
+ fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .read_slice(buf, maddr)
+ .map_err(Into::into)
+ }
+
+ fn read_from(
+ &self,
+ addr: MemoryRegionAddress,
+ src: &mut F,
+ count: usize,
+ ) -> guest_memory::Result
+ where
+ F: Read,
+ {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .read_from::(maddr, src, count)
+ .map_err(Into::into)
+ }
+
+ fn read_exact_from(
+ &self,
+ addr: MemoryRegionAddress,
+ src: &mut F,
+ count: usize,
+ ) -> guest_memory::Result<()>
+ where
+ F: Read,
+ {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .read_exact_from::(maddr, src, count)
+ .map_err(Into::into)
+ }
+
+ fn write_to(
+ &self,
+ addr: MemoryRegionAddress,
+ dst: &mut F,
+ count: usize,
+ ) -> guest_memory::Result
+ where
+ F: Write,
+ {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .write_to::(maddr, dst, count)
+ .map_err(Into::into)
+ }
+
+ fn write_all_to(
+ &self,
+ addr: MemoryRegionAddress,
+ dst: &mut F,
+ count: usize,
+ ) -> guest_memory::Result<()>
+ where
+ F: Write,
+ {
+ let maddr = addr.raw_value() as usize;
+ self.as_volatile_slice()
+ .unwrap()
+ .write_all_to::(maddr, dst, count)
+ .map_err(Into::into)
+ }
+
+ fn store(
+ &self,
+ val: T,
+ addr: MemoryRegionAddress,
+ order: Ordering,
+ ) -> guest_memory::Result<()> {
+ self.as_volatile_slice().and_then(|s| {
+ s.store(val, addr.raw_value() as usize, order)
+ .map_err(Into::into)
+ })
+ }
+
+ fn load(
+ &self,
+ addr: MemoryRegionAddress,
+ order: Ordering,
+ ) -> guest_memory::Result {
+ self.as_volatile_slice()
+ .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into))
+ }
+}
+
+impl GuestMemoryRegion for GuestRegionRaw {
+ type B = B;
+
+ fn len(&self) -> GuestUsize {
+ self.size as GuestUsize
+ }
+
+ fn start_addr(&self) -> GuestAddress {
+ self.guest_base
+ }
+
+ fn bitmap(&self) -> &Self::B {
+ &self.bitmap
+ }
+
+ fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> {
+ // Not sure why wrapping_offset is not unsafe. Anyway this
+ // is safe because we've just range-checked addr using check_address.
+ self.check_address(addr)
+ .ok_or(guest_memory::Error::InvalidBackendAddress)
+ .map(|addr| self.addr.wrapping_offset(addr.raw_value() as isize))
+ }
+
+ fn file_offset(&self) -> Option<&FileOffset> {
+ None
+ }
+
+ unsafe fn as_slice(&self) -> Option<&[u8]> {
+ // This is safe because we mapped the area at addr ourselves, so this slice will not
+ // overflow. However, it is possible to alias.
+ Some(std::slice::from_raw_parts(self.addr, self.size))
+ }
+
+ unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
+ // This is safe because we mapped the area at addr ourselves, so this slice will not
+ // overflow. However, it is possible to alias.
+ Some(std::slice::from_raw_parts_mut(self.addr, self.size))
+ }
+
+ fn get_slice(
+ &self,
+ offset: MemoryRegionAddress,
+ count: usize,
+ ) -> guest_memory::Result>> {
+ let offset = offset.raw_value() as usize;
+ let end = compute_offset(offset, count)?;
+ if end > self.size {
+ return Err(volatile_memory::Error::OutOfBounds { addr: end }.into());
+ }
+
+ // Safe because we checked that offset + count was within our range and we only ever hand
+ // out volatile accessors.
+ Ok(unsafe {
+ VolatileSlice::with_bitmap(
+ (self.addr as usize + offset) as *mut _,
+ count,
+ self.bitmap.slice_at(offset),
+ )
+ })
+ }
+
+ #[cfg(target_os = "linux")]
+ fn is_hugetlbfs(&self) -> Option {
+ None
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ extern crate vmm_sys_util;
+
+ use super::*;
+ use crate::{GuestMemoryHybrid, GuestRegionHybrid};
+ use std::sync::Arc;
+ use vm_memory::{GuestAddressSpace, GuestMemory, VolatileMemory};
+
+ /*
+ use crate::bitmap::tests::test_guest_memory_and_region;
+ use crate::bitmap::AtomicBitmap;
+ use crate::GuestAddressSpace;
+
+ use std::fs::File;
+ use std::mem;
+ use std::path::Path;
+ use vmm_sys_util::tempfile::TempFile;
+
+ type GuestMemoryMmap = super::GuestMemoryMmap<()>;
+ type GuestRegionMmap = super::GuestRegionMmap<()>;
+ type MmapRegion = super::MmapRegion<()>;
+ */
+
+ #[test]
+ fn test_region_raw_new() {
+ let mut buf = [0u8; 1024];
+ let m =
+ unsafe { GuestRegionRaw::<()>::new(GuestAddress(0x10_0000), &mut buf as *mut _, 1024) };
+
+ assert_eq!(m.start_addr(), GuestAddress(0x10_0000));
+ assert_eq!(m.len(), 1024);
+ }
+
+ /*
+ fn check_guest_memory_mmap(
+ maybe_guest_mem: Result,
+ expected_regions_summary: &[(GuestAddress, usize)],
+ ) {
+ assert!(maybe_guest_mem.is_ok());
+
+ let guest_mem = maybe_guest_mem.unwrap();
+ assert_eq!(guest_mem.num_regions(), expected_regions_summary.len());
+ let maybe_last_mem_reg = expected_regions_summary.last();
+ if let Some((region_addr, region_size)) = maybe_last_mem_reg {
+ let mut last_addr = region_addr.unchecked_add(*region_size as u64);
+ if last_addr.raw_value() != 0 {
+ last_addr = last_addr.unchecked_sub(1);
+ }
+ assert_eq!(guest_mem.last_addr(), last_addr);
+ }
+ for ((region_addr, region_size), mmap) in expected_regions_summary
+ .iter()
+ .zip(guest_mem.regions.iter())
+ {
+ assert_eq!(region_addr, &mmap.guest_base);
+ assert_eq!(region_size, &mmap.mapping.size());
+
+ assert!(guest_mem.find_region(*region_addr).is_some());
+ }
+ }
+
+ fn new_guest_memory_mmap(
+ regions_summary: &[(GuestAddress, usize)],
+ ) -> Result {
+ GuestMemoryMmap::from_ranges(regions_summary)
+ }
+
+ fn new_guest_memory_mmap_from_regions(
+ regions_summary: &[(GuestAddress, usize)],
+ ) -> Result {
+ GuestMemoryMmap::from_regions(
+ regions_summary
+ .iter()
+ .map(|(region_addr, region_size)| {
+ GuestRegionMmap::new(MmapRegion::new(*region_size).unwrap(), *region_addr)
+ .unwrap()
+ })
+ .collect(),
+ )
+ }
+
+ fn new_guest_memory_mmap_from_arc_regions(
+ regions_summary: &[(GuestAddress, usize)],
+ ) -> Result {
+ GuestMemoryMmap::from_arc_regions(
+ regions_summary
+ .iter()
+ .map(|(region_addr, region_size)| {
+ Arc::new(
+ GuestRegionMmap::new(MmapRegion::new(*region_size).unwrap(), *region_addr)
+ .unwrap(),
+ )
+ })
+ .collect(),
+ )
+ }
+
+ fn new_guest_memory_mmap_with_files(
+ regions_summary: &[(GuestAddress, usize)],
+ ) -> Result {
+ let regions: Vec<(GuestAddress, usize, Option)> = regions_summary
+ .iter()
+ .map(|(region_addr, region_size)| {
+ let f = TempFile::new().unwrap().into_file();
+ f.set_len(*region_size as u64).unwrap();
+
+ (*region_addr, *region_size, Some(FileOffset::new(f, 0)))
+ })
+ .collect();
+
+ GuestMemoryMmap::from_ranges_with_files(®ions)
+ }
+ */
+
+ #[test]
+ fn slice_addr() {
+ let mut buf = [0u8; 1024];
+ let m =
+ unsafe { GuestRegionRaw::<()>::new(GuestAddress(0x10_0000), &mut buf as *mut _, 1024) };
+
+ let s = m.get_slice(MemoryRegionAddress(2), 3).unwrap();
+ assert_eq!(s.as_ptr(), &mut buf[2] as *mut _);
+ }
+
+ /*
+ #[test]
+ fn test_address_in_range() {
+ let f1 = TempFile::new().unwrap().into_file();
+ f1.set_len(0x400).unwrap();
+ let f2 = TempFile::new().unwrap().into_file();
+ f2.set_len(0x400).unwrap();
+
+ let start_addr1 = GuestAddress(0x0);
+ let start_addr2 = GuestAddress(0x800);
+ let guest_mem =
+ GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
+ let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
+ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
+ (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
+ ])
+ .unwrap();
+
+ let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ for guest_mem in guest_mem_list.iter() {
+ assert!(guest_mem.address_in_range(GuestAddress(0x200)));
+ assert!(!guest_mem.address_in_range(GuestAddress(0x600)));
+ assert!(guest_mem.address_in_range(GuestAddress(0xa00)));
+ assert!(!guest_mem.address_in_range(GuestAddress(0xc00)));
+ }
+ }
+
+ #[test]
+ fn test_check_address() {
+ let f1 = TempFile::new().unwrap().into_file();
+ f1.set_len(0x400).unwrap();
+ let f2 = TempFile::new().unwrap().into_file();
+ f2.set_len(0x400).unwrap();
+
+ let start_addr1 = GuestAddress(0x0);
+ let start_addr2 = GuestAddress(0x800);
+ let guest_mem =
+ GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
+ let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
+ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
+ (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
+ ])
+ .unwrap();
+
+ let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ for guest_mem in guest_mem_list.iter() {
+ assert_eq!(
+ guest_mem.check_address(GuestAddress(0x200)),
+ Some(GuestAddress(0x200))
+ );
+ assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None);
+ assert_eq!(
+ guest_mem.check_address(GuestAddress(0xa00)),
+ Some(GuestAddress(0xa00))
+ );
+ assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None);
+ }
+ }
+
+ #[test]
+ fn test_to_region_addr() {
+ let f1 = TempFile::new().unwrap().into_file();
+ f1.set_len(0x400).unwrap();
+ let f2 = TempFile::new().unwrap().into_file();
+ f2.set_len(0x400).unwrap();
+
+ let start_addr1 = GuestAddress(0x0);
+ let start_addr2 = GuestAddress(0x800);
+ let guest_mem =
+ GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
+ let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
+ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
+ (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
+ ])
+ .unwrap();
+
+ let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ for guest_mem in guest_mem_list.iter() {
+ assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none());
+ let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap();
+ let (r1, addr1) = guest_mem.to_region_addr(GuestAddress(0xa00)).unwrap();
+ assert!(r0.as_ptr() == r1.as_ptr());
+ assert_eq!(addr0, MemoryRegionAddress(0));
+ assert_eq!(addr1, MemoryRegionAddress(0x200));
+ }
+ }
+
+ #[test]
+ fn test_get_host_address() {
+ let f1 = TempFile::new().unwrap().into_file();
+ f1.set_len(0x400).unwrap();
+ let f2 = TempFile::new().unwrap().into_file();
+ f2.set_len(0x400).unwrap();
+
+ let start_addr1 = GuestAddress(0x0);
+ let start_addr2 = GuestAddress(0x800);
+ let guest_mem =
+ GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
+ let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
+ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
+ (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
+ ])
+ .unwrap();
+
+ let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ for guest_mem in guest_mem_list.iter() {
+ assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_err());
+ let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap();
+ let ptr1 = guest_mem.get_host_address(GuestAddress(0xa00)).unwrap();
+ assert_eq!(
+ ptr0,
+ guest_mem.find_region(GuestAddress(0x800)).unwrap().as_ptr()
+ );
+ assert_eq!(unsafe { ptr0.offset(0x200) }, ptr1);
+ }
+ }
+
+ #[test]
+ fn test_deref() {
+ let f = TempFile::new().unwrap().into_file();
+ f.set_len(0x400).unwrap();
+
+ let start_addr = GuestAddress(0x0);
+ let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
+ let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
+ start_addr,
+ 0x400,
+ Some(FileOffset::new(f, 0)),
+ )])
+ .unwrap();
+
+ let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ for guest_mem in guest_mem_list.iter() {
+ let sample_buf = &[1, 2, 3, 4, 5];
+
+ assert_eq!(guest_mem.write(sample_buf, start_addr).unwrap(), 5);
+ let slice = guest_mem
+ .find_region(GuestAddress(0))
+ .unwrap()
+ .as_volatile_slice()
+ .unwrap();
+
+ let buf = &mut [0, 0, 0, 0, 0];
+ assert_eq!(slice.read(buf, 0).unwrap(), 5);
+ assert_eq!(buf, sample_buf);
+ }
+ }
+
+ #[test]
+ fn test_read_u64() {
+ let f1 = TempFile::new().unwrap().into_file();
+ f1.set_len(0x1000).unwrap();
+ let f2 = TempFile::new().unwrap().into_file();
+ f2.set_len(0x1000).unwrap();
+
+ let start_addr1 = GuestAddress(0x0);
+ let start_addr2 = GuestAddress(0x1000);
+ let bad_addr = GuestAddress(0x2001);
+ let bad_addr2 = GuestAddress(0x1ffc);
+ let max_addr = GuestAddress(0x2000);
+
+ let gm =
+ GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
+ let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
+ (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))),
+ (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))),
+ ])
+ .unwrap();
+
+ let gm_list = vec![gm, gm_backed_by_file];
+ for gm in gm_list.iter() {
+ let val1: u64 = 0xaa55_aa55_aa55_aa55;
+ let val2: u64 = 0x55aa_55aa_55aa_55aa;
+ assert_eq!(
+ format!("{:?}", gm.write_obj(val1, bad_addr).err().unwrap()),
+ format!("InvalidGuestAddress({:?})", bad_addr,)
+ );
+ assert_eq!(
+ format!("{:?}", gm.write_obj(val1, bad_addr2).err().unwrap()),
+ format!(
+ "PartialBuffer {{ expected: {:?}, completed: {:?} }}",
+ mem::size_of::(),
+ max_addr.checked_offset_from(bad_addr2).unwrap()
+ )
+ );
+
+ gm.write_obj(val1, GuestAddress(0x500)).unwrap();
+ gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap();
+ let num1: u64 = gm.read_obj(GuestAddress(0x500)).unwrap();
+ let num2: u64 = gm.read_obj(GuestAddress(0x1000 + 32)).unwrap();
+ assert_eq!(val1, num1);
+ assert_eq!(val2, num2);
+ }
+ }
+
+ #[test]
+ fn write_and_read() {
+ let f = TempFile::new().unwrap().into_file();
+ f.set_len(0x400).unwrap();
+
+ let mut start_addr = GuestAddress(0x1000);
+ let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
+ let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
+ start_addr,
+ 0x400,
+ Some(FileOffset::new(f, 0)),
+ )])
+ .unwrap();
+
+ let gm_list = vec![gm, gm_backed_by_file];
+ for gm in gm_list.iter() {
+ let sample_buf = &[1, 2, 3, 4, 5];
+
+ assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 5);
+
+ let buf = &mut [0u8; 5];
+ assert_eq!(gm.read(buf, start_addr).unwrap(), 5);
+ assert_eq!(buf, sample_buf);
+
+ start_addr = GuestAddress(0x13ff);
+ assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 1);
+ assert_eq!(gm.read(buf, start_addr).unwrap(), 1);
+ assert_eq!(buf[0], sample_buf[0]);
+ start_addr = GuestAddress(0x1000);
+ }
+ }
+
+ #[test]
+ fn read_to_and_write_from_mem() {
+ let f = TempFile::new().unwrap().into_file();
+ f.set_len(0x400).unwrap();
+
+ let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0x1000), 0x400)]).unwrap();
+ let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
+ GuestAddress(0x1000),
+ 0x400,
+ Some(FileOffset::new(f, 0)),
+ )])
+ .unwrap();
+
+ let gm_list = vec![gm, gm_backed_by_file];
+ for gm in gm_list.iter() {
+ let addr = GuestAddress(0x1010);
+ let mut file = if cfg!(unix) {
+ File::open(Path::new("/dev/zero")).unwrap()
+ } else {
+ File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
+ };
+ gm.write_obj(!0u32, addr).unwrap();
+ gm.read_exact_from(addr, &mut file, mem::size_of::())
+ .unwrap();
+ let value: u32 = gm.read_obj(addr).unwrap();
+ if cfg!(unix) {
+ assert_eq!(value, 0);
+ } else {
+ assert_eq!(value, 0x0090_5a4d);
+ }
+
+ let mut sink = Vec::new();
+ gm.write_all_to(addr, &mut sink, mem::size_of::())
+ .unwrap();
+ if cfg!(unix) {
+ assert_eq!(sink, vec![0; mem::size_of::()]);
+ } else {
+ assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
+ };
+ }
+ }
+
+ #[test]
+ fn create_vec_with_regions() {
+ let region_size = 0x400;
+ let regions = vec![
+ (GuestAddress(0x0), region_size),
+ (GuestAddress(0x1000), region_size),
+ ];
+ let mut iterated_regions = Vec::new();
+ let gm = GuestMemoryMmap::from_ranges(®ions).unwrap();
+
+ for region in gm.iter() {
+ assert_eq!(region.len(), region_size as GuestUsize);
+ }
+
+ for region in gm.iter() {
+ iterated_regions.push((region.start_addr(), region.len() as usize));
+ }
+ assert_eq!(regions, iterated_regions);
+
+ assert!(regions
+ .iter()
+ .map(|x| (x.0, x.1))
+ .eq(iterated_regions.iter().copied()));
+
+ assert_eq!(gm.regions[0].guest_base, regions[0].0);
+ assert_eq!(gm.regions[1].guest_base, regions[1].0);
+ }
+
+ #[test]
+ fn test_memory() {
+ let region_size = 0x400;
+ let regions = vec![
+ (GuestAddress(0x0), region_size),
+ (GuestAddress(0x1000), region_size),
+ ];
+ let mut iterated_regions = Vec::new();
+ let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap());
+ let mem = gm.memory();
+
+ for region in mem.iter() {
+ assert_eq!(region.len(), region_size as GuestUsize);
+ }
+
+ for region in mem.iter() {
+ iterated_regions.push((region.start_addr(), region.len() as usize));
+ }
+ assert_eq!(regions, iterated_regions);
+
+ assert!(regions
+ .iter()
+ .map(|x| (x.0, x.1))
+ .eq(iterated_regions.iter().copied()));
+
+ assert_eq!(gm.regions[0].guest_base, regions[0].0);
+ assert_eq!(gm.regions[1].guest_base, regions[1].0);
+ }
+
+ #[test]
+ fn test_access_cross_boundary() {
+ let f1 = TempFile::new().unwrap().into_file();
+ f1.set_len(0x1000).unwrap();
+ let f2 = TempFile::new().unwrap().into_file();
+ f2.set_len(0x1000).unwrap();
+
+ let start_addr1 = GuestAddress(0x0);
+ let start_addr2 = GuestAddress(0x1000);
+ let gm =
+ GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
+ let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
+ (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))),
+ (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))),
+ ])
+ .unwrap();
+
+ let gm_list = vec![gm, gm_backed_by_file];
+ for gm in gm_list.iter() {
+ let sample_buf = &[1, 2, 3, 4, 5];
+ assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5);
+ let buf = &mut [0u8; 5];
+ assert_eq!(gm.read(buf, GuestAddress(0xffc)).unwrap(), 5);
+ assert_eq!(buf, sample_buf);
+ }
+ }
+
+ #[test]
+ fn test_retrieve_fd_backing_memory_region() {
+ let f = TempFile::new().unwrap().into_file();
+ f.set_len(0x400).unwrap();
+
+ let start_addr = GuestAddress(0x0);
+ let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
+ assert!(gm.find_region(start_addr).is_some());
+ let region = gm.find_region(start_addr).unwrap();
+ assert!(region.file_offset().is_none());
+
+ let gm = GuestMemoryMmap::from_ranges_with_files(&[(
+ start_addr,
+ 0x400,
+ Some(FileOffset::new(f, 0)),
+ )])
+ .unwrap();
+ assert!(gm.find_region(start_addr).is_some());
+ let region = gm.find_region(start_addr).unwrap();
+ assert!(region.file_offset().is_some());
+ }
+
+ // Windows needs a dedicated test where it will retrieve the allocation
+ // granularity to determine a proper offset (other than 0) that can be
+ // used for the backing file. Refer to Microsoft docs here:
+ // https://docs.microsoft.com/en-us/windows/desktop/api/memoryapi/nf-memoryapi-mapviewoffile
+ #[test]
+ #[cfg(unix)]
+ fn test_retrieve_offset_from_fd_backing_memory_region() {
+ let f = TempFile::new().unwrap().into_file();
+ f.set_len(0x1400).unwrap();
+ // Needs to be aligned on 4k, otherwise mmap will fail.
+ let offset = 0x1000;
+
+ let start_addr = GuestAddress(0x0);
+ let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
+ assert!(gm.find_region(start_addr).is_some());
+ let region = gm.find_region(start_addr).unwrap();
+ assert!(region.file_offset().is_none());
+
+ let gm = GuestMemoryMmap::from_ranges_with_files(&[(
+ start_addr,
+ 0x400,
+ Some(FileOffset::new(f, offset)),
+ )])
+ .unwrap();
+ assert!(gm.find_region(start_addr).is_some());
+ let region = gm.find_region(start_addr).unwrap();
+ assert!(region.file_offset().is_some());
+ assert_eq!(region.file_offset().unwrap().start(), offset);
+ }
+ */
+
+ #[test]
+ fn test_mmap_insert_region() {
+ let start_addr1 = GuestAddress(0);
+ let start_addr2 = GuestAddress(0x10_0000);
+
+ let guest_mem = GuestMemoryHybrid::<()>::new();
+ let mut raw_buf = [0u8; 0x1000];
+ let raw_ptr = &mut raw_buf as *mut u8;
+ let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, raw_ptr, 0x1000) };
+ let guest_mem = guest_mem
+ .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
+ .unwrap();
+ let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, raw_ptr, 0x1000) };
+ let gm = &guest_mem
+ .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
+ .unwrap();
+ let mem_orig = gm.memory();
+ assert_eq!(mem_orig.num_regions(), 2);
+
+ let reg = unsafe { GuestRegionRaw::new(GuestAddress(0x8000), raw_ptr, 0x1000) };
+ let mmap = Arc::new(GuestRegionHybrid::from_raw_region(reg));
+ let gm = gm.insert_region(mmap).unwrap();
+ let reg = unsafe { GuestRegionRaw::new(GuestAddress(0x4000), raw_ptr, 0x1000) };
+ let mmap = Arc::new(GuestRegionHybrid::from_raw_region(reg));
+ let gm = gm.insert_region(mmap).unwrap();
+ let reg = unsafe { GuestRegionRaw::new(GuestAddress(0xc000), raw_ptr, 0x1000) };
+ let mmap = Arc::new(GuestRegionHybrid::from_raw_region(reg));
+ let gm = gm.insert_region(mmap).unwrap();
+ let reg = unsafe { GuestRegionRaw::new(GuestAddress(0xc000), raw_ptr, 0x1000) };
+ let mmap = Arc::new(GuestRegionHybrid::from_raw_region(reg));
+ gm.insert_region(mmap).unwrap_err();
+
+ assert_eq!(mem_orig.num_regions(), 2);
+ assert_eq!(gm.num_regions(), 5);
+
+ assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000));
+ assert_eq!(gm.regions[1].start_addr(), GuestAddress(0x4000));
+ assert_eq!(gm.regions[2].start_addr(), GuestAddress(0x8000));
+ assert_eq!(gm.regions[3].start_addr(), GuestAddress(0xc000));
+ assert_eq!(gm.regions[4].start_addr(), GuestAddress(0x10_0000));
+ }
+
+ #[test]
+ fn test_mmap_remove_region() {
+ let start_addr1 = GuestAddress(0);
+ let start_addr2 = GuestAddress(0x10_0000);
+
+ let guest_mem = GuestMemoryHybrid::<()>::new();
+ let mut raw_buf = [0u8; 0x1000];
+ let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x1000) };
+ let guest_mem = guest_mem
+ .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
+ .unwrap();
+ let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, &mut raw_buf as *mut _, 0x1000) };
+ let gm = &guest_mem
+ .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
+ .unwrap();
+ let mem_orig = gm.memory();
+ assert_eq!(mem_orig.num_regions(), 2);
+
+ gm.remove_region(GuestAddress(0), 128).unwrap_err();
+ gm.remove_region(GuestAddress(0x4000), 128).unwrap_err();
+ let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap();
+
+ assert_eq!(mem_orig.num_regions(), 2);
+ assert_eq!(gm.num_regions(), 1);
+
+ assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000));
+ assert_eq!(region.start_addr(), GuestAddress(0x10_0000));
+ }
+
+ #[test]
+ fn test_guest_memory_mmap_get_slice() {
+ let start_addr1 = GuestAddress(0);
+ let mut raw_buf = [0u8; 0x400];
+ let region =
+ unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
+
+ // Normal case.
+ let slice_addr = MemoryRegionAddress(0x100);
+ let slice_size = 0x200;
+ let slice = region.get_slice(slice_addr, slice_size).unwrap();
+ assert_eq!(slice.len(), slice_size);
+
+ // Empty slice.
+ let slice_addr = MemoryRegionAddress(0x200);
+ let slice_size = 0x0;
+ let slice = region.get_slice(slice_addr, slice_size).unwrap();
+ assert!(slice.is_empty());
+
+ // Error case when slice_size is beyond the boundary.
+ let slice_addr = MemoryRegionAddress(0x300);
+ let slice_size = 0x200;
+ assert!(region.get_slice(slice_addr, slice_size).is_err());
+ }
+
+ #[test]
+ fn test_guest_memory_mmap_as_volatile_slice() {
+ let start_addr1 = GuestAddress(0);
+ let mut raw_buf = [0u8; 0x400];
+ let region =
+ unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
+ let region_size = 0x400;
+
+ // Test slice length.
+ let slice = region.as_volatile_slice().unwrap();
+ assert_eq!(slice.len(), region_size);
+
+ // Test slice data.
+ let v = 0x1234_5678u32;
+ let r = slice.get_ref::(0x200).unwrap();
+ r.store(v);
+ assert_eq!(r.load(), v);
+ }
+
+ #[test]
+ fn test_guest_memory_get_slice() {
+ let start_addr1 = GuestAddress(0);
+ let start_addr2 = GuestAddress(0x800);
+
+ let guest_mem = GuestMemoryHybrid::<()>::new();
+ let mut raw_buf = [0u8; 0x400];
+ let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
+ let guest_mem = guest_mem
+ .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
+ .unwrap();
+ let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, &mut raw_buf as *mut _, 0x400) };
+ let guest_mem = guest_mem
+ .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
+ .unwrap();
+
+ // Normal cases.
+ let slice_size = 0x200;
+ let slice = guest_mem
+ .get_slice(GuestAddress(0x100), slice_size)
+ .unwrap();
+ assert_eq!(slice.len(), slice_size);
+
+ let slice_size = 0x400;
+ let slice = guest_mem
+ .get_slice(GuestAddress(0x800), slice_size)
+ .unwrap();
+ assert_eq!(slice.len(), slice_size);
+
+ // Empty slice.
+ assert!(guest_mem
+ .get_slice(GuestAddress(0x900), 0)
+ .unwrap()
+ .is_empty());
+
+ // Error cases, wrong size or base address.
+ assert!(guest_mem.get_slice(GuestAddress(0), 0x500).is_err());
+ assert!(guest_mem.get_slice(GuestAddress(0x600), 0x100).is_err());
+ assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err());
+ }
+
+ #[test]
+ fn test_checked_offset() {
+ let start_addr1 = GuestAddress(0);
+ let start_addr2 = GuestAddress(0x800);
+ let start_addr3 = GuestAddress(0xc00);
+
+ let guest_mem = GuestMemoryHybrid::<()>::new();
+ let mut raw_buf = [0u8; 0x400];
+ let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
+ let guest_mem = guest_mem
+ .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
+ .unwrap();
+ let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, &mut raw_buf as *mut _, 0x400) };
+ let guest_mem = guest_mem
+ .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
+ .unwrap();
+ let reg = unsafe { GuestRegionRaw::<()>::new(start_addr3, &mut raw_buf as *mut _, 0x400) };
+ let guest_mem = guest_mem
+ .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
+ .unwrap();
+
+ assert_eq!(
+ guest_mem.checked_offset(start_addr1, 0x200),
+ Some(GuestAddress(0x200))
+ );
+ assert_eq!(
+ guest_mem.checked_offset(start_addr1, 0xa00),
+ Some(GuestAddress(0xa00))
+ );
+ assert_eq!(
+ guest_mem.checked_offset(start_addr2, 0x7ff),
+ Some(GuestAddress(0xfff))
+ );
+ assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None);
+ assert_eq!(guest_mem.checked_offset(start_addr1, std::usize::MAX), None);
+
+ assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None);
+ assert_eq!(
+ guest_mem.checked_offset(start_addr1, 0x400 - 1),
+ Some(GuestAddress(0x400 - 1))
+ );
+ }
+
+ #[test]
+ fn test_check_range() {
+ let start_addr1 = GuestAddress(0);
+ let start_addr2 = GuestAddress(0x800);
+ let start_addr3 = GuestAddress(0xc00);
+
+ let guest_mem = GuestMemoryHybrid::<()>::new();
+ let mut raw_buf = [0u8; 0x400];
+ let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
+ let guest_mem = guest_mem
+ .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
+ .unwrap();
+ let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, &mut raw_buf as *mut _, 0x400) };
+ let guest_mem = guest_mem
+ .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
+ .unwrap();
+ let reg = unsafe { GuestRegionRaw::<()>::new(start_addr3, &mut raw_buf as *mut _, 0x400) };
+ let guest_mem = guest_mem
+ .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
+ .unwrap();
+
+ assert!(guest_mem.check_range(start_addr1, 0x0));
+ assert!(guest_mem.check_range(start_addr1, 0x200));
+ assert!(guest_mem.check_range(start_addr1, 0x400));
+ assert!(!guest_mem.check_range(start_addr1, 0xa00));
+ assert!(guest_mem.check_range(start_addr2, 0x7ff));
+ assert!(guest_mem.check_range(start_addr2, 0x800));
+ assert!(!guest_mem.check_range(start_addr2, 0x801));
+ assert!(!guest_mem.check_range(start_addr2, 0xc00));
+ assert!(!guest_mem.check_range(start_addr1, usize::MAX));
+ }
+}
diff --git a/src/dragonball/src/dbs_address_space/src/numa.rs b/src/dragonball/src/dbs_address_space/src/numa.rs
new file mode 100644
index 000000000..71f2d748a
--- /dev/null
+++ b/src/dragonball/src/dbs_address_space/src/numa.rs
@@ -0,0 +1,85 @@
+// Copyright (C) 2021 Alibaba Cloud. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+//! Types for NUMA information.
+
+use vm_memory::{GuestAddress, GuestUsize};
+
+/// Strategy of mbind() and don't lead to OOM.
+pub const MPOL_PREFERRED: u32 = 1;
+
+/// Strategy of mbind()
+pub const MPOL_MF_MOVE: u32 = 2;
+
+/// Type for recording numa ids of different devices
+pub struct NumaIdTable {
+ /// vectors of numa id for each memory region
+ pub memory: Vec,
+ /// vectors of numa id for each cpu
+ pub cpu: Vec,
+}
+
+/// Record numa node memory information.
+#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
+pub struct NumaNodeInfo {
+ /// Base address of the region in guest physical address space.
+ pub base: GuestAddress,
+ /// Size of the address region.
+ pub size: GuestUsize,
+}
+
+/// Record all region's info of a numa node.
+#[derive(Debug, Default, Clone, PartialEq, Eq)]
+pub struct NumaNode {
+ region_infos: Vec,
+ vcpu_ids: Vec,
+}
+
+impl NumaNode {
+ /// get reference of region_infos in numa node.
+ pub fn region_infos(&self) -> &Vec {
+ &self.region_infos
+ }
+
+ /// get vcpu ids belonging to a numa node.
+ pub fn vcpu_ids(&self) -> &Vec {
+ &self.vcpu_ids
+ }
+
+ /// add a new numa region info into this numa node.
+ pub fn add_info(&mut self, info: &NumaNodeInfo) {
+ self.region_infos.push(*info);
+ }
+
+ /// add a group of vcpu ids belong to this numa node
+ pub fn add_vcpu_ids(&mut self, vcpu_ids: &[u32]) {
+ self.vcpu_ids.extend(vcpu_ids)
+ }
+
+ /// create a new numa node struct
+ pub fn new() -> NumaNode {
+ NumaNode {
+ region_infos: Vec::new(),
+ vcpu_ids: Vec::new(),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_create_numa_node() {
+ let mut numa_node = NumaNode::new();
+ let info = NumaNodeInfo {
+ base: GuestAddress(0),
+ size: 1024,
+ };
+ numa_node.add_info(&info);
+ assert_eq!(*numa_node.region_infos(), vec![info]);
+ let vcpu_ids = vec![0, 1, 2, 3];
+ numa_node.add_vcpu_ids(&vcpu_ids);
+ assert_eq!(*numa_node.vcpu_ids(), vcpu_ids);
+ }
+}
diff --git a/src/dragonball/src/dbs_address_space/src/region.rs b/src/dragonball/src/dbs_address_space/src/region.rs
new file mode 100644
index 000000000..a0a832404
--- /dev/null
+++ b/src/dragonball/src/dbs_address_space/src/region.rs
@@ -0,0 +1,564 @@
+// Copyright (C) 2021 Alibaba Cloud. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::ffi::CString;
+use std::fs::{File, OpenOptions};
+use std::os::unix::io::FromRawFd;
+use std::path::Path;
+use std::str::FromStr;
+
+use nix::sys::memfd;
+use vm_memory::{Address, FileOffset, GuestAddress, GuestUsize};
+
+use crate::memory::MemorySourceType;
+use crate::memory::MemorySourceType::MemFdShared;
+use crate::AddressSpaceError;
+
+/// Type of address space regions.
+///
+/// On physical machines, physical memory may have different properties, such as
+/// volatile vs non-volatile, read-only vs read-write, non-executable vs executable etc.
+/// On virtual machines, the concept of memory property may be extended to support better
+/// cooperation between the hypervisor and the guest kernel. Here address space region type means
+/// what the region will be used for by the guest OS, and different permissions and policies may
+/// be applied to different address space regions.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum AddressSpaceRegionType {
+ /// Normal memory accessible by CPUs and IO devices.
+ DefaultMemory,
+ /// MMIO address region for Devices.
+ DeviceMemory,
+ /// DAX address region for virtio-fs/virtio-pmem.
+ DAXMemory,
+}
+
+/// Struct to maintain configuration information about a guest address region.
+#[derive(Debug, Clone)]
+pub struct AddressSpaceRegion {
+ /// Type of address space regions.
+ pub ty: AddressSpaceRegionType,
+ /// Base address of the region in virtual machine's physical address space.
+ pub base: GuestAddress,
+ /// Size of the address space region.
+ pub size: GuestUsize,
+ /// Host NUMA node ids assigned to this region.
+ pub host_numa_node_id: Option,
+
+ /// File/offset tuple to back the memory allocation.
+ file_offset: Option,
+ /// Mmap permission flags.
+ perm_flags: i32,
+ /// Mmap protection flags.
+ prot_flags: i32,
+ /// Hugepage madvise hint.
+ ///
+ /// It needs 'advise' or 'always' policy in host shmem config.
+ is_hugepage: bool,
+ /// Hotplug hint.
+ is_hotplug: bool,
+ /// Anonymous memory hint.
+ ///
+ /// It should be true for regions with the MADV_DONTFORK flag enabled.
+ is_anon: bool,
+}
+
+#[allow(clippy::too_many_arguments)]
+impl AddressSpaceRegion {
+ /// Create an address space region with default configuration.
+ pub fn new(ty: AddressSpaceRegionType, base: GuestAddress, size: GuestUsize) -> Self {
+ AddressSpaceRegion {
+ ty,
+ base,
+ size,
+ host_numa_node_id: None,
+ file_offset: None,
+ perm_flags: libc::MAP_SHARED,
+ prot_flags: libc::PROT_READ | libc::PROT_WRITE,
+ is_hugepage: false,
+ is_hotplug: false,
+ is_anon: false,
+ }
+ }
+
+ /// Create an address space region with all configurable information.
+ ///
+ /// # Arguments
+ /// * `ty` - Type of the address region
+ /// * `base` - Base address in VM to map content
+ /// * `size` - Length of content to map
+ /// * `numa_node_id` - Optional NUMA node id to allocate memory from
+ /// * `file_offset` - Optional file descriptor and offset to map content from
+ /// * `perm_flags` - mmap permission flags
+ /// * `prot_flags` - mmap protection flags
+ /// * `is_hotplug` - Whether it's a region for hotplug.
+ pub fn build(
+ ty: AddressSpaceRegionType,
+ base: GuestAddress,
+ size: GuestUsize,
+ host_numa_node_id: Option,
+ file_offset: Option,
+ perm_flags: i32,
+ prot_flags: i32,
+ is_hotplug: bool,
+ ) -> Self {
+ let mut region = Self::new(ty, base, size);
+
+ region.set_host_numa_node_id(host_numa_node_id);
+ region.set_file_offset(file_offset);
+ region.set_perm_flags(perm_flags);
+ region.set_prot_flags(prot_flags);
+ if is_hotplug {
+ region.set_hotplug();
+ }
+
+ region
+ }
+
+ /// Create an address space region to map memory into the virtual machine.
+ ///
+ /// # Arguments
+ /// * `base` - Base address in VM to map content
+ /// * `size` - Length of content to map
+ /// * `numa_node_id` - Optional NUMA node id to allocate memory from
+ /// * `mem_type` - Memory mapping from, 'shmem' or 'hugetlbfs'
+ /// * `mem_file_path` - Memory file path
+ /// * `mem_prealloc` - Whether to enable pre-allocation of guest memory
+ /// * `is_hotplug` - Whether it's a region for hotplug.
+ pub fn create_default_memory_region(
+ base: GuestAddress,
+ size: GuestUsize,
+ numa_node_id: Option,
+ mem_type: &str,
+ mem_file_path: &str,
+ mem_prealloc: bool,
+ is_hotplug: bool,
+ ) -> Result {
+ Self::create_memory_region(
+ base,
+ size,
+ numa_node_id,
+ mem_type,
+ mem_file_path,
+ mem_prealloc,
+ libc::PROT_READ | libc::PROT_WRITE,
+ is_hotplug,
+ )
+ }
+
+ /// Create an address space region to map memory from memfd/hugetlbfs into the virtual machine.
+ ///
+ /// # Arguments
+ /// * `base` - Base address in VM to map content
+ /// * `size` - Length of content to map
+ /// * `numa_node_id` - Optional NUMA node id to allocate memory from
+ /// * `mem_type` - Memory mapping from, 'shmem' or 'hugetlbfs'
+ /// * `mem_file_path` - Memory file path
+ /// * `mem_prealloc` - Whether to enable pre-allocation of guest memory
+ /// * `is_hotplug` - Whether it's a region for hotplug.
+ /// * `prot_flags` - mmap protection flags
+ pub fn create_memory_region(
+ base: GuestAddress,
+ size: GuestUsize,
+ numa_node_id: Option,
+ mem_type: &str,
+ mem_file_path: &str,
+ mem_prealloc: bool,
+ prot_flags: i32,
+ is_hotplug: bool,
+ ) -> Result {
+ let perm_flags = if mem_prealloc {
+ libc::MAP_SHARED | libc::MAP_POPULATE
+ } else {
+ libc::MAP_SHARED
+ };
+ let source_type = MemorySourceType::from_str(mem_type)
+ .map_err(|_e| AddressSpaceError::InvalidMemorySourceType(mem_type.to_string()))?;
+ let mut reg = match source_type {
+ MemorySourceType::MemFdShared | MemorySourceType::MemFdOnHugeTlbFs => {
+ let fn_str = if source_type == MemFdShared {
+ CString::new("shmem").expect("CString::new('shmem') failed")
+ } else {
+ CString::new("hugeshmem").expect("CString::new('hugeshmem') failed")
+ };
+ let filename = fn_str.as_c_str();
+ let fd = memfd::memfd_create(filename, memfd::MemFdCreateFlag::empty())
+ .map_err(AddressSpaceError::CreateMemFd)?;
+ // Safe because we have just created the fd.
+ let file: File = unsafe { File::from_raw_fd(fd) };
+ file.set_len(size).map_err(AddressSpaceError::SetFileSize)?;
+ Self::build(
+ AddressSpaceRegionType::DefaultMemory,
+ base,
+ size,
+ numa_node_id,
+ Some(FileOffset::new(file, 0)),
+ perm_flags,
+ prot_flags,
+ is_hotplug,
+ )
+ }
+ MemorySourceType::MmapAnonymous | MemorySourceType::MmapAnonymousHugeTlbFs => {
+ let mut perm_flags = libc::MAP_PRIVATE | libc::MAP_ANONYMOUS;
+ if mem_prealloc {
+ perm_flags |= libc::MAP_POPULATE
+ }
+ Self::build(
+ AddressSpaceRegionType::DefaultMemory,
+ base,
+ size,
+ numa_node_id,
+ None,
+ perm_flags,
+ prot_flags,
+ is_hotplug,
+ )
+ }
+ MemorySourceType::FileOnHugeTlbFs => {
+ let path = Path::new(mem_file_path);
+ if let Some(parent_dir) = path.parent() {
+ // Ensure that the parent directory is existed for the mem file path.
+ std::fs::create_dir_all(parent_dir).map_err(AddressSpaceError::CreateDir)?;
+ }
+ let file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(mem_file_path)
+ .map_err(AddressSpaceError::OpenFile)?;
+ nix::unistd::unlink(mem_file_path).map_err(AddressSpaceError::UnlinkFile)?;
+ file.set_len(size).map_err(AddressSpaceError::SetFileSize)?;
+ let file_offset = FileOffset::new(file, 0);
+ Self::build(
+ AddressSpaceRegionType::DefaultMemory,
+ base,
+ size,
+ numa_node_id,
+ Some(file_offset),
+ perm_flags,
+ prot_flags,
+ is_hotplug,
+ )
+ }
+ };
+
+ if source_type.is_hugepage() {
+ reg.set_hugepage();
+ }
+ if source_type.is_mmap_anonymous() {
+ reg.set_anonpage();
+ }
+
+ Ok(reg)
+ }
+
+ /// Create an address region for device MMIO.
+ ///
+ /// # Arguments
+ /// * `base` - Base address in VM to map content
+ /// * `size` - Length of content to map
+ pub fn create_device_region(
+ base: GuestAddress,
+ size: GuestUsize,
+ ) -> Result {
+ Ok(Self::build(
+ AddressSpaceRegionType::DeviceMemory,
+ base,
+ size,
+ None,
+ None,
+ 0,
+ 0,
+ false,
+ ))
+ }
+
+ /// Get type of the address space region.
+ pub fn region_type(&self) -> AddressSpaceRegionType {
+ self.ty
+ }
+
+ /// Get size of region.
+ pub fn len(&self) -> GuestUsize {
+ self.size
+ }
+
+ /// Get the inclusive start physical address of the region.
+ pub fn start_addr(&self) -> GuestAddress {
+ self.base
+ }
+
+ /// Get the inclusive end physical address of the region.
+ pub fn last_addr(&self) -> GuestAddress {
+ debug_assert!(self.size > 0 && self.base.checked_add(self.size).is_some());
+ GuestAddress(self.base.raw_value() + self.size - 1)
+ }
+
+ /// Get mmap permission flags of the address space region.
+ pub fn perm_flags(&self) -> i32 {
+ self.perm_flags
+ }
+
+ /// Set mmap permission flags for the address space region.
+ pub fn set_perm_flags(&mut self, perm_flags: i32) {
+ self.perm_flags = perm_flags;
+ }
+
+ /// Get mmap protection flags of the address space region.
+ pub fn prot_flags(&self) -> i32 {
+ self.prot_flags
+ }
+
+ /// Set mmap protection flags for the address space region.
+ pub fn set_prot_flags(&mut self, prot_flags: i32) {
+ self.prot_flags = prot_flags;
+ }
+
+ /// Get host_numa_node_id flags
+ pub fn host_numa_node_id(&self) -> Option {
+ self.host_numa_node_id
+ }
+
+ /// Set associated NUMA node ID to allocate memory from for this region.
+ pub fn set_host_numa_node_id(&mut self, host_numa_node_id: Option) {
+ self.host_numa_node_id = host_numa_node_id;
+ }
+
+ /// Check whether the address space region is backed by a memory file.
+ pub fn has_file(&self) -> bool {
+ self.file_offset.is_some()
+ }
+
+ /// Get optional file associated with the region.
+ pub fn file_offset(&self) -> Option<&FileOffset> {
+ self.file_offset.as_ref()
+ }
+
+ /// Set associated file/offset pair for the region.
+ pub fn set_file_offset(&mut self, file_offset: Option) {
+ self.file_offset = file_offset;
+ }
+
+ /// Set the hotplug hint.
+ pub fn set_hotplug(&mut self) {
+ self.is_hotplug = true
+ }
+
+ /// Get the hotplug hint.
+ pub fn is_hotplug(&self) -> bool {
+ self.is_hotplug
+ }
+
+ /// Set hugepage hint for `madvise()`, only takes effect when the memory type is `shmem`.
+ pub fn set_hugepage(&mut self) {
+ self.is_hugepage = true
+ }
+
+ /// Get the hugepage hint.
+ pub fn is_hugepage(&self) -> bool {
+ self.is_hugepage
+ }
+
+ /// Set the anonymous memory hint.
+ pub fn set_anonpage(&mut self) {
+ self.is_anon = true
+ }
+
+ /// Get the anonymous memory hint.
+ pub fn is_anonpage(&self) -> bool {
+ self.is_anon
+ }
+
+ /// Check whether the address space region is valid.
+ pub fn is_valid(&self) -> bool {
+ self.size > 0 && self.base.checked_add(self.size).is_some()
+ }
+
+ /// Check whether the address space region intersects with another one.
+ pub fn intersect_with(&self, other: &AddressSpaceRegion) -> bool {
+ // Treat invalid address region as intersecting always
+ let end1 = match self.base.checked_add(self.size) {
+ Some(addr) => addr,
+ None => return true,
+ };
+ let end2 = match other.base.checked_add(other.size) {
+ Some(addr) => addr,
+ None => return true,
+ };
+
+ !(end1 <= other.base || self.base >= end2)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::io::Write;
+ use vmm_sys_util::tempfile::TempFile;
+
+ #[test]
+ fn test_address_space_region_valid() {
+ let reg1 = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0xFFFFFFFFFFFFF000),
+ 0x2000,
+ );
+ assert!(!reg1.is_valid());
+ let reg1 = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0xFFFFFFFFFFFFF000),
+ 0x1000,
+ );
+ assert!(!reg1.is_valid());
+ let reg1 = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DeviceMemory,
+ GuestAddress(0xFFFFFFFFFFFFE000),
+ 0x1000,
+ );
+ assert!(reg1.is_valid());
+ assert_eq!(reg1.start_addr(), GuestAddress(0xFFFFFFFFFFFFE000));
+ assert_eq!(reg1.len(), 0x1000);
+ assert!(!reg1.has_file());
+ assert!(reg1.file_offset().is_none());
+ assert_eq!(reg1.perm_flags(), libc::MAP_SHARED);
+ assert_eq!(reg1.prot_flags(), libc::PROT_READ | libc::PROT_WRITE);
+ assert_eq!(reg1.region_type(), AddressSpaceRegionType::DeviceMemory);
+
+ let tmp_file = TempFile::new().unwrap();
+ let mut f = tmp_file.into_file();
+ let sample_buf = &[1, 2, 3, 4, 5];
+ assert!(f.write_all(sample_buf).is_ok());
+ let reg2 = AddressSpaceRegion::build(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x1000),
+ 0x1000,
+ None,
+ Some(FileOffset::new(f, 0x0)),
+ 0x5a,
+ 0x5a,
+ false,
+ );
+ assert_eq!(reg2.region_type(), AddressSpaceRegionType::DefaultMemory);
+ assert!(reg2.is_valid());
+ assert_eq!(reg2.start_addr(), GuestAddress(0x1000));
+ assert_eq!(reg2.len(), 0x1000);
+ assert!(reg2.has_file());
+ assert!(reg2.file_offset().is_some());
+ assert_eq!(reg2.perm_flags(), 0x5a);
+ assert_eq!(reg2.prot_flags(), 0x5a);
+ }
+
+ #[test]
+ fn test_address_space_region_intersect() {
+ let reg1 = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x1000),
+ 0x1000,
+ );
+ let reg2 = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x2000),
+ 0x1000,
+ );
+ let reg3 = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x1000),
+ 0x1001,
+ );
+ let reg4 = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0x1100),
+ 0x100,
+ );
+ let reg5 = AddressSpaceRegion::new(
+ AddressSpaceRegionType::DefaultMemory,
+ GuestAddress(0xFFFFFFFFFFFFF000),
+ 0x2000,
+ );
+
+ assert!(!reg1.intersect_with(®2));
+ assert!(!reg2.intersect_with(®1));
+
+ // intersect with self
+ assert!(reg1.intersect_with(®1));
+
+ // intersect with others
+ assert!(reg3.intersect_with(®2));
+ assert!(reg2.intersect_with(®3));
+ assert!(reg1.intersect_with(®4));
+ assert!(reg4.intersect_with(®1));
+ assert!(reg1.intersect_with(®5));
+ assert!(reg5.intersect_with(®1));
+ }
+
+ #[test]
+ fn test_create_device_region() {
+ let reg = AddressSpaceRegion::create_device_region(GuestAddress(0x10000), 0x1000).unwrap();
+ assert_eq!(reg.region_type(), AddressSpaceRegionType::DeviceMemory);
+ assert_eq!(reg.start_addr(), GuestAddress(0x10000));
+ assert_eq!(reg.len(), 0x1000);
+ }
+
+ #[test]
+ fn test_create_default_memory_region() {
+ AddressSpaceRegion::create_default_memory_region(
+ GuestAddress(0x100000),
+ 0x100000,
+ None,
+ "invalid",
+ "invalid",
+ false,
+ false,
+ )
+ .unwrap_err();
+
+ let reg = AddressSpaceRegion::create_default_memory_region(
+ GuestAddress(0x100000),
+ 0x100000,
+ None,
+ "shmem",
+ "",
+ false,
+ false,
+ )
+ .unwrap();
+ assert_eq!(reg.region_type(), AddressSpaceRegionType::DefaultMemory);
+ assert_eq!(reg.start_addr(), GuestAddress(0x100000));
+ assert_eq!(reg.last_addr(), GuestAddress(0x1fffff));
+ assert_eq!(reg.len(), 0x100000);
+ assert!(reg.file_offset().is_some());
+
+ let reg = AddressSpaceRegion::create_default_memory_region(
+ GuestAddress(0x100000),
+ 0x100000,
+ None,
+ "hugeshmem",
+ "",
+ true,
+ false,
+ )
+ .unwrap();
+ assert_eq!(reg.region_type(), AddressSpaceRegionType::DefaultMemory);
+ assert_eq!(reg.start_addr(), GuestAddress(0x100000));
+ assert_eq!(reg.last_addr(), GuestAddress(0x1fffff));
+ assert_eq!(reg.len(), 0x100000);
+ assert!(reg.file_offset().is_some());
+
+ let reg = AddressSpaceRegion::create_default_memory_region(
+ GuestAddress(0x100000),
+ 0x100000,
+ None,
+ "mmap",
+ "",
+ true,
+ false,
+ )
+ .unwrap();
+ assert_eq!(reg.region_type(), AddressSpaceRegionType::DefaultMemory);
+ assert_eq!(reg.start_addr(), GuestAddress(0x100000));
+ assert_eq!(reg.last_addr(), GuestAddress(0x1fffff));
+ assert_eq!(reg.len(), 0x100000);
+ assert!(reg.file_offset().is_none());
+
+ // TODO: test hugetlbfs
+ }
+}
diff --git a/src/dragonball/src/dbs_allocator/Cargo.toml b/src/dragonball/src/dbs_allocator/Cargo.toml
new file mode 100644
index 000000000..c3c0f3c10
--- /dev/null
+++ b/src/dragonball/src/dbs_allocator/Cargo.toml
@@ -0,0 +1,14 @@
+[package]
+name = "dbs-allocator"
+version = "0.1.1"
+authors = ["Liu Jiang "]
+description = "a resource allocator for virtual machine manager"
+license = "Apache-2.0"
+edition = "2018"
+homepage = "https://github.com/openanolis/dragonball-sandbox"
+repository = "https://github.com/openanolis/dragonball-sandbox"
+keywords = ["dragonball"]
+readme = "README.md"
+
+[dependencies]
+thiserror = "1.0"
diff --git a/src/dragonball/src/dbs_allocator/LICENSE b/src/dragonball/src/dbs_allocator/LICENSE
new file mode 120000
index 000000000..30cff7403
--- /dev/null
+++ b/src/dragonball/src/dbs_allocator/LICENSE
@@ -0,0 +1 @@
+../../LICENSE
\ No newline at end of file
diff --git a/src/dragonball/src/dbs_allocator/README.md b/src/dragonball/src/dbs_allocator/README.md
new file mode 100644
index 000000000..2e4b07a8c
--- /dev/null
+++ b/src/dragonball/src/dbs_allocator/README.md
@@ -0,0 +1,106 @@
+# dbs-allocator
+
+## Design
+
+The resource manager in the `Dragonball Sandbox` needs to manage and allocate different kinds of resource for the
+sandbox (virtual machine), such as memory-mapped I/O address space, port I/O address space, legacy IRQ numbers,
+MSI/MSI-X vectors, device instance id, etc. The `dbs-allocator` crate is designed to help the resource manager
+to track and allocate these types of resources.
+
+Main components are:
+- *Constraints*: struct to declare constraints for resource allocation.
+```rust
+#[derive(Copy, Clone, Debug)]
+pub struct Constraint {
+ /// Size of resource to allocate.
+ pub size: u64,
+ /// Lower boundary for resource allocation.
+ pub min: u64,
+ /// Upper boundary for resource allocation.
+ pub max: u64,
+ /// Alignment for allocated resource.
+ pub align: u64,
+ /// Policy for resource allocation.
+ pub policy: AllocPolicy,
+}
+```
+- `IntervalTree`: An interval tree implementation specialized for VMM resource management.
+```rust
+pub struct IntervalTree {
+ pub(crate) root: Option>,
+}
+​
+pub fn allocate(&mut self, constraint: &Constraint) -> Option
+pub fn free(&mut self, key: &Range) -> Option
+pub fn insert(&mut self, key: Range, data: Option) -> Self
+pub fn update(&mut self, key: &Range, data: T) -> Option
+pub fn delete(&mut self, key: &Range) -> Option
+pub fn get(&self, key: &Range) -> Option>
+```
+
+## Usage
+The concept of Interval Tree may seem complicated, but using dbs-allocator to do resource allocation and release is simple and straightforward.
+You can following these steps to allocate your VMM resource.
+```rust
+// 1. To start with, we should create an interval tree for some specific resouces and give maximum address/id range as root node. The range here could be address range, id range, etc.
+​
+let mut resources_pool = IntervalTree::new();
+resources_pool.insert(Range::new(MIN_RANGE, MAX_RANGE), None);
+​
+// 2. Next, create a constraint with the size for your resource, you could also assign the maximum, minimum and alignment for the constraint. Then we could use the constraint to allocate the resource in the range we previously decided. Interval Tree will give you the appropriate range.
+let mut constraint = Constraint::new(SIZE);
+let mut resources_range = self.resources_pool.allocate(&constraint);
+​
+// 3. Then we could use the resource range to let other crates like vm-pci / vm-device to create and maintain the device
+let mut device = Device::create(resources_range, ..)
+```
+
+## Example
+We will show examples for allocating an unused PCI device ID from the PCI device ID pool and allocating memory address using dbs-allocator
+```rust
+use dbs_allocator::{Constraint, IntervalTree, Range};
+​
+// Init a dbs-allocator IntervalTree
+let mut pci_device_pool = IntervalTree::new();
+​
+// Init PCI device id pool with the range 0 to 255
+pci_device_pool.insert(Range::new(0x0u8, 0xffu8), None);
+​
+// Construct a constraint with size 1 and alignment 1 to ask for an ID.
+let mut constraint = Constraint::new(1u64).align(1u64);
+​
+// Get an ID from the pci_device_pool
+let mut id = pci_device_pool.allocate(&constraint).map(|e| e.min as u8);
+​
+// Pass the ID generated from dbs-allocator to vm-pci specified functions to create pci devices
+let mut pci_device = PciDevice::new(id as u8, ..);
+
+```
+
+```rust
+use dbs_allocator::{Constraint, IntervalTree, Range};
+​
+// Init a dbs-allocator IntervalTree
+let mut mem_pool = IntervalTree::new();
+​
+// Init memory address from GUEST_MEM_START to GUEST_MEM_END
+mem_pool.insert(Range::new(GUEST_MEM_START, GUEST_MEM_END), None);
+​
+// Construct a constraint with size, maximum addr and minimum address of memory region to ask for an memory allocation range.
+let constraint = Constraint::new(region.len())
+ .min(region.start_addr().raw_value())
+ .max(region.last_addr().raw_value());
+​
+// Get the memory allocation range from the pci_device_pool
+let mem_range = mem_pool.allocate(&constraint).unwrap();
+​
+// Update the mem_range in IntervalTree with memory region info
+mem_pool.update(&mem_range, region);
+​
+// After allocation, we can use the memory range to do mapping and other memory related work.
+...
+```
+
+## License
+
+This project is licensed under [Apache License](http://www.apache.org/licenses/LICENSE-2.0), Version 2.0.
\ No newline at end of file
diff --git a/src/dragonball/src/dbs_allocator/src/interval_tree.rs b/src/dragonball/src/dbs_allocator/src/interval_tree.rs
new file mode 100644
index 000000000..c2a13c5c8
--- /dev/null
+++ b/src/dragonball/src/dbs_allocator/src/interval_tree.rs
@@ -0,0 +1,1297 @@
+// Copyright (C) 2019 Alibaba Cloud. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+//! An interval tree implementation specialized for VMM resource management.
+//!
+//! It's not designed as a generic interval tree, but specialized for VMM resource management.
+//! In addition to the normal get()/insert()/delete()/update() tree operations, it also implements
+//! allocate()/free() for resource allocation.
+//!
+//! # Examples
+//! ```rust
+//! extern crate dbs_allocator;
+//! use dbs_allocator::{Constraint, IntervalTree, NodeState, Range};
+//!
+//! // Create an interval tree and add available resources.
+//! let mut tree = IntervalTree::::new();
+//! tree.insert(Range::new(0x100u32, 0x100u32), None);
+//! tree.insert(Range::new(0x200u16, 0x2ffu16), None);
+//!
+//! // Allocate a range with constraints.
+//! let mut constraint = Constraint::new(8u64);
+//! constraint.min = 0x211;
+//! constraint.max = 0x21f;
+//! constraint.align = 0x8;
+//!
+//! let key = tree.allocate(&constraint);
+//! assert_eq!(key, Some(Range::new(0x218u64, 0x21fu64)));
+//! let val = tree.get(&Range::new(0x218u64, 0x21fu64));
+//! assert_eq!(val, Some(NodeState::Allocated));
+//!
+//! // Associate data with the allocated range and mark the range as occupied.
+//! // Note: caller needs to protect from concurrent access between allocate() and the first call
+//! // to update() to mark range as occupied.
+//! let old = tree.update(&Range::new(0x218u32, 0x21fu32), 2);
+//! assert_eq!(old, None);
+//! let old = tree.update(&Range::new(0x218u32, 0x21fu32), 3);
+//! assert_eq!(old, Some(2));
+//! let val = tree.get(&Range::new(0x218u32, 0x21fu32));
+//! assert_eq!(val, Some(NodeState::Valued(&3)));
+//!
+//! // Free allocated resource.
+//! let old = tree.free(key.as_ref().unwrap());
+//! assert_eq!(old, Some(3));
+//! ```
+
+use std::cmp::{max, min, Ordering};
+
+use crate::{AllocPolicy, Constraint};
+
+/// Represent a closed range `[min, max]`.
+#[allow(missing_docs)]
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub struct Range {
+ pub min: u64,
+ pub max: u64,
+}
+
+impl std::fmt::Debug for Range {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ write!(f, "[ {:016x}, {:016x} ]", self.min, self.max)
+ }
+}
+
+impl Range {
+ /// Create a instance of [`Range`] with given `min` and `max`.
+ ///
+ /// ## Panic
+ /// - if min is bigger than max
+ /// - if min == 0 && max == u64:MAX
+ pub fn new(min: T, max: T) -> Self
+ where
+ u64: From,
+ {
+ let umin = u64::from(min);
+ let umax = u64::from(max);
+ if umin > umax || (umin == 0 && umax == u64::MAX) {
+ panic!("interval_tree: Range({}, {}) is invalid", umin, umax);
+ }
+ Range {
+ min: umin,
+ max: umax,
+ }
+ }
+
+ /// Create a instance of [`Range`] with given base and size.
+ ///
+ /// ## Panic
+ /// - if base + size wraps around
+ /// - if base == 0 && size == u64::MAX
+ pub fn with_size(base: T, size: T) -> Self
+ where
+ u64: From,
+ {
+ let umin = u64::from(base);
+ let umax = u64::from(size).checked_add(umin).unwrap();
+ if umin > umax || (umin == 0 && umax == std::u64::MAX) {
+ panic!("interval_tree: Range({}, {}) is invalid", umin, umax);
+ }
+ Range {
+ min: umin,
+ max: umax,
+ }
+ }
+
+ /// Create a instance of [`Range`] containing only the point `value`.
+ pub fn new_point(value: T) -> Self
+ where
+ u64: From,
+ {
+ let val = u64::from(value);
+ Range { min: val, max: val }
+ }
+
+ /// Get size of the range.
+ pub fn len(&self) -> u64 {
+ self.max - self.min + 1
+ }
+
+ /// Check whether the range is empty.
+ pub fn is_empty(&self) -> bool {
+ false
+ }
+
+ /// Check whether two Range objects intersect with each other.
+ pub fn intersect(&self, other: &Range) -> bool {
+ max(self.min, other.min) <= min(self.max, other.max)
+ }
+
+ /// Check whether another [Range] object is fully covered by this range.
+ pub fn contain(&self, other: &Range) -> bool {
+ self.min <= other.min && self.max >= other.max
+ }
+
+ /// Create a new instance of [Range] with `min` aligned to `align`.
+ ///
+ /// # Examples
+ /// ```rust
+ /// extern crate dbs_allocator;
+ /// use dbs_allocator::Range;
+ ///
+ /// let a = Range::new(2u32, 6u32);
+ /// assert_eq!(a.align_to(0), Some(Range::new(2u32, 6u32)));
+ /// assert_eq!(a.align_to(1), Some(Range::new(2u16, 6u16)));
+ /// assert_eq!(a.align_to(2), Some(Range::new(2u64, 6u64)));
+ /// assert_eq!(a.align_to(4), Some(Range::new(4u8, 6u8)));
+ /// assert_eq!(a.align_to(8), None);
+ /// assert_eq!(a.align_to(3), None);
+ /// let b = Range::new(2u8, 2u8);
+ /// assert_eq!(b.align_to(2), Some(Range::new(2u8, 2u8)));
+ /// ```
+ pub fn align_to(&self, align: u64) -> Option {
+ match align {
+ 0 | 1 => Some(*self),
+ _ => {
+ if align & (align - 1) != 0 {
+ return None;
+ }
+ if let Some(min) = self.min.checked_add(align - 1).map(|v| v & !(align - 1)) {
+ if min <= self.max {
+ return Some(Range::new(min, self.max));
+ }
+ }
+ None
+ }
+ }
+ }
+}
+
+impl PartialOrd for Range {
+ fn partial_cmp(&self, other: &Self) -> Option {
+ match self.min.cmp(&other.min) {
+ Ordering::Equal => Some(self.max.cmp(&other.max)),
+ res => Some(res),
+ }
+ }
+}
+
+impl Ord for Range {
+ fn cmp(&self, other: &Self) -> Ordering {
+ match self.min.cmp(&other.min) {
+ Ordering::Equal => self.max.cmp(&other.max),
+ res => res,
+ }
+ }
+}
+
+/// State of interval tree node.
+///
+/// Valid state transitions:
+/// - None -> Free: [IntervalTree::insert()]
+/// - None -> Valued: [IntervalTree::insert()]
+/// - Free -> Allocated: [IntervalTree::allocate()]
+/// - Allocated -> Valued(T): [IntervalTree::update()]
+/// - Valued -> Valued(T): [IntervalTree::update()]
+/// - Allocated -> Free: [IntervalTree::free()]
+/// - Valued(T) -> Free: [IntervalTree::free()]
+/// - * -> None: [IntervalTree::delete()]
+#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord)]
+pub enum NodeState {
+ /// Node is free
+ Free,
+ /// Node is allocated but without associated data
+ Allocated,
+ /// Node is allocated with associated data.
+ Valued(T),
+}
+
+impl NodeState {
+ fn take(&mut self) -> Self {
+ std::mem::replace(self, NodeState::::Free)
+ }
+
+ fn replace(&mut self, value: NodeState) -> Self {
+ std::mem::replace(self, value)
+ }
+
+ fn as_ref(&self) -> NodeState<&T> {
+ match self {
+ NodeState::::Valued(ref x) => NodeState::<&T>::Valued(x),
+ NodeState::::Allocated => NodeState::<&T>::Allocated,
+ NodeState::::Free => NodeState::<&T>::Free,
+ }
+ }
+
+ fn as_mut(&mut self) -> NodeState<&mut T> {
+ match self {
+ NodeState::::Valued(ref mut x) => NodeState::<&mut T>::Valued(x),
+ NodeState::::Allocated => NodeState::<&mut T>::Allocated,
+ NodeState::::Free => NodeState::<&mut T>::Free,
+ }
+ }
+
+ fn is_free(&self) -> bool {
+ matches!(self, NodeState::::Free)
+ }
+}
+
+impl From> for Option {
+ fn from(n: NodeState) -> Option {
+ match n {
+ NodeState::::Free | NodeState::::Allocated => None,
+ NodeState::::Valued(data) => Some(data),
+ }
+ }
+}
+
+/// Internal tree node to implement interval tree.
+#[derive(Debug, PartialEq, Eq)]
+struct InnerNode {
+ /// Interval handled by this node.
+ key: Range,
+ /// Optional contained data, None if the node is free.
+ data: NodeState,
+ /// Optional left child of current node.
+ left: Option>,
+ /// Optional right child of current node.
+ right: Option>,
+ /// Cached height of the node.
+ height: u32,
+ /// Cached maximum valued covered by this node.
+ max_key: u64,
+}
+
+impl InnerNode {
+ fn new(key: Range, data: NodeState) -> Self {
+ InnerNode {
+ key,
+ data,
+ left: None,
+ right: None,
+ height: 1,
+ max_key: key.max,
+ }
+ }
+}
+
+/// Newtype for interval tree nodes.
+#[derive(Debug, PartialEq, Eq)]
+struct Node(Box>);
+
+impl Node {
+ fn new(key: Range, data: Option) -> Self {
+ let value = if let Some(t) = data {
+ NodeState::Valued(t)
+ } else {
+ NodeState::Free
+ };
+ Node(Box::new(InnerNode::new(key, value)))
+ }
+
+ /// Returns a readonly reference to the node associated with the `key` or None if not found.
+ fn search(&self, key: &Range) -> Option<&Self> {
+ match self.0.key.cmp(key) {
+ Ordering::Equal => Some(self),
+ Ordering::Less => self.0.right.as_ref().and_then(|node| node.search(key)),
+ Ordering::Greater => self.0.left.as_ref().and_then(|node| node.search(key)),
+ }
+ }
+
+ /// Returns a shared reference to the node covers full range of the `key`.
+ fn search_superset(&self, key: &Range) -> Option<&Self> {
+ if self.0.key.contain(key) {
+ Some(self)
+ } else if key.max < self.0.key.min && self.0.left.is_some() {
+ // Safe to unwrap() because we have just checked it.
+ self.0.left.as_ref().unwrap().search_superset(key)
+ } else if key.min > self.0.key.max && self.0.right.is_some() {
+ // Safe to unwrap() because we have just checked it.
+ self.0.right.as_ref().unwrap().search_superset(key)
+ } else {
+ None
+ }
+ }
+
+ /// Returns a mutable reference to the node covers full range of the `key`.
+ fn search_superset_mut(&mut self, key: &Range) -> Option<&mut Self> {
+ if self.0.key.contain(key) {
+ Some(self)
+ } else if key.max < self.0.key.min && self.0.left.is_some() {
+ // Safe to unwrap() because we have just checked it.
+ self.0.left.as_mut().unwrap().search_superset_mut(key)
+ } else if key.min > self.0.key.max && self.0.right.is_some() {
+ // Safe to unwrap() because we have just checked it.
+ self.0.right.as_mut().unwrap().search_superset_mut(key)
+ } else {
+ None
+ }
+ }
+
+ /// Insert a new (key, data) pair into the subtree.
+ ///
+ /// Note: it will panic if the new key intersects with existing nodes.
+ fn insert(mut self, key: Range, data: Option) -> Self {
+ match self.0.key.cmp(&key) {
+ Ordering::Equal => {
+ panic!("interval_tree: key {:?} exists", key);
+ }
+ Ordering::Less => {
+ if self.0.key.intersect(&key) {
+ panic!(
+ "interval_tree: key {:?} intersects with existing {:?}",
+ key, self.0.key
+ );
+ }
+ match self.0.right {
+ None => self.0.right = Some(Node::new(key, data)),
+ Some(_) => self.0.right = self.0.right.take().map(|n| n.insert(key, data)),
+ }
+ }
+ Ordering::Greater => {
+ if self.0.key.intersect(&key) {
+ panic!(
+ "interval_tree: key {:?} intersects with existing {:?}",
+ key, self.0.key
+ );
+ }
+ match self.0.left {
+ None => self.0.left = Some(Node::new(key, data)),
+ Some(_) => self.0.left = self.0.left.take().map(|n| n.insert(key, data)),
+ }
+ }
+ }
+ self.updated_node()
+ }
+
+ /// Update an existing entry and return the old value.
+ fn update(&mut self, key: &Range, data: NodeState) -> Option {
+ match self.0.key.cmp(key) {
+ Ordering::Equal => {
+ match (self.0.data.as_ref(), data.as_ref()) {
+ (NodeState::<&T>::Free, NodeState::<&T>::Free)
+ | (NodeState::<&T>::Free, NodeState::<&T>::Valued(_))
+ | (NodeState::<&T>::Allocated, NodeState::<&T>::Free)
+ | (NodeState::<&T>::Allocated, NodeState::<&T>::Allocated)
+ | (NodeState::<&T>::Valued(_), NodeState::<&T>::Free)
+ | (NodeState::<&T>::Valued(_), NodeState::<&T>::Allocated) => {
+ panic!("try to update unallocated interval tree node");
+ }
+ _ => {}
+ }
+ self.0.data.replace(data).into()
+ }
+ Ordering::Less => match self.0.right.as_mut() {
+ None => None,
+ Some(node) => node.update(key, data),
+ },
+ Ordering::Greater => match self.0.left.as_mut() {
+ None => None,
+ Some(node) => node.update(key, data),
+ },
+ }
+ }
+
+ /// Delete `key` from the subtree.
+ ///
+ /// Note: it doesn't return whether the key exists in the subtree, so caller need to ensure the
+ /// logic.
+ fn delete(mut self, key: &Range) -> (Option, Option) {
+ match self.0.key.cmp(key) {
+ Ordering::Equal => {
+ let data = self.0.data.take();
+ return (data.into(), self.delete_root());
+ }
+ Ordering::Less => {
+ if let Some(node) = self.0.right.take() {
+ let (data, right) = node.delete(key);
+ self.0.right = right;
+ return (data, Some(self.updated_node()));
+ }
+ }
+ Ordering::Greater => {
+ if let Some(node) = self.0.left.take() {
+ let (data, left) = node.delete(key);
+ self.0.left = left;
+ return (data, Some(self.updated_node()));
+ }
+ }
+ }
+ (None, Some(self))
+ }
+
+ /// Rotate the node if necessary to keep balance.
+ fn rotate(self) -> Self {
+ let l = height(&self.0.left);
+ let r = height(&self.0.right);
+ match (l as i32) - (r as i32) {
+ 1 | 0 | -1 => self,
+ 2 => self.rotate_left_successor(),
+ -2 => self.rotate_right_successor(),
+ _ => unreachable!(),
+ }
+ }
+
+ /// Perform a single left rotation on this node.
+ fn rotate_left(mut self) -> Self {
+ let mut new_root = self.0.right.take().expect("Node is broken");
+ self.0.right = new_root.0.left.take();
+ self.update_cached_info();
+ new_root.0.left = Some(self);
+ new_root.update_cached_info();
+ new_root
+ }
+
+ /// Perform a single right rotation on this node.
+ fn rotate_right(mut self) -> Self {
+ let mut new_root = self.0.left.take().expect("Node is broken");
+ self.0.left = new_root.0.right.take();
+ self.update_cached_info();
+ new_root.0.right = Some(self);
+ new_root.update_cached_info();
+ new_root
+ }
+
+ /// Performs a rotation when the left successor is too high.
+ fn rotate_left_successor(mut self) -> Self {
+ let left = self.0.left.take().expect("Node is broken");
+ if height(&left.0.left) < height(&left.0.right) {
+ let rotated = left.rotate_left();
+ self.0.left = Some(rotated);
+ self.update_cached_info();
+ } else {
+ self.0.left = Some(left);
+ }
+ self.rotate_right()
+ }
+
+ /// Performs a rotation when the right successor is too high.
+ fn rotate_right_successor(mut self) -> Self {
+ let right = self.0.right.take().expect("Node is broken");
+ if height(&right.0.left) > height(&right.0.right) {
+ let rotated = right.rotate_right();
+ self.0.right = Some(rotated);
+ self.update_cached_info();
+ } else {
+ self.0.right = Some(right);
+ }
+ self.rotate_left()
+ }
+
+ fn delete_root(mut self) -> Option {
+ match (self.0.left.take(), self.0.right.take()) {
+ (None, None) => None,
+ (Some(l), None) => Some(l),
+ (None, Some(r)) => Some(r),
+ (Some(l), Some(r)) => Some(Self::combine_subtrees(l, r)),
+ }
+ }
+
+ /// Find the minimal key below the tree and returns a new optional tree where the minimal
+ /// value has been removed and the (optional) minimal node as tuple (min_node, remaining)
+ fn get_new_root(mut self) -> (Self, Option) {
+ match self.0.left.take() {
+ None => {
+ let remaining = self.0.right.take();
+ (self, remaining)
+ }
+ Some(left) => {
+ let (min_node, left) = left.get_new_root();
+ self.0.left = left;
+ (min_node, Some(self.updated_node()))
+ }
+ }
+ }
+
+ fn combine_subtrees(l: Self, r: Self) -> Self {
+ let (mut new_root, remaining) = r.get_new_root();
+ new_root.0.left = Some(l);
+ new_root.0.right = remaining;
+ new_root.updated_node()
+ }
+
+ fn find_candidate(&self, constraint: &Constraint) -> Option<&Self> {
+ match constraint.policy {
+ AllocPolicy::FirstMatch => self.first_match(constraint),
+ AllocPolicy::Default => self.first_match(constraint),
+ }
+ }
+
+ fn first_match(&self, constraint: &Constraint) -> Option<&Self> {
+ let mut candidate = if self.0.left.is_some() {
+ self.0.left.as_ref().unwrap().first_match(constraint)
+ } else {
+ None
+ };
+
+ if candidate.is_none() && self.check_constraint(constraint) {
+ candidate = Some(self);
+ }
+ if candidate.is_none() && self.0.right.is_some() {
+ candidate = self.0.right.as_ref().unwrap().first_match(constraint);
+ }
+ candidate
+ }
+
+ fn check_constraint(&self, constraint: &Constraint) -> bool {
+ if self.0.data.is_free() {
+ let min = std::cmp::max(self.0.key.min, constraint.min);
+ let max = std::cmp::min(self.0.key.max, constraint.max);
+ if min <= max {
+ let key = Range::new(min, max);
+ if constraint.align == 0 || constraint.align == 1 {
+ return key.len() >= constraint.size;
+ }
+ return match key.align_to(constraint.align) {
+ None => false,
+ Some(aligned_key) => aligned_key.len() >= constraint.size,
+ };
+ }
+ }
+ false
+ }
+
+ /// Update cached information of the node.
+ /// Please make sure that the cached values of both children are up to date.
+ fn update_cached_info(&mut self) {
+ self.0.height = max(height(&self.0.left), height(&self.0.right)) + 1;
+ self.0.max_key = max(
+ max_key(&self.0.left),
+ max(max_key(&self.0.right), self.0.key.max),
+ );
+ }
+
+ /// Update the sub-tree to keep balance.
+ fn updated_node(mut self) -> Self {
+ self.update_cached_info();
+ self.rotate()
+ }
+}
+
+/// Compute height of the optional sub-tree.
+fn height(node: &Option>) -> u32 {
+ node.as_ref().map_or(0, |n| n.0.height)
+}
+
+/// Compute maximum key value covered by the optional sub-tree.
+fn max_key(node: &Option>) -> u64 {
+ node.as_ref().map_or(0, |n| n.0.max_key)
+}
+
+/// An interval tree implementation specialized for VMM resource management.
+#[derive(Debug, Default, PartialEq, Eq)]
+pub struct IntervalTree {
+ root: Option>,
+}
+
+impl IntervalTree {
+ /// Construct a default empty [IntervalTree] object.
+ ///
+ /// # Examples
+ /// ```rust
+ /// extern crate dbs_allocator;
+ ///
+ /// let tree = dbs_allocator::IntervalTree::::new();
+ /// ```
+ pub fn new() -> Self {
+ IntervalTree { root: None }
+ }
+
+ /// Check whether the interval tree is empty.
+ pub fn is_empty(&self) -> bool {
+ self.root.is_none()
+ }
+
+ /// Get the data item associated with the key, or return None if no match found.
+ ///
+ /// # Examples
+ /// ```rust
+ /// extern crate dbs_allocator;
+ /// use dbs_allocator::{IntervalTree, NodeState, Range};
+ ///
+ /// let mut tree = dbs_allocator::IntervalTree::::new();
+ /// assert!(tree.is_empty());
+ /// assert_eq!(tree.get(&Range::new(0x101u64, 0x101u64)), None);
+ /// tree.insert(Range::new(0x100u64, 0x100u64), Some(1));
+ /// tree.insert(Range::new(0x200u64, 0x2ffu64), None);
+ /// assert!(!tree.is_empty());
+ /// assert_eq!(
+ /// tree.get(&Range::new(0x100u64, 0x100u64)),
+ /// Some(NodeState::Valued(&1))
+ /// );
+ /// assert_eq!(
+ /// tree.get(&Range::new(0x200u64, 0x2ffu64)),
+ /// Some(NodeState::Free)
+ /// );
+ /// assert_eq!(tree.get(&Range::new(0x101u64, 0x101u64)), None);
+ /// assert_eq!(tree.get(&Range::new(0x100u64, 0x101u64)), None);
+ /// ```
+ pub fn get(&self, key: &Range) -> Option> {
+ match self.root {
+ None => None,
+ Some(ref node) => node.search(key).map(|n| n.0.data.as_ref()),
+ }
+ }
+
+ /// Get a shared reference to the node fully covering the entire key range.
+ ///
+ /// # Examples
+ /// ```rust
+ /// extern crate dbs_allocator;
+ /// use dbs_allocator::{IntervalTree, NodeState, Range};
+ ///
+ /// let mut tree = IntervalTree::::new();
+ /// tree.insert(Range::new(0x100u32, 0x100u32), Some(1));
+ /// tree.insert(Range::new(0x200u32, 0x2ffu32), None);
+ /// assert_eq!(
+ /// tree.get_superset(&Range::new(0x100u32, 0x100u32)),
+ /// Some((&Range::new(0x100u32, 0x100u32), NodeState::Valued(&1)))
+ /// );
+ /// assert_eq!(
+ /// tree.get_superset(&Range::new(0x210u32, 0x210u32)),
+ /// Some((&Range::new(0x200u32, 0x2ffu32), NodeState::Free))
+ /// );
+ /// assert_eq!(
+ /// tree.get_superset(&Range::new(0x2ffu32, 0x2ffu32)),
+ /// Some((&Range::new(0x200u32, 0x2ffu32), NodeState::Free))
+ /// );
+ /// ```
+ pub fn get_superset(&self, key: &Range) -> Option<(&Range, NodeState<&T>)> {
+ match self.root {
+ None => None,
+ Some(ref node) => node
+ .search_superset(key)
+ .map(|n| (&n.0.key, n.0.data.as_ref())),
+ }
+ }
+
+ /// Get a mutable reference to the node fully covering the entire key range.
+ ///
+ /// # Examples
+ /// ```rust
+ /// extern crate dbs_allocator;
+ /// use dbs_allocator::{IntervalTree, NodeState, Range};
+ ///
+ /// let mut tree = IntervalTree::::new();
+ /// tree.insert(Range::new(0x100u32, 0x100u32), Some(1));
+ /// tree.insert(Range::new(0x200u32, 0x2ffu32), None);
+ /// assert_eq!(
+ /// tree.get_superset_mut(&Range::new(0x100u32, 0x100u32)),
+ /// Some((&Range::new(0x100u32, 0x100u32), NodeState::Valued(&mut 1)))
+ /// );
+ /// assert_eq!(
+ /// tree.get_superset_mut(&Range::new(0x210u32, 0x210u32)),
+ /// Some((&Range::new(0x200u32, 0x2ffu32), NodeState::Free))
+ /// );
+ /// assert_eq!(
+ /// tree.get_superset_mut(&Range::new(0x2ffu32, 0x2ffu32)),
+ /// Some((&Range::new(0x200u32, 0x2ffu32), NodeState::Free))
+ /// );
+ /// ```
+ pub fn get_superset_mut(&mut self, key: &Range) -> Option<(&Range, NodeState<&mut T>)> {
+ match self.root {
+ None => None,
+ Some(ref mut node) => node
+ .search_superset_mut(key)
+ .map(|n| (&n.0.key, n.0.data.as_mut())),
+ }
+ }
+
+ /// Get a shared reference to the value associated with the id.
+ ///
+ /// # Examples
+ /// ```rust
+ /// extern crate dbs_allocator;
+ /// use dbs_allocator::{IntervalTree, NodeState, Range};
+ ///
+ /// let mut tree = IntervalTree::::new();
+ /// tree.insert(Range::new(0x100u16, 0x100u16), Some(1));
+ /// tree.insert(Range::new(0x200u16, 0x2ffu16), None);
+ /// assert_eq!(tree.get_by_id(0x100u16), Some(&1));
+ /// assert_eq!(tree.get_by_id(0x210u32), None);
+ /// assert_eq!(tree.get_by_id(0x2ffu64), None);
+ /// ```
+ pub fn get_by_id(&self, id: U) -> Option<&T>
+ where
+ u64: From,
+ {
+ match self.root {
+ None => None,
+ Some(ref node) => {
+ let key = Range::new_point(id);
+ match node.search_superset(&key) {
+ Some(node) => node.0.data.as_ref().into(),
+ None => None,
+ }
+ }
+ }
+ }
+
+ /// Get a mutable reference to the value associated with the id.
+ ///
+ /// # Examples
+ /// ```rust
+ /// extern crate dbs_allocator;
+ /// use dbs_allocator::{IntervalTree, NodeState, Range};
+ ///
+ /// let mut tree = IntervalTree::::new();
+ /// tree.insert(Range::new(0x100u16, 0x100u16), Some(1));
+ /// tree.insert(Range::new(0x200u16, 0x2ffu16), None);
+ /// assert_eq!(tree.get_by_id_mut(0x100u16), Some(&mut 1));
+ /// assert_eq!(tree.get_by_id_mut(0x210u32), None);
+ /// assert_eq!(tree.get_by_id_mut(0x2ffu64), None);
+ /// ```
+ pub fn get_by_id_mut(&mut self, id: U) -> Option<&mut T>
+ where
+ u64: From,
+ {
+ match self.root {
+ None => None,
+ Some(ref mut node) => {
+ let key = Range::new_point(id);
+ match node.search_superset_mut(&key) {
+ Some(node) => node.0.data.as_mut().into(),
+ None => None,
+ }
+ }
+ }
+ }
+
+ /// Insert the (key, data) pair into the interval tree, panic if intersects with existing nodes.
+ ///
+ /// # Examples
+ /// ```rust
+ /// extern crate dbs_allocator;
+ /// use dbs_allocator::{IntervalTree, NodeState, Range};
+ ///
+ /// let mut tree = IntervalTree::::new();
+ /// tree.insert(Range::new(0x100u32, 0x100u32), Some(1));
+ /// tree.insert(Range::new(0x200u32, 0x2ffu32), None);
+ /// assert_eq!(
+ /// tree.get(&Range::new(0x100u64, 0x100u64)),
+ /// Some(NodeState::Valued(&1))
+ /// );
+ /// assert_eq!(
+ /// tree.get(&Range::new(0x200u64, 0x2ffu64)),
+ /// Some(NodeState::Free)
+ /// );
+ /// ```
+ pub fn insert(&mut self, key: Range, data: Option) {
+ match self.root.take() {
+ None => self.root = Some(Node::new(key, data)),
+ Some(node) => self.root = Some(node.insert(key, data)),
+ }
+ }
+
+ /// Update an existing entry and return the old value.
+ ///
+ /// # Examples
+ /// ```rust
+ /// extern crate dbs_allocator;
+ /// use dbs_allocator::{Constraint, IntervalTree, Range};
+ ///
+ /// let mut tree = IntervalTree::::new();
+ /// tree.insert(Range::new(0x100u64, 0x100u64), None);
+ /// tree.insert(Range::new(0x200u64, 0x2ffu64), None);
+ ///
+ /// let constraint = Constraint::new(2u32);
+ /// let key = tree.allocate(&constraint);
+ /// assert_eq!(key, Some(Range::new(0x200u64, 0x201u64)));
+ /// let old = tree.update(&Range::new(0x200u64, 0x201u64), 2);
+ /// assert_eq!(old, None);
+ /// let old = tree.update(&Range::new(0x200u64, 0x201u64), 3);
+ /// assert_eq!(old, Some(2));
+ /// ```
+ pub fn update(&mut self, key: &Range, data: T) -> Option {
+ match self.root.as_mut() {
+ None => None,
+ Some(node) => node.update(key, NodeState::::Valued(data)),
+ }
+ }
+
+ /// Remove the `key` from the tree and return the associated data.
+ ///
+ /// # Examples
+ /// ```rust
+ /// extern crate dbs_allocator;
+ /// use dbs_allocator::{IntervalTree, Range};
+ ///
+ /// let mut tree = IntervalTree::::new();
+ /// tree.insert(Range::new(0x100u64, 0x100u64), Some(1));
+ /// tree.insert(Range::new(0x200u64, 0x2ffu64), None);
+ /// let old = tree.delete(&Range::new(0x100u64, 0x100u64));
+ /// assert_eq!(old, Some(1));
+ /// let old = tree.delete(&Range::new(0x200u64, 0x2ffu64));
+ /// assert_eq!(old, None);
+ /// ```
+ pub fn delete(&mut self, key: &Range) -> Option {
+ match self.root.take() {
+ Some(node) => {
+ let (data, root) = node.delete(key);
+ self.root = root;
+ data
+ }
+ None => None,
+ }
+ }
+
+ /// Allocate a resource range according the allocation constraints.
+ ///
+ /// # Examples
+ /// ```rust
+ /// extern crate dbs_allocator;
+ /// use dbs_allocator::{Constraint, IntervalTree, Range};
+ ///
+ /// let mut tree = IntervalTree::::new();
+ /// tree.insert(Range::new(0x100u64, 0x100u64), None);
+ /// tree.insert(Range::new(0x200u64, 0x2ffu64), None);
+ ///
+ /// let constraint = Constraint::new(2u8);
+ /// let key = tree.allocate(&constraint);
+ /// assert_eq!(key, Some(Range::new(0x200u64, 0x201u64)));
+ /// tree.update(&Range::new(0x200u64, 0x201u64), 2);
+ /// ```
+ pub fn allocate(&mut self, constraint: &Constraint) -> Option {
+ if constraint.size == 0 {
+ return None;
+ }
+ let candidate = match self.root.as_mut() {
+ None => None,
+ Some(node) => node.find_candidate(constraint),
+ };
+
+ match candidate {
+ None => None,
+ Some(node) => {
+ let node_key = node.0.key;
+ let range = Range::new(
+ max(node_key.min, constraint.min),
+ min(node_key.max, constraint.max),
+ );
+ // Safe to unwrap because candidate satisfy the constraints.
+ let aligned_key = range.align_to(constraint.align).unwrap();
+ let result = Range::new(aligned_key.min, aligned_key.min + constraint.size - 1);
+
+ // Allocate a resource from the node, no need to split the candidate node.
+ if node_key.min == aligned_key.min && node_key.len() == constraint.size {
+ self.root
+ .as_mut()
+ .unwrap()
+ .update(&node_key, NodeState::::Allocated);
+ return Some(node_key);
+ }
+
+ // Split the candidate node.
+ // TODO: following algorithm is not optimal in preference of simplicity.
+ self.delete(&node_key);
+ if aligned_key.min > node_key.min {
+ self.insert(Range::new(node_key.min, aligned_key.min - 1), None);
+ }
+ self.insert(result, None);
+ if result.max < node_key.max {
+ self.insert(Range::new(result.max + 1, node_key.max), None);
+ }
+
+ self.root
+ .as_mut()
+ .unwrap()
+ .update(&result, NodeState::::Allocated);
+ Some(result)
+ }
+ }
+ }
+
+ /// Free an allocated range and return the associated data.
+ pub fn free(&mut self, key: &Range) -> Option {
+ let result = self.delete(key);
+ let mut range = *key;
+
+ // Try to merge with adjacent free nodes.
+ if range.min > 0 {
+ if let Some((r, v)) = self.get_superset(&Range::new(range.min - 1, range.min - 1)) {
+ if v.is_free() {
+ range.min = r.min;
+ }
+ }
+ }
+ if range.max < std::u64::MAX {
+ if let Some((r, v)) = self.get_superset(&Range::new(range.max + 1, range.max + 1)) {
+ if v.is_free() {
+ range.max = r.max;
+ }
+ }
+ }
+
+ if range.min < key.min {
+ self.delete(&Range::new(range.min, key.min - 1));
+ }
+ if range.max > key.max {
+ self.delete(&Range::new(key.max + 1, range.max));
+ }
+ self.insert(range, None);
+
+ result
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ #[should_panic]
+ fn test_new_range() {
+ let _ = Range::new(2u8, 1u8);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_new_range_overflow() {
+ let _ = Range::new(0u64, std::u64::MAX);
+ }
+
+ #[test]
+ fn test_range_intersect() {
+ let range_a = Range::new(1u8, 4u8);
+ let range_b = Range::new(4u16, 6u16);
+ let range_c = Range::new(2u32, 3u32);
+ let range_d = Range::new(4u64, 4u64);
+ let range_e = Range::new(5u32, 6u32);
+
+ assert!(range_a.intersect(&range_b));
+ assert!(range_b.intersect(&range_a));
+ assert!(range_a.intersect(&range_c));
+ assert!(range_c.intersect(&range_a));
+ assert!(range_a.intersect(&range_d));
+ assert!(range_d.intersect(&range_a));
+ assert!(!range_a.intersect(&range_e));
+ assert!(!range_e.intersect(&range_a));
+
+ assert_eq!(range_a.len(), 4);
+ assert_eq!(range_d.len(), 1);
+ }
+
+ #[test]
+ fn test_range_contain() {
+ let range_a = Range::new(2u8, 6u8);
+ assert!(range_a.contain(&Range::new(2u8, 3u8)));
+ assert!(range_a.contain(&Range::new(3u8, 4u8)));
+ assert!(range_a.contain(&Range::new(5u8, 5u8)));
+ assert!(range_a.contain(&Range::new(5u8, 6u8)));
+ assert!(range_a.contain(&Range::new(6u8, 6u8)));
+ assert!(!range_a.contain(&Range::new(1u8, 1u8)));
+ assert!(!range_a.contain(&Range::new(1u8, 2u8)));
+ assert!(!range_a.contain(&Range::new(1u8, 3u8)));
+ assert!(!range_a.contain(&Range::new(1u8, 7u8)));
+ assert!(!range_a.contain(&Range::new(7u8, 8u8)));
+ assert!(!range_a.contain(&Range::new(6u8, 7u8)));
+ assert!(!range_a.contain(&Range::new(7u8, 8u8)));
+ }
+
+ #[test]
+ fn test_range_align_to() {
+ let range_a = Range::new(2u32, 6);
+ assert_eq!(range_a.align_to(0), Some(Range::new(2u64, 6u64)));
+ assert_eq!(range_a.align_to(1), Some(Range::new(2u8, 6u8)));
+ assert_eq!(range_a.align_to(2), Some(Range::new(2u16, 6u16)));
+ assert_eq!(range_a.align_to(4), Some(Range::new(4u32, 6u32)));
+ assert_eq!(range_a.align_to(8), None);
+ assert_eq!(range_a.align_to(3), None);
+
+ let range_b = Range::new(0xFFFF_FFFF_FFFF_FFFDu64, 0xFFFF_FFFF_FFFF_FFFFu64);
+ assert_eq!(
+ range_b.align_to(2),
+ Some(Range::new(0xFFFF_FFFF_FFFF_FFFEu64, 0xFFFF_FFFF_FFFF_FFFF))
+ );
+ assert_eq!(range_b.align_to(4), None);
+ }
+
+ #[test]
+ fn test_range_ord() {
+ let range_a = Range::new(1u32, 4u32);
+ let range_b = Range::new(1u32, 4u32);
+ let range_c = Range::new(1u32, 3u32);
+ let range_d = Range::new(1u32, 5u32);
+ let range_e = Range::new(2u32, 2u32);
+
+ assert_eq!(range_a, range_b);
+ assert_eq!(range_b, range_a);
+ assert!(range_a > range_c);
+ assert!(range_c < range_a);
+ assert!(range_a < range_d);
+ assert!(range_d > range_a);
+ assert!(range_a < range_e);
+ assert!(range_e > range_a);
+ }
+
+ #[should_panic]
+ #[test]
+ fn test_tree_insert_equal() {
+ let mut tree = IntervalTree::::new();
+ tree.insert(Range::new(0x100u16, 0x200), Some(1));
+ tree.insert(Range::new(0x100u32, 0x200), None);
+ }
+
+ #[should_panic]
+ #[test]
+ fn test_tree_insert_intersect_on_right() {
+ let mut tree = IntervalTree::::new();
+ tree.insert(Range::new(0x100, 0x200u32), Some(1));
+ tree.insert(Range::new(0x200, 0x2ffu64), None);
+ }
+
+ #[should_panic]
+ #[test]
+ fn test_tree_insert_intersect_on_left() {
+ let mut tree = IntervalTree::::new();
+ tree.insert(Range::new(0x100, 0x200u32), Some(1));
+ tree.insert(Range::new(0x000, 0x100u64), None);
+ }
+
+ #[test]
+ fn test_tree_get_superset() {
+ let mut tree = IntervalTree::::new();
+ tree.insert(Range::new(0x100u32, 0x100u32), Some(1));
+ tree.insert(Range::new(0x001u16, 0x008u16), None);
+ tree.insert(Range::new(0x009u16, 0x00fu16), None);
+ tree.insert(Range::new(0x200u16, 0x2ffu16), None);
+ let mut constraint = Constraint::new(8u64);
+ constraint.min = 0x211;
+ constraint.max = 0x21f;
+ constraint.align = 0x8;
+ tree.allocate(&constraint);
+
+ // Valued case.
+ assert_eq!(
+ tree.get_superset(&Range::new(0x100u32, 0x100)),
+ Some((&Range::new(0x100, 0x100u32), NodeState::Valued(&1)))
+ );
+
+ // Free case.
+ assert_eq!(
+ tree.get_superset(&Range::new(0x200u16, 0x200)),
+ Some((&Range::new(0x200, 0x217u64), NodeState::Free))
+ );
+ assert_eq!(
+ tree.get_superset(&Range::new(0x2ffu32, 0x2ff)),
+ Some((&Range::new(0x220, 0x2ffu32), NodeState::Free))
+ );
+
+ // Allocated case.
+ assert_eq!(
+ tree.get_superset(&Range::new(0x218u16, 0x21f)),
+ Some((&Range::new(0x218, 0x21fu16), NodeState::Allocated))
+ );
+
+ // None case.
+ assert_eq!(tree.get_superset(&Range::new(0x2ffu32, 0x300)), None);
+ assert_eq!(tree.get_superset(&Range::new(0x300u32, 0x300)), None);
+ assert_eq!(tree.get_superset(&Range::new(0x1ffu32, 0x300)), None);
+ }
+
+ #[test]
+ fn test_tree_get_superset_mut() {
+ let mut tree = IntervalTree::::new();
+ tree.insert(Range::new(0x100u32, 0x100u32), Some(1));
+ tree.insert(Range::new(0x200u16, 0x2ffu16), None);
+ let mut constraint = Constraint::new(8u64);
+ constraint.min = 0x211;
+ constraint.max = 0x21f;
+ constraint.align = 0x8;
+ tree.allocate(&constraint);
+
+ // Valued case.
+ assert_eq!(
+ tree.get_superset_mut(&Range::new(0x100u32, 0x100u32)),
+ Some((&Range::new(0x100u32, 0x100u32), NodeState::Valued(&mut 1)))
+ );
+
+ // Allocated case.
+ assert_eq!(
+ tree.get_superset_mut(&Range::new(0x218u64, 0x21fu64)),
+ Some((&Range::new(0x218u64, 0x21fu64), NodeState::Allocated))
+ );
+
+ // Free case.
+ assert_eq!(
+ tree.get_superset_mut(&Range::new(0x2ffu32, 0x2ffu32)),
+ Some((&Range::new(0x220u32, 0x2ffu32), NodeState::Free))
+ );
+
+ // None case.
+ assert_eq!(tree.get_superset(&Range::new(0x2ffu32, 0x300)), None);
+ assert_eq!(tree.get_superset(&Range::new(0x300u32, 0x300)), None);
+ assert_eq!(tree.get_superset(&Range::new(0x1ffu32, 0x300)), None);
+ }
+
+ #[test]
+ fn test_tree_update() {
+ let mut tree = IntervalTree::