mirror of
https://github.com/aljazceru/kata-containers.git
synced 2025-12-18 23:04:20 +01:00
CCv0: Merge main into CCv0 branch
Merge remote-tracking branch 'upstream/main' into CCv0 Fixes: #5054 Signed-off-by: Georgina Kinge <georgina.kinge@ibm.com>
This commit is contained in:
100
.github/workflows/add-backport-label.yaml
vendored
Normal file
100
.github/workflows/add-backport-label.yaml
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
name: Add backport label
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
- edited
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
jobs:
|
||||
check-issues:
|
||||
if: ${{ github.event.label.name != 'auto-backport' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install hub extension script
|
||||
run: |
|
||||
pushd $(mktemp -d) &>/dev/null
|
||||
git clone --single-branch --depth 1 "https://github.com/kata-containers/.github" && cd .github/scripts
|
||||
sudo install hub-util.sh /usr/local/bin
|
||||
popd &>/dev/null
|
||||
|
||||
- name: Determine whether to add label
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CONTAINS_AUTO_BACKPORT: ${{ contains(github.event.pull_request.labels.*.name, 'auto-backport') }}
|
||||
id: add_label
|
||||
run: |
|
||||
pr=${{ github.event.pull_request.number }}
|
||||
linked_issue_urls=$(hub-util.sh \
|
||||
list-issues-for-pr "$pr" |\
|
||||
grep -v "^\#" |\
|
||||
cut -d';' -f3 || true)
|
||||
[ -z "$linked_issue_urls" ] && {
|
||||
echo "::error::No linked issues for PR $pr"
|
||||
exit 1
|
||||
}
|
||||
has_bug=false
|
||||
for issue_url in $(echo "$linked_issue_urls")
|
||||
do
|
||||
issue=$(echo "$issue_url"| awk -F\/ '{print $NF}' || true)
|
||||
[ -z "$issue" ] && {
|
||||
echo "::error::Cannot determine issue number from $issue_url for PR $pr"
|
||||
exit 1
|
||||
}
|
||||
labels=$(hub-util.sh list-labels-for-issue "$issue")
|
||||
|
||||
label_names=$(echo $labels | jq -r '.[].name' || true)
|
||||
if [[ "$label_names" =~ "bug" ]]; then
|
||||
has_bug=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
has_backport_needed_label=${{ contains(github.event.pull_request.labels.*.name, 'needs-backport') }}
|
||||
has_no_backport_needed_label=${{ contains(github.event.pull_request.labels.*.name, 'no-backport-needed') }}
|
||||
|
||||
echo "::set-output name=add_backport_label::false"
|
||||
if [ $has_backport_needed_label = true ] || [ $has_bug = true ]; then
|
||||
if [[ $has_no_backport_needed_label = false ]]; then
|
||||
echo "::set-output name=add_backport_label::true"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Do not spam comment, only if auto-backport label is going to be newly added.
|
||||
echo "::set-output name=auto_backport_added::$CONTAINS_AUTO_BACKPORT"
|
||||
|
||||
- name: Add comment
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && steps.add_label.outputs.add_backport_label == 'true' && steps.add_label.outputs.auto_backport_added == 'false' }}
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
github.rest.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: 'This issue has been marked for auto-backporting. Add label(s) backport-to-BRANCHNAME to backport to them'
|
||||
})
|
||||
|
||||
# Allow label to be removed by adding no-backport-needed label
|
||||
- name: Remove auto-backport label
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && steps.add_label.outputs.add_backport_label == 'false' }}
|
||||
uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90
|
||||
with:
|
||||
remove-labels: "auto-backport"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Add auto-backport label
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && steps.add_label.outputs.add_backport_label == 'true' }}
|
||||
uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90
|
||||
with:
|
||||
add-labels: "auto-backport"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
29
.github/workflows/auto-backport.yaml
vendored
Normal file
29
.github/workflows/auto-backport.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
on:
|
||||
pull_request_target:
|
||||
types: ["labeled", "closed"]
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
name: Backport PR
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
github.event.pull_request.merged == true
|
||||
&& contains(github.event.pull_request.labels.*.name, 'auto-backport')
|
||||
&& (
|
||||
(github.event.action == 'labeled' && github.event.label.name == 'auto-backport')
|
||||
|| (github.event.action == 'closed')
|
||||
)
|
||||
steps:
|
||||
- name: Backport Action
|
||||
uses: sqren/backport-github-action@v8.9.2
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
auto_backport_label_prefix: backport-to-
|
||||
|
||||
- name: Info log
|
||||
if: ${{ success() }}
|
||||
run: cat /home/runner/.backport/backport.info.log
|
||||
|
||||
- name: Debug log
|
||||
if: ${{ failure() }}
|
||||
run: cat /home/runner/.backport/backport.debug.log
|
||||
@@ -23,25 +23,27 @@ arch=${ARCH:-$(uname -m)}
|
||||
workdir="$(mktemp -d --tmpdir build-libseccomp.XXXXX)"
|
||||
|
||||
# Variables for libseccomp
|
||||
# Currently, specify the libseccomp version directly without using `versions.yaml`
|
||||
# because the current Snap workflow is incomplete.
|
||||
# After solving the issue, replace this code by using the `versions.yaml`.
|
||||
# libseccomp_version=$(get_version "externals.libseccomp.version")
|
||||
# libseccomp_url=$(get_version "externals.libseccomp.url")
|
||||
libseccomp_version="2.5.4"
|
||||
libseccomp_url="https://github.com/seccomp/libseccomp"
|
||||
libseccomp_version="${LIBSECCOMP_VERSION:-""}"
|
||||
if [ -z "${libseccomp_version}" ]; then
|
||||
libseccomp_version=$(get_version "externals.libseccomp.version")
|
||||
fi
|
||||
libseccomp_url="${LIBSECCOMP_URL:-""}"
|
||||
if [ -z "${libseccomp_url}" ]; then
|
||||
libseccomp_url=$(get_version "externals.libseccomp.url")
|
||||
fi
|
||||
libseccomp_tarball="libseccomp-${libseccomp_version}.tar.gz"
|
||||
libseccomp_tarball_url="${libseccomp_url}/releases/download/v${libseccomp_version}/${libseccomp_tarball}"
|
||||
cflags="-O2"
|
||||
|
||||
# Variables for gperf
|
||||
# Currently, specify the gperf version directly without using `versions.yaml`
|
||||
# because the current Snap workflow is incomplete.
|
||||
# After solving the issue, replace this code by using the `versions.yaml`.
|
||||
# gperf_version=$(get_version "externals.gperf.version")
|
||||
# gperf_url=$(get_version "externals.gperf.url")
|
||||
gperf_version="3.1"
|
||||
gperf_url="https://ftp.gnu.org/gnu/gperf"
|
||||
gperf_version="${GPERF_VERSION:-""}"
|
||||
if [ -z "${gperf_version}" ]; then
|
||||
gperf_version=$(get_version "externals.gperf.version")
|
||||
fi
|
||||
gperf_url="${GPERF_URL:-""}"
|
||||
if [ -z "${gperf_url}" ]; then
|
||||
gperf_url=$(get_version "externals.gperf.url")
|
||||
fi
|
||||
gperf_tarball="gperf-${gperf_version}.tar.gz"
|
||||
gperf_tarball_url="${gperf_url}/${gperf_tarball}"
|
||||
|
||||
|
||||
1
src/agent/Cargo.lock
generated
1
src/agent/Cargo.lock
generated
@@ -1686,6 +1686,7 @@ dependencies = [
|
||||
"image-rs",
|
||||
"ipnetwork",
|
||||
"kata-sys-util",
|
||||
"kata-types",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
|
||||
@@ -18,9 +18,10 @@ serde_json = "1.0.39"
|
||||
scan_fmt = "0.2.3"
|
||||
scopeguard = "1.0.0"
|
||||
thiserror = "1.0.26"
|
||||
regex = "1.5.5"
|
||||
regex = "1.5.6"
|
||||
serial_test = "0.5.1"
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
kata-types = { path = "../libs/kata-types" }
|
||||
sysinfo = "0.23.0"
|
||||
url = "2.2.2"
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ protobuf = "2.27.0"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.1.2"
|
||||
scan_fmt = "0.2.6"
|
||||
regex = "1.5.5"
|
||||
regex = "1.5.6"
|
||||
path-absolutize = "1.2.0"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.10" }
|
||||
|
||||
@@ -174,7 +174,7 @@ impl CgroupManager for Manager {
|
||||
freezer_controller.freeze()?;
|
||||
}
|
||||
_ => {
|
||||
return Err(anyhow!(nix::Error::EINVAL));
|
||||
return Err(anyhow!("Invalid FreezerState"));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -106,6 +106,11 @@ impl Default for ContainerStatus {
|
||||
}
|
||||
}
|
||||
|
||||
// We might want to change this to thiserror in the future
|
||||
const MissingCGroupManager: &str = "failed to get container's cgroup Manager";
|
||||
const MissingLinux: &str = "no linux config";
|
||||
const InvalidNamespace: &str = "invalid namespace type";
|
||||
|
||||
pub type Config = CreateOpts;
|
||||
type NamespaceType = String;
|
||||
|
||||
@@ -292,7 +297,7 @@ impl Container for LinuxContainer {
|
||||
self.status.transition(ContainerState::Paused);
|
||||
return Ok(());
|
||||
}
|
||||
Err(anyhow!("failed to get container's cgroup manager"))
|
||||
Err(anyhow!(MissingCGroupManager))
|
||||
}
|
||||
|
||||
fn resume(&mut self) -> Result<()> {
|
||||
@@ -310,7 +315,7 @@ impl Container for LinuxContainer {
|
||||
self.status.transition(ContainerState::Running);
|
||||
return Ok(());
|
||||
}
|
||||
Err(anyhow!("failed to get container's cgroup manager"))
|
||||
Err(anyhow!(MissingCGroupManager))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -397,7 +402,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
};
|
||||
|
||||
if spec.linux.is_none() {
|
||||
return Err(anyhow!("no linux config"));
|
||||
return Err(anyhow!(MissingLinux));
|
||||
}
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
@@ -411,7 +416,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
for ns in &nses {
|
||||
let s = NAMESPACES.get(&ns.r#type.as_str());
|
||||
if s.is_none() {
|
||||
return Err(anyhow!("invalid ns type"));
|
||||
return Err(anyhow!(InvalidNamespace));
|
||||
}
|
||||
let s = s.unwrap();
|
||||
|
||||
@@ -1437,18 +1442,10 @@ impl LinuxContainer {
|
||||
Some(unistd::getuid()),
|
||||
Some(unistd::getgid()),
|
||||
)
|
||||
.context(format!("cannot change onwer of container {} root", id))?;
|
||||
|
||||
if config.spec.is_none() {
|
||||
return Err(anyhow!(nix::Error::EINVAL));
|
||||
}
|
||||
.context(format!("Cannot change owner of container {} root", id))?;
|
||||
|
||||
let spec = config.spec.as_ref().unwrap();
|
||||
|
||||
if spec.linux.is_none() {
|
||||
return Err(anyhow!(nix::Error::EINVAL));
|
||||
}
|
||||
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
let cpath = if linux.cgroups_path.is_empty() {
|
||||
@@ -1530,7 +1527,7 @@ pub async fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()
|
||||
let binary = PathBuf::from(h.path.as_str());
|
||||
let path = binary.canonicalize()?;
|
||||
if !path.exists() {
|
||||
return Err(anyhow!(nix::Error::EINVAL));
|
||||
return Err(anyhow!("Path {:?} does not exist", path));
|
||||
}
|
||||
|
||||
let mut args = h.args.clone();
|
||||
|
||||
@@ -1020,9 +1020,7 @@ pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec, process: &Process) -> Result<(
|
||||
}
|
||||
|
||||
fn mask_path(path: &str) -> Result<()> {
|
||||
if !path.starts_with('/') || path.contains("..") {
|
||||
return Err(anyhow!(nix::Error::EINVAL));
|
||||
}
|
||||
check_paths(path)?;
|
||||
|
||||
match mount(
|
||||
Some("/dev/null"),
|
||||
@@ -1040,9 +1038,7 @@ fn mask_path(path: &str) -> Result<()> {
|
||||
}
|
||||
|
||||
fn readonly_path(path: &str) -> Result<()> {
|
||||
if !path.starts_with('/') || path.contains("..") {
|
||||
return Err(anyhow!(nix::Error::EINVAL));
|
||||
}
|
||||
check_paths(path)?;
|
||||
|
||||
if let Err(e) = mount(
|
||||
Some(&path[1..]),
|
||||
@@ -1068,6 +1064,16 @@ fn readonly_path(path: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_paths(path: &str) -> Result<()> {
|
||||
if !path.starts_with('/') || path.contains("..") {
|
||||
return Err(anyhow!(
|
||||
"Cannot mount {} (path does not start with '/' or contains '..').",
|
||||
path
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -1420,6 +1426,55 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_paths() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
name: &'a str,
|
||||
path: &'a str,
|
||||
result: Result<()>,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
name: "valid path",
|
||||
path: "/foo/bar",
|
||||
result: Ok(()),
|
||||
},
|
||||
TestData {
|
||||
name: "does not starts with /",
|
||||
path: "foo/bar",
|
||||
result: Err(anyhow!(
|
||||
"Cannot mount foo/bar (path does not start with '/' or contains '..')."
|
||||
)),
|
||||
},
|
||||
TestData {
|
||||
name: "contains ..",
|
||||
path: "../foo/bar",
|
||||
result: Err(anyhow!(
|
||||
"Cannot mount ../foo/bar (path does not start with '/' or contains '..')."
|
||||
)),
|
||||
},
|
||||
];
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d.name);
|
||||
|
||||
let result = check_paths(d.path);
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
if d.result.is_ok() {
|
||||
assert!(result.is_ok());
|
||||
continue;
|
||||
}
|
||||
|
||||
let expected_error = format!("{}", d.result.as_ref().unwrap_err());
|
||||
let actual_error = format!("{}", result.unwrap_err());
|
||||
assert!(actual_error == expected_error, "{}", msg);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_proc_mount() {
|
||||
let mount = oci::Mount {
|
||||
|
||||
@@ -4,17 +4,15 @@
|
||||
//
|
||||
|
||||
use crate::container::Config;
|
||||
use anyhow::{anyhow, Context, Error, Result};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use oci::{Linux, LinuxIdMapping, LinuxNamespace, Spec};
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Component, PathBuf};
|
||||
|
||||
fn einval() -> Error {
|
||||
anyhow!(nix::Error::EINVAL)
|
||||
}
|
||||
|
||||
fn get_linux(oci: &Spec) -> Result<&Linux> {
|
||||
oci.linux.as_ref().ok_or_else(einval)
|
||||
oci.linux
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Unable to get Linux section from Spec"))
|
||||
}
|
||||
|
||||
fn contain_namespace(nses: &[LinuxNamespace], key: &str) -> bool {
|
||||
@@ -31,7 +29,10 @@ fn rootfs(root: &str) -> Result<()> {
|
||||
let path = PathBuf::from(root);
|
||||
// not absolute path or not exists
|
||||
if !path.exists() || !path.is_absolute() {
|
||||
return Err(einval());
|
||||
return Err(anyhow!(
|
||||
"Path from {:?} does not exist or is not absolute",
|
||||
root
|
||||
));
|
||||
}
|
||||
|
||||
// symbolic link? ..?
|
||||
@@ -49,7 +50,7 @@ fn rootfs(root: &str) -> Result<()> {
|
||||
if let Some(v) = c.as_os_str().to_str() {
|
||||
stack.push(v.to_string());
|
||||
} else {
|
||||
return Err(einval());
|
||||
return Err(anyhow!("Invalid path component (unable to convert to str)"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,10 +59,13 @@ fn rootfs(root: &str) -> Result<()> {
|
||||
cleaned.push(e);
|
||||
}
|
||||
|
||||
let canon = path.canonicalize().context("canonicalize")?;
|
||||
let canon = path.canonicalize().context("failed to canonicalize path")?;
|
||||
if cleaned != canon {
|
||||
// There is symbolic in path
|
||||
return Err(einval());
|
||||
return Err(anyhow!(
|
||||
"There may be illegal symbols in the path name. Cleaned ({:?}) and canonicalized ({:?}) paths do not match",
|
||||
cleaned,
|
||||
canon));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -74,7 +78,7 @@ fn hostname(oci: &Spec) -> Result<()> {
|
||||
|
||||
let linux = get_linux(oci)?;
|
||||
if !contain_namespace(&linux.namespaces, "uts") {
|
||||
return Err(einval());
|
||||
return Err(anyhow!("Linux namespace does not contain uts"));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -88,7 +92,7 @@ fn security(oci: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
if !contain_namespace(&linux.namespaces, "mount") {
|
||||
return Err(einval());
|
||||
return Err(anyhow!("Linux namespace does not contain mount"));
|
||||
}
|
||||
|
||||
// don't care about selinux at present
|
||||
@@ -103,7 +107,7 @@ fn idmapping(maps: &[LinuxIdMapping]) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
Err(einval())
|
||||
Err(anyhow!("No idmap has size > 0"))
|
||||
}
|
||||
|
||||
fn usernamespace(oci: &Spec) -> Result<()> {
|
||||
@@ -121,7 +125,7 @@ fn usernamespace(oci: &Spec) -> Result<()> {
|
||||
} else {
|
||||
// no user namespace but idmap
|
||||
if !linux.uid_mappings.is_empty() || !linux.gid_mappings.is_empty() {
|
||||
return Err(einval());
|
||||
return Err(anyhow!("No user namespace, but uid or gid mapping exists"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,7 +167,7 @@ fn sysctl(oci: &Spec) -> Result<()> {
|
||||
if contain_namespace(&linux.namespaces, "ipc") {
|
||||
continue;
|
||||
} else {
|
||||
return Err(einval());
|
||||
return Err(anyhow!("Linux namespace does not contain ipc"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,11 +182,11 @@ fn sysctl(oci: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
if key == "kernel.hostname" {
|
||||
return Err(einval());
|
||||
return Err(anyhow!("Kernel hostname specfied in Spec"));
|
||||
}
|
||||
}
|
||||
|
||||
return Err(einval());
|
||||
return Err(anyhow!("Sysctl config contains invalid settings"));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -191,12 +195,13 @@ fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
|
||||
let linux = get_linux(oci)?;
|
||||
|
||||
if !contain_namespace(&linux.namespaces, "user") {
|
||||
return Err(einval());
|
||||
return Err(anyhow!("Linux namespace is missing user"));
|
||||
}
|
||||
|
||||
if linux.uid_mappings.is_empty() || linux.gid_mappings.is_empty() {
|
||||
// rootless containers requires at least one UID/GID mapping
|
||||
return Err(einval());
|
||||
return Err(anyhow!(
|
||||
"Rootless containers require at least one UID/GID mapping"
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -220,7 +225,7 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> {
|
||||
let fields: Vec<&str> = opt.split('=').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(einval());
|
||||
return Err(anyhow!("Options has invalid field: {:?}", fields));
|
||||
}
|
||||
|
||||
let id = fields[1]
|
||||
@@ -229,11 +234,11 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> {
|
||||
.context(format!("parse field {}", &fields[1]))?;
|
||||
|
||||
if opt.starts_with("uid=") && !has_idmapping(&linux.uid_mappings, id) {
|
||||
return Err(einval());
|
||||
return Err(anyhow!("uid of {} does not have a valid mapping", id));
|
||||
}
|
||||
|
||||
if opt.starts_with("gid=") && !has_idmapping(&linux.gid_mappings, id) {
|
||||
return Err(einval());
|
||||
return Err(anyhow!("gid of {} does not have a valid mapping", id));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -249,15 +254,18 @@ fn rootless_euid(oci: &Spec) -> Result<()> {
|
||||
|
||||
pub fn validate(conf: &Config) -> Result<()> {
|
||||
lazy_static::initialize(&SYSCTLS);
|
||||
let oci = conf.spec.as_ref().ok_or_else(einval)?;
|
||||
let oci = conf
|
||||
.spec
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Invalid config spec"))?;
|
||||
|
||||
if oci.linux.is_none() {
|
||||
return Err(einval());
|
||||
return Err(anyhow!("oci Linux is none"));
|
||||
}
|
||||
|
||||
let root = match oci.root.as_ref() {
|
||||
Some(v) => v.path.as_str(),
|
||||
None => return Err(einval()),
|
||||
None => return Err(anyhow!("oci root is none")),
|
||||
};
|
||||
|
||||
rootfs(root).context("rootfs")?;
|
||||
|
||||
@@ -13,6 +13,8 @@ use std::time;
|
||||
use tracing::instrument;
|
||||
use url::Url;
|
||||
|
||||
use kata_types::config::default::DEFAULT_AGENT_VSOCK_PORT;
|
||||
|
||||
const DEBUG_CONSOLE_FLAG: &str = "agent.debug_console";
|
||||
const DEV_MODE_FLAG: &str = "agent.devmode";
|
||||
const TRACE_MODE_OPTION: &str = "agent.trace";
|
||||
@@ -33,7 +35,6 @@ const DEFAULT_LOG_LEVEL: slog::Level = slog::Level::Info;
|
||||
const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
|
||||
const DEFAULT_CONTAINER_PIPE_SIZE: i32 = 0;
|
||||
const VSOCK_ADDR: &str = "vsock://-1";
|
||||
const VSOCK_PORT: u16 = 1024;
|
||||
|
||||
// Environment variables used for development and testing
|
||||
const SERVER_ADDR_ENV_VAR: &str = "KATA_AGENT_SERVER_ADDR";
|
||||
@@ -167,7 +168,7 @@ impl Default for AgentConfig {
|
||||
debug_console_vport: 0,
|
||||
log_vport: 0,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: format!("{}:{}", VSOCK_ADDR, VSOCK_PORT),
|
||||
server_addr: format!("{}:{}", VSOCK_ADDR, DEFAULT_AGENT_VSOCK_PORT),
|
||||
unified_cgroup_hierarchy: false,
|
||||
tracing: false,
|
||||
endpoints: Default::default(),
|
||||
|
||||
@@ -169,11 +169,12 @@ pub fn baremount(
|
||||
|
||||
info!(
|
||||
logger,
|
||||
"mount source={:?}, dest={:?}, fs_type={:?}, options={:?}",
|
||||
"baremount source={:?}, dest={:?}, fs_type={:?}, options={:?}, flags={:?}",
|
||||
source,
|
||||
destination,
|
||||
fs_type,
|
||||
options
|
||||
options,
|
||||
flags
|
||||
);
|
||||
|
||||
nix::mount::mount(
|
||||
@@ -779,6 +780,14 @@ pub async fn add_storages(
|
||||
};
|
||||
|
||||
// Todo need to rollback the mounted storage if err met.
|
||||
|
||||
if res.is_err() {
|
||||
error!(
|
||||
logger,
|
||||
"add_storages failed, storage: {:?}, error: {:?} ", storage, res
|
||||
);
|
||||
}
|
||||
|
||||
let mount_point = res?;
|
||||
|
||||
if !mount_point.is_empty() {
|
||||
|
||||
@@ -522,7 +522,7 @@ impl Handle {
|
||||
.as_ref()
|
||||
.map(|to| to.address.as_str()) // Extract address field
|
||||
.and_then(|addr| if addr.is_empty() { None } else { Some(addr) }) // Make sure it's not empty
|
||||
.ok_or_else(|| anyhow!(nix::Error::EINVAL))?;
|
||||
.ok_or_else(|| anyhow!("Unable to determine ip address of ARP neighbor"))?;
|
||||
|
||||
let ip = IpAddr::from_str(ip_address)
|
||||
.map_err(|e| anyhow!("Failed to parse IP {}: {:?}", ip_address, e))?;
|
||||
@@ -566,7 +566,12 @@ fn parse_mac_address(addr: &str) -> Result<[u8; 6]> {
|
||||
|
||||
// Parse single Mac address block
|
||||
let mut parse_next = || -> Result<u8> {
|
||||
let v = u8::from_str_radix(split.next().ok_or_else(|| anyhow!(nix::Error::EINVAL))?, 16)?;
|
||||
let v = u8::from_str_radix(
|
||||
split
|
||||
.next()
|
||||
.ok_or_else(|| anyhow!("Invalid MAC address {}", addr))?,
|
||||
16,
|
||||
)?;
|
||||
Ok(v)
|
||||
};
|
||||
|
||||
|
||||
@@ -420,7 +420,7 @@ impl AgentService {
|
||||
let mut process = req
|
||||
.process
|
||||
.into_option()
|
||||
.ok_or_else(|| anyhow!(nix::Error::EINVAL))?;
|
||||
.ok_or_else(|| anyhow!("Unable to parse process from ExecProcessRequest"))?;
|
||||
|
||||
// Apply any necessary corrections for PCI addresses
|
||||
update_env_pci(&mut process.Env, &sandbox.pcimap)?;
|
||||
@@ -669,7 +669,7 @@ impl AgentService {
|
||||
};
|
||||
|
||||
if reader.is_none() {
|
||||
return Err(anyhow!(nix::Error::EINVAL));
|
||||
return Err(anyhow!("Unable to determine stream reader, is None"));
|
||||
}
|
||||
|
||||
let reader = reader.ok_or_else(|| anyhow!("cannot get stream reader"))?;
|
||||
@@ -1964,7 +1964,11 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
|
||||
let path = PathBuf::from(req.path.as_str());
|
||||
|
||||
if !path.starts_with(CONTAINER_BASE) {
|
||||
return Err(anyhow!(nix::Error::EINVAL));
|
||||
return Err(anyhow!(
|
||||
"Path {:?} does not start with {}",
|
||||
path,
|
||||
CONTAINER_BASE
|
||||
));
|
||||
}
|
||||
|
||||
let parent = path.parent();
|
||||
|
||||
@@ -757,4 +757,67 @@ mod tests {
|
||||
assert_eq!(config5.content, config3.content);
|
||||
assert_eq!(configs.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rate_limiter_configs() {
|
||||
const SIZE: u64 = 1024 * 1024;
|
||||
const ONE_TIME_BURST: u64 = 1024;
|
||||
const REFILL_TIME: u64 = 1000;
|
||||
|
||||
let c: TokenBucketConfigInfo = TokenBucketConfigInfo {
|
||||
size: SIZE,
|
||||
one_time_burst: ONE_TIME_BURST,
|
||||
refill_time: REFILL_TIME,
|
||||
};
|
||||
let b: TokenBucket = c.into();
|
||||
assert_eq!(b.capacity(), SIZE);
|
||||
assert_eq!(b.one_time_burst(), ONE_TIME_BURST);
|
||||
assert_eq!(b.refill_time_ms(), REFILL_TIME);
|
||||
|
||||
let mut rlc = RateLimiterConfigInfo {
|
||||
bandwidth: TokenBucketConfigInfo {
|
||||
size: SIZE,
|
||||
one_time_burst: ONE_TIME_BURST,
|
||||
refill_time: REFILL_TIME,
|
||||
},
|
||||
ops: TokenBucketConfigInfo {
|
||||
size: SIZE * 2,
|
||||
one_time_burst: 0,
|
||||
refill_time: REFILL_TIME * 2,
|
||||
},
|
||||
};
|
||||
let rl: RateLimiter = (&rlc).try_into().unwrap();
|
||||
assert_eq!(rl.bandwidth().unwrap().capacity(), SIZE);
|
||||
assert_eq!(rl.bandwidth().unwrap().one_time_burst(), ONE_TIME_BURST);
|
||||
assert_eq!(rl.bandwidth().unwrap().refill_time_ms(), REFILL_TIME);
|
||||
assert_eq!(rl.ops().unwrap().capacity(), SIZE * 2);
|
||||
assert_eq!(rl.ops().unwrap().one_time_burst(), 0);
|
||||
assert_eq!(rl.ops().unwrap().refill_time_ms(), REFILL_TIME * 2);
|
||||
|
||||
let bandwidth = TokenBucketConfigInfo {
|
||||
size: SIZE * 2,
|
||||
one_time_burst: ONE_TIME_BURST * 2,
|
||||
refill_time: REFILL_TIME * 2,
|
||||
};
|
||||
rlc.update_bandwidth(bandwidth);
|
||||
assert_eq!(rlc.bandwidth.size, SIZE * 2);
|
||||
assert_eq!(rlc.bandwidth.one_time_burst, ONE_TIME_BURST * 2);
|
||||
assert_eq!(rlc.bandwidth.refill_time, REFILL_TIME * 2);
|
||||
assert_eq!(rlc.ops.size, SIZE * 2);
|
||||
assert_eq!(rlc.ops.one_time_burst, 0);
|
||||
assert_eq!(rlc.ops.refill_time, REFILL_TIME * 2);
|
||||
|
||||
let ops = TokenBucketConfigInfo {
|
||||
size: SIZE * 3,
|
||||
one_time_burst: ONE_TIME_BURST * 3,
|
||||
refill_time: REFILL_TIME * 3,
|
||||
};
|
||||
rlc.update_ops(ops);
|
||||
assert_eq!(rlc.bandwidth.size, SIZE * 2);
|
||||
assert_eq!(rlc.bandwidth.one_time_burst, ONE_TIME_BURST * 2);
|
||||
assert_eq!(rlc.bandwidth.refill_time, REFILL_TIME * 2);
|
||||
assert_eq!(rlc.ops.size, SIZE * 3);
|
||||
assert_eq!(rlc.ops.one_time_burst, ONE_TIME_BURST * 3);
|
||||
assert_eq!(rlc.ops.refill_time, REFILL_TIME * 3);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ byte-unit = "3.1.4"
|
||||
glob = "0.3.0"
|
||||
lazy_static = "1.4.0"
|
||||
num_cpus = "1.13.1"
|
||||
regex = "1.5.4"
|
||||
regex = "1.5.6"
|
||||
serde = { version = "1.0.100", features = ["derive"] }
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
|
||||
@@ -9,8 +9,10 @@ use crate::config::{ConfigOps, TomlConfig};
|
||||
|
||||
pub use vendor::AgentVendor;
|
||||
|
||||
use super::default::{DEFAULT_AGENT_LOG_PORT, DEFAULT_AGENT_VSOCK_PORT};
|
||||
|
||||
/// Kata agent configuration information.
|
||||
#[derive(Debug, Default, Deserialize, Serialize)]
|
||||
#[derive(Debug, Default, Deserialize, Serialize, Clone)]
|
||||
pub struct Agent {
|
||||
/// If enabled, the agent will log additional debug messages to the system log.
|
||||
#[serde(default, rename = "enable_debug")]
|
||||
@@ -34,11 +36,11 @@ pub struct Agent {
|
||||
pub debug_console_enabled: bool,
|
||||
|
||||
/// Agent server port
|
||||
#[serde(default)]
|
||||
#[serde(default = "default_server_port")]
|
||||
pub server_port: u32,
|
||||
|
||||
/// Agent log port
|
||||
#[serde(default)]
|
||||
#[serde(default = "default_log_port")]
|
||||
pub log_port: u32,
|
||||
|
||||
/// Agent connection dialing timeout value in millisecond
|
||||
@@ -75,23 +77,31 @@ pub struct Agent {
|
||||
pub container_pipe_size: u32,
|
||||
}
|
||||
|
||||
fn default_server_port() -> u32 {
|
||||
DEFAULT_AGENT_VSOCK_PORT
|
||||
}
|
||||
|
||||
fn default_log_port() -> u32 {
|
||||
DEFAULT_AGENT_LOG_PORT
|
||||
}
|
||||
|
||||
fn default_dial_timeout() -> u32 {
|
||||
// 10ms
|
||||
// ms
|
||||
10
|
||||
}
|
||||
|
||||
fn default_reconnect_timeout() -> u32 {
|
||||
// 3s
|
||||
// ms
|
||||
3_000
|
||||
}
|
||||
|
||||
fn default_request_timeout() -> u32 {
|
||||
// 30s
|
||||
// ms
|
||||
30_000
|
||||
}
|
||||
|
||||
fn default_health_check_timeout() -> u32 {
|
||||
// 90s
|
||||
// ms
|
||||
90_000
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@ lazy_static! {
|
||||
];
|
||||
}
|
||||
pub const DEFAULT_AGENT_NAME: &str = "kata-agent";
|
||||
pub const DEFAULT_AGENT_VSOCK_PORT: u32 = 1024;
|
||||
pub const DEFAULT_AGENT_LOG_PORT: u32 = 1025;
|
||||
|
||||
pub const DEFAULT_INTERNETWORKING_MODEL: &str = "tcfilter";
|
||||
|
||||
|
||||
499
src/runtime-rs/Cargo.lock
generated
499
src/runtime-rs/Cargo.lock
generated
@@ -100,51 +100,107 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
|
||||
|
||||
[[package]]
|
||||
name = "async-macros"
|
||||
version = "1.0.0"
|
||||
name = "async-channel"
|
||||
version = "1.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e421d59b24c1feea2496e409b3e0a8de23e5fc130a2ddc0b012e551f3b272bba"
|
||||
checksum = "e14485364214912d3b19cc3435dde4df66065127f05fa0d75c712f36f12c2f28"
|
||||
dependencies = [
|
||||
"futures-core-preview",
|
||||
"pin-utils",
|
||||
"concurrent-queue",
|
||||
"event-listener",
|
||||
"futures-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-std"
|
||||
version = "0.99.12"
|
||||
name = "async-executor"
|
||||
version = "1.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "44501a9f7961bb539b67be0c428b3694e26557046a52759ca7eaf790030a64cc"
|
||||
checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965"
|
||||
dependencies = [
|
||||
"async-macros",
|
||||
"async-task",
|
||||
"crossbeam-channel 0.3.9",
|
||||
"crossbeam-deque",
|
||||
"crossbeam-utils 0.6.6",
|
||||
"futures-core",
|
||||
"futures-io",
|
||||
"futures-timer 1.0.3",
|
||||
"kv-log-macro",
|
||||
"log",
|
||||
"memchr",
|
||||
"mio 0.6.23",
|
||||
"mio-uds",
|
||||
"num_cpus",
|
||||
"concurrent-queue",
|
||||
"fastrand",
|
||||
"futures-lite",
|
||||
"once_cell",
|
||||
"pin-project-lite 0.1.12",
|
||||
"pin-utils",
|
||||
"slab",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-task"
|
||||
version = "1.3.1"
|
||||
name = "async-global-executor"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0ac2c016b079e771204030951c366db398864f5026f84a44dafb0ff20f02085d"
|
||||
checksum = "5262ed948da60dd8956c6c5aca4d4163593dddb7b32d73267c93dab7b2e98940"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi 0.3.9",
|
||||
"async-channel",
|
||||
"async-executor",
|
||||
"async-io",
|
||||
"async-lock",
|
||||
"blocking",
|
||||
"futures-lite",
|
||||
"num_cpus",
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-io"
|
||||
version = "1.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0ab006897723d9352f63e2b13047177c3982d8d79709d713ce7747a8f19fd1b0"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"concurrent-queue",
|
||||
"futures-lite",
|
||||
"libc",
|
||||
"log",
|
||||
"once_cell",
|
||||
"parking",
|
||||
"polling",
|
||||
"slab",
|
||||
"socket2",
|
||||
"waker-fn",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-lock"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e97a171d191782fba31bb902b14ad94e24a68145032b7eedf871ab0bc0d077b6"
|
||||
dependencies = [
|
||||
"event-listener",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-std"
|
||||
version = "1.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d"
|
||||
dependencies = [
|
||||
"async-channel",
|
||||
"async-global-executor",
|
||||
"async-io",
|
||||
"async-lock",
|
||||
"crossbeam-utils",
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
"futures-io",
|
||||
"futures-lite",
|
||||
"gloo-timers",
|
||||
"kv-log-macro",
|
||||
"log",
|
||||
"memchr",
|
||||
"once_cell",
|
||||
"pin-project-lite",
|
||||
"pin-utils",
|
||||
"slab",
|
||||
"wasm-bindgen-futures",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-task"
|
||||
version = "4.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524"
|
||||
|
||||
[[package]]
|
||||
name = "async-trait"
|
||||
version = "0.1.56"
|
||||
@@ -156,6 +212,12 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atomic-waker"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a"
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.1.0"
|
||||
@@ -238,6 +300,20 @@ dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "blocking"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c6ccb65d468978a086b69884437ded69a90faab3bbe6e67f242173ea728acccc"
|
||||
dependencies = [
|
||||
"async-channel",
|
||||
"async-task",
|
||||
"atomic-waker",
|
||||
"fastrand",
|
||||
"futures-lite",
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bumpalo"
|
||||
version = "3.10.0"
|
||||
@@ -272,6 +348,12 @@ version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8"
|
||||
|
||||
[[package]]
|
||||
name = "cache-padded"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c"
|
||||
|
||||
[[package]]
|
||||
name = "caps"
|
||||
version = "0.5.3"
|
||||
@@ -326,7 +408,7 @@ dependencies = [
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
"time 0.1.43",
|
||||
"winapi 0.3.9",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -359,6 +441,15 @@ version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2382f75942f4b3be3690fe4f86365e9c853c1587d6ee58212cebf6e2a9ccd101"
|
||||
|
||||
[[package]]
|
||||
name = "concurrent-queue"
|
||||
version = "1.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af4780a44ab5696ea9e28294517f1fffb421a83a25af521333c838635509db9c"
|
||||
dependencies = [
|
||||
"cache-padded",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "constant_time_eq"
|
||||
version = "0.1.5"
|
||||
@@ -400,15 +491,6 @@ dependencies = [
|
||||
"cfg-if 1.0.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c8ec7fcd21571dc78f96cc96243cab8d8f035247c3efd16c687be154c3fa9efa"
|
||||
dependencies = [
|
||||
"crossbeam-utils 0.6.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.4"
|
||||
@@ -416,54 +498,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53"
|
||||
dependencies = [
|
||||
"cfg-if 1.0.0",
|
||||
"crossbeam-utils 0.8.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.7.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed"
|
||||
dependencies = [
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils 0.7.2",
|
||||
"maybe-uninit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"cfg-if 0.1.10",
|
||||
"crossbeam-utils 0.7.2",
|
||||
"lazy_static",
|
||||
"maybe-uninit",
|
||||
"memoffset 0.5.6",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.6.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10",
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"cfg-if 0.1.10",
|
||||
"lazy_static",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -573,7 +608,7 @@ dependencies = [
|
||||
"kvm-bindings",
|
||||
"kvm-ioctls",
|
||||
"libc",
|
||||
"memoffset 0.6.5",
|
||||
"memoffset",
|
||||
"vm-memory",
|
||||
"vmm-sys-util",
|
||||
]
|
||||
@@ -633,7 +668,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b773f7f1b9088438e9746890c7c0836b133b07935812867a33e06e81c92c0cdc"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"mio 0.8.3",
|
||||
"mio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -781,7 +816,7 @@ checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1"
|
||||
dependencies = [
|
||||
"errno-dragonfly",
|
||||
"libc",
|
||||
"winapi 0.3.9",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -794,6 +829,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "event-listener"
|
||||
version = "2.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0"
|
||||
|
||||
[[package]]
|
||||
name = "event-manager"
|
||||
version = "0.2.1"
|
||||
@@ -862,22 +903,6 @@ version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
|
||||
|
||||
[[package]]
|
||||
name = "fuchsia-zircon"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"fuchsia-zircon-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fuchsia-zircon-sys"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
|
||||
|
||||
[[package]]
|
||||
name = "fuse-backend-rs"
|
||||
version = "0.9.0"
|
||||
@@ -892,7 +917,7 @@ dependencies = [
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
"mio 0.8.3",
|
||||
"mio",
|
||||
"nix 0.23.1",
|
||||
"virtio-queue",
|
||||
"vm-memory",
|
||||
@@ -936,12 +961,6 @@ version = "0.3.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3"
|
||||
|
||||
[[package]]
|
||||
name = "futures-core-preview"
|
||||
version = "0.3.0-alpha.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b35b6263fb1ef523c3056565fa67b1d16f0a8604ff12b11b08c25f28a734c60a"
|
||||
|
||||
[[package]]
|
||||
name = "futures-executor"
|
||||
version = "0.3.21"
|
||||
@@ -959,6 +978,21 @@ version = "0.3.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b"
|
||||
|
||||
[[package]]
|
||||
name = "futures-lite"
|
||||
version = "1.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48"
|
||||
dependencies = [
|
||||
"fastrand",
|
||||
"futures-core",
|
||||
"futures-io",
|
||||
"memchr",
|
||||
"parking",
|
||||
"pin-project-lite",
|
||||
"waker-fn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "futures-macro"
|
||||
version = "0.3.21"
|
||||
@@ -982,16 +1016,6 @@ version = "0.3.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a"
|
||||
|
||||
[[package]]
|
||||
name = "futures-timer"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7946248e9429ff093345d3e8fdf4eb0f9b2d79091611c9c14f744971a6f8be45"
|
||||
dependencies = [
|
||||
"futures-core-preview",
|
||||
"pin-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "futures-timer"
|
||||
version = "3.0.2"
|
||||
@@ -1011,7 +1035,7 @@ dependencies = [
|
||||
"futures-sink",
|
||||
"futures-task",
|
||||
"memchr",
|
||||
"pin-project-lite 0.2.9",
|
||||
"pin-project-lite",
|
||||
"pin-utils",
|
||||
"slab",
|
||||
]
|
||||
@@ -1060,6 +1084,18 @@ version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
|
||||
|
||||
[[package]]
|
||||
name = "gloo-timers"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5fb7d06c1c8cc2a29bee7ec961009a0b2caa0793ee4900c2ffb348734ba1c8f9"
|
||||
dependencies = [
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "go-flag"
|
||||
version = "0.1.0"
|
||||
@@ -1077,7 +1113,7 @@ checksum = "19775995ee20209163239355bc3ad2f33f83da35d9ef72dea26e5af753552c87"
|
||||
dependencies = [
|
||||
"dashmap",
|
||||
"futures 0.3.21",
|
||||
"futures-timer 3.0.2",
|
||||
"futures-timer",
|
||||
"no-std-compat",
|
||||
"nonzero_ext",
|
||||
"parking_lot 0.12.1",
|
||||
@@ -1234,9 +1270,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.57"
|
||||
version = "0.3.59"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397"
|
||||
checksum = "258451ab10b34f8af53416d1fdab72c22e805f0c92a1136d59470ec0b11138b2"
|
||||
dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
@@ -1282,16 +1318,6 @@ dependencies = [
|
||||
"toml 0.5.9",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kernel32-sys"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
|
||||
dependencies = [
|
||||
"winapi 0.2.8",
|
||||
"winapi-build",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kv-log-macro"
|
||||
version = "1.0.7"
|
||||
@@ -1415,27 +1441,12 @@ version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f"
|
||||
|
||||
[[package]]
|
||||
name = "maybe-uninit"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
|
||||
|
||||
[[package]]
|
||||
name = "memoffset"
|
||||
version = "0.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memoffset"
|
||||
version = "0.6.5"
|
||||
@@ -1454,25 +1465,6 @@ dependencies = [
|
||||
"adler",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.6.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10",
|
||||
"fuchsia-zircon",
|
||||
"fuchsia-zircon-sys",
|
||||
"iovec",
|
||||
"kernel32-sys",
|
||||
"libc",
|
||||
"log",
|
||||
"miow",
|
||||
"net2",
|
||||
"slab",
|
||||
"winapi 0.2.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.8.3"
|
||||
@@ -1485,46 +1477,12 @@ dependencies = [
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mio-uds"
|
||||
version = "0.6.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0"
|
||||
dependencies = [
|
||||
"iovec",
|
||||
"libc",
|
||||
"mio 0.6.23",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "miow"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d"
|
||||
dependencies = [
|
||||
"kernel32-sys",
|
||||
"net2",
|
||||
"winapi 0.2.8",
|
||||
"ws2_32-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "multimap"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
|
||||
|
||||
[[package]]
|
||||
name = "net2"
|
||||
version = "0.2.37"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae"
|
||||
dependencies = [
|
||||
"cfg-if 0.1.10",
|
||||
"libc",
|
||||
"winapi 0.3.9",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "netlink-packet-core"
|
||||
version = "0.4.2"
|
||||
@@ -1601,7 +1559,7 @@ dependencies = [
|
||||
"cc",
|
||||
"cfg-if 1.0.0",
|
||||
"libc",
|
||||
"memoffset 0.6.5",
|
||||
"memoffset",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1613,7 +1571,7 @@ dependencies = [
|
||||
"bitflags",
|
||||
"cfg-if 1.0.0",
|
||||
"libc",
|
||||
"memoffset 0.6.5",
|
||||
"memoffset",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1729,6 +1687,12 @@ version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
|
||||
|
||||
[[package]]
|
||||
name = "parking"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72"
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.11.2"
|
||||
@@ -1761,7 +1725,7 @@ dependencies = [
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"smallvec",
|
||||
"winapi 0.3.9",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1814,12 +1778,6 @@ dependencies = [
|
||||
"indexmap",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.1.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777"
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.9"
|
||||
@@ -1832,6 +1790,20 @@ version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "polling"
|
||||
version = "2.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "899b00b9c8ab553c743b3e11e87c5c7d423b2a2de229ba95b24a756344748011"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"cfg-if 1.0.0",
|
||||
"libc",
|
||||
"log",
|
||||
"wepoll-ffi",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.16"
|
||||
@@ -1944,14 +1916,14 @@ version = "0.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8"
|
||||
dependencies = [
|
||||
"crossbeam-utils 0.8.8",
|
||||
"crossbeam-utils",
|
||||
"libc",
|
||||
"mach",
|
||||
"once_cell",
|
||||
"raw-cpuid",
|
||||
"wasi 0.10.2+wasi-snapshot-preview1",
|
||||
"web-sys",
|
||||
"winapi 0.3.9",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2011,7 +1983,7 @@ dependencies = [
|
||||
"libc",
|
||||
"rand_core 0.3.1",
|
||||
"rdrand",
|
||||
"winapi 0.3.9",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2150,7 +2122,7 @@ version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
|
||||
dependencies = [
|
||||
"winapi 0.3.9",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2251,7 +2223,7 @@ dependencies = [
|
||||
"io-lifetimes",
|
||||
"libc",
|
||||
"linux-raw-sys",
|
||||
"winapi 0.3.9",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2451,7 +2423,7 @@ version = "2.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "766c59b252e62a34651412870ff55d8c4e6d04df19b43eecb2703e417b097ffe"
|
||||
dependencies = [
|
||||
"crossbeam-channel 0.5.4",
|
||||
"crossbeam-channel",
|
||||
"slog",
|
||||
"take_mut",
|
||||
"thread_local",
|
||||
@@ -2504,7 +2476,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi 0.3.9",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2576,7 +2548,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c2e86926081dda636c546d8c5e641661049d7562a68f5488be4a1f7f66f6086"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi 0.3.9",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2613,7 +2585,7 @@ dependencies = [
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"remove_dir_all",
|
||||
"winapi 0.3.9",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2668,7 +2640,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi 0.3.9",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2715,15 +2687,15 @@ dependencies = [
|
||||
"bytes 1.1.0",
|
||||
"libc",
|
||||
"memchr",
|
||||
"mio 0.8.3",
|
||||
"mio",
|
||||
"num_cpus",
|
||||
"once_cell",
|
||||
"parking_lot 0.12.1",
|
||||
"pin-project-lite 0.2.9",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"socket2",
|
||||
"tokio-macros",
|
||||
"winapi 0.3.9",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2957,7 +2929,7 @@ checksum = "339d4349c126fdcd87e034631d7274370cf19eb0e87b33166bcd956589fc72c5"
|
||||
dependencies = [
|
||||
"arc-swap 1.5.0",
|
||||
"libc",
|
||||
"winapi 0.3.9",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2986,6 +2958,12 @@ dependencies = [
|
||||
"nix 0.23.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "waker-fn"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.9.0+wasi-snapshot-preview1"
|
||||
@@ -3006,9 +2984,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.80"
|
||||
version = "0.2.82"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad"
|
||||
checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d"
|
||||
dependencies = [
|
||||
"cfg-if 1.0.0",
|
||||
"wasm-bindgen-macro",
|
||||
@@ -3016,13 +2994,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-backend"
|
||||
version = "0.2.80"
|
||||
version = "0.2.82"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4"
|
||||
checksum = "662cd44805586bd52971b9586b1df85cdbbd9112e4ef4d8f41559c334dc6ac3f"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"once_cell",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
@@ -3030,10 +3008,22 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.80"
|
||||
name = "wasm-bindgen-futures"
|
||||
version = "0.4.32"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5"
|
||||
checksum = "fa76fb221a1f8acddf5b54ace85912606980ad661ac7a503b4570ffd3a624dad"
|
||||
dependencies = [
|
||||
"cfg-if 1.0.0",
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
"web-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.82"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
@@ -3041,9 +3031,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.80"
|
||||
version = "0.2.82"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b"
|
||||
checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -3054,9 +3044,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.80"
|
||||
version = "0.2.82"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744"
|
||||
checksum = "6598dd0bd3c7d51095ff6531a5b23e02acdc81804e30d8f07afb77b7215a140a"
|
||||
|
||||
[[package]]
|
||||
name = "wasm_container"
|
||||
@@ -3079,6 +3069,15 @@ dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wepoll-ffi"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "which"
|
||||
version = "4.2.5"
|
||||
@@ -3090,12 +3089,6 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
@@ -3106,12 +3099,6 @@ dependencies = [
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-build"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
@@ -3167,16 +3154,6 @@ version = "0.36.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
|
||||
|
||||
[[package]]
|
||||
name = "ws2_32-sys"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
|
||||
dependencies = [
|
||||
"winapi 0.2.8",
|
||||
"winapi-build",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zstd"
|
||||
version = "0.11.2+zstd.1.5.2"
|
||||
|
||||
@@ -8,6 +8,8 @@ use anyhow::{Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use ttrpc::context as ttrpc_ctx;
|
||||
|
||||
use kata_types::config::Agent as AgentConfig;
|
||||
|
||||
use crate::{kata::KataAgent, Agent, AgentManager, HealthService};
|
||||
|
||||
/// millisecond to nanosecond
|
||||
@@ -37,6 +39,10 @@ impl AgentManager for KataAgent {
|
||||
async fn stop(&self) {
|
||||
self.stop_log_forwarder().await;
|
||||
}
|
||||
|
||||
async fn agent_config(&self) -> AgentConfig {
|
||||
self.agent_config().await
|
||||
}
|
||||
}
|
||||
|
||||
// implement for health service
|
||||
|
||||
@@ -126,4 +126,9 @@ impl KataAgent {
|
||||
let mut inner = self.inner.lock().await;
|
||||
inner.log_forwarder.stop();
|
||||
}
|
||||
|
||||
pub(crate) async fn agent_config(&self) -> AgentConfig {
|
||||
let inner = self.inner.lock().await;
|
||||
inner.config.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,10 +29,16 @@ pub use types::{
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use kata_types::config::Agent as AgentConfig;
|
||||
|
||||
pub const AGENT_KATA: &str = "kata";
|
||||
|
||||
#[async_trait]
|
||||
pub trait AgentManager: Send + Sync {
|
||||
async fn start(&self, address: &str) -> Result<()>;
|
||||
async fn stop(&self);
|
||||
|
||||
async fn agent_config(&self) -> AgentConfig;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
||||
@@ -4,6 +4,9 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(PartialEq, Clone, Default)]
|
||||
@@ -390,6 +393,56 @@ pub struct KernelModule {
|
||||
pub parameters: Vec<String>,
|
||||
}
|
||||
|
||||
impl KernelModule {
|
||||
pub fn set_kernel_modules(modules: Vec<String>) -> Result<Vec<Self>> {
|
||||
let mut kernel_modules = Vec::new();
|
||||
for module_string in modules {
|
||||
if module_string.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let kernel_module = Self::try_from(module_string)?;
|
||||
kernel_modules.push(kernel_module);
|
||||
}
|
||||
Ok(kernel_modules)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for KernelModule {
|
||||
type Error = anyhow::Error;
|
||||
// input string: " ModuleName Param1 Param2 ... "
|
||||
// NOTICE: " ModuleName Param1="spaces in here" " => KernelModule { name: ModuleName, parameters: Param1="spaces in here" }
|
||||
fn try_from(str: String) -> Result<Self> {
|
||||
let split: Vec<&str> = str.split(' ').collect();
|
||||
let mut name = String::new();
|
||||
let mut parameters = Vec::new();
|
||||
|
||||
let mut flag = false;
|
||||
for (index, info) in split.iter().enumerate() {
|
||||
if index == 0 {
|
||||
name = info.to_string();
|
||||
} else if flag {
|
||||
// a former param's string contains \"
|
||||
if let Some(former_param) = parameters.pop() {
|
||||
let cur_param = format!("{} {}", former_param, info);
|
||||
parameters.push(cur_param);
|
||||
}
|
||||
} else {
|
||||
parameters.push(info.to_string());
|
||||
}
|
||||
|
||||
if info.contains('\"') {
|
||||
flag = !flag;
|
||||
}
|
||||
}
|
||||
|
||||
if flag {
|
||||
return Err(anyhow!("\" not match"));
|
||||
}
|
||||
|
||||
Ok(KernelModule { name, parameters })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Clone, Default)]
|
||||
pub struct CreateSandboxRequest {
|
||||
pub hostname: String,
|
||||
@@ -486,3 +539,44 @@ pub struct VersionCheckResponse {
|
||||
pub struct OomEventResponse {
|
||||
pub container_id: String,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use super::KernelModule;
|
||||
|
||||
#[test]
|
||||
fn test_new_kernel_module() {
|
||||
let kernel_module_str1 = "ModuleName Param1 Param2";
|
||||
let kernel_module1 = KernelModule::try_from(kernel_module_str1.to_string()).unwrap();
|
||||
assert!(kernel_module1.name == "ModuleName");
|
||||
assert!(kernel_module1.parameters[0] == "Param1");
|
||||
assert!(kernel_module1.parameters[1] == "Param2");
|
||||
|
||||
let kernel_module_str2 = "ModuleName Param1=\"spaces in here\"";
|
||||
let kernel_module2 = KernelModule::try_from(kernel_module_str2.to_string()).unwrap();
|
||||
assert!(kernel_module2.name == "ModuleName");
|
||||
assert!(kernel_module2.parameters[0] == "Param1=\"spaces in here\"");
|
||||
|
||||
// exception case
|
||||
let kernel_module_str3 = "ModuleName \"Param1";
|
||||
let kernel_module3 = KernelModule::try_from(kernel_module_str3.to_string());
|
||||
assert!(kernel_module3.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_kernel_modules() {
|
||||
let kernel_module_str1 = "ModuleName1 Param1 Param2".to_string();
|
||||
let kernel_module_str2 = "".to_string();
|
||||
let kernel_module_str3 = "ModuleName2".to_string();
|
||||
let kernel_modules_str = vec![kernel_module_str1, kernel_module_str2, kernel_module_str3];
|
||||
|
||||
let kernel_modules = KernelModule::set_kernel_modules(kernel_modules_str).unwrap();
|
||||
assert!(kernel_modules.len() == 2);
|
||||
assert!(kernel_modules[0].name == "ModuleName1");
|
||||
assert!(kernel_modules[0].parameters.len() == 2);
|
||||
assert!(kernel_modules[1].name == "ModuleName2");
|
||||
assert!(kernel_modules[1].parameters.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ slog-scope = "4.4.0"
|
||||
tokio = { version = "1.8.0" }
|
||||
toml = "0.4.2"
|
||||
url = "2.1.1"
|
||||
async-std = "0.99.5"
|
||||
async-std = "1.12.0"
|
||||
|
||||
agent = { path = "../../agent" }
|
||||
common = { path = "../common" }
|
||||
|
||||
@@ -91,7 +91,7 @@ impl HealthCheck {
|
||||
::std::process::exit(1);
|
||||
}
|
||||
} else {
|
||||
info!(sl!(), "wait to exit exit {}", id);
|
||||
info!(sl!(), "wait to exit {}", id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ pub mod sandbox_persist;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use agent::kata::KataAgent;
|
||||
use agent::{kata::KataAgent, AGENT_KATA};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use common::{message::Message, RuntimeHandler, RuntimeInstance};
|
||||
@@ -55,20 +55,7 @@ impl RuntimeHandler for VirtContainer {
|
||||
let hypervisor = new_hypervisor(&config).await.context("new hypervisor")?;
|
||||
|
||||
// get uds from hypervisor and get config from toml_config
|
||||
let agent = Arc::new(KataAgent::new(kata_types::config::Agent {
|
||||
debug: true,
|
||||
enable_tracing: false,
|
||||
server_port: 1024,
|
||||
log_port: 1025,
|
||||
dial_timeout_ms: 10,
|
||||
reconnect_timeout_ms: 3_000,
|
||||
request_timeout_ms: 30_000,
|
||||
health_check_request_timeout_ms: 90_000,
|
||||
kernel_modules: Default::default(),
|
||||
container_pipe_size: 0,
|
||||
debug_console_enabled: false,
|
||||
}));
|
||||
|
||||
let agent = new_agent(&config).context("new agent")?;
|
||||
let resource_manager = Arc::new(ResourceManager::new(
|
||||
sid,
|
||||
agent.clone(),
|
||||
@@ -121,3 +108,44 @@ async fn new_hypervisor(toml_config: &TomlConfig) -> Result<Arc<dyn Hypervisor>>
|
||||
_ => Err(anyhow!("Unsupported hypervisor {}", &hypervisor_name)),
|
||||
}
|
||||
}
|
||||
|
||||
fn new_agent(toml_config: &TomlConfig) -> Result<Arc<KataAgent>> {
|
||||
let agent_name = &toml_config.runtime.agent_name;
|
||||
let agent_config = toml_config
|
||||
.agent
|
||||
.get(agent_name)
|
||||
.ok_or_else(|| anyhow!("failed to get agent for {}", &agent_name))
|
||||
.context("get agent")?;
|
||||
match agent_name.as_str() {
|
||||
AGENT_KATA => {
|
||||
let agent = KataAgent::new(agent_config.clone());
|
||||
Ok(Arc::new(agent))
|
||||
}
|
||||
_ => Err(anyhow!("Unsupported agent {}", &agent_name)),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use super::*;
|
||||
|
||||
fn default_toml_config_agent() -> Result<TomlConfig> {
|
||||
let config_content = r#"
|
||||
[agent.kata]
|
||||
container_pipe_size=1
|
||||
|
||||
[runtime]
|
||||
agent_name="kata"
|
||||
"#;
|
||||
TomlConfig::load(config_content).map_err(|e| anyhow!("can not load config toml: {}", e))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_agent() {
|
||||
let toml_config = default_toml_config_agent().unwrap();
|
||||
|
||||
let res = new_agent(&toml_config);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use agent::{self, kata::KataAgent, Agent};
|
||||
use agent::{self, kata::KataAgent, types::KernelModule, Agent};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use common::{
|
||||
@@ -159,6 +159,8 @@ impl Sandbox for VirtSandbox {
|
||||
.context("setup device after start vm")?;
|
||||
|
||||
// create sandbox in vm
|
||||
let agent_config = self.agent.agent_config().await;
|
||||
let kernel_modules = KernelModule::set_kernel_modules(agent_config.kernel_modules)?;
|
||||
let req = agent::CreateSandboxRequest {
|
||||
hostname: "".to_string(),
|
||||
dns: vec![],
|
||||
@@ -175,7 +177,7 @@ impl Sandbox for VirtSandbox {
|
||||
.await
|
||||
.security_info
|
||||
.guest_hook_path,
|
||||
kernel_modules: vec![],
|
||||
kernel_modules,
|
||||
};
|
||||
|
||||
self.agent
|
||||
|
||||
@@ -15,7 +15,7 @@ use crate::Error;
|
||||
/// from a shimv2 container manager such as containerd.
|
||||
///
|
||||
/// For detailed information, please refer to the
|
||||
/// [shim spec](https://github.com/containerd/containerd/blob/main/runtime/v2/README.md).
|
||||
/// [shim spec](https://github.com/containerd/containerd/blob/v1.6.8/runtime/v2/README.md).
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct Args {
|
||||
/// the id of the container
|
||||
@@ -26,8 +26,6 @@ pub struct Args {
|
||||
pub address: String,
|
||||
/// the binary path to publish events back to containerd
|
||||
pub publish_binary: String,
|
||||
/// Abstract socket path to serve.
|
||||
pub socket: String,
|
||||
/// the path to the bundle to delete
|
||||
pub bundle: String,
|
||||
/// Whether or not to enable debug
|
||||
@@ -123,7 +121,6 @@ mod tests {
|
||||
let default_namespace = "ns1".to_string();
|
||||
let default_address = bind_address.to_string();
|
||||
let default_publish_binary = "containerd".to_string();
|
||||
let default_socket = "socket".to_string();
|
||||
let default_bundle = path.to_string();
|
||||
let default_debug = false;
|
||||
|
||||
@@ -132,7 +129,6 @@ mod tests {
|
||||
namespace: default_namespace.clone(),
|
||||
address: default_address.clone(),
|
||||
publish_binary: default_publish_binary.clone(),
|
||||
socket: default_socket,
|
||||
bundle: default_bundle.clone(),
|
||||
debug: default_debug,
|
||||
};
|
||||
|
||||
@@ -43,7 +43,6 @@ fn parse_args(args: &[OsString]) -> Result<Action> {
|
||||
flags.add_flag("id", &mut shim_args.id);
|
||||
flags.add_flag("namespace", &mut shim_args.namespace);
|
||||
flags.add_flag("publish-binary", &mut shim_args.publish_binary);
|
||||
flags.add_flag("socket", &mut shim_args.socket);
|
||||
flags.add_flag("help", &mut help);
|
||||
flags.add_flag("version", &mut version);
|
||||
})
|
||||
@@ -87,8 +86,6 @@ fn show_help(cmd: &OsStr) {
|
||||
namespace that owns the shim
|
||||
-publish-binary string
|
||||
path to publish binary (used for publishing events) (default "containerd")
|
||||
-socket string
|
||||
socket path to serve
|
||||
--version
|
||||
show the runtime version detail and exit
|
||||
"#,
|
||||
|
||||
102
src/runtime-rs/crates/shim/src/core_sched.rs
Normal file
102
src/runtime-rs/crates/shim/src/core_sched.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright (c) 2019-2022 Alibaba Cloud
|
||||
// Copyright (c) 2019-2022 Ant Group
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
//
|
||||
// Core Scheduling landed in linux 5.14, this enables that -
|
||||
// ONLY the processes have the same cookie value can share an SMT core for security
|
||||
// reasons, since SMT siblings share their cpu caches and many other things. This can
|
||||
// prevent some malicious processes steal others' private information.
|
||||
//
|
||||
// This is enabled by containerd, see https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md#format
|
||||
//
|
||||
// This is done by using system call prctl(), for core scheduling purpose, it is defined as
|
||||
// int prctl(PR_SCHED_CORE, int cs_command, pid_t pid, enum pid_type type,
|
||||
// unsigned long *cookie);
|
||||
//
|
||||
// You may go to https://lwn.net/Articles/861251/, https://lore.kernel.org/lkml/20210422123309.039845339@infradead.org/
|
||||
// and kernel.org/doc/html/latest/admin-guide/hw-vuln/core-scheduling.html for more info.
|
||||
//
|
||||
|
||||
use anyhow::Result;
|
||||
use nix::{self, errno::Errno};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub const PID_GROUP: usize = 0;
|
||||
#[allow(dead_code)]
|
||||
pub const THREAD_GROUP: usize = 1;
|
||||
pub const PROCESS_GROUP: usize = 2;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub const PR_SCHED_CORE: i32 = 62;
|
||||
pub const PR_SCHED_CORE_CREATE: usize = 1;
|
||||
pub const PR_SCHED_CORE_SHARE_FROM: usize = 3;
|
||||
|
||||
// create a new core sched domain, this will NOT succeed if kernel version < 5.14
|
||||
pub fn core_sched_create(pidtype: usize) -> Result<(), Errno> {
|
||||
let errno = unsafe { nix::libc::prctl(PR_SCHED_CORE, PR_SCHED_CORE_CREATE, 0, pidtype, 0) };
|
||||
if errno != 0 {
|
||||
Err(nix::errno::Errno::from_i32(-errno))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// shares the domain with *pid*
|
||||
#[allow(dead_code)]
|
||||
pub fn core_sched_share_from(pid: usize, pidtype: usize) -> Result<(), Errno> {
|
||||
let errno =
|
||||
unsafe { nix::libc::prctl(PR_SCHED_CORE, PR_SCHED_CORE_SHARE_FROM, pid, pidtype, 0) };
|
||||
if errno != 0 {
|
||||
Err(nix::errno::Errno::from_i32(-errno))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use nix::errno::Errno::{EINVAL, ENODEV, ENOMEM, EPERM, ESRCH};
|
||||
|
||||
const RELEASE_MAJOR_VERSION: u8 = 5;
|
||||
const RELEASE_MINOR_VERSION: u8 = 14;
|
||||
|
||||
// since this feature only lands in linux 5.14, we run the test when version is higher
|
||||
fn core_sched_landed() -> bool {
|
||||
let vinfo = std::fs::read_to_string("/proc/sys/kernel/osrelease");
|
||||
if let Ok(info) = vinfo {
|
||||
let vnum: Vec<&str> = info.as_str().split('.').collect();
|
||||
if vnum.len() >= 2 {
|
||||
let major: u8 = vnum[0].parse().unwrap();
|
||||
let minor: u8 = vnum[1].parse().unwrap();
|
||||
return major >= RELEASE_MAJOR_VERSION && minor >= RELEASE_MINOR_VERSION;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_core_sched() {
|
||||
std::env::set_var("SCHED_CORE", "1");
|
||||
assert_eq!(std::env::var("SCHED_CORE").unwrap(), "1");
|
||||
if core_sched_landed() {
|
||||
// it is possible that the machine running this test does not support SMT,
|
||||
// therefore it does not make sense to assert a successful prctl call
|
||||
// but we can still make sure that the return value is a possible value
|
||||
let e = core_sched_create(PROCESS_GROUP);
|
||||
if let Err(errno) = e {
|
||||
if errno != EINVAL
|
||||
&& errno != ENODEV
|
||||
&& errno != ENOMEM
|
||||
&& errno != EPERM
|
||||
&& errno != ESRCH
|
||||
{
|
||||
panic!("impossible return value {:?}", errno);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -17,6 +17,7 @@ mod logger;
|
||||
mod panic_hook;
|
||||
mod shim;
|
||||
pub use shim::ShimExecutor;
|
||||
mod core_sched;
|
||||
#[rustfmt::skip]
|
||||
pub mod config;
|
||||
mod shim_delete;
|
||||
|
||||
@@ -68,7 +68,7 @@ impl ShimExecutor {
|
||||
let data = [&self.args.address, &self.args.namespace, id].join("/");
|
||||
let mut hasher = sha2::Sha256::new();
|
||||
hasher.update(data);
|
||||
// https://github.com/containerd/containerd/blob/main/runtime/v2/shim/util_unix.go#L68 to
|
||||
// https://github.com/containerd/containerd/blob/v1.6.8/runtime/v2/shim/util_unix.go#L68 to
|
||||
// generate a shim socket path.
|
||||
Ok(PathBuf::from(format!(
|
||||
"unix://{}/s/{:X}",
|
||||
@@ -97,7 +97,6 @@ mod tests {
|
||||
namespace: "test_namespace".into(),
|
||||
address: "containerd_socket".into(),
|
||||
publish_binary: "containerd".into(),
|
||||
socket: "socket".into(),
|
||||
bundle: bundle_path.to_str().unwrap().into(),
|
||||
debug: false,
|
||||
};
|
||||
|
||||
@@ -10,7 +10,7 @@ use anyhow::{Context, Result};
|
||||
use kata_sys_util::spec::get_bundle_path;
|
||||
|
||||
use crate::{
|
||||
logger,
|
||||
core_sched, logger,
|
||||
shim::{ShimExecutor, ENV_KATA_RUNTIME_BIND_FD},
|
||||
Error,
|
||||
};
|
||||
@@ -23,6 +23,12 @@ impl ShimExecutor {
|
||||
let path = bundle_path.join("log");
|
||||
let _logger_guard =
|
||||
logger::set_logger(path.to_str().unwrap(), &sid, self.args.debug).context("set logger");
|
||||
if try_core_sched().is_err() {
|
||||
warn!(
|
||||
sl!(),
|
||||
"Failed to enable core sched since prctl() returns non-zero value."
|
||||
);
|
||||
}
|
||||
|
||||
self.do_run()
|
||||
.await
|
||||
@@ -62,3 +68,14 @@ fn get_server_fd() -> Result<RawFd> {
|
||||
.map_err(|_| Error::ServerFd(env_fd))?;
|
||||
Ok(fd)
|
||||
}
|
||||
|
||||
// TODO: currently we log a warning on fail (i.e. kernel version < 5.14), maybe just exit
|
||||
// TODO: more test on higher version of kernel
|
||||
fn try_core_sched() -> Result<()> {
|
||||
if let Ok(v) = std::env::var("SCHED_CORE") {
|
||||
if !v.is_empty() {
|
||||
core_sched::core_sched_create(core_sched::PROCESS_GROUP)?
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -157,7 +157,6 @@ mod tests {
|
||||
namespace: "ns".into(),
|
||||
address: "address".into(),
|
||||
publish_binary: "containerd".into(),
|
||||
socket: "socket".into(),
|
||||
bundle: bundle_path.to_str().unwrap().into(),
|
||||
debug: false,
|
||||
};
|
||||
@@ -189,7 +188,6 @@ mod tests {
|
||||
namespace: "ns1".into(),
|
||||
address: "containerd_socket".into(),
|
||||
publish_binary: "containerd".into(),
|
||||
socket: "socket".into(),
|
||||
bundle: bundle_path.to_str().unwrap().into(),
|
||||
debug: false,
|
||||
};
|
||||
@@ -209,7 +207,6 @@ mod tests {
|
||||
namespace: "ns1".into(),
|
||||
address: "containerd_socket".into(),
|
||||
publish_binary: "containerd".into(),
|
||||
socket: "socket".into(),
|
||||
bundle: bundle_path2.to_str().unwrap().into(),
|
||||
debug: false,
|
||||
};
|
||||
|
||||
@@ -282,6 +282,7 @@ ifneq (,$(QEMUCMD))
|
||||
|
||||
# qemu-specific options (all should be suffixed by "_QEMU")
|
||||
DEFBLOCKSTORAGEDRIVER_QEMU := virtio-scsi
|
||||
DEFBLOCKDEVICEAIO_QEMU := io_uring
|
||||
DEFNETWORKMODEL_QEMU := tcfilter
|
||||
KERNELTYPE = uncompressed
|
||||
KERNELNAME = $(call MAKE_KERNEL_NAME,$(KERNELTYPE))
|
||||
@@ -516,6 +517,7 @@ USER_VARS += DEFDISABLEBLOCK
|
||||
USER_VARS += DEFBLOCKSTORAGEDRIVER_ACRN
|
||||
USER_VARS += DEFBLOCKSTORAGEDRIVER_FC
|
||||
USER_VARS += DEFBLOCKSTORAGEDRIVER_QEMU
|
||||
USER_VARS += DEFBLOCKDEVICEAIO_QEMU
|
||||
USER_VARS += DEFSHAREDFS_CLH_VIRTIOFS
|
||||
USER_VARS += DEFSHAREDFS_QEMU_VIRTIOFS
|
||||
USER_VARS += DEFVIRTIOFSDAEMON
|
||||
|
||||
@@ -233,6 +233,20 @@ virtio_fs_cache = "@DEFVIRTIOFSCACHE@"
|
||||
# or nvdimm.
|
||||
block_device_driver = "@DEFBLOCKSTORAGEDRIVER_QEMU@"
|
||||
|
||||
# aio is the I/O mechanism used by qemu
|
||||
# Options:
|
||||
#
|
||||
# - threads
|
||||
# Pthread based disk I/O.
|
||||
#
|
||||
# - native
|
||||
# Native Linux I/O.
|
||||
#
|
||||
# - io_uring
|
||||
# Linux io_uring API. This provides the fastest I/O operations on Linux, requires kernel>5.1 and
|
||||
# qemu >=5.0.
|
||||
block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
|
||||
@@ -4,56 +4,58 @@ go 1.14
|
||||
|
||||
require (
|
||||
code.cloudfoundry.org/bytefmt v0.0.0-20211005130812-5bb3c17173e5
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/BurntSushi/toml v1.2.0
|
||||
github.com/blang/semver v3.5.1+incompatible
|
||||
github.com/blang/semver/v4 v4.0.0
|
||||
github.com/containerd/cgroups v1.0.5-0.20220625035431-cf7417bca682
|
||||
github.com/containerd/console v1.0.3
|
||||
github.com/containerd/containerd v1.6.6
|
||||
github.com/containerd/cri-containerd v1.11.1-0.20190125013620-4dd6735020f5
|
||||
github.com/containerd/cri-containerd v1.19.0
|
||||
github.com/containerd/fifo v1.0.0
|
||||
github.com/containerd/ttrpc v1.1.0
|
||||
github.com/containerd/typeurl v1.0.2
|
||||
github.com/containernetworking/plugins v1.1.1
|
||||
github.com/containers/podman/v4 v4.2.0
|
||||
github.com/coreos/go-systemd/v22 v22.3.2
|
||||
github.com/cri-o/cri-o v1.0.0-rc2.0.20170928185954-3394b3b2d6af
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/fsnotify/fsnotify v1.5.1
|
||||
github.com/frankban/quicktest v1.13.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4
|
||||
github.com/go-ini/ini v1.28.2
|
||||
github.com/go-openapi/errors v0.20.2
|
||||
github.com/go-openapi/runtime v0.19.21
|
||||
github.com/go-openapi/strfmt v0.21.1
|
||||
github.com/go-openapi/swag v0.21.1
|
||||
github.com/go-openapi/validate v0.22.0
|
||||
github.com/godbus/dbus/v5 v5.0.6
|
||||
github.com/godbus/dbus/v5 v5.1.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/intel-go/cpuid v0.0.0-20210602155658-5747e5cec0d9
|
||||
github.com/mdlayher/vsock v1.1.0
|
||||
github.com/opencontainers/runc v1.1.2
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
||||
github.com/opencontainers/runc v1.1.3
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab
|
||||
github.com/opencontainers/selinux v1.10.1
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.11.1
|
||||
github.com/prometheus/client_golang v1.12.1
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.30.0
|
||||
github.com/prometheus/common v0.32.1
|
||||
github.com/prometheus/procfs v0.7.3
|
||||
github.com/rogpeppe/go-internal v1.8.1-0.20210923151022-86f73c517451 // indirect
|
||||
github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/urfave/cli v1.22.2
|
||||
github.com/vishvananda/netlink v1.1.1-0.20210924202909-187053b97868
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/urfave/cli v1.22.4
|
||||
github.com/vishvananda/netlink v1.1.1-0.20220115184804-dd687eb2f2d4
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f
|
||||
gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20220601114329-47893b162965
|
||||
go.opentelemetry.io/otel v1.3.0
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.0.0
|
||||
go.opentelemetry.io/otel/sdk v1.3.0
|
||||
go.opentelemetry.io/otel/trace v1.3.0
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
|
||||
google.golang.org/grpc v1.43.0
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b
|
||||
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f
|
||||
google.golang.org/grpc v1.47.0
|
||||
google.golang.org/protobuf v1.27.1
|
||||
k8s.io/apimachinery v0.22.5
|
||||
k8s.io/cri-api v0.23.1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -61,6 +61,17 @@ const (
|
||||
Nvdimm = "nvdimm"
|
||||
)
|
||||
|
||||
const (
|
||||
// AIOThreads is the pthread asynchronous I/O implementation.
|
||||
AIOThreads = "threads"
|
||||
|
||||
// AIONative is the native Linux AIO implementation
|
||||
AIONative = "native"
|
||||
|
||||
// AIOUring is the Linux io_uring I/O implementation
|
||||
AIOIOUring = "io_uring"
|
||||
)
|
||||
|
||||
const (
|
||||
// Virtio9P means use virtio-9p for the shared file system
|
||||
Virtio9P = "virtio-9p"
|
||||
|
||||
@@ -1166,8 +1166,11 @@ const (
|
||||
// Threads is the pthread asynchronous I/O implementation.
|
||||
Threads BlockDeviceAIO = "threads"
|
||||
|
||||
// Native is the pthread asynchronous I/O implementation.
|
||||
// Native is the native Linux AIO implementation.
|
||||
Native BlockDeviceAIO = "native"
|
||||
|
||||
// IOUring is the Linux io_uring I/O implementation.
|
||||
IOUring BlockDeviceAIO = "io_uring"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -787,30 +787,28 @@ func (q *QMP) ExecuteQuit(ctx context.Context) error {
|
||||
return q.executeCommand(ctx, "quit", nil, nil)
|
||||
}
|
||||
|
||||
func (q *QMP) blockdevAddBaseArgs(driver, device, blockdevID string, ro bool) (map[string]interface{}, map[string]interface{}) {
|
||||
var args map[string]interface{}
|
||||
|
||||
func (q *QMP) blockdevAddBaseArgs(driver string, blockDevice *BlockDevice) map[string]interface{} {
|
||||
blockdevArgs := map[string]interface{}{
|
||||
"driver": "raw",
|
||||
"read-only": ro,
|
||||
"read-only": blockDevice.ReadOnly,
|
||||
"file": map[string]interface{}{
|
||||
"driver": driver,
|
||||
"filename": device,
|
||||
"filename": blockDevice.File,
|
||||
"aio": string(blockDevice.AIO),
|
||||
},
|
||||
}
|
||||
|
||||
blockdevArgs["node-name"] = blockdevID
|
||||
args = blockdevArgs
|
||||
blockdevArgs["node-name"] = blockDevice.ID
|
||||
|
||||
return args, blockdevArgs
|
||||
return blockdevArgs
|
||||
}
|
||||
|
||||
// ExecuteBlockdevAdd sends a blockdev-add to the QEMU instance. device is the
|
||||
// path of the device to add, e.g., /dev/rdb0, and blockdevID is an identifier
|
||||
// used to name the device. As this identifier will be passed directly to QMP,
|
||||
// it must obey QMP's naming rules, e,g., it must start with a letter.
|
||||
func (q *QMP) ExecuteBlockdevAdd(ctx context.Context, device, blockdevID string, ro bool) error {
|
||||
args, _ := q.blockdevAddBaseArgs("host_device", device, blockdevID, ro)
|
||||
func (q *QMP) ExecuteBlockdevAdd(ctx context.Context, blockDevice *BlockDevice) error {
|
||||
args := q.blockdevAddBaseArgs("host_device", blockDevice)
|
||||
|
||||
return q.executeCommand(ctx, "blockdev-add", args, nil)
|
||||
}
|
||||
@@ -822,29 +820,29 @@ func (q *QMP) ExecuteBlockdevAdd(ctx context.Context, device, blockdevID string,
|
||||
// direct denotes whether use of O_DIRECT (bypass the host page cache)
|
||||
// is enabled. noFlush denotes whether flush requests for the device are
|
||||
// ignored.
|
||||
func (q *QMP) ExecuteBlockdevAddWithCache(ctx context.Context, device, blockdevID string, direct, noFlush, ro bool) error {
|
||||
args, blockdevArgs := q.blockdevAddBaseArgs("host_device", device, blockdevID, ro)
|
||||
func (q *QMP) ExecuteBlockdevAddWithCache(ctx context.Context, blockDevice *BlockDevice, direct, noFlush bool) error {
|
||||
blockdevArgs := q.blockdevAddBaseArgs("host_device", blockDevice)
|
||||
|
||||
blockdevArgs["cache"] = map[string]interface{}{
|
||||
"direct": direct,
|
||||
"no-flush": noFlush,
|
||||
}
|
||||
|
||||
return q.executeCommand(ctx, "blockdev-add", args, nil)
|
||||
return q.executeCommand(ctx, "blockdev-add", blockdevArgs, nil)
|
||||
}
|
||||
|
||||
// ExecuteBlockdevAddWithDriverCache has three one parameter driver
|
||||
// than ExecuteBlockdevAddWithCache.
|
||||
// Parameter driver can set the driver of block device.
|
||||
func (q *QMP) ExecuteBlockdevAddWithDriverCache(ctx context.Context, driver, device, blockdevID string, direct, noFlush, ro bool) error {
|
||||
args, blockdevArgs := q.blockdevAddBaseArgs(driver, device, blockdevID, ro)
|
||||
func (q *QMP) ExecuteBlockdevAddWithDriverCache(ctx context.Context, driver string, blockDevice *BlockDevice, direct, noFlush bool) error {
|
||||
blockdevArgs := q.blockdevAddBaseArgs(driver, blockDevice)
|
||||
|
||||
blockdevArgs["cache"] = map[string]interface{}{
|
||||
"direct": direct,
|
||||
"no-flush": noFlush,
|
||||
}
|
||||
|
||||
return q.executeCommand(ctx, "blockdev-add", args, nil)
|
||||
return q.executeCommand(ctx, "blockdev-add", blockdevArgs, nil)
|
||||
}
|
||||
|
||||
// ExecuteDeviceAdd adds the guest portion of a device to a QEMU instance
|
||||
|
||||
@@ -400,8 +400,13 @@ func TestQMPBlockdevAdd(t *testing.T) {
|
||||
cfg := QMPConfig{Logger: qmpTestLogger{}}
|
||||
q := startQMPLoop(buf, cfg, connectedCh, disconnectedCh)
|
||||
q.version = checkVersion(t, connectedCh)
|
||||
err := q.ExecuteBlockdevAdd(context.Background(), "/dev/rbd0",
|
||||
fmt.Sprintf("drive_%s", volumeUUID), false)
|
||||
dev := BlockDevice{
|
||||
ID: fmt.Sprintf("drive_%s", volumeUUID),
|
||||
File: "/dev/rbd0",
|
||||
ReadOnly: false,
|
||||
AIO: Native,
|
||||
}
|
||||
err := q.ExecuteBlockdevAdd(context.Background(), &dev)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error %v", err)
|
||||
}
|
||||
@@ -424,8 +429,13 @@ func TestQMPBlockdevAddWithCache(t *testing.T) {
|
||||
cfg := QMPConfig{Logger: qmpTestLogger{}}
|
||||
q := startQMPLoop(buf, cfg, connectedCh, disconnectedCh)
|
||||
q.version = checkVersion(t, connectedCh)
|
||||
err := q.ExecuteBlockdevAddWithCache(context.Background(), "/dev/rbd0",
|
||||
fmt.Sprintf("drive_%s", volumeUUID), true, true, false)
|
||||
dev := BlockDevice{
|
||||
ID: fmt.Sprintf("drive_%s", volumeUUID),
|
||||
File: "/dev/rbd0",
|
||||
ReadOnly: false,
|
||||
AIO: Native,
|
||||
}
|
||||
err := q.ExecuteBlockdevAddWithCache(context.Background(), &dev, true, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error %v", err)
|
||||
}
|
||||
|
||||
@@ -216,6 +216,7 @@ type RuntimeConfigOptions struct {
|
||||
ShimPath string
|
||||
LogPath string
|
||||
BlockDeviceDriver string
|
||||
BlockDeviceAIO string
|
||||
SharedFS string
|
||||
VirtioFSDaemon string
|
||||
JaegerEndpoint string
|
||||
@@ -305,6 +306,7 @@ func MakeRuntimeConfigFileData(config RuntimeConfigOptions) string {
|
||||
path = "` + config.HypervisorPath + `"
|
||||
kernel = "` + config.KernelPath + `"
|
||||
block_device_driver = "` + config.BlockDeviceDriver + `"
|
||||
block_device_aio = "` + config.BlockDeviceAIO + `"
|
||||
kernel_params = "` + config.KernelParams + `"
|
||||
image = "` + config.ImagePath + `"
|
||||
machine_type = "` + config.MachineType + `"
|
||||
|
||||
@@ -63,6 +63,7 @@ const defaultBridgesCount uint32 = 1
|
||||
const defaultInterNetworkingModel = "tcfilter"
|
||||
const defaultDisableBlockDeviceUse bool = false
|
||||
const defaultBlockDeviceDriver = "virtio-scsi"
|
||||
const defaultBlockDeviceAIO string = "io_uring"
|
||||
const defaultBlockDeviceCacheSet bool = false
|
||||
const defaultBlockDeviceCacheDirect bool = false
|
||||
const defaultBlockDeviceCacheNoflush bool = false
|
||||
|
||||
@@ -105,6 +105,7 @@ type hypervisor struct {
|
||||
GuestPreAttestationSecretGuid string `toml:"guest_pre_attestation_secret_guid"`
|
||||
GuestPreAttestationSecretType string `toml:"guest_pre_attestation_secret_type"`
|
||||
SEVCertChainPath string `toml:"sev_cert_chain"`
|
||||
BlockDeviceAIO string `toml:"block_device_aio"`
|
||||
HypervisorPathList []string `toml:"valid_hypervisor_paths"`
|
||||
JailerPathList []string `toml:"valid_jailer_paths"`
|
||||
CtlPathList []string `toml:"valid_ctlpaths"`
|
||||
@@ -477,6 +478,22 @@ func (h hypervisor) blockDeviceDriver() (string, error) {
|
||||
return "", fmt.Errorf("Invalid hypervisor block storage driver %v specified (supported drivers: %v)", h.BlockDeviceDriver, supportedBlockDrivers)
|
||||
}
|
||||
|
||||
func (h hypervisor) blockDeviceAIO() (string, error) {
|
||||
supportedBlockAIO := []string{config.AIOIOUring, config.AIONative, config.AIOThreads}
|
||||
|
||||
if h.BlockDeviceAIO == "" {
|
||||
return defaultBlockDeviceAIO, nil
|
||||
}
|
||||
|
||||
for _, b := range supportedBlockAIO {
|
||||
if b == h.BlockDeviceAIO {
|
||||
return h.BlockDeviceAIO, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Invalid hypervisor block storage I/O mechanism %v specified (supported AIO: %v)", h.BlockDeviceAIO, supportedBlockAIO)
|
||||
}
|
||||
|
||||
func (h hypervisor) sharedFS() (string, error) {
|
||||
supportedSharedFS := []string{config.Virtio9P, config.VirtioFS, config.VirtioFSNydus}
|
||||
|
||||
@@ -736,6 +753,11 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
return vc.HypervisorConfig{}, err
|
||||
}
|
||||
|
||||
blockAIO, err := h.blockDeviceAIO()
|
||||
if err != nil {
|
||||
return vc.HypervisorConfig{}, err
|
||||
}
|
||||
|
||||
sharedFS, err := h.sharedFS()
|
||||
if err != nil {
|
||||
return vc.HypervisorConfig{}, err
|
||||
@@ -792,6 +814,7 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
Debug: h.Debug,
|
||||
DisableNestingChecks: h.DisableNestingChecks,
|
||||
BlockDeviceDriver: blockDriver,
|
||||
BlockDeviceAIO: blockAIO,
|
||||
BlockDeviceCacheSet: h.BlockDeviceCacheSet,
|
||||
BlockDeviceCacheDirect: h.BlockDeviceCacheDirect,
|
||||
BlockDeviceCacheNoflush: h.BlockDeviceCacheNoflush,
|
||||
@@ -1197,6 +1220,7 @@ func GetDefaultHypervisorConfig() vc.HypervisorConfig {
|
||||
Debug: defaultEnableDebug,
|
||||
DisableNestingChecks: defaultDisableNestingChecks,
|
||||
BlockDeviceDriver: defaultBlockDeviceDriver,
|
||||
BlockDeviceAIO: defaultBlockDeviceAIO,
|
||||
BlockDeviceCacheSet: defaultBlockDeviceCacheSet,
|
||||
BlockDeviceCacheDirect: defaultBlockDeviceCacheDirect,
|
||||
BlockDeviceCacheNoflush: defaultBlockDeviceCacheNoflush,
|
||||
|
||||
@@ -79,6 +79,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf
|
||||
machineType := "machineType"
|
||||
disableBlockDevice := true
|
||||
blockDeviceDriver := "virtio-scsi"
|
||||
blockDeviceAIO := "io_uring"
|
||||
enableIOThreads := true
|
||||
hotplugVFIOOnRootBus := true
|
||||
pcieRootPort := uint32(2)
|
||||
@@ -99,6 +100,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf
|
||||
DefaultGuestHookPath: defaultGuestHookPath,
|
||||
DisableBlock: disableBlockDevice,
|
||||
BlockDeviceDriver: blockDeviceDriver,
|
||||
BlockDeviceAIO: blockDeviceAIO,
|
||||
EnableIOThreads: enableIOThreads,
|
||||
HotplugVFIOOnRootBus: hotplugVFIOOnRootBus,
|
||||
PCIeRootPort: pcieRootPort,
|
||||
@@ -159,6 +161,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (config testRuntimeConf
|
||||
DefaultMaxMemorySize: maxMemory,
|
||||
DisableBlockDeviceUse: disableBlockDevice,
|
||||
BlockDeviceDriver: defaultBlockDeviceDriver,
|
||||
BlockDeviceAIO: defaultBlockDeviceAIO,
|
||||
DefaultBridges: defaultBridgesCount,
|
||||
EnableIOThreads: enableIOThreads,
|
||||
HotplugVFIOOnRootBus: hotplugVFIOOnRootBus,
|
||||
@@ -550,6 +553,7 @@ func TestMinimalRuntimeConfig(t *testing.T) {
|
||||
GuestHookPath: defaultGuestHookPath,
|
||||
VhostUserStorePath: defaultVhostUserStorePath,
|
||||
VirtioFSCache: defaultVirtioFSCacheMode,
|
||||
BlockDeviceAIO: defaultBlockDeviceAIO,
|
||||
}
|
||||
|
||||
expectedAgentConfig := vc.KataAgentConfig{
|
||||
@@ -593,6 +597,7 @@ func TestNewQemuHypervisorConfig(t *testing.T) {
|
||||
hotplugVFIOOnRootBus := true
|
||||
pcieRootPort := uint32(2)
|
||||
orgVHostVSockDevicePath := utils.VHostVSockDevicePath
|
||||
blockDeviceAIO := "io_uring"
|
||||
defer func() {
|
||||
utils.VHostVSockDevicePath = orgVHostVSockDevicePath
|
||||
}()
|
||||
@@ -614,6 +619,7 @@ func TestNewQemuHypervisorConfig(t *testing.T) {
|
||||
TxRateLimiterMaxRate: txRateLimiterMaxRate,
|
||||
SharedFS: "virtio-fs",
|
||||
VirtioFSDaemon: filepath.Join(dir, "virtiofsd"),
|
||||
BlockDeviceAIO: blockDeviceAIO,
|
||||
}
|
||||
|
||||
files := []string{hypervisorPath, kernelPath, imagePath}
|
||||
@@ -674,6 +680,11 @@ func TestNewQemuHypervisorConfig(t *testing.T) {
|
||||
if config.TxRateLimiterMaxRate != txRateLimiterMaxRate {
|
||||
t.Errorf("Expected value for tx rate limiter %v, got %v", txRateLimiterMaxRate, config.TxRateLimiterMaxRate)
|
||||
}
|
||||
|
||||
if config.BlockDeviceAIO != blockDeviceAIO {
|
||||
t.Errorf("Expected value for BlockDeviceAIO %v, got %v", blockDeviceAIO, config.BlockDeviceAIO)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestNewFirecrackerHypervisorConfig(t *testing.T) {
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"syscall"
|
||||
|
||||
ctrAnnotations "github.com/containerd/containerd/pkg/cri/annotations"
|
||||
crioAnnotations "github.com/cri-o/cri-o/pkg/annotations"
|
||||
podmanAnnotations "github.com/containers/podman/v4/pkg/annotations"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -46,17 +46,17 @@ var (
|
||||
|
||||
// CRIContainerTypeKeyList lists all the CRI keys that could define
|
||||
// the container type from annotations in the config.json.
|
||||
CRIContainerTypeKeyList = []string{ctrAnnotations.ContainerType, crioAnnotations.ContainerType, dockershimAnnotations.ContainerTypeLabelKey}
|
||||
CRIContainerTypeKeyList = []string{ctrAnnotations.ContainerType, podmanAnnotations.ContainerType, dockershimAnnotations.ContainerTypeLabelKey}
|
||||
|
||||
// CRISandboxNameKeyList lists all the CRI keys that could define
|
||||
// the sandbox ID (sandbox ID) from annotations in the config.json.
|
||||
CRISandboxNameKeyList = []string{ctrAnnotations.SandboxID, crioAnnotations.SandboxID, dockershimAnnotations.SandboxIDLabelKey}
|
||||
CRISandboxNameKeyList = []string{ctrAnnotations.SandboxID, podmanAnnotations.SandboxID, dockershimAnnotations.SandboxIDLabelKey}
|
||||
|
||||
// CRIContainerTypeList lists all the maps from CRI ContainerTypes annotations
|
||||
// to a virtcontainers ContainerType.
|
||||
CRIContainerTypeList = []annotationContainerType{
|
||||
{crioAnnotations.ContainerTypeSandbox, vc.PodSandbox},
|
||||
{crioAnnotations.ContainerTypeContainer, vc.PodContainer},
|
||||
{podmanAnnotations.ContainerTypeSandbox, vc.PodSandbox},
|
||||
{podmanAnnotations.ContainerTypeContainer, vc.PodContainer},
|
||||
{ctrAnnotations.ContainerTypeSandbox, vc.PodSandbox},
|
||||
{ctrAnnotations.ContainerTypeContainer, vc.PodContainer},
|
||||
{dockershimAnnotations.ContainerTypeLabelSandbox, vc.PodSandbox},
|
||||
@@ -686,6 +686,22 @@ func addHypervisorBlockOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConfig)
|
||||
}
|
||||
}
|
||||
|
||||
if value, ok := ocispec.Annotations[vcAnnotations.BlockDeviceAIO]; ok {
|
||||
supportedAIO := []string{config.AIONative, config.AIOThreads, config.AIOIOUring}
|
||||
|
||||
valid := false
|
||||
for _, b := range supportedAIO {
|
||||
if b == value {
|
||||
sbConfig.HypervisorConfig.BlockDeviceAIO = value
|
||||
valid = true
|
||||
}
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return fmt.Errorf("Invalid AIO mechanism %v specified in annotation (supported IO mechanism : %v)", value, supportedAIO)
|
||||
}
|
||||
}
|
||||
|
||||
if err := newAnnotationConfiguration(ocispec, vcAnnotations.DisableBlockDeviceUse).setBool(func(disableBlockDeviceUse bool) {
|
||||
sbConfig.HypervisorConfig.DisableBlockDeviceUse = disableBlockDeviceUse
|
||||
}); err != nil {
|
||||
@@ -1052,8 +1068,8 @@ func getShmSize(c vc.ContainerConfig) (uint64, error) {
|
||||
|
||||
// IsCRIOContainerManager check if a Pod is created from CRI-O
|
||||
func IsCRIOContainerManager(spec *specs.Spec) bool {
|
||||
if val, ok := spec.Annotations[crioAnnotations.ContainerType]; ok {
|
||||
if val == crioAnnotations.ContainerTypeSandbox || val == crioAnnotations.ContainerTypeContainer {
|
||||
if val, ok := spec.Annotations[podmanAnnotations.ContainerType]; ok {
|
||||
if val == podmanAnnotations.ContainerTypeSandbox || val == podmanAnnotations.ContainerTypeContainer {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"testing"
|
||||
|
||||
ctrAnnotations "github.com/containerd/containerd/pkg/cri/annotations"
|
||||
crioAnnotations "github.com/cri-o/cri-o/pkg/annotations"
|
||||
podmanAnnotations "github.com/containers/podman/v4/pkg/annotations"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/sys/unix"
|
||||
@@ -224,22 +224,22 @@ func TestContainerType(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "crio unexpected annotation, expect error",
|
||||
annotationKey: crioAnnotations.ContainerType,
|
||||
annotationKey: podmanAnnotations.ContainerType,
|
||||
annotationValue: "foo",
|
||||
expectedType: vc.UnknownContainerType,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
description: "crio sandbox",
|
||||
annotationKey: crioAnnotations.ContainerType,
|
||||
annotationValue: string(crioAnnotations.ContainerTypeSandbox),
|
||||
annotationKey: podmanAnnotations.ContainerType,
|
||||
annotationValue: string(podmanAnnotations.ContainerTypeSandbox),
|
||||
expectedType: vc.PodSandbox,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
description: "crio container",
|
||||
annotationKey: crioAnnotations.ContainerType,
|
||||
annotationValue: string(crioAnnotations.ContainerTypeContainer),
|
||||
annotationKey: podmanAnnotations.ContainerType,
|
||||
annotationValue: string(podmanAnnotations.ContainerTypeContainer),
|
||||
expectedType: vc.PodContainer,
|
||||
expectedErr: false,
|
||||
},
|
||||
@@ -287,7 +287,7 @@ func TestSandboxIDSuccessful(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
ociSpec.Annotations = map[string]string{
|
||||
crioAnnotations.SandboxID: testSandboxID,
|
||||
podmanAnnotations.SandboxID: testSandboxID,
|
||||
}
|
||||
|
||||
sandboxID, err := SandboxID(ociSpec)
|
||||
@@ -642,6 +642,7 @@ func TestAddHypervisorAnnotations(t *testing.T) {
|
||||
ocispec.Annotations[vcAnnotations.HugePages] = "true"
|
||||
ocispec.Annotations[vcAnnotations.IOMMU] = "true"
|
||||
ocispec.Annotations[vcAnnotations.BlockDeviceDriver] = "virtio-scsi"
|
||||
ocispec.Annotations[vcAnnotations.BlockDeviceAIO] = "io_uring"
|
||||
ocispec.Annotations[vcAnnotations.DisableBlockDeviceUse] = "true"
|
||||
ocispec.Annotations[vcAnnotations.EnableIOThreads] = "true"
|
||||
ocispec.Annotations[vcAnnotations.BlockDeviceCacheSet] = "true"
|
||||
@@ -679,6 +680,7 @@ func TestAddHypervisorAnnotations(t *testing.T) {
|
||||
assert.Equal(config.HypervisorConfig.HugePages, true)
|
||||
assert.Equal(config.HypervisorConfig.IOMMU, true)
|
||||
assert.Equal(config.HypervisorConfig.BlockDeviceDriver, "virtio-scsi")
|
||||
assert.Equal(config.HypervisorConfig.BlockDeviceAIO, "io_uring")
|
||||
assert.Equal(config.HypervisorConfig.DisableBlockDeviceUse, true)
|
||||
assert.Equal(config.HypervisorConfig.EnableIOThreads, true)
|
||||
assert.Equal(config.HypervisorConfig.BlockDeviceCacheSet, true)
|
||||
@@ -883,15 +885,15 @@ func TestIsCRIOContainerManager(t *testing.T) {
|
||||
result bool
|
||||
}{
|
||||
{
|
||||
annotations: map[string]string{crioAnnotations.ContainerType: "abc"},
|
||||
annotations: map[string]string{podmanAnnotations.ContainerType: "abc"},
|
||||
result: false,
|
||||
},
|
||||
{
|
||||
annotations: map[string]string{crioAnnotations.ContainerType: crioAnnotations.ContainerTypeSandbox},
|
||||
annotations: map[string]string{podmanAnnotations.ContainerType: podmanAnnotations.ContainerTypeSandbox},
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
annotations: map[string]string{crioAnnotations.ContainerType: crioAnnotations.ContainerTypeContainer},
|
||||
annotations: map[string]string{podmanAnnotations.ContainerType: podmanAnnotations.ContainerTypeContainer},
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
7
src/runtime/vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
7
src/runtime/vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
@@ -1,5 +1,2 @@
|
||||
TAGS
|
||||
tags
|
||||
.*.swp
|
||||
tomlcheck/tomlcheck
|
||||
toml.test
|
||||
/toml.test
|
||||
/toml-test
|
||||
|
||||
15
src/runtime/vendor/github.com/BurntSushi/toml/.travis.yml
generated
vendored
15
src/runtime/vendor/github.com/BurntSushi/toml/.travis.yml
generated
vendored
@@ -1,15 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
install:
|
||||
- go install ./...
|
||||
- go get github.com/BurntSushi/toml-test
|
||||
script:
|
||||
- export PATH="$PATH:$HOME/gopath/bin"
|
||||
- make test
|
||||
3
src/runtime/vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
3
src/runtime/vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
@@ -1,3 +0,0 @@
|
||||
Compatible with TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
|
||||
|
||||
19
src/runtime/vendor/github.com/BurntSushi/toml/Makefile
generated
vendored
19
src/runtime/vendor/github.com/BurntSushi/toml/Makefile
generated
vendored
@@ -1,19 +0,0 @@
|
||||
install:
|
||||
go install ./...
|
||||
|
||||
test: install
|
||||
go test -v
|
||||
toml-test toml-test-decoder
|
||||
toml-test -encoder toml-test-encoder
|
||||
|
||||
fmt:
|
||||
gofmt -w *.go */*.go
|
||||
colcheck *.go */*.go
|
||||
|
||||
tags:
|
||||
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
|
||||
|
||||
push:
|
||||
git push origin master
|
||||
git push github master
|
||||
|
||||
228
src/runtime/vendor/github.com/BurntSushi/toml/README.md
generated
vendored
228
src/runtime/vendor/github.com/BurntSushi/toml/README.md
generated
vendored
@@ -1,46 +1,26 @@
|
||||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
reflection interface similar to Go's standard library `json` and `xml` packages.
|
||||
|
||||
Spec: https://github.com/toml-lang/toml
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
|
||||
Documentation: https://godocs.io/github.com/BurntSushi/toml
|
||||
|
||||
Documentation: https://godoc.org/github.com/BurntSushi/toml
|
||||
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
|
||||
changelog; this information is also in the git tag annotations (e.g. `git show
|
||||
v0.4.0`).
|
||||
|
||||
Installation:
|
||||
This library requires Go 1.13 or newer; add it to your go.mod with:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml
|
||||
```
|
||||
% go get github.com/BurntSushi/toml@latest
|
||||
|
||||
Try the toml validator:
|
||||
It also comes with a TOML validator CLI tool:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml)
|
||||
|
||||
### Testing
|
||||
|
||||
This package passes all tests in
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
||||
and the encoder.
|
||||
% go install github.com/BurntSushi/toml/cmd/tomlv@latest
|
||||
% tomlv some-toml-file.toml
|
||||
|
||||
### Examples
|
||||
|
||||
This package works similarly to how the Go standard library handles `XML`
|
||||
and `JSON`. Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys
|
||||
and values:
|
||||
For the simplest example, consider some TOML file as just a list of keys and
|
||||
values:
|
||||
|
||||
```toml
|
||||
Age = 25
|
||||
@@ -50,29 +30,23 @@ Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
||||
// handle error
|
||||
}
|
||||
_, err := toml.Decode(tomlData, &conf)
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML key
|
||||
value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
@@ -80,139 +54,67 @@ some_key_NAME = "wat"
|
||||
|
||||
```go
|
||||
type TOML struct {
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
}
|
||||
```
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
Beware that like other decoders **only exported fields** are considered when
|
||||
encoding and decoding; private fields are silently ignored.
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces
|
||||
Here's an example that automatically parses values in a `mail.Address`:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
### More complex usage
|
||||
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
contacts = [
|
||||
"Donald Duck <donald@duckburg.com>",
|
||||
"Scrooge McDuck <scrooge@duckburg.com>",
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
Can be decoded with:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
// Create address type which satisfies the encoding.TextUnmarshaler interface.
|
||||
type address struct {
|
||||
*mail.Address
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
func (a *address) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
a.Address, err = mail.ParseAddress(string(text))
|
||||
return err
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
// Decode it.
|
||||
func decode() {
|
||||
blob := `
|
||||
contacts = [
|
||||
"Donald Duck <donald@duckburg.com>",
|
||||
"Scrooge McDuck <scrooge@duckburg.com>",
|
||||
]
|
||||
`
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
var contacts struct {
|
||||
Contacts []address
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
_, err := toml.Decode(blob, &contacts)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, c := range contacts.Contacts {
|
||||
fmt.Printf("%#v\n", c.Address)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"}
|
||||
// &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"}
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
|
||||
a similar way.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
||||
### More complex usage
|
||||
See the [`_example/`](/_example) directory for a more complex example.
|
||||
|
||||
519
src/runtime/vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
519
src/runtime/vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@@ -1,54 +1,171 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
// TOML description of themselves.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
func Unmarshal(p []byte, v interface{}) error {
|
||||
_, err := Decode(string(p), v)
|
||||
// Unmarshal decodes the contents of `data` in TOML format into a pointer `v`.
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode the TOML data in to the pointer v.
|
||||
//
|
||||
// See the documentation on Decoder for a description of the decoding process.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at path and decode it for you.
|
||||
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
// When using the various `Decode*` functions, the type `Primitive` may
|
||||
// be given to any value, and its decoding will be delayed.
|
||||
//
|
||||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
|
||||
// This type can be used for any value, which will cause decoding to be delayed.
|
||||
// You can use the PrimitiveDecode() function to "manually" decode these values.
|
||||
//
|
||||
// The underlying representation of a `Primitive` value is subject to change.
|
||||
// Do not rely on it.
|
||||
// NOTE: The underlying representation of a `Primitive` value is subject to
|
||||
// change. Do not rely on it.
|
||||
//
|
||||
// N.B. Primitive values are still parsed, so using them will only avoid
|
||||
// the overhead of reflection. They can be useful when you don't know the
|
||||
// exact type of TOML data until run time.
|
||||
// NOTE: Primitive values are still parsed, so using them will only avoid the
|
||||
// overhead of reflection. They can be useful when you don't know the exact type
|
||||
// of TOML data until runtime.
|
||||
type Primitive struct {
|
||||
undecoded interface{}
|
||||
context Key
|
||||
}
|
||||
|
||||
// DEPRECATED!
|
||||
// The significand precision for float32 and float64 is 24 and 53 bits; this is
|
||||
// the range a natural number can be stored in a float without loss of data.
|
||||
const (
|
||||
maxSafeFloat32Int = 16777215 // 2^24-1
|
||||
maxSafeFloat64Int = int64(9007199254740991) // 2^53-1
|
||||
)
|
||||
|
||||
// Decoder decodes TOML data.
|
||||
//
|
||||
// Use MetaData.PrimitiveDecode instead.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]bool)}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
// TOML tables correspond to Go structs or maps (dealer's choice – they can be
|
||||
// used interchangeably).
|
||||
//
|
||||
// TOML table arrays correspond to either a slice of structs or a slice of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
|
||||
// in the local timezone.
|
||||
//
|
||||
// time.Duration types are treated as nanoseconds if the TOML value is an
|
||||
// integer, or they're parsed with time.ParseDuration() if they're strings.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond to the
|
||||
// obvious Go types.
|
||||
//
|
||||
// An exception to the above rules is if a type implements the TextUnmarshaler
|
||||
// interface, in which case any primitive TOML value (floats, strings, integers,
|
||||
// booleans, datetimes) will be converted to a []byte and given to the value's
|
||||
// UnmarshalText method. See the Unmarshaler example for a demonstration with
|
||||
// email addresses.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go struct.
|
||||
// The special `toml` struct tag can be used to map TOML keys to struct fields
|
||||
// that don't match the key name exactly (see the example). A case insensitive
|
||||
// match to struct names will be tried if an exact match can't be found.
|
||||
//
|
||||
// The mapping between TOML values and Go values is loose. That is, there may
|
||||
// exist TOML values that cannot be placed into your representation, and there
|
||||
// may be parts of your representation that do not correspond to TOML values.
|
||||
// This loose mapping can be made stricter by using the IsDefined and/or
|
||||
// Undecoded methods on the MetaData returned.
|
||||
//
|
||||
// This decoder does not handle cyclic types. Decode will not terminate if a
|
||||
// cyclic type is passed.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
// NewDecoder creates a new Decoder.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r: r}
|
||||
}
|
||||
|
||||
var (
|
||||
unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Decode TOML data in to the pointer `v`.
|
||||
func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
s := "%q"
|
||||
if reflect.TypeOf(v) == nil {
|
||||
s = "%v"
|
||||
}
|
||||
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
|
||||
}
|
||||
|
||||
// Check if this is a supported type: struct, map, interface{}, or something
|
||||
// that implements UnmarshalTOML or UnmarshalText.
|
||||
rv = indirect(rv)
|
||||
rt := rv.Type()
|
||||
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
|
||||
!(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) &&
|
||||
!rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) {
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt)
|
||||
}
|
||||
|
||||
// TODO: parser should read from io.Reader? Or at the very least, make it
|
||||
// read from []byte rather than string
|
||||
data, err := ioutil.ReadAll(dec.r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
|
||||
p, err := parse(string(data))
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
|
||||
md := MetaData{
|
||||
mapping: p.mapping,
|
||||
keyInfo: p.keyInfo,
|
||||
keys: p.ordered,
|
||||
decoded: make(map[string]struct{}, len(p.ordered)),
|
||||
context: nil,
|
||||
data: data,
|
||||
}
|
||||
return md, md.unify(p.mapping, rv)
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
@@ -68,90 +185,15 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decode will decode the contents of `data` in TOML format into a pointer
|
||||
// `v`.
|
||||
//
|
||||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
|
||||
// used interchangeably.)
|
||||
//
|
||||
// TOML arrays of tables correspond to either a slice of structs or a slice
|
||||
// of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go `time.Time` values.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond
|
||||
// to the obvious Go types.
|
||||
//
|
||||
// An exception to the above rules is if a type implements the
|
||||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
|
||||
// (floats, strings, integers, booleans and datetimes) will be converted to
|
||||
// a byte string and given to the value's UnmarshalText method. See the
|
||||
// Unmarshaler example for a demonstration with time duration strings.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go
|
||||
// struct. The special `toml` struct tag may be used to map TOML keys to
|
||||
// struct fields that don't match the key name exactly. (See the example.)
|
||||
// A case insensitive match to struct names will be tried if an exact match
|
||||
// can't be found.
|
||||
//
|
||||
// The mapping between TOML values and Go values is loose. That is, there
|
||||
// may exist TOML values that cannot be placed into your representation, and
|
||||
// there may be parts of your representation that do not correspond to
|
||||
// TOML values. This loose mapping can be made stricter by using the IsDefined
|
||||
// and/or Undecoded methods on the MetaData returned.
|
||||
//
|
||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
||||
// `Decode` will not terminate.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
|
||||
}
|
||||
p, err := parse(data)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
md := MetaData{
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
}
|
||||
return md, md.unify(p.mapping, indirect(rv))
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at `fpath` and decode it for you.
|
||||
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// DecodeReader is just like Decode, except it will consume all bytes
|
||||
// from the reader and decode it for you.
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
// Any type mismatch produces an error. Finding a type that we don't know
|
||||
// how to handle produces an unsupported type error.
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
|
||||
// Special case. Look for a `Primitive` value.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
// TODO: #76 would make this superfluous after implemented.
|
||||
if rv.Type() == primitiveType {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
@@ -163,36 +205,24 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
rvi := rv.Interface()
|
||||
if v, ok := rvi.(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
|
||||
// Special case. Handle time.Time values specifically.
|
||||
// TODO: Remove this code when we decide to drop support for Go 1.1.
|
||||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
|
||||
// interfaces.
|
||||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
|
||||
return md.unifyDatetime(data, rv)
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
// BUG(burntsushi)
|
||||
|
||||
// TODO:
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML
|
||||
// hash or array. In particular, the unmarshaler should only be applied
|
||||
// to primitive TOML values. But at this point, it will be applied to
|
||||
// all kinds of values and produce an incorrect error whenever those values
|
||||
// are hashes or arrays (including arrays of tables).
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
|
||||
// array. In particular, the unmarshaler should only be applied to primitive
|
||||
// TOML values. But at this point, it will be applied to all kinds of values
|
||||
// and produce an incorrect error whenever those values are hashes or arrays
|
||||
// (including arrays of tables).
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
@@ -218,17 +248,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
if rv.NumMethod() > 0 { // Only support empty interfaces are supported.
|
||||
return md.e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
return md.e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
@@ -237,7 +264,7 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
return md.e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
}
|
||||
|
||||
@@ -259,17 +286,18 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
for _, i := range f.index {
|
||||
subv = indirect(subv.Field(i))
|
||||
}
|
||||
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.decoded[md.context.add(key).String()] = struct{}{}
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
|
||||
err := md.unify(datum, subv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("cannot write unexported field %s.%s",
|
||||
rv.Type().String(), f.name)
|
||||
return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -277,28 +305,43 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
keyType := rv.Type().Key().Kind()
|
||||
if keyType != reflect.String && keyType != reflect.Interface {
|
||||
return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||
keyType, rv.Type())
|
||||
}
|
||||
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if tmap == nil {
|
||||
return nil
|
||||
}
|
||||
return badtype("map", mapping)
|
||||
return md.badtype("map", mapping)
|
||||
}
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
for k, v := range tmap {
|
||||
md.decoded[md.context.add(k).String()] = true
|
||||
md.decoded[md.context.add(k).String()] = struct{}{}
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
|
||||
err := md.unify(v, indirect(rvval))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey.SetString(k)
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
|
||||
switch keyType {
|
||||
case reflect.Interface:
|
||||
rvkey.Set(reflect.ValueOf(k))
|
||||
case reflect.String:
|
||||
rvkey.SetString(k)
|
||||
}
|
||||
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
@@ -310,12 +353,10 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
return md.badtype("slice", data)
|
||||
}
|
||||
sliceLen := datav.Len()
|
||||
if sliceLen != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d",
|
||||
rv.Len(), sliceLen)
|
||||
if l := datav.Len(); l != rv.Len() {
|
||||
return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
@@ -326,7 +367,7 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
return md.badtype("slice", data)
|
||||
}
|
||||
n := datav.Len()
|
||||
if rv.IsNil() || rv.Cap() < n {
|
||||
@@ -337,37 +378,45 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
sliceLen := data.Len()
|
||||
for i := 0; i < sliceLen; i++ {
|
||||
v := data.Index(i).Interface()
|
||||
sliceval := indirect(rv.Index(i))
|
||||
if err := md.unify(v, sliceval); err != nil {
|
||||
l := data.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
||||
if _, ok := data.(time.Time); ok {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
_, ok := rv.Interface().(json.Number)
|
||||
if ok {
|
||||
if i, ok := data.(int64); ok {
|
||||
rv.SetString(strconv.FormatInt(i, 10))
|
||||
} else if f, ok := data.(float64); ok {
|
||||
rv.SetString(strconv.FormatFloat(f, 'f', -1, 64))
|
||||
} else {
|
||||
return md.badtype("string", data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("time.Time", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
}
|
||||
return badtype("string", data)
|
||||
return md.badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
rvk := rv.Kind()
|
||||
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
switch rvk {
|
||||
case reflect.Float32:
|
||||
if num < -math.MaxFloat32 || num > math.MaxFloat32 {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
rv.SetFloat(num)
|
||||
@@ -376,54 +425,60 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("float", data)
|
||||
|
||||
if num, ok := data.(int64); ok {
|
||||
if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
|
||||
(rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetFloat(float64(num))
|
||||
return nil
|
||||
}
|
||||
|
||||
return md.badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
_, ok := rv.Interface().(time.Duration)
|
||||
if ok {
|
||||
// Parse as string duration, and fall back to regular integer parsing
|
||||
// (as nanosecond) if this is not a string.
|
||||
if s, ok := data.(string); ok {
|
||||
dur, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return md.parseErr(errParseDuration{s})
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
rv.SetInt(int64(dur))
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("integer", data)
|
||||
|
||||
num, ok := data.(int64)
|
||||
if !ok {
|
||||
return md.badtype("integer", data)
|
||||
}
|
||||
|
||||
rvk := rv.Kind()
|
||||
switch {
|
||||
case rvk >= reflect.Int && rvk <= reflect.Int64:
|
||||
if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) ||
|
||||
(rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) ||
|
||||
(rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetInt(num)
|
||||
case rvk >= reflect.Uint && rvk <= reflect.Uint64:
|
||||
unum := uint64(num)
|
||||
if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) ||
|
||||
rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) ||
|
||||
rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
@@ -431,7 +486,7 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
return badtype("boolean", data)
|
||||
return md.badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
@@ -439,10 +494,16 @@ func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
||||
func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case TextMarshaler:
|
||||
case Marshaler:
|
||||
text, err := sdata.MarshalTOML()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case encoding.TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -459,7 +520,7 @@ func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
||||
case float64:
|
||||
s = fmt.Sprintf("%f", sdata)
|
||||
default:
|
||||
return badtype("primitive (string-like)", data)
|
||||
return md.badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
@@ -467,22 +528,54 @@ func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) badtype(dst string, data interface{}) error {
|
||||
return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst)
|
||||
}
|
||||
|
||||
func (md *MetaData) parseErr(err error) error {
|
||||
k := md.context.String()
|
||||
return ParseError{
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos,
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
err: err,
|
||||
input: string(md.data),
|
||||
}
|
||||
}
|
||||
|
||||
func (md *MetaData) e(format string, args ...interface{}) error {
|
||||
f := "toml: "
|
||||
if len(md.context) > 0 {
|
||||
f = fmt.Sprintf("toml: (last key %q): ", md.context)
|
||||
p := md.keyInfo[md.context.String()].pos
|
||||
if p.Line > 0 {
|
||||
f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf(f+format, args...)
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
// indirect returns the value pointed to by a pointer.
|
||||
// Pointers are followed until the value is not a pointer.
|
||||
// New values are allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
// Pointers are followed until the value is not a pointer. New values are
|
||||
// allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of interest
|
||||
// to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
||||
pvi := pv.Interface()
|
||||
if _, ok := pvi.(encoding.TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
if _, ok := pvi.(Unmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
@@ -498,12 +591,12 @@ func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
rvi := rv.Interface()
|
||||
if _, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := rvi.(Unmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("cannot load TOML value of type %T into a Go %s", data, expected)
|
||||
}
|
||||
|
||||
19
src/runtime/vendor/github.com/BurntSushi/toml/decode_go116.go
generated
vendored
Normal file
19
src/runtime/vendor/github.com/BurntSushi/toml/decode_go116.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
//go:build go1.16
|
||||
// +build go1.16
|
||||
|
||||
package toml
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
// DecodeFS is just like Decode, except it will automatically read the contents
|
||||
// of the file at `path` from a fs.FS instance.
|
||||
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
|
||||
fp, err := fsys.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
}
|
||||
21
src/runtime/vendor/github.com/BurntSushi/toml/deprecated.go
generated
vendored
Normal file
21
src/runtime/vendor/github.com/BurntSushi/toml/deprecated.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Deprecated: use encoding.TextMarshaler
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// Deprecated: use encoding.TextUnmarshaler
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
||||
|
||||
// Deprecated: use MetaData.PrimitiveDecode.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]struct{})}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Deprecated: use NewDecoder(reader).Decode(&value).
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }
|
||||
28
src/runtime/vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
28
src/runtime/vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
@@ -1,27 +1,13 @@
|
||||
/*
|
||||
Package toml provides facilities for decoding and encoding TOML configuration
|
||||
files via reflection. There is also support for delaying decoding with
|
||||
the Primitive type, and querying the set of keys in a TOML document with the
|
||||
MetaData type.
|
||||
Package toml implements decoding and encoding of TOML files.
|
||||
|
||||
The specification implemented: https://github.com/toml-lang/toml
|
||||
This package supports TOML v1.0.0, as listed on https://toml.io
|
||||
|
||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
||||
whether a file is a valid TOML document. It can also be used to print the
|
||||
type of each key in a TOML document.
|
||||
There is also support for delaying decoding with the Primitive type, and
|
||||
querying the set of keys in a TOML document with the MetaData type.
|
||||
|
||||
Testing
|
||||
|
||||
There are two important types of tests used for this package. The first is
|
||||
contained inside '*_test.go' files and uses the standard Go unit testing
|
||||
framework. These tests are primarily devoted to holistically testing the
|
||||
decoder and encoder.
|
||||
|
||||
The second type of testing is used to verify the implementation's adherence
|
||||
to the TOML specification. These tests have been factored into their own
|
||||
project: https://github.com/BurntSushi/toml-test
|
||||
|
||||
The reason the tests are in a separate project is so that they can be used by
|
||||
any implementation of TOML. Namely, it is language agnostic.
|
||||
The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
|
||||
and can be used to verify if TOML document is valid. It can also be used to
|
||||
print the type of each key.
|
||||
*/
|
||||
package toml
|
||||
|
||||
618
src/runtime/vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
618
src/runtime/vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@@ -2,57 +2,127 @@ package toml
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml/internal"
|
||||
)
|
||||
|
||||
type tomlEncodeError struct{ error }
|
||||
|
||||
var (
|
||||
errArrayMixedElementTypes = errors.New(
|
||||
"toml: cannot encode array with mixed element types")
|
||||
errArrayNilElement = errors.New(
|
||||
"toml: cannot encode array with nil element")
|
||||
errNonString = errors.New(
|
||||
"toml: cannot encode a map with non-string key type")
|
||||
errAnonNonStruct = errors.New(
|
||||
"toml: cannot encode an anonymous field that is not a struct")
|
||||
errArrayNoTable = errors.New(
|
||||
"toml: TOML array element cannot contain a table")
|
||||
errNoKey = errors.New(
|
||||
"toml: top-level values must be Go maps or structs")
|
||||
errAnything = errors.New("") // used in testing
|
||||
errArrayNilElement = errors.New("toml: cannot encode array with nil element")
|
||||
errNonString = errors.New("toml: cannot encode a map with non-string key type")
|
||||
errNoKey = errors.New("toml: top-level values must be Go maps or structs")
|
||||
errAnything = errors.New("") // used in testing
|
||||
)
|
||||
|
||||
var quotedReplacer = strings.NewReplacer(
|
||||
"\t", "\\t",
|
||||
"\n", "\\n",
|
||||
"\r", "\\r",
|
||||
var dblQuotedReplacer = strings.NewReplacer(
|
||||
"\"", "\\\"",
|
||||
"\\", "\\\\",
|
||||
"\x00", `\u0000`,
|
||||
"\x01", `\u0001`,
|
||||
"\x02", `\u0002`,
|
||||
"\x03", `\u0003`,
|
||||
"\x04", `\u0004`,
|
||||
"\x05", `\u0005`,
|
||||
"\x06", `\u0006`,
|
||||
"\x07", `\u0007`,
|
||||
"\b", `\b`,
|
||||
"\t", `\t`,
|
||||
"\n", `\n`,
|
||||
"\x0b", `\u000b`,
|
||||
"\f", `\f`,
|
||||
"\r", `\r`,
|
||||
"\x0e", `\u000e`,
|
||||
"\x0f", `\u000f`,
|
||||
"\x10", `\u0010`,
|
||||
"\x11", `\u0011`,
|
||||
"\x12", `\u0012`,
|
||||
"\x13", `\u0013`,
|
||||
"\x14", `\u0014`,
|
||||
"\x15", `\u0015`,
|
||||
"\x16", `\u0016`,
|
||||
"\x17", `\u0017`,
|
||||
"\x18", `\u0018`,
|
||||
"\x19", `\u0019`,
|
||||
"\x1a", `\u001a`,
|
||||
"\x1b", `\u001b`,
|
||||
"\x1c", `\u001c`,
|
||||
"\x1d", `\u001d`,
|
||||
"\x1e", `\u001e`,
|
||||
"\x1f", `\u001f`,
|
||||
"\x7f", `\u007f`,
|
||||
)
|
||||
|
||||
// Encoder controls the encoding of Go values to a TOML document to some
|
||||
// io.Writer.
|
||||
//
|
||||
// The indentation level can be controlled with the Indent field.
|
||||
type Encoder struct {
|
||||
// A single indentation level. By default it is two spaces.
|
||||
Indent string
|
||||
var (
|
||||
marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
|
||||
)
|
||||
|
||||
// hasWritten is whether we have written any output to w yet.
|
||||
hasWritten bool
|
||||
w *bufio.Writer
|
||||
// Marshaler is the interface implemented by types that can marshal themselves
|
||||
// into valid TOML.
|
||||
type Marshaler interface {
|
||||
MarshalTOML() ([]byte, error)
|
||||
}
|
||||
|
||||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
|
||||
// given. By default, a single indentation level is 2 spaces.
|
||||
// Encoder encodes a Go to a TOML document.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same as
|
||||
// for the Decode* functions.
|
||||
//
|
||||
// time.Time is encoded as a RFC 3339 string, and time.Duration as its string
|
||||
// representation.
|
||||
//
|
||||
// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
|
||||
// encoding the value as custom TOML.
|
||||
//
|
||||
// If you want to write arbitrary binary data then you will need to use
|
||||
// something like base64 since TOML does not have any binary types.
|
||||
//
|
||||
// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
|
||||
// are encoded first.
|
||||
//
|
||||
// Go maps will be sorted alphabetically by key for deterministic output.
|
||||
//
|
||||
// The toml struct tag can be used to provide the key name; if omitted the
|
||||
// struct field name will be used. If the "omitempty" option is present the
|
||||
// following value will be skipped:
|
||||
//
|
||||
// - arrays, slices, maps, and string with len of 0
|
||||
// - struct with all zero values
|
||||
// - bool false
|
||||
//
|
||||
// If omitzero is given all int and float types with a value of 0 will be
|
||||
// skipped.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation will return an
|
||||
// error. Examples of this includes maps with non-string keys, slices with nil
|
||||
// elements, embedded non-struct types, and nested slices containing maps or
|
||||
// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
|
||||
// is okay, as is []map[string][]string).
|
||||
//
|
||||
// NOTE: only exported keys are encoded due to the use of reflection. Unexported
|
||||
// keys are silently discarded.
|
||||
type Encoder struct {
|
||||
// String to use for a single indentation level; default is two spaces.
|
||||
Indent string
|
||||
|
||||
w *bufio.Writer
|
||||
hasWritten bool // written any output to w yet?
|
||||
}
|
||||
|
||||
// NewEncoder create a new Encoder.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: bufio.NewWriter(w),
|
||||
@@ -60,29 +130,10 @@ func NewEncoder(w io.Writer) *Encoder {
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes a TOML representation of the Go value to the underlying
|
||||
// io.Writer. If the value given cannot be encoded to a valid TOML document,
|
||||
// then an error is returned.
|
||||
// Encode writes a TOML representation of the Go value to the Encoder's writer.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same
|
||||
// as for the Decode* functions. Similarly, the TextMarshaler interface is
|
||||
// supported by encoding the resulting bytes as strings. (If you want to write
|
||||
// arbitrary binary data then you will need to use something like base64 since
|
||||
// TOML does not have any binary types.)
|
||||
//
|
||||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
|
||||
// sub-hashes are encoded first.
|
||||
//
|
||||
// If a Go map is encoded, then its keys are sorted alphabetically for
|
||||
// deterministic output. More control over this behavior may be provided if
|
||||
// there is demand for it.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation---like map
|
||||
// types with non-string keys---will cause an error to be returned. Similarly
|
||||
// for mixed arrays/slices, arrays/slices with nil elements, embedded
|
||||
// non-struct types and nested slices containing maps or structs.
|
||||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
|
||||
// and so is []map[string][]string.)
|
||||
// An error is returned if the value given cannot be encoded to a valid TOML
|
||||
// document.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
rv := eindirect(reflect.ValueOf(v))
|
||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
||||
@@ -106,13 +157,15 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case. Time needs to be in ISO8601 format.
|
||||
// Special case. If we can marshal the type to text, then we used that.
|
||||
// Basically, this prevents the encoder for handling these types as
|
||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time, TextMarshaler:
|
||||
enc.keyEqElement(key, rv)
|
||||
// If we can marshal the type to text, then we use that. This prevents the
|
||||
// encoder for handling these types as generic structs (or whatever the
|
||||
// underlying type of a TextMarshaler is).
|
||||
switch {
|
||||
case isMarshaler(rv):
|
||||
enc.writeKeyValue(key, rv, false)
|
||||
return
|
||||
case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented.
|
||||
enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -123,12 +176,12 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
|
||||
enc.keyEqElement(key, rv)
|
||||
enc.writeKeyValue(key, rv, false)
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
|
||||
enc.eArrayOfTables(key, rv)
|
||||
} else {
|
||||
enc.keyEqElement(key, rv)
|
||||
enc.writeKeyValue(key, rv, false)
|
||||
}
|
||||
case reflect.Interface:
|
||||
if rv.IsNil() {
|
||||
@@ -148,55 +201,114 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
case reflect.Struct:
|
||||
enc.eTable(key, rv)
|
||||
default:
|
||||
panic(e("unsupported type for key '%s': %s", key, k))
|
||||
encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k))
|
||||
}
|
||||
}
|
||||
|
||||
// eElement encodes any value that can be an array element (primitives and
|
||||
// arrays).
|
||||
// eElement encodes any value that can be an array element.
|
||||
func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
switch v := rv.Interface().(type) {
|
||||
case time.Time:
|
||||
// Special case time.Time as a primitive. Has to come before
|
||||
// TextMarshaler below because time.Time implements
|
||||
// encoding.TextMarshaler, but we need to always use UTC.
|
||||
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
|
||||
return
|
||||
case TextMarshaler:
|
||||
// Special case. Use text marshaler if it's available for this value.
|
||||
if s, err := v.MarshalText(); err != nil {
|
||||
encPanic(err)
|
||||
} else {
|
||||
enc.writeQuoted(string(s))
|
||||
case time.Time: // Using TextMarshaler adds extra quotes, which we don't want.
|
||||
format := time.RFC3339Nano
|
||||
switch v.Location() {
|
||||
case internal.LocalDatetime:
|
||||
format = "2006-01-02T15:04:05.999999999"
|
||||
case internal.LocalDate:
|
||||
format = "2006-01-02"
|
||||
case internal.LocalTime:
|
||||
format = "15:04:05.999999999"
|
||||
}
|
||||
switch v.Location() {
|
||||
default:
|
||||
enc.wf(v.Format(format))
|
||||
case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
|
||||
enc.wf(v.In(time.UTC).Format(format))
|
||||
}
|
||||
return
|
||||
case Marshaler:
|
||||
s, err := v.MarshalTOML()
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
if s == nil {
|
||||
encPanic(errors.New("MarshalTOML returned nil and no error"))
|
||||
}
|
||||
enc.w.Write(s)
|
||||
return
|
||||
case encoding.TextMarshaler:
|
||||
s, err := v.MarshalText()
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
if s == nil {
|
||||
encPanic(errors.New("MarshalText returned nil and no error"))
|
||||
}
|
||||
enc.writeQuoted(string(s))
|
||||
return
|
||||
case time.Duration:
|
||||
enc.writeQuoted(v.String())
|
||||
return
|
||||
case json.Number:
|
||||
n, _ := rv.Interface().(json.Number)
|
||||
|
||||
if n == "" { /// Useful zero value.
|
||||
enc.w.WriteByte('0')
|
||||
return
|
||||
} else if v, err := n.Int64(); err == nil {
|
||||
enc.eElement(reflect.ValueOf(v))
|
||||
return
|
||||
} else if v, err := n.Float64(); err == nil {
|
||||
enc.eElement(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
encPanic(errors.New(fmt.Sprintf("Unable to convert \"%s\" to neither int64 nor float64", n)))
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64:
|
||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16,
|
||||
reflect.Uint32, reflect.Uint64:
|
||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
||||
case reflect.Float32:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
|
||||
case reflect.Float64:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
|
||||
case reflect.Array, reflect.Slice:
|
||||
enc.eArrayOrSliceElement(rv)
|
||||
case reflect.Interface:
|
||||
case reflect.Ptr:
|
||||
enc.eElement(rv.Elem())
|
||||
return
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
case reflect.Bool:
|
||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
||||
case reflect.Float32:
|
||||
f := rv.Float()
|
||||
if math.IsNaN(f) {
|
||||
enc.wf("nan")
|
||||
} else if math.IsInf(f, 0) {
|
||||
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
|
||||
} else {
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
|
||||
}
|
||||
case reflect.Float64:
|
||||
f := rv.Float()
|
||||
if math.IsNaN(f) {
|
||||
enc.wf("nan")
|
||||
} else if math.IsInf(f, 0) {
|
||||
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
|
||||
} else {
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
|
||||
}
|
||||
case reflect.Array, reflect.Slice:
|
||||
enc.eArrayOrSliceElement(rv)
|
||||
case reflect.Struct:
|
||||
enc.eStruct(nil, rv, true)
|
||||
case reflect.Map:
|
||||
enc.eMap(nil, rv, true)
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
default:
|
||||
panic(e("unexpected primitive type: %s", rv.Kind()))
|
||||
encPanic(fmt.Errorf("unexpected type: %T", rv.Interface()))
|
||||
}
|
||||
}
|
||||
|
||||
// By the TOML spec, all floats must have a decimal with at least one
|
||||
// number on either side.
|
||||
// By the TOML spec, all floats must have a decimal with at least one number on
|
||||
// either side.
|
||||
func floatAddDecimal(fstr string) string {
|
||||
if !strings.Contains(fstr, ".") {
|
||||
return fstr + ".0"
|
||||
@@ -205,14 +317,14 @@ func floatAddDecimal(fstr string) string {
|
||||
}
|
||||
|
||||
func (enc *Encoder) writeQuoted(s string) {
|
||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
||||
enc.wf("\"%s\"", dblQuotedReplacer.Replace(s))
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
elem := eindirect(rv.Index(i))
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
@@ -226,44 +338,43 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
trv := eindirect(rv.Index(i))
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.newline()
|
||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.wf("%s[[%s]]", enc.indentStr(key), key)
|
||||
enc.newline()
|
||||
enc.eMapOrStruct(key, trv)
|
||||
enc.eMapOrStruct(key, trv, false)
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
panicIfInvalidKey(key)
|
||||
if len(key) == 1 {
|
||||
// Output an extra newline between top-level tables.
|
||||
// (The newline isn't written if nothing else has been written though.)
|
||||
enc.newline()
|
||||
}
|
||||
if len(key) > 0 {
|
||||
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.wf("%s[%s]", enc.indentStr(key), key)
|
||||
enc.newline()
|
||||
}
|
||||
enc.eMapOrStruct(key, rv)
|
||||
enc.eMapOrStruct(key, rv, false)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
|
||||
switch rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv)
|
||||
enc.eMap(key, rv, inline)
|
||||
case reflect.Struct:
|
||||
enc.eStruct(key, rv)
|
||||
enc.eStruct(key, rv, inline)
|
||||
default:
|
||||
// Should never happen?
|
||||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
||||
func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
rt := rv.Type()
|
||||
if rt.Key().Kind() != reflect.String {
|
||||
encPanic(errNonString)
|
||||
@@ -274,118 +385,179 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string) {
|
||||
var writeMapKeys = func(mapKeys []string, trailC bool) {
|
||||
sort.Strings(mapKeys)
|
||||
for _, mapKey := range mapKeys {
|
||||
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
if isNil(mrv) {
|
||||
// Don't write anything for nil fields.
|
||||
for i, mapKey := range mapKeys {
|
||||
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
|
||||
if isNil(val) {
|
||||
continue
|
||||
}
|
||||
enc.encode(key.add(mapKey), mrv)
|
||||
|
||||
if inline {
|
||||
enc.writeKeyValue(Key{mapKey}, val, true)
|
||||
if trailC || i != len(mapKeys)-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
} else {
|
||||
enc.encode(key.add(mapKey), val)
|
||||
}
|
||||
}
|
||||
}
|
||||
writeMapKeys(mapKeysDirect)
|
||||
writeMapKeys(mapKeysSub)
|
||||
|
||||
if inline {
|
||||
enc.wf("{")
|
||||
}
|
||||
writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
|
||||
writeMapKeys(mapKeysSub, false)
|
||||
if inline {
|
||||
enc.wf("}")
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
const is32Bit = (32 << (^uint(0) >> 63)) == 32
|
||||
|
||||
func pointerTo(t reflect.Type) reflect.Type {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
return pointerTo(t.Elem())
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table, then all keys under it will be in that
|
||||
// a field that creates a new table then all keys under it will be in that
|
||||
// table (not the one we're writing here).
|
||||
rt := rv.Type()
|
||||
var fieldsDirect, fieldsSub [][]int
|
||||
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
||||
//
|
||||
// Fields is a [][]int: for fieldsDirect this always has one entry (the
|
||||
// struct index). For fieldsSub it contains two entries: the parent field
|
||||
// index from tv, and the field indexes for the fields of the sub.
|
||||
var (
|
||||
rt = rv.Type()
|
||||
fieldsDirect, fieldsSub [][]int
|
||||
addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
||||
)
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
// skip unexported fields
|
||||
if f.PkgPath != "" && !f.Anonymous {
|
||||
isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct
|
||||
if f.PkgPath != "" && !isEmbed { /// Skip unexported fields.
|
||||
continue
|
||||
}
|
||||
frv := rv.Field(i)
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
// Treat anonymous struct fields with
|
||||
// tag names as though they are not
|
||||
// anonymous, like encoding/json does.
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, f.Index)
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct &&
|
||||
getOptions(f.Tag).name == "" {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), f.Index)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Fall through to the normal field encoding logic below
|
||||
// for non-struct anonymous fields.
|
||||
opts := getOptions(f.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
|
||||
frv := eindirect(rv.Field(i))
|
||||
|
||||
// Treat anonymous struct fields with tag names as though they are
|
||||
// not anonymous, like encoding/json does.
|
||||
//
|
||||
// Non-struct anonymous fields use the normal encoding logic.
|
||||
if isEmbed {
|
||||
if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct {
|
||||
addFields(frv.Type(), frv, append(start, f.Index...))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if typeIsHash(tomlTypeOfGo(frv)) {
|
||||
if typeIsTable(tomlTypeOfGo(frv)) {
|
||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
// Copy so it works correct on 32bit archs; not clear why this
|
||||
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
|
||||
// This also works fine on 64bit, but 32bit archs are somewhat
|
||||
// rare and this is a wee bit faster.
|
||||
if is32Bit {
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
var writeFields = func(fields [][]int) {
|
||||
writeFields := func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
sft := rt.FieldByIndex(fieldIndex)
|
||||
sf := rv.FieldByIndex(fieldIndex)
|
||||
if isNil(sf) {
|
||||
// Don't write anything for nil fields.
|
||||
fieldType := rt.FieldByIndex(fieldIndex)
|
||||
fieldVal := eindirect(rv.FieldByIndex(fieldIndex))
|
||||
|
||||
if isNil(fieldVal) { /// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
|
||||
opts := getOptions(sft.Tag)
|
||||
opts := getOptions(fieldType.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
keyName := sft.Name
|
||||
keyName := fieldType.Name
|
||||
if opts.name != "" {
|
||||
keyName = opts.name
|
||||
}
|
||||
if opts.omitempty && isEmpty(sf) {
|
||||
if opts.omitempty && isEmpty(fieldVal) {
|
||||
continue
|
||||
}
|
||||
if opts.omitzero && isZero(sf) {
|
||||
if opts.omitzero && isZero(fieldVal) {
|
||||
continue
|
||||
}
|
||||
|
||||
enc.encode(key.add(keyName), sf)
|
||||
if inline {
|
||||
enc.writeKeyValue(Key{keyName}, fieldVal, true)
|
||||
if fieldIndex[0] != len(fields)-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
} else {
|
||||
enc.encode(key.add(keyName), fieldVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if inline {
|
||||
enc.wf("{")
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
if inline {
|
||||
enc.wf("}")
|
||||
}
|
||||
}
|
||||
|
||||
// tomlTypeName returns the TOML type name of the Go value's type. It is
|
||||
// used to determine whether the types of array elements are mixed (which is
|
||||
// forbidden). If the Go value is nil, then it is illegal for it to be an array
|
||||
// element, and valueIsNil is returned as true.
|
||||
|
||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
||||
// no concrete TOML type could be found.
|
||||
// tomlTypeOfGo returns the TOML type name of the Go value's type.
|
||||
//
|
||||
// It is used to determine whether the types of array elements are mixed (which
|
||||
// is forbidden). If the Go value is nil, then it is illegal for it to be an
|
||||
// array element, and valueIsNil is returned as true.
|
||||
//
|
||||
// The type may be `nil`, which means no concrete TOML type could be found.
|
||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Struct {
|
||||
if rv.Type() == timeType {
|
||||
return tomlDatetime
|
||||
}
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
return tomlHash
|
||||
}
|
||||
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
@@ -397,7 +569,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
if isTableArray(rv) {
|
||||
return tomlArrayHash
|
||||
}
|
||||
return tomlArray
|
||||
@@ -407,54 +579,35 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time:
|
||||
return tomlDatetime
|
||||
case TextMarshaler:
|
||||
return tomlString
|
||||
default:
|
||||
return tomlHash
|
||||
}
|
||||
default:
|
||||
panic("unexpected reflect.Kind: " + rv.Kind().String())
|
||||
encPanic(errors.New("unsupported type: " + rv.Kind().String()))
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
func isMarshaler(rv reflect.Value) bool {
|
||||
return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml)
|
||||
}
|
||||
|
||||
// isTableArray reports if all entries in the array or slice are a table.
|
||||
func isTableArray(arr reflect.Value) bool {
|
||||
if isNil(arr) || !arr.IsValid() || arr.Len() == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
elem := rv.Index(i)
|
||||
switch elemType := tomlTypeOfGo(elem); {
|
||||
case elemType == nil:
|
||||
ret := true
|
||||
for i := 0; i < arr.Len(); i++ {
|
||||
tt := tomlTypeOfGo(eindirect(arr.Index(i)))
|
||||
// Don't allow nil.
|
||||
if tt == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
case !typeEqual(firstType, elemType):
|
||||
encPanic(errArrayMixedElementTypes)
|
||||
}
|
||||
|
||||
if ret && !typeEqual(tomlHash, tt) {
|
||||
ret = false
|
||||
}
|
||||
}
|
||||
// If we have a nested array, then we must make sure that the nested
|
||||
// array contains ONLY primitives.
|
||||
// This checks arbitrarily nested arrays.
|
||||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
|
||||
nest := tomlArrayType(eindirect(rv.Index(0)))
|
||||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
|
||||
encPanic(errArrayNoTable)
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
return ret
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
@@ -499,6 +652,8 @@ func isEmpty(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
case reflect.Struct:
|
||||
return reflect.Zero(rv.Type()).Interface() == rv.Interface()
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
}
|
||||
@@ -511,18 +666,32 @@ func (enc *Encoder) newline() {
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
|
||||
// Write a key/value pair:
|
||||
//
|
||||
// key = <any value>
|
||||
//
|
||||
// This is also used for "k = v" in inline tables; so something like this will
|
||||
// be written in three calls:
|
||||
//
|
||||
// ┌────────────────────┐
|
||||
// │ ┌───┐ ┌─────┐│
|
||||
// v v v v vv
|
||||
// key = {k = v, k2 = v2}
|
||||
//
|
||||
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
||||
enc.eElement(val)
|
||||
enc.newline()
|
||||
if !inline {
|
||||
enc.newline()
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
||||
_, err := fmt.Fprintf(enc.w, format, v...)
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
enc.hasWritten = true
|
||||
@@ -536,13 +705,25 @@ func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
// Resolve any level of pointers to the actual value (e.g. **string → string).
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
|
||||
if isMarshaler(v) {
|
||||
return v
|
||||
}
|
||||
if v.CanAddr() { /// Special case for marshalers; see #358.
|
||||
if pv := v.Addr(); isMarshaler(pv) {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
if v.IsNil() {
|
||||
return v
|
||||
}
|
||||
|
||||
return eindirect(v.Elem())
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
@@ -553,16 +734,3 @@ func isNil(rv reflect.Value) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func panicIfInvalidKey(key Key) {
|
||||
for _, k := range key {
|
||||
if len(k) == 0 {
|
||||
encPanic(e("Key '%s' is not a valid table name. Key names "+
|
||||
"cannot be empty.", key.maybeQuotedAll()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isValidKeyName(s string) bool {
|
||||
return len(s) != 0
|
||||
}
|
||||
|
||||
19
src/runtime/vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
19
src/runtime/vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
@@ -1,19 +0,0 @@
|
||||
// +build go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// In order to support Go 1.1, we define our own TextMarshaler and
|
||||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
|
||||
// standard library interfaces.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
)
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
||||
18
src/runtime/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
18
src/runtime/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
@@ -1,18 +0,0 @@
|
||||
// +build !go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// These interfaces were introduced in Go 1.2, so we add them manually when
|
||||
// compiling for Go 1.1.
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler interface {
|
||||
MarshalText() (text []byte, err error)
|
||||
}
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler interface {
|
||||
UnmarshalText(text []byte) error
|
||||
}
|
||||
276
src/runtime/vendor/github.com/BurntSushi/toml/error.go
generated
vendored
Normal file
276
src/runtime/vendor/github.com/BurntSushi/toml/error.go
generated
vendored
Normal file
@@ -0,0 +1,276 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseError is returned when there is an error parsing the TOML syntax.
|
||||
//
|
||||
// For example invalid syntax, duplicate keys, etc.
|
||||
//
|
||||
// In addition to the error message itself, you can also print detailed location
|
||||
// information with context by using ErrorWithPosition():
|
||||
//
|
||||
// toml: error: Key 'fruit' was already created and cannot be used as an array.
|
||||
//
|
||||
// At line 4, column 2-7:
|
||||
//
|
||||
// 2 | fruit = []
|
||||
// 3 |
|
||||
// 4 | [[fruit]] # Not allowed
|
||||
// ^^^^^
|
||||
//
|
||||
// Furthermore, the ErrorWithUsage() can be used to print the above with some
|
||||
// more detailed usage guidance:
|
||||
//
|
||||
// toml: error: newlines not allowed within inline tables
|
||||
//
|
||||
// At line 1, column 18:
|
||||
//
|
||||
// 1 | x = [{ key = 42 #
|
||||
// ^
|
||||
//
|
||||
// Error help:
|
||||
//
|
||||
// Inline tables must always be on a single line:
|
||||
//
|
||||
// table = {key = 42, second = 43}
|
||||
//
|
||||
// It is invalid to split them over multiple lines like so:
|
||||
//
|
||||
// # INVALID
|
||||
// table = {
|
||||
// key = 42,
|
||||
// second = 43
|
||||
// }
|
||||
//
|
||||
// Use regular for this:
|
||||
//
|
||||
// [table]
|
||||
// key = 42
|
||||
// second = 43
|
||||
type ParseError struct {
|
||||
Message string // Short technical message.
|
||||
Usage string // Longer message with usage guidance; may be blank.
|
||||
Position Position // Position of the error
|
||||
LastKey string // Last parsed key, may be blank.
|
||||
Line int // Line the error occurred. Deprecated: use Position.
|
||||
|
||||
err error
|
||||
input string
|
||||
}
|
||||
|
||||
// Position of an error.
|
||||
type Position struct {
|
||||
Line int // Line number, starting at 1.
|
||||
Start int // Start of error, as byte offset starting at 0.
|
||||
Len int // Lenght in bytes.
|
||||
}
|
||||
|
||||
func (pe ParseError) Error() string {
|
||||
msg := pe.Message
|
||||
if msg == "" { // Error from errorf()
|
||||
msg = pe.err.Error()
|
||||
}
|
||||
|
||||
if pe.LastKey == "" {
|
||||
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
|
||||
}
|
||||
return fmt.Sprintf("toml: line %d (last key %q): %s",
|
||||
pe.Position.Line, pe.LastKey, msg)
|
||||
}
|
||||
|
||||
// ErrorWithUsage() returns the error with detailed location context.
|
||||
//
|
||||
// See the documentation on ParseError.
|
||||
func (pe ParseError) ErrorWithPosition() string {
|
||||
if pe.input == "" { // Should never happen, but just in case.
|
||||
return pe.Error()
|
||||
}
|
||||
|
||||
var (
|
||||
lines = strings.Split(pe.input, "\n")
|
||||
col = pe.column(lines)
|
||||
b = new(strings.Builder)
|
||||
)
|
||||
|
||||
msg := pe.Message
|
||||
if msg == "" {
|
||||
msg = pe.err.Error()
|
||||
}
|
||||
|
||||
// TODO: don't show control characters as literals? This may not show up
|
||||
// well everywhere.
|
||||
|
||||
if pe.Position.Len == 1 {
|
||||
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
|
||||
msg, pe.Position.Line, col+1)
|
||||
} else {
|
||||
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
|
||||
msg, pe.Position.Line, col, col+pe.Position.Len)
|
||||
}
|
||||
if pe.Position.Line > 2 {
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
|
||||
}
|
||||
if pe.Position.Line > 1 {
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
|
||||
}
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
|
||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// ErrorWithUsage() returns the error with detailed location context and usage
|
||||
// guidance.
|
||||
//
|
||||
// See the documentation on ParseError.
|
||||
func (pe ParseError) ErrorWithUsage() string {
|
||||
m := pe.ErrorWithPosition()
|
||||
if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
|
||||
lines := strings.Split(strings.TrimSpace(u.Usage()), "\n")
|
||||
for i := range lines {
|
||||
if lines[i] != "" {
|
||||
lines[i] = " " + lines[i]
|
||||
}
|
||||
}
|
||||
return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n"
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (pe ParseError) column(lines []string) int {
|
||||
var pos, col int
|
||||
for i := range lines {
|
||||
ll := len(lines[i]) + 1 // +1 for the removed newline
|
||||
if pos+ll >= pe.Position.Start {
|
||||
col = pe.Position.Start - pos
|
||||
if col < 0 { // Should never happen, but just in case.
|
||||
col = 0
|
||||
}
|
||||
break
|
||||
}
|
||||
pos += ll
|
||||
}
|
||||
|
||||
return col
|
||||
}
|
||||
|
||||
type (
|
||||
errLexControl struct{ r rune }
|
||||
errLexEscape struct{ r rune }
|
||||
errLexUTF8 struct{ b byte }
|
||||
errLexInvalidNum struct{ v string }
|
||||
errLexInvalidDate struct{ v string }
|
||||
errLexInlineTableNL struct{}
|
||||
errLexStringNL struct{}
|
||||
errParseRange struct {
|
||||
i interface{} // int or float
|
||||
size string // "int64", "uint16", etc.
|
||||
}
|
||||
errParseDuration struct{ d string }
|
||||
)
|
||||
|
||||
func (e errLexControl) Error() string {
|
||||
return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r)
|
||||
}
|
||||
func (e errLexControl) Usage() string { return "" }
|
||||
|
||||
func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) }
|
||||
func (e errLexEscape) Usage() string { return usageEscape }
|
||||
func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
|
||||
func (e errLexUTF8) Usage() string { return "" }
|
||||
func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) }
|
||||
func (e errLexInvalidNum) Usage() string { return "" }
|
||||
func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) }
|
||||
func (e errLexInvalidDate) Usage() string { return "" }
|
||||
func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
|
||||
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
|
||||
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
|
||||
func (e errLexStringNL) Usage() string { return usageStringNewline }
|
||||
func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
|
||||
func (e errParseRange) Usage() string { return usageIntOverflow }
|
||||
func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
|
||||
func (e errParseDuration) Usage() string { return usageDuration }
|
||||
|
||||
const usageEscape = `
|
||||
A '\' inside a "-delimited string is interpreted as an escape character.
|
||||
|
||||
The following escape sequences are supported:
|
||||
\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX
|
||||
|
||||
To prevent a '\' from being recognized as an escape character, use either:
|
||||
|
||||
- a ' or '''-delimited string; escape characters aren't processed in them; or
|
||||
- write two backslashes to get a single backslash: '\\'.
|
||||
|
||||
If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/'
|
||||
instead of '\' will usually also work: "C:/Users/martin".
|
||||
`
|
||||
|
||||
const usageInlineNewline = `
|
||||
Inline tables must always be on a single line:
|
||||
|
||||
table = {key = 42, second = 43}
|
||||
|
||||
It is invalid to split them over multiple lines like so:
|
||||
|
||||
# INVALID
|
||||
table = {
|
||||
key = 42,
|
||||
second = 43
|
||||
}
|
||||
|
||||
Use regular for this:
|
||||
|
||||
[table]
|
||||
key = 42
|
||||
second = 43
|
||||
`
|
||||
|
||||
const usageStringNewline = `
|
||||
Strings must always be on a single line, and cannot span more than one line:
|
||||
|
||||
# INVALID
|
||||
string = "Hello,
|
||||
world!"
|
||||
|
||||
Instead use """ or ''' to split strings over multiple lines:
|
||||
|
||||
string = """Hello,
|
||||
world!"""
|
||||
`
|
||||
|
||||
const usageIntOverflow = `
|
||||
This number is too large; this may be an error in the TOML, but it can also be a
|
||||
bug in the program that uses too small of an integer.
|
||||
|
||||
The maximum and minimum values are:
|
||||
|
||||
size │ lowest │ highest
|
||||
───────┼────────────────┼──────────
|
||||
int8 │ -128 │ 127
|
||||
int16 │ -32,768 │ 32,767
|
||||
int32 │ -2,147,483,648 │ 2,147,483,647
|
||||
int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷
|
||||
uint8 │ 0 │ 255
|
||||
uint16 │ 0 │ 65535
|
||||
uint32 │ 0 │ 4294967295
|
||||
uint64 │ 0 │ 1.8 × 10¹⁸
|
||||
|
||||
int refers to int32 on 32-bit systems and int64 on 64-bit systems.
|
||||
`
|
||||
|
||||
const usageDuration = `
|
||||
A duration must be as "number<unit>", without any spaces. Valid units are:
|
||||
|
||||
ns nanoseconds (billionth of a second)
|
||||
us, µs microseconds (millionth of a second)
|
||||
ms milliseconds (thousands of a second)
|
||||
s seconds
|
||||
m minutes
|
||||
h hours
|
||||
|
||||
You can combine multiple units; for example "5m10s" for 5 minutes and 10
|
||||
seconds.
|
||||
`
|
||||
3
src/runtime/vendor/github.com/BurntSushi/toml/go.mod
generated
vendored
Normal file
3
src/runtime/vendor/github.com/BurntSushi/toml/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/BurntSushi/toml
|
||||
|
||||
go 1.16
|
||||
36
src/runtime/vendor/github.com/BurntSushi/toml/internal/tz.go
generated
vendored
Normal file
36
src/runtime/vendor/github.com/BurntSushi/toml/internal/tz.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
package internal
|
||||
|
||||
import "time"
|
||||
|
||||
// Timezones used for local datetime, date, and time TOML types.
|
||||
//
|
||||
// The exact way times and dates without a timezone should be interpreted is not
|
||||
// well-defined in the TOML specification and left to the implementation. These
|
||||
// defaults to current local timezone offset of the computer, but this can be
|
||||
// changed by changing these variables before decoding.
|
||||
//
|
||||
// TODO:
|
||||
// Ideally we'd like to offer people the ability to configure the used timezone
|
||||
// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit
|
||||
// tricky: the reason we use three different variables for this is to support
|
||||
// round-tripping – without these specific TZ names we wouldn't know which
|
||||
// format to use.
|
||||
//
|
||||
// There isn't a good way to encode this right now though, and passing this sort
|
||||
// of information also ties in to various related issues such as string format
|
||||
// encoding, encoding of comments, etc.
|
||||
//
|
||||
// So, for the time being, just put this in internal until we can write a good
|
||||
// comprehensive API for doing all of this.
|
||||
//
|
||||
// The reason they're exported is because they're referred from in e.g.
|
||||
// internal/tag.
|
||||
//
|
||||
// Note that this behaviour is valid according to the TOML spec as the exact
|
||||
// behaviour is left up to implementations.
|
||||
var (
|
||||
localOffset = func() int { _, o := time.Now().Zone(); return o }()
|
||||
LocalDatetime = time.FixedZone("datetime-local", localOffset)
|
||||
LocalDate = time.FixedZone("date-local", localOffset)
|
||||
LocalTime = time.FixedZone("time-local", localOffset)
|
||||
)
|
||||
788
src/runtime/vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
788
src/runtime/vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
File diff suppressed because it is too large
Load Diff
@@ -1,33 +1,40 @@
|
||||
package toml
|
||||
|
||||
import "strings"
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MetaData allows access to meta information about TOML data that may not
|
||||
// be inferrable via reflection. In particular, whether a key has been defined
|
||||
// and the TOML type of a key.
|
||||
// MetaData allows access to meta information about TOML data that's not
|
||||
// accessible otherwise.
|
||||
//
|
||||
// It allows checking if a key is defined in the TOML data, whether any keys
|
||||
// were undecoded, and the TOML type of a key.
|
||||
type MetaData struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]bool
|
||||
context Key // Used only during decoding.
|
||||
|
||||
keyInfo map[string]keyInfo
|
||||
mapping map[string]interface{}
|
||||
keys []Key
|
||||
decoded map[string]struct{}
|
||||
data []byte // Input file; for errors.
|
||||
}
|
||||
|
||||
// IsDefined returns true if the key given exists in the TOML data. The key
|
||||
// should be specified hierarchially. e.g.,
|
||||
// IsDefined reports if the key exists in the TOML data.
|
||||
//
|
||||
// // access the TOML key 'a.b.c'
|
||||
// IsDefined("a", "b", "c")
|
||||
// The key should be specified hierarchically, for example to access the TOML
|
||||
// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive.
|
||||
//
|
||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
||||
// Returns false for an empty key.
|
||||
func (md *MetaData) IsDefined(key ...string) bool {
|
||||
if len(key) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hash map[string]interface{}
|
||||
var ok bool
|
||||
var hashOrVal interface{} = md.mapping
|
||||
var (
|
||||
hash map[string]interface{}
|
||||
ok bool
|
||||
hashOrVal interface{} = md.mapping
|
||||
)
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
return false
|
||||
@@ -41,58 +48,20 @@ func (md *MetaData) IsDefined(key ...string) bool {
|
||||
|
||||
// Type returns a string representation of the type of the key specified.
|
||||
//
|
||||
// Type will return the empty string if given an empty key or a key that
|
||||
// does not exist. Keys are case sensitive.
|
||||
// Type will return the empty string if given an empty key or a key that does
|
||||
// not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
fullkey := strings.Join(key, ".")
|
||||
if typ, ok := md.types[fullkey]; ok {
|
||||
return typ.typeString()
|
||||
if ki, ok := md.keyInfo[Key(key).String()]; ok {
|
||||
return ki.tomlType.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
|
||||
// to get values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
return strings.Join(k, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuotedAll() string {
|
||||
var ss []string
|
||||
for i := range k {
|
||||
ss = append(ss, k.maybeQuoted(i))
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
quote := false
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
quote = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if quote {
|
||||
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
||||
// Each key is itself a slice, where the first element is the top of the
|
||||
// hierarchy and the last is the most specific.
|
||||
//
|
||||
// The list will have the same order as the keys appeared in the TOML data.
|
||||
// Each key is itself a slice, where the first element is the top of the
|
||||
// hierarchy and the last is the most specific. The list will have the same
|
||||
// order as the keys appeared in the TOML data.
|
||||
//
|
||||
// All keys returned are non-empty.
|
||||
func (md *MetaData) Keys() []Key {
|
||||
@@ -113,9 +82,40 @@ func (md *MetaData) Keys() []Key {
|
||||
func (md *MetaData) Undecoded() []Key {
|
||||
undecoded := make([]Key, 0, len(md.keys))
|
||||
for _, key := range md.keys {
|
||||
if !md.decoded[key.String()] {
|
||||
if _, ok := md.decoded[key.String()]; !ok {
|
||||
undecoded = append(undecoded, key)
|
||||
}
|
||||
}
|
||||
return undecoded
|
||||
}
|
||||
|
||||
// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
|
||||
// values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
ss := make([]string, len(k))
|
||||
for i := range k {
|
||||
ss[i] = k.maybeQuoted(i)
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
if k[i] == "" {
|
||||
return `""`
|
||||
}
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
|
||||
}
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
695
src/runtime/vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
695
src/runtime/vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
@@ -5,54 +5,69 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/BurntSushi/toml/internal"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
lx *lexer
|
||||
lx *lexer
|
||||
context Key // Full key for the current hash in scope.
|
||||
currentKey string // Base key name for everything except hashes.
|
||||
pos Position // Current position in the TOML file.
|
||||
|
||||
// A list of keys in the order that they appear in the TOML data.
|
||||
ordered []Key
|
||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||
|
||||
// the full key for the current hash in scope
|
||||
context Key
|
||||
|
||||
// the base key name for everything except hashes
|
||||
currentKey string
|
||||
|
||||
// rough approximation of line number
|
||||
approxLine int
|
||||
|
||||
// A map of 'key.group.names' to whether they were created implicitly.
|
||||
implicits map[string]bool
|
||||
keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
|
||||
mapping map[string]interface{} // Map keyname → key value.
|
||||
implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
|
||||
}
|
||||
|
||||
type parseError string
|
||||
|
||||
func (pe parseError) Error() string {
|
||||
return string(pe)
|
||||
type keyInfo struct {
|
||||
pos Position
|
||||
tomlType tomlType
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
if err, ok = r.(parseError); ok {
|
||||
if pErr, ok := r.(ParseError); ok {
|
||||
pErr.input = data
|
||||
err = pErr
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
|
||||
// which mangles stuff.
|
||||
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
|
||||
data = data[2:]
|
||||
}
|
||||
|
||||
// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
|
||||
// file (second byte in surrogate pair being NULL). Again, do this here to
|
||||
// avoid having to deal with UTF-8/16 stuff in the lexer.
|
||||
ex := 6
|
||||
if len(data) < 6 {
|
||||
ex = len(data)
|
||||
}
|
||||
if i := strings.IndexRune(data[:ex], 0); i > -1 {
|
||||
return nil, ParseError{
|
||||
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
|
||||
Position: Position{Line: 1, Start: i, Len: 1},
|
||||
Line: 1,
|
||||
input: data,
|
||||
}
|
||||
}
|
||||
|
||||
p = &parser{
|
||||
keyInfo: make(map[string]keyInfo),
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]bool),
|
||||
implicits: make(map[string]struct{}),
|
||||
}
|
||||
for {
|
||||
item := p.next()
|
||||
@@ -65,20 +80,57 @@ func parse(data string) (p *parser, err error) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicErr(it item, err error) {
|
||||
panic(ParseError{
|
||||
err: err,
|
||||
Position: it.pos,
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) panicItemf(it item, format string, v ...interface{}) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: it.pos,
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
|
||||
p.approxLine, p.current(), fmt.Sprintf(format, v...))
|
||||
panic(parseError(msg))
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: p.pos,
|
||||
Line: p.pos.Line,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val)
|
||||
if it.typ == itemError {
|
||||
p.panicf("%s", it.val)
|
||||
if it.err != nil {
|
||||
panic(ParseError{
|
||||
Position: it.pos,
|
||||
Line: it.pos.Line,
|
||||
LastKey: p.current(),
|
||||
err: it.err,
|
||||
})
|
||||
}
|
||||
|
||||
p.panicItemf(it, "%s", it.val)
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) nextPos() item {
|
||||
it := p.next()
|
||||
p.pos = it.pos
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
||||
}
|
||||
@@ -97,44 +149,60 @@ func (p *parser) assertEqual(expected, got itemType) {
|
||||
|
||||
func (p *parser) topLevel(item item) {
|
||||
switch item.typ {
|
||||
case itemCommentStart:
|
||||
p.approxLine = item.line
|
||||
case itemCommentStart: // # ..
|
||||
p.expect(itemText)
|
||||
case itemTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
case itemTableStart: // [ .. ]
|
||||
name := p.nextPos()
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
|
||||
key = append(key, p.keyString(name))
|
||||
}
|
||||
p.assertEqual(itemTableEnd, kg.typ)
|
||||
p.assertEqual(itemTableEnd, name.typ)
|
||||
|
||||
p.establishContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.addContext(key, false)
|
||||
p.setType("", tomlHash, item.pos)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
case itemArrayTableStart: // [[ .. ]]
|
||||
name := p.nextPos()
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
|
||||
key = append(key, p.keyString(name))
|
||||
}
|
||||
p.assertEqual(itemArrayTableEnd, kg.typ)
|
||||
p.assertEqual(itemArrayTableEnd, name.typ)
|
||||
|
||||
p.establishContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.addContext(key, true)
|
||||
p.setType("", tomlArrayHash, item.pos)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart:
|
||||
kname := p.next()
|
||||
p.approxLine = kname.line
|
||||
p.currentKey = p.keyString(kname)
|
||||
case itemKeyStart: // key = ..
|
||||
outerContext := p.context
|
||||
/// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
|
||||
k := p.nextPos()
|
||||
var key Key
|
||||
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
|
||||
key = append(key, p.keyString(k))
|
||||
}
|
||||
p.assertEqual(itemKeyEnd, k.typ)
|
||||
|
||||
val, typ := p.value(p.next())
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ)
|
||||
/// The current key is the last part.
|
||||
p.currentKey = key[len(key)-1]
|
||||
|
||||
/// All the other parts (if any) are the context; need to set each part
|
||||
/// as implicit.
|
||||
context := key[:len(key)-1]
|
||||
for i := range context {
|
||||
p.addImplicitContext(append(p.context, context[i:i+1]...))
|
||||
}
|
||||
|
||||
/// Set value.
|
||||
vItem := p.next()
|
||||
val, typ := p.value(vItem, false)
|
||||
p.set(p.currentKey, val, typ, vItem.pos)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
|
||||
/// Remove the context we added (preserving any context from [tbl] lines).
|
||||
p.context = outerContext
|
||||
p.currentKey = ""
|
||||
default:
|
||||
p.bug("Unexpected type at top level: %s", item.typ)
|
||||
@@ -148,180 +216,261 @@ func (p *parser) keyString(it item) string {
|
||||
return it.val
|
||||
case itemString, itemMultilineString,
|
||||
itemRawString, itemRawMultilineString:
|
||||
s, _ := p.value(it)
|
||||
s, _ := p.value(it, false)
|
||||
return s.(string)
|
||||
default:
|
||||
p.bug("Unexpected key type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var datetimeRepl = strings.NewReplacer(
|
||||
"z", "Z",
|
||||
"t", "T",
|
||||
" ", "T")
|
||||
|
||||
// value translates an expected value from the lexer into a Go value wrapped
|
||||
// as an empty interface.
|
||||
func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
|
||||
switch it.typ {
|
||||
case itemString:
|
||||
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
|
||||
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
|
||||
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
|
||||
return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
|
||||
case itemRawString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemRawMultilineString:
|
||||
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
|
||||
case itemInteger:
|
||||
return p.valueInteger(it)
|
||||
case itemFloat:
|
||||
return p.valueFloat(it)
|
||||
case itemBool:
|
||||
switch it.val {
|
||||
case "true":
|
||||
return true, p.typeOfPrimitive(it)
|
||||
case "false":
|
||||
return false, p.typeOfPrimitive(it)
|
||||
default:
|
||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
||||
}
|
||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
||||
case itemInteger:
|
||||
if !numUnderscoresOK(it.val) {
|
||||
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
|
||||
it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
// Distinguish integer values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid integer, but it's possible that the number is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Integer '%s' is out of the range of 64-bit "+
|
||||
"signed integers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemFloat:
|
||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
||||
switch r {
|
||||
case '.', 'e', 'E':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
for _, part := range parts {
|
||||
if !numUnderscoresOK(part) {
|
||||
p.panicf("Invalid float %q: underscores must be "+
|
||||
"surrounded by digits", it.val)
|
||||
}
|
||||
}
|
||||
if !numPeriodsOK(it.val) {
|
||||
// As a special case, numbers like '123.' or '1.e2',
|
||||
// which are valid as far as Go/strconv are concerned,
|
||||
// must be rejected because TOML says that a fractional
|
||||
// part consists of '.' followed by 1+ digits.
|
||||
p.panicf("Invalid float %q: '.' must be followed "+
|
||||
"by one or more digits", it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
||||
"IEEE-754 floating-point numbers.", it.val)
|
||||
} else {
|
||||
p.panicf("Invalid float value: %q", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemDatetime:
|
||||
var t time.Time
|
||||
var ok bool
|
||||
var err error
|
||||
for _, format := range []string{
|
||||
"2006-01-02T15:04:05Z07:00",
|
||||
"2006-01-02T15:04:05",
|
||||
"2006-01-02",
|
||||
} {
|
||||
t, err = time.ParseInLocation(format, it.val, time.Local)
|
||||
if err == nil {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
p.panicf("Invalid TOML Datetime: %q.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
return p.valueDatetime(it)
|
||||
case itemArray:
|
||||
array := make([]interface{}, 0)
|
||||
types := make([]tomlType, 0)
|
||||
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
val, typ := p.value(it)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
}
|
||||
return array, p.typeOfArray(types)
|
||||
return p.valueArray(it)
|
||||
case itemInlineTableStart:
|
||||
var (
|
||||
hash = make(map[string]interface{})
|
||||
outerContext = p.context
|
||||
outerKey = p.currentKey
|
||||
)
|
||||
|
||||
p.context = append(p.context, p.currentKey)
|
||||
p.currentKey = ""
|
||||
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
|
||||
if it.typ != itemKeyStart {
|
||||
p.bug("Expected key start but instead found %q, around line %d",
|
||||
it.val, p.approxLine)
|
||||
}
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
// retrieve key
|
||||
k := p.next()
|
||||
p.approxLine = k.line
|
||||
kname := p.keyString(k)
|
||||
|
||||
// retrieve value
|
||||
p.currentKey = kname
|
||||
val, typ := p.value(p.next())
|
||||
// make sure we keep metadata up to date
|
||||
p.setType(kname, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[kname] = val
|
||||
}
|
||||
p.context = outerContext
|
||||
p.currentKey = outerKey
|
||||
return hash, tomlHash
|
||||
return p.valueInlineTable(it, parentIsArray)
|
||||
default:
|
||||
p.bug("Unexpected value type: %s", it.typ)
|
||||
}
|
||||
p.bug("Unexpected value type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (p *parser) valueInteger(it item) (interface{}, tomlType) {
|
||||
if !numUnderscoresOK(it.val) {
|
||||
p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
|
||||
}
|
||||
if numHasLeadingZero(it.val) {
|
||||
p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val)
|
||||
}
|
||||
|
||||
num, err := strconv.ParseInt(it.val, 0, 64)
|
||||
if err != nil {
|
||||
// Distinguish integer values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid integer, but it's possible that the number is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
|
||||
p.panicErr(it, errParseRange{i: it.val, size: "int64"})
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
}
|
||||
|
||||
func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
||||
switch r {
|
||||
case '.', 'e', 'E':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
for _, part := range parts {
|
||||
if !numUnderscoresOK(part) {
|
||||
p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val)
|
||||
}
|
||||
}
|
||||
if len(parts) > 0 && numHasLeadingZero(parts[0]) {
|
||||
p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val)
|
||||
}
|
||||
if !numPeriodsOK(it.val) {
|
||||
// As a special case, numbers like '123.' or '1.e2',
|
||||
// which are valid as far as Go/strconv are concerned,
|
||||
// must be rejected because TOML says that a fractional
|
||||
// part consists of '.' followed by 1+ digits.
|
||||
p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
|
||||
val = "nan"
|
||||
}
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
|
||||
p.panicErr(it, errParseRange{i: it.val, size: "float64"})
|
||||
} else {
|
||||
p.panicItemf(it, "Invalid float value: %q", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
}
|
||||
|
||||
var dtTypes = []struct {
|
||||
fmt string
|
||||
zone *time.Location
|
||||
}{
|
||||
{time.RFC3339Nano, time.Local},
|
||||
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
|
||||
{"2006-01-02", internal.LocalDate},
|
||||
{"15:04:05.999999999", internal.LocalTime},
|
||||
}
|
||||
|
||||
func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
|
||||
it.val = datetimeRepl.Replace(it.val)
|
||||
var (
|
||||
t time.Time
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
for _, dt := range dtTypes {
|
||||
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
|
||||
if err == nil {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
}
|
||||
|
||||
func (p *parser) valueArray(it item) (interface{}, tomlType) {
|
||||
p.setType(p.currentKey, tomlArray, it.pos)
|
||||
|
||||
var (
|
||||
types []tomlType
|
||||
|
||||
// Initialize to a non-nil empty slice. This makes it consistent with
|
||||
// how S = [] decodes into a non-nil slice inside something like struct
|
||||
// { S []string }. See #338
|
||||
array = []interface{}{}
|
||||
)
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
val, typ := p.value(it, true)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
|
||||
// XXX: types isn't used here, we need it to record the accurate type
|
||||
// information.
|
||||
//
|
||||
// Not entirely sure how to best store this; could use "key[0]",
|
||||
// "key[1]" notation, or maybe store it on the Array type?
|
||||
}
|
||||
return array, tomlArray
|
||||
}
|
||||
|
||||
func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
|
||||
var (
|
||||
hash = make(map[string]interface{})
|
||||
outerContext = p.context
|
||||
outerKey = p.currentKey
|
||||
)
|
||||
|
||||
p.context = append(p.context, p.currentKey)
|
||||
prevContext := p.context
|
||||
p.currentKey = ""
|
||||
|
||||
p.addImplicit(p.context)
|
||||
p.addContext(p.context, parentIsArray)
|
||||
|
||||
/// Loop over all table key/value pairs.
|
||||
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
/// Read all key parts.
|
||||
k := p.nextPos()
|
||||
var key Key
|
||||
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
|
||||
key = append(key, p.keyString(k))
|
||||
}
|
||||
p.assertEqual(itemKeyEnd, k.typ)
|
||||
|
||||
/// The current key is the last part.
|
||||
p.currentKey = key[len(key)-1]
|
||||
|
||||
/// All the other parts (if any) are the context; need to set each part
|
||||
/// as implicit.
|
||||
context := key[:len(key)-1]
|
||||
for i := range context {
|
||||
p.addImplicitContext(append(p.context, context[i:i+1]...))
|
||||
}
|
||||
|
||||
/// Set the value.
|
||||
val, typ := p.value(p.next(), false)
|
||||
p.set(p.currentKey, val, typ, it.pos)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[p.currentKey] = val
|
||||
|
||||
/// Restore context.
|
||||
p.context = prevContext
|
||||
}
|
||||
p.context = outerContext
|
||||
p.currentKey = outerKey
|
||||
return hash, tomlHash
|
||||
}
|
||||
|
||||
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
|
||||
// +/- signs, and base prefixes.
|
||||
func numHasLeadingZero(s string) bool {
|
||||
if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x
|
||||
return true
|
||||
}
|
||||
if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// numUnderscoresOK checks whether each underscore in s is surrounded by
|
||||
// characters that are not underscores.
|
||||
func numUnderscoresOK(s string) bool {
|
||||
switch s {
|
||||
case "nan", "+nan", "-nan", "inf", "-inf", "+inf":
|
||||
return true
|
||||
}
|
||||
accept := false
|
||||
for _, r := range s {
|
||||
if r == '_' {
|
||||
if !accept {
|
||||
return false
|
||||
}
|
||||
accept = false
|
||||
continue
|
||||
}
|
||||
accept = true
|
||||
|
||||
// isHexadecimal is a superset of all the permissable characters
|
||||
// surrounding an underscore.
|
||||
accept = isHexadecimal(r)
|
||||
}
|
||||
return accept
|
||||
}
|
||||
@@ -338,13 +487,12 @@ func numPeriodsOK(s string) bool {
|
||||
return !period
|
||||
}
|
||||
|
||||
// establishContext sets the current context of the parser,
|
||||
// where the context is either a hash or an array of hashes. Which one is
|
||||
// set depends on the value of the `array` parameter.
|
||||
// Set the current context of the parser, where the context is either a hash or
|
||||
// an array of hashes, depending on the value of the `array` parameter.
|
||||
//
|
||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
||||
// will create implicit hashes automatically.
|
||||
func (p *parser) establishContext(key Key, array bool) {
|
||||
func (p *parser) addContext(key Key, array bool) {
|
||||
var ok bool
|
||||
|
||||
// Always start at the top level and drill down for our context.
|
||||
@@ -383,7 +531,7 @@ func (p *parser) establishContext(key Key, array bool) {
|
||||
// list of tables for it.
|
||||
k := key[len(key)-1]
|
||||
if _, ok := hashContext[k]; !ok {
|
||||
hashContext[k] = make([]map[string]interface{}, 0, 5)
|
||||
hashContext[k] = make([]map[string]interface{}, 0, 4)
|
||||
}
|
||||
|
||||
// Add a new table. But make sure the key hasn't already been used
|
||||
@@ -391,8 +539,7 @@ func (p *parser) establishContext(key Key, array bool) {
|
||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
||||
} else {
|
||||
p.panicf("Key '%s' was already created and cannot be used as "+
|
||||
"an array.", keyContext)
|
||||
p.panicf("Key '%s' was already created and cannot be used as an array.", key)
|
||||
}
|
||||
} else {
|
||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
||||
@@ -400,15 +547,23 @@ func (p *parser) establishContext(key Key, array bool) {
|
||||
p.context = append(p.context, key[len(key)-1])
|
||||
}
|
||||
|
||||
// set calls setValue and setType.
|
||||
func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) {
|
||||
p.setValue(key, val)
|
||||
p.setType(key, typ, pos)
|
||||
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
// It will make sure that the key hasn't already been defined, account for
|
||||
// implicit key groups.
|
||||
func (p *parser) setValue(key string, value interface{}) {
|
||||
var tmpHash interface{}
|
||||
var ok bool
|
||||
|
||||
hash := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
var (
|
||||
tmpHash interface{}
|
||||
ok bool
|
||||
hash = p.mapping
|
||||
keyContext Key
|
||||
)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
if tmpHash, ok = hash[k]; !ok {
|
||||
@@ -422,24 +577,26 @@ func (p *parser) setValue(key string, value interface{}) {
|
||||
case map[string]interface{}:
|
||||
hash = t
|
||||
default:
|
||||
p.bug("Expected hash to have type 'map[string]interface{}', but "+
|
||||
"it has '%T' instead.", tmpHash)
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
}
|
||||
}
|
||||
keyContext = append(keyContext, key)
|
||||
|
||||
if _, ok := hash[key]; ok {
|
||||
// Typically, if the given key has already been set, then we have
|
||||
// to raise an error since duplicate keys are disallowed. However,
|
||||
// it's possible that a key was previously defined implicitly. In this
|
||||
// case, it is allowed to be redefined concretely. (See the
|
||||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
|
||||
// Normally redefining keys isn't allowed, but the key could have been
|
||||
// defined implicitly and it's allowed to be redefined concretely. (See
|
||||
// the `valid/implicit-and-explicit-after.toml` in toml-test)
|
||||
//
|
||||
// But we have to make sure to stop marking it as an implicit. (So that
|
||||
// another redefinition provokes an error.)
|
||||
//
|
||||
// Note that since it has already been defined (as a hash), we don't
|
||||
// want to overwrite it. So our business is done.
|
||||
if p.isArray(keyContext) {
|
||||
p.removeImplicit(keyContext)
|
||||
hash[key] = value
|
||||
return
|
||||
}
|
||||
if p.isImplicit(keyContext) {
|
||||
p.removeImplicit(keyContext)
|
||||
return
|
||||
@@ -449,40 +606,39 @@ func (p *parser) setValue(key string, value interface{}) {
|
||||
// key, which is *always* wrong.
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
}
|
||||
|
||||
hash[key] = value
|
||||
}
|
||||
|
||||
// setType sets the type of a particular value at a given key.
|
||||
// It should be called immediately AFTER setValue.
|
||||
// setType sets the type of a particular value at a given key. It should be
|
||||
// called immediately AFTER setValue.
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
func (p *parser) setType(key string, typ tomlType, pos Position) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
}
|
||||
keyContext = append(keyContext, p.context...)
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
keyContext = append(keyContext, key)
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
// Special case to make empty keys ("" = 1) work.
|
||||
// Without it it will set "" rather than `""`.
|
||||
// TODO: why is this needed? And why is this only needed here?
|
||||
if len(keyContext) == 0 {
|
||||
keyContext = Key{""}
|
||||
}
|
||||
p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos}
|
||||
}
|
||||
|
||||
// addImplicit sets the given Key as having been created implicitly.
|
||||
func (p *parser) addImplicit(key Key) {
|
||||
p.implicits[key.String()] = true
|
||||
}
|
||||
|
||||
// removeImplicit stops tagging the given key as having been implicitly
|
||||
// created.
|
||||
func (p *parser) removeImplicit(key Key) {
|
||||
p.implicits[key.String()] = false
|
||||
}
|
||||
|
||||
// isImplicit returns true if the key group pointed to by the key was created
|
||||
// implicitly.
|
||||
func (p *parser) isImplicit(key Key) bool {
|
||||
return p.implicits[key.String()]
|
||||
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
|
||||
// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
|
||||
func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
|
||||
func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
|
||||
func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
|
||||
func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
|
||||
func (p *parser) addImplicitContext(key Key) {
|
||||
p.addImplicit(key)
|
||||
p.addContext(key, false)
|
||||
}
|
||||
|
||||
// current returns the full key name of the current context.
|
||||
@@ -497,24 +653,62 @@ func (p *parser) current() string {
|
||||
}
|
||||
|
||||
func stripFirstNewline(s string) string {
|
||||
if len(s) == 0 || s[0] != '\n' {
|
||||
if len(s) > 0 && s[0] == '\n' {
|
||||
return s[1:]
|
||||
}
|
||||
if len(s) > 1 && s[0] == '\r' && s[1] == '\n' {
|
||||
return s[2:]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Remove newlines inside triple-quoted strings if a line ends with "\".
|
||||
func (p *parser) stripEscapedNewlines(s string) string {
|
||||
split := strings.Split(s, "\n")
|
||||
if len(split) < 1 {
|
||||
return s
|
||||
}
|
||||
return s[1:]
|
||||
}
|
||||
|
||||
func stripEscapedWhitespace(s string) string {
|
||||
esc := strings.Split(s, "\\\n")
|
||||
if len(esc) > 1 {
|
||||
for i := 1; i < len(esc); i++ {
|
||||
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
|
||||
escNL := false // Keep track of the last non-blank line was escaped.
|
||||
for i, line := range split {
|
||||
line = strings.TrimRight(line, " \t\r")
|
||||
|
||||
if len(line) == 0 || line[len(line)-1] != '\\' {
|
||||
split[i] = strings.TrimRight(split[i], "\r")
|
||||
if !escNL && i != len(split)-1 {
|
||||
split[i] += "\n"
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
escBS := true
|
||||
for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
|
||||
escBS = !escBS
|
||||
}
|
||||
if escNL {
|
||||
line = strings.TrimLeft(line, " \t\r")
|
||||
}
|
||||
escNL = !escBS
|
||||
|
||||
if escBS {
|
||||
split[i] += "\n"
|
||||
continue
|
||||
}
|
||||
|
||||
if i == len(split)-1 {
|
||||
p.panicf("invalid escape: '\\ '")
|
||||
}
|
||||
|
||||
split[i] = line[:len(line)-1] // Remove \
|
||||
if len(split)-1 > i {
|
||||
split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
|
||||
}
|
||||
}
|
||||
return strings.Join(esc, "")
|
||||
return strings.Join(split, "")
|
||||
}
|
||||
|
||||
func (p *parser) replaceEscapes(str string) string {
|
||||
var replaced []rune
|
||||
func (p *parser) replaceEscapes(it item, str string) string {
|
||||
replaced := make([]rune, 0, len(str))
|
||||
s := []byte(str)
|
||||
r := 0
|
||||
for r < len(s) {
|
||||
@@ -532,7 +726,8 @@ func (p *parser) replaceEscapes(str string) string {
|
||||
switch s[r] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
return ""
|
||||
case ' ', '\t':
|
||||
p.panicItemf(it, "invalid escape: '\\%c'", s[r])
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
@@ -558,14 +753,14 @@ func (p *parser) replaceEscapes(str string) string {
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
|
||||
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 5
|
||||
case 'U':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
|
||||
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 9
|
||||
}
|
||||
@@ -573,20 +768,14 @@ func (p *parser) replaceEscapes(str string) string {
|
||||
return string(replaced)
|
||||
}
|
||||
|
||||
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
|
||||
func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
|
||||
s := string(bs)
|
||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||
if err != nil {
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
||||
"lexer claims it's OK: %s", s, err)
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
|
||||
}
|
||||
if !utf8.ValidRune(rune(hex)) {
|
||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
||||
p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s)
|
||||
}
|
||||
return rune(hex)
|
||||
}
|
||||
|
||||
func isStringType(ty itemType) bool {
|
||||
return ty == itemString || ty == itemMultilineString ||
|
||||
ty == itemRawString || ty == itemRawMultilineString
|
||||
}
|
||||
|
||||
1
src/runtime/vendor/github.com/BurntSushi/toml/session.vim
generated
vendored
1
src/runtime/vendor/github.com/BurntSushi/toml/session.vim
generated
vendored
@@ -1 +0,0 @@
|
||||
au BufWritePost *.go silent!make tags > /dev/null 2>&1
|
||||
4
src/runtime/vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
4
src/runtime/vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
@@ -70,8 +70,8 @@ func typeFields(t reflect.Type) []field {
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
var count map[reflect.Type]int
|
||||
var nextCount map[reflect.Type]int
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
@@ -16,7 +16,7 @@ func typeEqual(t1, t2 tomlType) bool {
|
||||
return t1.typeString() == t2.typeString()
|
||||
}
|
||||
|
||||
func typeIsHash(t tomlType) bool {
|
||||
func typeIsTable(t tomlType) bool {
|
||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
||||
}
|
||||
|
||||
@@ -68,24 +68,3 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
||||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// typeOfArray returns a tomlType for an array given a list of types of its
|
||||
// values.
|
||||
//
|
||||
// In the current spec, if an array is homogeneous, then its type is always
|
||||
// "Array". If the array is not homogeneous, an error is generated.
|
||||
func (p *parser) typeOfArray(types []tomlType) tomlType {
|
||||
// Empty arrays are cool.
|
||||
if len(types) == 0 {
|
||||
return tomlArray
|
||||
}
|
||||
|
||||
theType := types[0]
|
||||
for _, t := range types[1:] {
|
||||
if !typeEqual(theType, t) {
|
||||
p.panicf("Array contains values of type '%s' and '%s', but "+
|
||||
"arrays must be homogeneous.", theType, t)
|
||||
}
|
||||
}
|
||||
return tomlArray
|
||||
}
|
||||
6
src/runtime/vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
6
src/runtime/vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@@ -143,6 +144,11 @@ func (f *win32File) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsClosed checks if the file has been closed
|
||||
func (f *win32File) IsClosed() bool {
|
||||
return f.closing.isSet()
|
||||
}
|
||||
|
||||
// prepareIo prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
|
||||
3
src/runtime/vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
3
src/runtime/vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
@@ -1,9 +1,8 @@
|
||||
module github.com/Microsoft/go-winio
|
||||
|
||||
go 1.12
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
)
|
||||
|
||||
3
src/runtime/vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
3
src/runtime/vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
@@ -1,14 +1,11 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
||||
17
src/runtime/vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
17
src/runtime/vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@@ -252,15 +253,23 @@ func (conn *HvsockConn) Close() error {
|
||||
return conn.sock.Close()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) IsClosed() bool {
|
||||
return conn.sock.IsClosed()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) shutdown(how int) error {
|
||||
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
|
||||
if conn.IsClosed() {
|
||||
return ErrFileClosed
|
||||
}
|
||||
|
||||
err := syscall.Shutdown(conn.sock.handle, how)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("shutdown", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseRead shuts down the read end of the socket.
|
||||
// CloseRead shuts down the read end of the socket, preventing future read operations.
|
||||
func (conn *HvsockConn) CloseRead() error {
|
||||
err := conn.shutdown(syscall.SHUT_RD)
|
||||
if err != nil {
|
||||
@@ -269,8 +278,8 @@ func (conn *HvsockConn) CloseRead() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
|
||||
// no more data will be written.
|
||||
// CloseWrite shuts down the write end of the socket, preventing future write operations and
|
||||
// notifying the other endpoint that no more data will be written.
|
||||
func (conn *HvsockConn) CloseWrite() error {
|
||||
err := conn.shutdown(syscall.SHUT_WR)
|
||||
if err != nil {
|
||||
|
||||
9
src/runtime/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
9
src/runtime/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
@@ -14,8 +14,6 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||
@@ -41,13 +39,6 @@ type Version uint8
|
||||
var _ = (encoding.TextMarshaler)(GUID{})
|
||||
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID
|
||||
|
||||
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
||||
func NewV4() (GUID, error) {
|
||||
var b [16]byte
|
||||
|
||||
15
src/runtime/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
Normal file
15
src/runtime/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// +build !windows
|
||||
|
||||
package guid
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type as that is only available to builds
|
||||
// targeted at `windows`. The representation matches that used by native Windows
|
||||
// code.
|
||||
type GUID struct {
|
||||
Data1 uint32
|
||||
Data2 uint16
|
||||
Data3 uint16
|
||||
Data4 [8]byte
|
||||
}
|
||||
10
src/runtime/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
Normal file
10
src/runtime/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
package guid
|
||||
|
||||
import "golang.org/x/sys/windows"
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID
|
||||
15
src/runtime/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
15
src/runtime/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
@@ -3,11 +3,10 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -72,7 +71,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
// Stat (to determine if `name` is a directory).
|
||||
s, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "%s os.Stat %s", gvmga, name)
|
||||
return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err)
|
||||
}
|
||||
|
||||
// Get a handle to the file/directory. Must defer Close on success.
|
||||
@@ -88,7 +87,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
sd := uintptr(0)
|
||||
origDACL := uintptr(0)
|
||||
if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
|
||||
return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name)
|
||||
return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err)
|
||||
}
|
||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
|
||||
|
||||
@@ -102,7 +101,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
|
||||
// And finally use SetSecurityInfo to apply the updated DACL.
|
||||
if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
|
||||
return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name)
|
||||
return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -120,7 +119,7 @@ func createFile(name string, isDir bool) (syscall.Handle, error) {
|
||||
}
|
||||
fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name)
|
||||
return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err)
|
||||
}
|
||||
return fd, nil
|
||||
}
|
||||
@@ -131,7 +130,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
|
||||
// Generate pointers to the SIDs based on the string SIDs
|
||||
sid, err := syscall.StringToSid(sidVmGroup)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup)
|
||||
return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err)
|
||||
}
|
||||
|
||||
inheritance := inheritModeNoInheritance
|
||||
@@ -154,7 +153,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
|
||||
|
||||
modifiedDACL := uintptr(0)
|
||||
if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil {
|
||||
return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name)
|
||||
return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err)
|
||||
}
|
||||
|
||||
return modifiedDACL, nil
|
||||
|
||||
59
src/runtime/vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
59
src/runtime/vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package vhd
|
||||
@@ -7,14 +8,13 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
|
||||
|
||||
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk
|
||||
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
|
||||
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
|
||||
//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk
|
||||
//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk
|
||||
//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath
|
||||
@@ -62,13 +62,27 @@ type OpenVirtualDiskParameters struct {
|
||||
Version2 OpenVersion2
|
||||
}
|
||||
|
||||
// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
|
||||
// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating
|
||||
// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods.
|
||||
type openVersion2 struct {
|
||||
getInfoOnly int32
|
||||
readOnly int32
|
||||
resiliencyGUID guid.GUID
|
||||
}
|
||||
|
||||
type openVirtualDiskParameters struct {
|
||||
version uint32
|
||||
version2 openVersion2
|
||||
}
|
||||
|
||||
type AttachVersion2 struct {
|
||||
RestrictedOffset uint64
|
||||
RestrictedLength uint64
|
||||
}
|
||||
|
||||
type AttachVirtualDiskParameters struct {
|
||||
Version uint32 // Must always be set to 2
|
||||
Version uint32
|
||||
Version2 AttachVersion2
|
||||
}
|
||||
|
||||
@@ -146,16 +160,13 @@ func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syscall.CloseHandle(handle); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return syscall.CloseHandle(handle)
|
||||
}
|
||||
|
||||
// DetachVirtualDisk detaches a virtual hard disk by handle.
|
||||
func DetachVirtualDisk(handle syscall.Handle) (err error) {
|
||||
if err := detachVirtualDisk(handle, 0, 0); err != nil {
|
||||
return errors.Wrap(err, "failed to detach virtual disk")
|
||||
return fmt.Errorf("failed to detach virtual disk: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -185,7 +196,7 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua
|
||||
parameters,
|
||||
nil,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "failed to attach virtual disk")
|
||||
return fmt.Errorf("failed to attach virtual disk: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -209,7 +220,7 @@ func AttachVhd(path string) (err error) {
|
||||
AttachVirtualDiskFlagNone,
|
||||
¶ms,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "failed to attach virtual disk")
|
||||
return fmt.Errorf("failed to attach virtual disk: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -234,19 +245,35 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual
|
||||
var (
|
||||
handle syscall.Handle
|
||||
defaultType VirtualStorageType
|
||||
getInfoOnly int32
|
||||
readOnly int32
|
||||
)
|
||||
if parameters.Version != 2 {
|
||||
return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
|
||||
}
|
||||
if parameters.Version2.GetInfoOnly {
|
||||
getInfoOnly = 1
|
||||
}
|
||||
if parameters.Version2.ReadOnly {
|
||||
readOnly = 1
|
||||
}
|
||||
params := &openVirtualDiskParameters{
|
||||
version: parameters.Version,
|
||||
version2: openVersion2{
|
||||
getInfoOnly,
|
||||
readOnly,
|
||||
parameters.Version2.ResiliencyGUID,
|
||||
},
|
||||
}
|
||||
if err := openVirtualDisk(
|
||||
&defaultType,
|
||||
vhdPath,
|
||||
uint32(virtualDiskAccessMask),
|
||||
uint32(openVirtualDiskFlags),
|
||||
parameters,
|
||||
params,
|
||||
&handle,
|
||||
); err != nil {
|
||||
return 0, errors.Wrap(err, "failed to open virtual disk")
|
||||
return 0, fmt.Errorf("failed to open virtual disk: %w", err)
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
@@ -272,7 +299,7 @@ func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask,
|
||||
nil,
|
||||
&handle,
|
||||
); err != nil {
|
||||
return handle, errors.Wrap(err, "failed to create virtual disk")
|
||||
return handle, fmt.Errorf("failed to create virtual disk: %w", err)
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
@@ -290,7 +317,7 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) {
|
||||
&diskPathSizeInBytes,
|
||||
&diskPhysicalPathBuf[0],
|
||||
); err != nil {
|
||||
return "", errors.Wrap(err, "failed to get disk physical path")
|
||||
return "", fmt.Errorf("failed to get disk physical path: %w", err)
|
||||
}
|
||||
return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil
|
||||
}
|
||||
@@ -314,10 +341,10 @@ func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error
|
||||
createParams,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create differencing vhd: %s", err)
|
||||
return fmt.Errorf("failed to create differencing vhd: %w", err)
|
||||
}
|
||||
if err := syscall.CloseHandle(vhdHandle); err != nil {
|
||||
return fmt.Errorf("failed to close differencing vhd handle: %s", err)
|
||||
return fmt.Errorf("failed to close differencing vhd handle: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
4
src/runtime/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
4
src/runtime/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
@@ -88,7 +88,7 @@ func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint
|
||||
return
|
||||
}
|
||||
|
||||
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
var _p0 *uint16
|
||||
_p0, win32err = syscall.UTF16PtrFromString(path)
|
||||
if win32err != nil {
|
||||
@@ -97,7 +97,7 @@ func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtua
|
||||
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle)
|
||||
}
|
||||
|
||||
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
/*
|
||||
Copyright 2019 The containerd Authors.
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: api.proto
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
@@ -192,7 +192,7 @@
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
122
src/runtime/vendor/github.com/containers/podman/v4/pkg/annotations/annotations.go
generated
vendored
Normal file
122
src/runtime/vendor/github.com/containers/podman/v4/pkg/annotations/annotations.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
package annotations
|
||||
|
||||
const (
|
||||
// Annotations carries the received Kubelet annotations.
|
||||
Annotations = "io.kubernetes.cri-o.Annotations"
|
||||
|
||||
// ContainerID is the container ID annotation.
|
||||
ContainerID = "io.kubernetes.cri-o.ContainerID"
|
||||
|
||||
// ContainerName is the container name annotation.
|
||||
ContainerName = "io.kubernetes.cri-o.ContainerName"
|
||||
|
||||
// ContainerType is the container type (sandbox or container) annotation.
|
||||
ContainerType = "io.kubernetes.cri-o.ContainerType"
|
||||
|
||||
// Created is the container creation time annotation.
|
||||
Created = "io.kubernetes.cri-o.Created"
|
||||
|
||||
// HostName is the container host name annotation.
|
||||
HostName = "io.kubernetes.cri-o.HostName"
|
||||
|
||||
// CgroupParent is the sandbox cgroup parent.
|
||||
CgroupParent = "io.kubernetes.cri-o.CgroupParent"
|
||||
|
||||
// IP is the container ipv4 or ipv6 address.
|
||||
IP = "io.kubernetes.cri-o.IP"
|
||||
|
||||
// NamespaceOptions store the options for namespaces.
|
||||
NamespaceOptions = "io.kubernetes.cri-o.NamespaceOptions"
|
||||
|
||||
// SeccompProfilePath is the node seccomp profile path.
|
||||
SeccompProfilePath = "io.kubernetes.cri-o.SeccompProfilePath"
|
||||
|
||||
// Image is the container image ID annotation.
|
||||
Image = "io.kubernetes.cri-o.Image"
|
||||
|
||||
// ImageName is the container image name annotation.
|
||||
ImageName = "io.kubernetes.cri-o.ImageName"
|
||||
|
||||
// ImageRef is the container image ref annotation.
|
||||
ImageRef = "io.kubernetes.cri-o.ImageRef"
|
||||
|
||||
// KubeName is the kubernetes name annotation.
|
||||
KubeName = "io.kubernetes.cri-o.KubeName"
|
||||
|
||||
// PortMappings holds the port mappings for the sandbox.
|
||||
PortMappings = "io.kubernetes.cri-o.PortMappings"
|
||||
|
||||
// Labels are the kubernetes labels annotation.
|
||||
Labels = "io.kubernetes.cri-o.Labels"
|
||||
|
||||
// LogPath is the container logging path annotation.
|
||||
LogPath = "io.kubernetes.cri-o.LogPath"
|
||||
|
||||
// Metadata is the container metadata annotation.
|
||||
Metadata = "io.kubernetes.cri-o.Metadata"
|
||||
|
||||
// Name is the pod name annotation.
|
||||
Name = "io.kubernetes.cri-o.Name"
|
||||
|
||||
// Namespace is the pod namespace annotation.
|
||||
Namespace = "io.kubernetes.cri-o.Namespace"
|
||||
|
||||
// PrivilegedRuntime is the annotation for the privileged runtime path.
|
||||
PrivilegedRuntime = "io.kubernetes.cri-o.PrivilegedRuntime"
|
||||
|
||||
// ResolvPath is the resolver configuration path annotation.
|
||||
ResolvPath = "io.kubernetes.cri-o.ResolvPath"
|
||||
|
||||
// HostnamePath is the path to /etc/hostname to bind mount annotation.
|
||||
HostnamePath = "io.kubernetes.cri-o.HostnamePath"
|
||||
|
||||
// SandboxID is the sandbox ID annotation.
|
||||
SandboxID = "io.kubernetes.cri-o.SandboxID"
|
||||
|
||||
// SandboxName is the sandbox name annotation.
|
||||
SandboxName = "io.kubernetes.cri-o.SandboxName"
|
||||
|
||||
// ShmPath is the shared memory path annotation.
|
||||
ShmPath = "io.kubernetes.cri-o.ShmPath"
|
||||
|
||||
// MountPoint is the mount point of the container rootfs.
|
||||
MountPoint = "io.kubernetes.cri-o.MountPoint"
|
||||
|
||||
// RuntimeHandler is the annotation for runtime handler.
|
||||
RuntimeHandler = "io.kubernetes.cri-o.RuntimeHandler"
|
||||
|
||||
// TTY is the terminal path annotation.
|
||||
TTY = "io.kubernetes.cri-o.TTY"
|
||||
|
||||
// Stdin is the stdin annotation.
|
||||
Stdin = "io.kubernetes.cri-o.Stdin"
|
||||
|
||||
// StdinOnce is the stdin_once annotation.
|
||||
StdinOnce = "io.kubernetes.cri-o.StdinOnce"
|
||||
|
||||
// Volumes is the volumes annotation.
|
||||
Volumes = "io.kubernetes.cri-o.Volumes"
|
||||
|
||||
// HostNetwork indicates whether the host network namespace is used or not.
|
||||
HostNetwork = "io.kubernetes.cri-o.HostNetwork"
|
||||
|
||||
// CNIResult is the JSON string representation of the Result from CNI.
|
||||
CNIResult = "io.kubernetes.cri-o.CNIResult"
|
||||
|
||||
// ContainerManager is the annotation key for indicating the creator and
|
||||
// manager of the container.
|
||||
ContainerManager = "io.container.manager"
|
||||
)
|
||||
|
||||
// ContainerType values
|
||||
const (
|
||||
// ContainerTypeSandbox represents a pod sandbox container.
|
||||
ContainerTypeSandbox = "sandbox"
|
||||
|
||||
// ContainerTypeContainer represents a container running within a pod.
|
||||
ContainerTypeContainer = "container"
|
||||
)
|
||||
|
||||
// ContainerManagerLibpod indicates that libpod created and manages the
|
||||
// container.
|
||||
const ContainerManagerLibpod = "libpod"
|
||||
105
src/runtime/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
105
src/runtime/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
@@ -15,7 +15,7 @@ type roffRenderer struct {
|
||||
extensions blackfriday.Extensions
|
||||
listCounters []int
|
||||
firstHeader bool
|
||||
defineTerm bool
|
||||
firstDD bool
|
||||
listDepth int
|
||||
}
|
||||
|
||||
@@ -42,7 +42,8 @@ const (
|
||||
quoteCloseTag = "\n.RE\n"
|
||||
listTag = "\n.RS\n"
|
||||
listCloseTag = "\n.RE\n"
|
||||
arglistTag = "\n.TP\n"
|
||||
dtTag = "\n.TP\n"
|
||||
dd2Tag = "\n"
|
||||
tableStart = "\n.TS\nallbox;\n"
|
||||
tableEnd = ".TE\n"
|
||||
tableCellStart = "T{\n"
|
||||
@@ -90,7 +91,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
|
||||
|
||||
switch node.Type {
|
||||
case blackfriday.Text:
|
||||
r.handleText(w, node, entering)
|
||||
escapeSpecialChars(w, node.Literal)
|
||||
case blackfriday.Softbreak:
|
||||
out(w, crTag)
|
||||
case blackfriday.Hardbreak:
|
||||
@@ -150,40 +151,21 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
|
||||
out(w, codeCloseTag)
|
||||
case blackfriday.Table:
|
||||
r.handleTable(w, node, entering)
|
||||
case blackfriday.TableCell:
|
||||
r.handleTableCell(w, node, entering)
|
||||
case blackfriday.TableHead:
|
||||
case blackfriday.TableBody:
|
||||
case blackfriday.TableRow:
|
||||
// no action as cell entries do all the nroff formatting
|
||||
return blackfriday.GoToNext
|
||||
case blackfriday.TableCell:
|
||||
r.handleTableCell(w, node, entering)
|
||||
case blackfriday.HTMLSpan:
|
||||
// ignore other HTML tags
|
||||
default:
|
||||
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||||
}
|
||||
return walkAction
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
var (
|
||||
start, end string
|
||||
)
|
||||
// handle special roff table cell text encapsulation
|
||||
if node.Parent.Type == blackfriday.TableCell {
|
||||
if len(node.Literal) > 30 {
|
||||
start = tableCellStart
|
||||
end = tableCellEnd
|
||||
} else {
|
||||
// end rows that aren't terminated by "tableCellEnd" with a cr if end of row
|
||||
if node.Parent.Next == nil && !node.Parent.IsHeader {
|
||||
end = crTag
|
||||
}
|
||||
}
|
||||
}
|
||||
out(w, start)
|
||||
escapeSpecialChars(w, node.Literal)
|
||||
out(w, end)
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
if entering {
|
||||
switch node.Level {
|
||||
@@ -230,15 +212,20 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering
|
||||
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||
out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
|
||||
r.listCounters[len(r.listCounters)-1]++
|
||||
} else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
|
||||
// DT (definition term): line just before DD (see below).
|
||||
out(w, dtTag)
|
||||
r.firstDD = true
|
||||
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||
// state machine for handling terms and following definitions
|
||||
// since blackfriday does not distinguish them properly, nor
|
||||
// does it seperate them into separate lists as it should
|
||||
if !r.defineTerm {
|
||||
out(w, arglistTag)
|
||||
r.defineTerm = true
|
||||
// DD (definition description): line that starts with ": ".
|
||||
//
|
||||
// We have to distinguish between the first DD and the
|
||||
// subsequent ones, as there should be no vertical
|
||||
// whitespace between the DT and the first DD.
|
||||
if r.firstDD {
|
||||
r.firstDD = false
|
||||
} else {
|
||||
r.defineTerm = false
|
||||
out(w, dd2Tag)
|
||||
}
|
||||
} else {
|
||||
out(w, ".IP \\(bu 2\n")
|
||||
@@ -251,7 +238,7 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering
|
||||
func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
if entering {
|
||||
out(w, tableStart)
|
||||
//call walker to count cells (and rows?) so format section can be produced
|
||||
// call walker to count cells (and rows?) so format section can be produced
|
||||
columns := countColumns(node)
|
||||
out(w, strings.Repeat("l ", columns)+"\n")
|
||||
out(w, strings.Repeat("l ", columns)+".\n")
|
||||
@@ -261,28 +248,41 @@ func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
var (
|
||||
start, end string
|
||||
)
|
||||
if node.IsHeader {
|
||||
start = codespanTag
|
||||
end = codespanCloseTag
|
||||
}
|
||||
if entering {
|
||||
var start string
|
||||
if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
|
||||
out(w, "\t"+start)
|
||||
} else {
|
||||
out(w, start)
|
||||
start = "\t"
|
||||
}
|
||||
if node.IsHeader {
|
||||
start += codespanTag
|
||||
} else if nodeLiteralSize(node) > 30 {
|
||||
start += tableCellStart
|
||||
}
|
||||
out(w, start)
|
||||
} else {
|
||||
// need to carriage return if we are at the end of the header row
|
||||
if node.IsHeader && node.Next == nil {
|
||||
end = end + crTag
|
||||
var end string
|
||||
if node.IsHeader {
|
||||
end = codespanCloseTag
|
||||
} else if nodeLiteralSize(node) > 30 {
|
||||
end = tableCellEnd
|
||||
}
|
||||
if node.Next == nil && end != tableCellEnd {
|
||||
// Last cell: need to carriage return if we are at the end of the
|
||||
// header row and content isn't wrapped in a "tablecell"
|
||||
end += crTag
|
||||
}
|
||||
out(w, end)
|
||||
}
|
||||
}
|
||||
|
||||
func nodeLiteralSize(node *blackfriday.Node) int {
|
||||
total := 0
|
||||
for n := node.FirstChild; n != nil; n = n.FirstChild {
|
||||
total += len(n.Literal)
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// because roff format requires knowing the column count before outputting any table
|
||||
// data we need to walk a table tree and count the columns
|
||||
func countColumns(node *blackfriday.Node) int {
|
||||
@@ -309,15 +309,6 @@ func out(w io.Writer, output string) {
|
||||
io.WriteString(w, output) // nolint: errcheck
|
||||
}
|
||||
|
||||
func needsBackslash(c byte) bool {
|
||||
for _, r := range []byte("-_&\\~") {
|
||||
if c == r {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func escapeSpecialChars(w io.Writer, text []byte) {
|
||||
for i := 0; i < len(text); i++ {
|
||||
// escape initial apostrophe or period
|
||||
@@ -328,7 +319,7 @@ func escapeSpecialChars(w io.Writer, text []byte) {
|
||||
// directly copy normal characters
|
||||
org := i
|
||||
|
||||
for i < len(text) && !needsBackslash(text[i]) {
|
||||
for i < len(text) && text[i] != '\\' {
|
||||
i++
|
||||
}
|
||||
if i > org {
|
||||
|
||||
93
src/runtime/vendor/github.com/cri-o/cri-o/pkg/annotations/annotations.go
generated
vendored
93
src/runtime/vendor/github.com/cri-o/cri-o/pkg/annotations/annotations.go
generated
vendored
@@ -1,93 +0,0 @@
|
||||
package annotations
|
||||
|
||||
const (
|
||||
// Annotations carries the received Kubelet annotations
|
||||
Annotations = "io.kubernetes.cri-o.Annotations"
|
||||
|
||||
// ContainerID is the container ID annotation
|
||||
ContainerID = "io.kubernetes.cri-o.ContainerID"
|
||||
|
||||
// ContainerName is the container name annotation
|
||||
ContainerName = "io.kubernetes.cri-o.ContainerName"
|
||||
|
||||
// ContainerType is the container type (sandbox or container) annotation
|
||||
ContainerType = "io.kubernetes.cri-o.ContainerType"
|
||||
|
||||
// Created is the container creation time annotation
|
||||
Created = "io.kubernetes.cri-o.Created"
|
||||
|
||||
// HostName is the container host name annotation
|
||||
HostName = "io.kubernetes.cri-o.HostName"
|
||||
|
||||
// IP is the container ipv4 or ipv6 address
|
||||
IP = "io.kubernetes.cri-o.IP"
|
||||
|
||||
// Image is the container image ID annotation
|
||||
Image = "io.kubernetes.cri-o.Image"
|
||||
|
||||
// ImageName is the container image name annotation
|
||||
ImageName = "io.kubernetes.cri-o.ImageName"
|
||||
|
||||
// ImageRef is the container image ref annotation
|
||||
ImageRef = "io.kubernetes.cri-o.ImageRef"
|
||||
|
||||
// KubeName is the kubernetes name annotation
|
||||
KubeName = "io.kubernetes.cri-o.KubeName"
|
||||
|
||||
// Labels are the kubernetes labels annotation
|
||||
Labels = "io.kubernetes.cri-o.Labels"
|
||||
|
||||
// LogPath is the container logging path annotation
|
||||
LogPath = "io.kubernetes.cri-o.LogPath"
|
||||
|
||||
// Metadata is the container metadata annotation
|
||||
Metadata = "io.kubernetes.cri-o.Metadata"
|
||||
|
||||
// Name is the pod name annotation
|
||||
Name = "io.kubernetes.cri-o.Name"
|
||||
|
||||
// PrivilegedRuntime is the annotation for the privileged runtime path
|
||||
PrivilegedRuntime = "io.kubernetes.cri-o.PrivilegedRuntime"
|
||||
|
||||
// ResolvPath is the resolver configuration path annotation
|
||||
ResolvPath = "io.kubernetes.cri-o.ResolvPath"
|
||||
|
||||
// HostnamePath is the path to /etc/hostname to bind mount annotation
|
||||
HostnamePath = "io.kubernetes.cri-o.HostnamePath"
|
||||
|
||||
// SandboxID is the sandbox ID annotation
|
||||
SandboxID = "io.kubernetes.cri-o.SandboxID"
|
||||
|
||||
// SandboxName is the sandbox name annotation
|
||||
SandboxName = "io.kubernetes.cri-o.SandboxName"
|
||||
|
||||
// ShmPath is the shared memory path annotation
|
||||
ShmPath = "io.kubernetes.cri-o.ShmPath"
|
||||
|
||||
// MountPoint is the mount point of the container rootfs
|
||||
MountPoint = "io.kubernetes.cri-o.MountPoint"
|
||||
|
||||
// TrustedSandbox is the annotation for trusted sandboxes
|
||||
TrustedSandbox = "io.kubernetes.cri-o.TrustedSandbox"
|
||||
|
||||
// TTY is the terminal path annotation
|
||||
TTY = "io.kubernetes.cri-o.TTY"
|
||||
|
||||
// Stdin is the stdin annotation
|
||||
Stdin = "io.kubernetes.cri-o.Stdin"
|
||||
|
||||
// StdinOnce is the stdin_once annotation
|
||||
StdinOnce = "io.kubernetes.cri-o.StdinOnce"
|
||||
|
||||
// Volumes is the volumes annotatoin
|
||||
Volumes = "io.kubernetes.cri-o.Volumes"
|
||||
)
|
||||
|
||||
// ContainerType values
|
||||
const (
|
||||
// ContainerTypeSandbox represents a pod sandbox container
|
||||
ContainerTypeSandbox = "sandbox"
|
||||
|
||||
// ContainerTypeContainer represents a container running within a pod
|
||||
ContainerTypeContainer = "container"
|
||||
)
|
||||
8
src/runtime/vendor/github.com/cyphar/filepath-securejoin/.travis.yml
generated
vendored
8
src/runtime/vendor/github.com/cyphar/filepath-securejoin/.travis.yml
generated
vendored
@@ -4,10 +4,12 @@
|
||||
|
||||
language: go
|
||||
go:
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.13.x
|
||||
- 1.16.x
|
||||
- tip
|
||||
|
||||
arch:
|
||||
- AMD64
|
||||
- ppc64le
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
20
src/runtime/vendor/github.com/cyphar/filepath-securejoin/README.md
generated
vendored
20
src/runtime/vendor/github.com/cyphar/filepath-securejoin/README.md
generated
vendored
@@ -7,6 +7,19 @@ standard library][go#20126]. The purpose of this function is to be a "secure"
|
||||
alternative to `filepath.Join`, and in particular it provides certain
|
||||
guarantees that are not provided by `filepath.Join`.
|
||||
|
||||
> **NOTE**: This code is *only* safe if you are not at risk of other processes
|
||||
> modifying path components after you've used `SecureJoin`. If it is possible
|
||||
> for a malicious process to modify path components of the resolved path, then
|
||||
> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There
|
||||
> are some Linux kernel patches I'm working on which might allow for a better
|
||||
> solution.][lwn-obeneath]
|
||||
>
|
||||
> In addition, with a slightly modified API it might be possible to use
|
||||
> `O_PATH` and verify that the opened path is actually the resolved one -- but
|
||||
> I have not done that yet. I might add it in the future as a helper function
|
||||
> to help users verify the path (we can't just return `/proc/self/fd/<foo>`
|
||||
> because that doesn't always work transparently for all users).
|
||||
|
||||
This is the function prototype:
|
||||
|
||||
```go
|
||||
@@ -16,8 +29,8 @@ func SecureJoin(root, unsafePath string) (string, error)
|
||||
This library **guarantees** the following:
|
||||
|
||||
* If no error is set, the resulting string **must** be a child path of
|
||||
`SecureJoin` and will not contain any symlink path components (they will all
|
||||
be expanded).
|
||||
`root` and will not contain any symlink path components (they will all be
|
||||
expanded).
|
||||
|
||||
* When expanding symlinks, all symlink path components **must** be resolved
|
||||
relative to the provided root. In particular, this can be considered a
|
||||
@@ -25,7 +38,7 @@ This library **guarantees** the following:
|
||||
these symlinks will **not** be expanded lexically (`filepath.Clean` is not
|
||||
called on the input before processing).
|
||||
|
||||
* Non-existant path components are unaffected by `SecureJoin` (similar to
|
||||
* Non-existent path components are unaffected by `SecureJoin` (similar to
|
||||
`filepath.EvalSymlinks`'s semantics).
|
||||
|
||||
* The returned path will always be `filepath.Clean`ed and thus not contain any
|
||||
@@ -57,6 +70,7 @@ func SecureJoin(root, unsafePath string) (string, error) {
|
||||
}
|
||||
```
|
||||
|
||||
[lwn-obeneath]: https://lwn.net/Articles/767547/
|
||||
[go#20126]: https://github.com/golang/go/issues/20126
|
||||
|
||||
### License ###
|
||||
|
||||
2
src/runtime/vendor/github.com/cyphar/filepath-securejoin/VERSION
generated
vendored
2
src/runtime/vendor/github.com/cyphar/filepath-securejoin/VERSION
generated
vendored
@@ -1 +1 @@
|
||||
0.2.2
|
||||
0.2.3
|
||||
|
||||
3
src/runtime/vendor/github.com/cyphar/filepath-securejoin/go.mod
generated
vendored
Normal file
3
src/runtime/vendor/github.com/cyphar/filepath-securejoin/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/cyphar/filepath-securejoin
|
||||
|
||||
go 1.13
|
||||
25
src/runtime/vendor/github.com/cyphar/filepath-securejoin/join.go
generated
vendored
25
src/runtime/vendor/github.com/cyphar/filepath-securejoin/join.go
generated
vendored
@@ -12,39 +12,20 @@ package securejoin
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been
|
||||
// evaluated in attempting to securely join the two given paths.
|
||||
var ErrSymlinkLoop = errors.Wrap(syscall.ELOOP, "secure join")
|
||||
|
||||
// IsNotExist tells you if err is an error that implies that either the path
|
||||
// accessed does not exist (or path components don't exist). This is
|
||||
// effectively a more broad version of os.IsNotExist.
|
||||
func IsNotExist(err error) bool {
|
||||
// If it's a bone-fide ENOENT just bail.
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check that it's not actually an ENOTDIR, which in some cases is a more
|
||||
// convoluted case of ENOENT (usually involving weird paths).
|
||||
var errno error
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case *os.PathError:
|
||||
errno = err.Err
|
||||
case *os.LinkError:
|
||||
errno = err.Err
|
||||
case *os.SyscallError:
|
||||
errno = err.Err
|
||||
}
|
||||
return errno == syscall.ENOTDIR || errno == syscall.ENOENT
|
||||
return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT)
|
||||
}
|
||||
|
||||
// SecureJoinVFS joins the two given path components (similar to Join) except
|
||||
@@ -68,7 +49,7 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
|
||||
n := 0
|
||||
for unsafePath != "" {
|
||||
if n > 255 {
|
||||
return "", ErrSymlinkLoop
|
||||
return "", &os.PathError{Op: "SecureJoin", Path: root + "/" + unsafePath, Err: syscall.ELOOP}
|
||||
}
|
||||
|
||||
// Next path component, p.
|
||||
|
||||
1
src/runtime/vendor/github.com/cyphar/filepath-securejoin/vendor.conf
generated
vendored
1
src/runtime/vendor/github.com/cyphar/filepath-securejoin/vendor.conf
generated
vendored
@@ -1 +0,0 @@
|
||||
github.com/pkg/errors v0.8.0
|
||||
20
src/runtime/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
20
src/runtime/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
@@ -7,9 +7,27 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.5.4] - 2022-04-25
|
||||
|
||||
* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
|
||||
* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
|
||||
* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
|
||||
|
||||
## [1.5.3] - 2022-04-22
|
||||
|
||||
* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
|
||||
|
||||
## [1.5.2] - 2022-04-21
|
||||
|
||||
* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
|
||||
* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
|
||||
* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
|
||||
* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
|
||||
* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
|
||||
|
||||
## [1.5.1] - 2021-08-24
|
||||
|
||||
* Revert Add AddRaw to not follow symlinks
|
||||
* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
|
||||
|
||||
## [1.5.0] - 2021-08-20
|
||||
|
||||
|
||||
17
src/runtime/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
17
src/runtime/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
@@ -48,18 +48,6 @@ fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Win
|
||||
|
||||
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
|
||||
|
||||
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
|
||||
|
||||
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
|
||||
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
|
||||
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
|
||||
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
|
||||
* When you're done, you will want to halt or destroy the Vagrant boxes.
|
||||
|
||||
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
|
||||
|
||||
Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
|
||||
|
||||
### Maintainers
|
||||
|
||||
Help maintaining fsnotify is welcome. To be a maintainer:
|
||||
@@ -67,11 +55,6 @@ Help maintaining fsnotify is welcome. To be a maintainer:
|
||||
* Submit a pull request and sign the CLA as above.
|
||||
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
|
||||
|
||||
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
|
||||
|
||||
All code changes should be internal pull requests.
|
||||
|
||||
Releases are tagged using [Semantic Versioning](http://semver.org/).
|
||||
|
||||
[hub]: https://github.com/github/hub
|
||||
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
|
||||
|
||||
24
src/runtime/vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
24
src/runtime/vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
@@ -1,12 +1,8 @@
|
||||
# File system notifications for Go
|
||||
|
||||
[](https://godoc.org/github.com/fsnotify/fsnotify) [](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
|
||||
[](https://pkg.go.dev/github.com/fsnotify/fsnotify) [](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [](https://github.com/fsnotify/fsnotify/issues/413)
|
||||
|
||||
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
|
||||
|
||||
```console
|
||||
go get -u golang.org/x/sys/...
|
||||
```
|
||||
fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library.
|
||||
|
||||
Cross platform: Windows, Linux, BSD and macOS.
|
||||
|
||||
@@ -16,22 +12,20 @@ Cross platform: Windows, Linux, BSD and macOS.
|
||||
| kqueue | BSD, macOS, iOS\* | Supported |
|
||||
| ReadDirectoryChangesW | Windows | Supported |
|
||||
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
|
||||
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) |
|
||||
| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
|
||||
| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
|
||||
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
|
||||
\* Android and iOS are untested.
|
||||
|
||||
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
|
||||
Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
|
||||
|
||||
## API stability
|
||||
|
||||
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
|
||||
fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
|
||||
|
||||
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
|
||||
|
||||
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
|
||||
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/).
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -84,10 +78,6 @@ func main() {
|
||||
|
||||
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
|
||||
|
||||
## Example
|
||||
|
||||
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
|
||||
|
||||
## FAQ
|
||||
|
||||
**When a file is moved to another directory is it still being watched?**
|
||||
|
||||
36
src/runtime/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go
generated
vendored
Normal file
36
src/runtime/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
|
||||
// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct{}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
||||
9
src/runtime/vendor/github.com/fsnotify/fsnotify/go.mod
generated
vendored
9
src/runtime/vendor/github.com/fsnotify/fsnotify/go.mod
generated
vendored
@@ -1,7 +1,10 @@
|
||||
module github.com/fsnotify/fsnotify
|
||||
|
||||
go 1.13
|
||||
go 1.16
|
||||
|
||||
require golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
||||
require golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
|
||||
|
||||
retract v1.5.0
|
||||
retract (
|
||||
v1.5.3 // Published an incorrect branch accidentally https://github.com/fsnotify/fsnotify/issues/445
|
||||
v1.5.0 // Contains symlink regression https://github.com/fsnotify/fsnotify/pull/394
|
||||
)
|
||||
|
||||
4
src/runtime/vendor/github.com/fsnotify/fsnotify/go.sum
generated
vendored
4
src/runtime/vendor/github.com/fsnotify/fsnotify/go.sum
generated
vendored
@@ -1,2 +1,2 @@
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
||||
13
src/runtime/vendor/github.com/fsnotify/fsnotify/inotify.go
generated
vendored
13
src/runtime/vendor/github.com/fsnotify/fsnotify/inotify.go
generated
vendored
@@ -163,6 +163,19 @@ func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchList returns the directories and files that are being monitered.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for pathname := range w.watches {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
|
||||
1
src/runtime/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
generated
vendored
1
src/runtime/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
generated
vendored
@@ -38,7 +38,6 @@ func newFdPoller(fd int) (*fdPoller, error) {
|
||||
poller.close()
|
||||
}
|
||||
}()
|
||||
poller.fd = fd
|
||||
|
||||
// Create epoll fd
|
||||
poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
|
||||
|
||||
13
src/runtime/vendor/github.com/fsnotify/fsnotify/kqueue.go
generated
vendored
13
src/runtime/vendor/github.com/fsnotify/fsnotify/kqueue.go
generated
vendored
@@ -148,6 +148,19 @@ func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchList returns the directories and files that are being monitered.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for pathname := range w.watches {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||
|
||||
|
||||
28
src/runtime/vendor/github.com/fsnotify/fsnotify/windows.go
generated
vendored
28
src/runtime/vendor/github.com/fsnotify/fsnotify/windows.go
generated
vendored
@@ -12,6 +12,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
@@ -96,6 +97,21 @@ func (w *Watcher) Remove(name string) error {
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// WatchList returns the directories and files that are being monitered.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for _, entry := range w.watches {
|
||||
for _, watchEntry := range entry {
|
||||
entries = append(entries, watchEntry.path)
|
||||
}
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
const (
|
||||
// Options for AddWatch
|
||||
sysFSONESHOT = 0x80000000
|
||||
@@ -452,8 +468,16 @@ func (w *Watcher) readEvents() {
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
|
||||
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
|
||||
// TODO: Consider using unsafe.Slice that is available from go1.17
|
||||
// https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang
|
||||
// instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name
|
||||
size := int(raw.FileNameLength / 2)
|
||||
var buf []uint16
|
||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||
sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
|
||||
sh.Len = size
|
||||
sh.Cap = size
|
||||
name := syscall.UTF16ToString(buf)
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
var mask uint64
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user