diff --git a/src/agent/Cargo.lock b/src/agent/Cargo.lock index c2c16f388..d8ad331f3 100644 --- a/src/agent/Cargo.lock +++ b/src/agent/Cargo.lock @@ -1174,7 +1174,8 @@ dependencies = [ [[package]] name = "ttrpc" version = "0.3.0" -source = "git+https://github.com/containerd/ttrpc-rust.git?branch=0.3.0#ba1efe3bbb8f8af4895b7623ed1d11561e70e566" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa9da24c351f0feef5e66c0b28c18373a7ef3e1bfdfd5852170de494f9bf870" dependencies = [ "byteorder", "libc", diff --git a/src/agent/Cargo.toml b/src/agent/Cargo.toml index 3013b2d10..505313c85 100644 --- a/src/agent/Cargo.toml +++ b/src/agent/Cargo.toml @@ -11,7 +11,7 @@ rustjail = { path = "rustjail" } protocols = { path = "protocols" } netlink = { path = "netlink", features = ["with-log", "with-agent-handler"] } lazy_static = "1.3.0" -ttrpc = { git = "https://github.com/containerd/ttrpc-rust.git", branch="0.3.0" } +ttrpc = "0.3.0" protobuf = "=2.14.0" libc = "0.2.58" nix = "0.17.0" diff --git a/src/agent/oci/src/lib.rs b/src/agent/oci/src/lib.rs index 0c4794f6e..b51d78436 100644 --- a/src/agent/oci/src/lib.rs +++ b/src/agent/oci/src/lib.rs @@ -142,7 +142,7 @@ pub struct User { pub gid: u32, #[serde( default, - rename = "addtionalGids", + rename = "additionalGids", skip_serializing_if = "Vec::is_empty" )] pub additional_gids: Vec, @@ -302,6 +302,7 @@ pub struct LinuxBlockIODevice { #[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)] pub struct LinuxWeightDevice { + #[serde(flatten)] pub blk: LinuxBlockIODevice, #[serde(default, skip_serializing_if = "Option::is_none")] pub weight: Option, @@ -315,6 +316,7 @@ pub struct LinuxWeightDevice { #[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)] pub struct LinuxThrottleDevice { + #[serde(flatten)] pub blk: LinuxBlockIODevice, #[serde(default)] pub rate: u64, @@ -375,7 +377,7 @@ pub struct LinuxMemory { #[serde(default, skip_serializing_if = "Option::is_none", rename = "kernelTCP")] pub kernel_tcp: Option, #[serde(default, skip_serializing_if = "Option::is_none")] - pub swapiness: Option, + pub swappiness: Option, #[serde( default, skip_serializing_if = "Option::is_none", @@ -833,7 +835,7 @@ mod tests { } #[test] - fn test_deserialize_sepc() { + fn test_deserialize_spec() { let data = r#"{ "ociVersion": "1.0.1", "process": { @@ -1118,36 +1120,28 @@ mod tests { "leafWeight": 10, "weightDevice": [ { - "blk": { - "major": 8, - "minor": 0 - }, + "major": 8, + "minor": 0, "weight": 500, "leafWeight": 300 }, { - "blk":{ - "major": 8, - "minor": 16 - }, + "major": 8, + "minor": 16, "weight": 500 } ], "throttleReadBpsDevice": [ { - "blk":{ - "major": 8, - "minor": 0 - }, + "major": 8, + "minor": 0, "rate": 600 } ], "throttleWriteIOPSDevice": [ { - "blk":{ - "major": 8, - "minor": 16 - }, + "major": 8, + "minor": 16, "rate": 300 } ] @@ -1223,8 +1217,7 @@ mod tests { uid: 1, gid: 1, // incompatible with oci - // additional_gids: vec![5, 6], - additional_gids: vec![], + additional_gids: vec![5, 6], username: "".to_string(), }, args: vec!["sh".to_string()], @@ -1437,8 +1430,7 @@ mod tests { swap: Some(536870912), kernel: Some(-1), kernel_tcp: Some(-1), - // incompatible with oci - swapiness: None, + swappiness: Some(0), disable_oom_killer: Some(false), }), cpu: Some(crate::LinuxCPU { @@ -1591,25 +1583,6 @@ mod tests { vm: None, }; - // warning : incompatible with oci : https://github.com/opencontainers/runtime-spec/blob/master/config.md - // 1. User use addtionalGids while oci use additionalGids - // 2. LinuxMemory use swapiness while oci use swappiness - // 3. LinuxWeightDevice with blk - // { - // "blk": { - // "major": 8, - // "minor": 0 - // }, - // "weight": 500, - // "leafWeight": 300 - // } - // oci without blk - // { - // "major": 8, - // "minor": 0, - // "weight": 500, - // "leafWeight": 300 - // } let current: crate::Spec = serde_json::from_str(data).unwrap(); assert_eq!(expected, current); } diff --git a/src/agent/protocols/Cargo.toml b/src/agent/protocols/Cargo.toml index 7b383a217..59ab72427 100644 --- a/src/agent/protocols/Cargo.toml +++ b/src/agent/protocols/Cargo.toml @@ -5,7 +5,7 @@ authors = ["The Kata Containers community "] edition = "2018" [dependencies] -ttrpc = { git = "https://github.com/containerd/ttrpc-rust.git", branch="0.3.0" } +ttrpc = "0.3.0" protobuf = "=2.14.0" futures = "0.1.27" diff --git a/src/agent/rustjail/src/cgroups/fs/mod.rs b/src/agent/rustjail/src/cgroups/fs/mod.rs index c7fcee8c8..03c95efd8 100644 --- a/src/agent/rustjail/src/cgroups/fs/mod.rs +++ b/src/agent/rustjail/src/cgroups/fs/mod.rs @@ -67,6 +67,15 @@ pub fn load<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Option { + match $cg.controller_of() { + Some(c) => c, + None => return SingularPtrField::none(), + } + }; +} + #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Manager { pub paths: HashMap, @@ -421,13 +430,13 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool } } - if let Some(swapiness) = memory.swapiness { - if swapiness >= 0 && swapiness <= 100 { - mem_controller.set_swappiness(swapiness as u64)?; + if let Some(swappiness) = memory.swappiness { + if swappiness >= 0 && swappiness <= 100 { + mem_controller.set_swappiness(swappiness as u64)?; } else { return Err(anyhow!( "invalid value:{}. valid memory swappiness range is 0-100", - swapiness + swappiness )); } } @@ -605,10 +614,8 @@ lazy_static! { } fn get_cpu_stats(cg: &cgroups::Cgroup) -> SingularPtrField { - let cpu_controller: &CpuController = cg.controller_of().unwrap(); - + let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg); let stat = cpu_controller.cpu().stat; - let h = lines_to_map(&stat); SingularPtrField::some(ThrottlingData { @@ -621,27 +628,18 @@ fn get_cpu_stats(cg: &cgroups::Cgroup) -> SingularPtrField { } fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField { - let cpuacct_controller: Option<&CpuAcctController> = cg.controller_of(); - if cpuacct_controller.is_none() { - if cg.v2() { - return SingularPtrField::some(CpuUsage { - total_usage: 0, - percpu_usage: vec![], - usage_in_kernelmode: 0, - usage_in_usermode: 0, - unknown_fields: UnknownFields::default(), - cached_size: CachedSize::default(), - }); - } + if let Some(cpuacct_controller) = cg.controller_of::() { + let cpuacct = cpuacct_controller.cpuacct(); - // try to get from cpu controller - let cpu_controller: &CpuController = cg.controller_of().unwrap(); - let stat = cpu_controller.cpu().stat; - let h = lines_to_map(&stat); - let usage_in_usermode = *h.get("user_usec").unwrap(); - let usage_in_kernelmode = *h.get("system_usec").unwrap(); - let total_usage = *h.get("usage_usec").unwrap(); - let percpu_usage = vec![]; + let h = lines_to_map(&cpuacct.stat); + let usage_in_usermode = + (((*h.get("user").unwrap() * NANO_PER_SECOND) as f64) / *CLOCK_TICKS) as u64; + let usage_in_kernelmode = + (((*h.get("system").unwrap() * NANO_PER_SECOND) as f64) / *CLOCK_TICKS) as u64; + + let total_usage = cpuacct.usage; + + let percpu_usage = line_to_vec(&cpuacct.usage_percpu); return SingularPtrField::some(CpuUsage { total_usage, @@ -653,18 +651,25 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField { }); } - let cpuacct_controller = cpuacct_controller.unwrap(); - let cpuacct = cpuacct_controller.cpuacct(); + if cg.v2() { + return SingularPtrField::some(CpuUsage { + total_usage: 0, + percpu_usage: vec![], + usage_in_kernelmode: 0, + usage_in_usermode: 0, + unknown_fields: UnknownFields::default(), + cached_size: CachedSize::default(), + }); + } - let h = lines_to_map(&cpuacct.stat); - let usage_in_usermode = - (((*h.get("user").unwrap() * NANO_PER_SECOND) as f64) / *CLOCK_TICKS) as u64; - let usage_in_kernelmode = - (((*h.get("system").unwrap() * NANO_PER_SECOND) as f64) / *CLOCK_TICKS) as u64; - - let total_usage = cpuacct.usage; - - let percpu_usage = line_to_vec(&cpuacct.usage_percpu); + // try to get from cpu controller + let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg); + let stat = cpu_controller.cpu().stat; + let h = lines_to_map(&stat); + let usage_in_usermode = *h.get("user_usec").unwrap(); + let usage_in_kernelmode = *h.get("system_usec").unwrap(); + let total_usage = *h.get("usage_usec").unwrap(); + let percpu_usage = vec![]; SingularPtrField::some(CpuUsage { total_usage, @@ -677,7 +682,7 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField { } fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField { - let memory_controller: &MemController = cg.controller_of().unwrap(); + let memory_controller: &MemController = get_controller_or_return_singular_none!(cg); // cache from memory stat let memory = memory_controller.memory_stat(); @@ -734,7 +739,7 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField { } fn get_pids_stats(cg: &cgroups::Cgroup) -> SingularPtrField { - let pid_controller: &PidController = cg.controller_of().unwrap(); + let pid_controller: &PidController = get_controller_or_return_singular_none!(cg); let current = pid_controller.get_pid_current().unwrap_or(0); let max = pid_controller.get_pid_max(); @@ -841,7 +846,7 @@ fn build_blkio_stats_entry(major: i16, minor: i16, op: &str, value: u64) -> Blki } fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> SingularPtrField { - let blkio_controller: &BlkIoController = cg.controller_of().unwrap(); + let blkio_controller: &BlkIoController = get_controller_or_return_singular_none!(cg); let blkio = blkio_controller.blkio(); let mut resp = BlkioStats::new(); @@ -869,7 +874,7 @@ fn get_blkio_stats(cg: &cgroups::Cgroup) -> SingularPtrField { return get_blkio_stats_v2(&cg); } - let blkio_controller: &BlkIoController = cg.controller_of().unwrap(); + let blkio_controller: &BlkIoController = get_controller_or_return_singular_none!(cg); let blkio = blkio_controller.blkio(); let mut m = BlkioStats::new(); diff --git a/src/agent/rustjail/src/lib.rs b/src/agent/rustjail/src/lib.rs index 63dc77046..af9847b28 100644 --- a/src/agent/rustjail/src/lib.rs +++ b/src/agent/rustjail/src/lib.rs @@ -309,7 +309,7 @@ pub fn resources_grpc_to_oci(res: &grpcLinuxResources) -> ociLinuxResources { swap: Some(mem.Swap), kernel: Some(mem.Kernel), kernel_tcp: Some(mem.KernelTCP), - swapiness: Some(mem.Swappiness as i64), + swappiness: Some(mem.Swappiness as i64), disable_oom_killer: Some(mem.DisableOOMKiller), }) } else { diff --git a/src/agent/rustjail/src/mount.rs b/src/agent/rustjail/src/mount.rs index 1942fcc5b..5f4c9e26d 100644 --- a/src/agent/rustjail/src/mount.rs +++ b/src/agent/rustjail/src/mount.rs @@ -185,6 +185,7 @@ pub fn init_rootfs( None::<&str>, )?; + let mut bind_mount_dev = false; for m in &spec.mounts { let (mut flags, data) = parse_mount(&m); if !m.destination.starts_with("/") || m.destination.contains("..") { @@ -198,6 +199,9 @@ pub fn init_rootfs( mount_cgroups(cfd_log, &m, rootfs, flags, &data, cpath, mounts)?; } else { if m.destination == "/dev" { + if m.r#type == "bind" { + bind_mount_dev = true; + } flags &= !MsFlags::MS_RDONLY; } @@ -239,9 +243,14 @@ pub fn init_rootfs( let olddir = unistd::getcwd()?; unistd::chdir(rootfs)?; - default_symlinks()?; - create_devices(&linux.devices, bind_device)?; - ensure_ptmx()?; + // in case the /dev directory was binded mount from guest, + // then there's no need to create devices nodes and symlinks + // in /dev. + if !bind_mount_dev { + default_symlinks()?; + create_devices(&linux.devices, bind_device)?; + ensure_ptmx()?; + } unistd::chdir(&olddir)?; diff --git a/src/agent/rustjail/src/sync.rs b/src/agent/rustjail/src/sync.rs index 9e98b0ad7..422827b94 100644 --- a/src/agent/rustjail/src/sync.rs +++ b/src/agent/rustjail/src/sync.rs @@ -72,7 +72,15 @@ fn read_count(fd: RawFd, count: usize) -> Result> { } } - Ok(v[0..len].to_vec()) + if len != count { + Err(anyhow::anyhow!( + "invalid read count expect {} get {}", + count, + len + )) + } else { + Ok(v[0..len].to_vec()) + } } pub fn read_sync(fd: RawFd) -> Result> { diff --git a/src/agent/rustjail/src/validator.rs b/src/agent/rustjail/src/validator.rs index 4e3ce4318..554ec40e7 100644 --- a/src/agent/rustjail/src/validator.rs +++ b/src/agent/rustjail/src/validator.rs @@ -4,7 +4,7 @@ // use crate::container::Config; -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, Context, Result}; use lazy_static; use nix::errno::Errno; use oci::{LinuxIDMapping, LinuxNamespace, Spec}; @@ -52,7 +52,11 @@ fn rootfs(root: &str) -> Result<()> { continue; } - stack.push(c.as_os_str().to_str().unwrap().to_string()); + if let Some(v) = c.as_os_str().to_str() { + stack.push(v.to_string()); + } else { + return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); + } } let mut cleaned = PathBuf::from("/"); @@ -60,7 +64,7 @@ fn rootfs(root: &str) -> Result<()> { cleaned.push(e); } - let canon = path.canonicalize()?; + let canon = path.canonicalize().context("canonicalize")?; if cleaned != canon { // There is symbolic in path return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); @@ -78,10 +82,10 @@ fn hostname(oci: &Spec) -> Result<()> { return Ok(()); } - if oci.linux.is_none() { - return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); - } - let linux = oci.linux.as_ref().unwrap(); + let linux = oci + .linux + .as_ref() + .ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?; if !contain_namespace(&linux.namespaces, "uts") { return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); } @@ -90,8 +94,11 @@ fn hostname(oci: &Spec) -> Result<()> { } fn security(oci: &Spec) -> Result<()> { - let linux = oci.linux.as_ref().unwrap(); - if linux.masked_paths.len() == 0 && linux.readonly_paths.len() == 0 { + let linux = oci + .linux + .as_ref() + .ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?; + if linux.masked_paths.is_empty() && linux.readonly_paths.is_empty() { return Ok(()); } @@ -115,7 +122,10 @@ fn idmapping(maps: &Vec) -> Result<()> { } fn usernamespace(oci: &Spec) -> Result<()> { - let linux = oci.linux.as_ref().unwrap(); + let linux = oci + .linux + .as_ref() + .ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?; if contain_namespace(&linux.namespaces, "user") { let user_ns = PathBuf::from("/proc/self/ns/user"); if !user_ns.exists() { @@ -123,8 +133,8 @@ fn usernamespace(oci: &Spec) -> Result<()> { } // check if idmappings is correct, at least I saw idmaps // with zero size was passed to agent - idmapping(&linux.uid_mappings)?; - idmapping(&linux.gid_mappings)?; + idmapping(&linux.uid_mappings).context("idmapping uid")?; + idmapping(&linux.gid_mappings).context("idmapping gid")?; } else { // no user namespace but idmap if linux.uid_mappings.len() != 0 || linux.gid_mappings.len() != 0 { @@ -136,7 +146,10 @@ fn usernamespace(oci: &Spec) -> Result<()> { } fn cgroupnamespace(oci: &Spec) -> Result<()> { - let linux = oci.linux.as_ref().unwrap(); + let linux = oci + .linux + .as_ref() + .ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?; if contain_namespace(&linux.namespaces, "cgroup") { let path = PathBuf::from("/proc/self/ns/cgroup"); if !path.exists() { @@ -165,14 +178,20 @@ fn check_host_ns(path: &str) -> Result<()> { let cpath = PathBuf::from(path); let hpath = PathBuf::from("/proc/self/ns/net"); - let real_hpath = hpath.read_link()?; - let meta = cpath.symlink_metadata()?; + let real_hpath = hpath + .read_link() + .context(format!("read link {:?}", hpath))?; + let meta = cpath + .symlink_metadata() + .context(format!("symlink metadata {:?}", cpath))?; let file_type = meta.file_type(); if !file_type.is_symlink() { return Ok(()); } - let real_cpath = cpath.read_link()?; + let real_cpath = cpath + .read_link() + .context(format!("read link {:?}", cpath))?; if real_cpath == real_hpath { return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); } @@ -181,7 +200,10 @@ fn check_host_ns(path: &str) -> Result<()> { } fn sysctl(oci: &Spec) -> Result<()> { - let linux = oci.linux.as_ref().unwrap(); + let linux = oci + .linux + .as_ref() + .ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?; for (key, _) in linux.sysctl.iter() { if SYSCTLS.contains_key(key.as_str()) || key.starts_with("fs.mqueue.") { if contain_namespace(&linux.namespaces, "ipc") { @@ -192,16 +214,8 @@ fn sysctl(oci: &Spec) -> Result<()> { } if key.starts_with("net.") { - if !contain_namespace(&linux.namespaces, "network") { - return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); - } - - let net = get_namespace_path(&linux.namespaces, "network")?; - if net.is_empty() || net == "".to_string() { - continue; - } - - check_host_ns(net.as_str())?; + // the network ns is shared with the guest, don't expect to find it in spec + continue; } if contain_namespace(&linux.namespaces, "uts") { @@ -220,7 +234,10 @@ fn sysctl(oci: &Spec) -> Result<()> { } fn rootless_euid_mapping(oci: &Spec) -> Result<()> { - let linux = oci.linux.as_ref().unwrap(); + let linux = oci + .linux + .as_ref() + .ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?; if !contain_namespace(&linux.namespaces, "user") { return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); } @@ -243,7 +260,10 @@ fn has_idmapping(maps: &Vec, id: u32) -> bool { } fn rootless_euid_mount(oci: &Spec) -> Result<()> { - let linux = oci.linux.as_ref().unwrap(); + let linux = oci + .linux + .as_ref() + .ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?; for mnt in oci.mounts.iter() { for opt in mnt.options.iter() { @@ -254,7 +274,10 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> { return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); } - let id = fields[1].trim().parse::()?; + let id = fields[1] + .trim() + .parse::() + .context(format!("parse field {}", &fields[1]))?; if opt.starts_with("uid=") { if !has_idmapping(&linux.uid_mappings, id) { @@ -274,34 +297,37 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> { } fn rootless_euid(oci: &Spec) -> Result<()> { - rootless_euid_mapping(oci)?; - rootless_euid_mount(oci)?; + rootless_euid_mapping(oci).context("rootless euid mapping")?; + rootless_euid_mount(oci).context("rotless euid mount")?; Ok(()) } pub fn validate(conf: &Config) -> Result<()> { lazy_static::initialize(&SYSCTLS); - let oci = conf.spec.as_ref().unwrap(); + let oci = conf + .spec + .as_ref() + .ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?; if oci.linux.is_none() { return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); } - if oci.root.is_none() { - return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); - } - let root = oci.root.as_ref().unwrap().path.as_str(); + let root = match oci.root.as_ref() { + Some(v) => v.path.as_str(), + None => return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))), + }; - rootfs(root)?; - network(oci)?; - hostname(oci)?; - security(oci)?; - usernamespace(oci)?; - cgroupnamespace(oci)?; - sysctl(&oci)?; + rootfs(root).context("rootfs")?; + network(oci).context("network")?; + hostname(oci).context("hostname")?; + security(oci).context("security")?; + usernamespace(oci).context("usernamespace")?; + cgroupnamespace(oci).context("cgroupnamespace")?; + sysctl(&oci).context("sysctl")?; if conf.rootless_euid { - rootless_euid(oci)?; + rootless_euid(oci).context("rootless euid")?; } Ok(()) diff --git a/src/agent/src/namespace.rs b/src/agent/src/namespace.rs index f5c6fa3b0..db0b2ffe3 100644 --- a/src/agent/src/namespace.rs +++ b/src/agent/src/namespace.rs @@ -16,7 +16,6 @@ use std::thread::{self}; use crate::mount::{BareMount, FLAGS}; use slog::Logger; -//use container::Process; const PERSISTENT_NS_DIR: &str = "/var/run/sandbox-ns"; pub const NSTYPEIPC: &str = "ipc"; pub const NSTYPEUTS: &str = "uts"; @@ -81,7 +80,10 @@ impl Namespace { fs::create_dir_all(&self.persistent_ns_dir)?; let ns_path = PathBuf::from(&self.persistent_ns_dir); - let ns_type = self.ns_type.clone(); + let ns_type = self.ns_type; + if ns_type == NamespaceType::PID { + return Err(anyhow!("Cannot persist namespace of PID type")); + } let logger = self.logger.clone(); let new_ns_path = ns_path.join(&ns_type.get()); @@ -202,7 +204,7 @@ mod tests { assert!(remove_mounts(&vec![ns_ipc.unwrap().path]).is_ok()); let logger = slog::Logger::root(slog::Discard, o!()); - let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap(); + let tmpdir = Builder::new().prefix("uts").tempdir().unwrap(); let ns_uts = Namespace::new(&logger) .as_uts("test_hostname") @@ -211,6 +213,17 @@ mod tests { assert!(ns_uts.is_ok()); assert!(remove_mounts(&vec![ns_uts.unwrap().path]).is_ok()); + + // Check it cannot persist pid namespaces. + let logger = slog::Logger::root(slog::Discard, o!()); + let tmpdir = Builder::new().prefix("pid").tempdir().unwrap(); + + let ns_pid = Namespace::new(&logger) + .as_pid() + .set_root_dir(tmpdir.path().to_str().unwrap()) + .setup(); + + assert!(ns_pid.is_err()); } #[test] diff --git a/src/agent/src/sandbox.rs b/src/agent/src/sandbox.rs index ee5ab08f9..5ba25218b 100644 --- a/src/agent/src/sandbox.rs +++ b/src/agent/src/sandbox.rs @@ -3,7 +3,6 @@ // SPDX-License-Identifier: Apache-2.0 // -//use crate::container::Container; use crate::linux_abi::*; use crate::mount::{get_mount_fs_type, remove_mounts, TYPEROOTFS}; use crate::namespace::Namespace; @@ -233,6 +232,10 @@ impl Sandbox { online_memory(&self.logger)?; } + if req.nb_cpus == 0 { + return Ok(()); + } + let cpuset = rustjail_cgroups::fs::get_guest_cpuset()?; for (_, ctr) in self.containers.iter() { @@ -393,7 +396,6 @@ fn online_memory(logger: &Logger) -> Result<()> { #[cfg(test)] mod tests { - //use rustjail::Error; use super::Sandbox; use crate::{mount::BareMount, skip_if_not_root}; use anyhow::Error; diff --git a/src/runtime/Makefile b/src/runtime/Makefile index 690755f2a..0c03b7065 100644 --- a/src/runtime/Makefile +++ b/src/runtime/Makefile @@ -68,7 +68,7 @@ NETMON_TARGET = $(PROJECT_TYPE)-netmon NETMON_TARGET_OUTPUT = $(CURDIR)/$(NETMON_TARGET) BINLIBEXECLIST += $(NETMON_TARGET) -DESTDIR := / +DESTDIR ?= / ifeq ($(PREFIX),) PREFIX := /usr diff --git a/src/runtime/containerd-shim-v2/delete.go b/src/runtime/containerd-shim-v2/delete.go index 70631c4fa..f1d7ebbc5 100644 --- a/src/runtime/containerd-shim-v2/delete.go +++ b/src/runtime/containerd-shim-v2/delete.go @@ -17,13 +17,12 @@ import ( func deleteContainer(ctx context.Context, s *service, c *container) error { if !c.cType.IsSandbox() { if c.status != task.StatusStopped { - _, err := s.sandbox.StopContainer(c.id, false) - if err != nil { + if _, err := s.sandbox.StopContainer(c.id, false); err != nil && !isNotFound(err) { return err } } - if _, err := s.sandbox.DeleteContainer(c.id); err != nil { + if _, err := s.sandbox.DeleteContainer(c.id); err != nil && !isNotFound(err) { return err } } diff --git a/src/runtime/virtcontainers/kata_agent.go b/src/runtime/virtcontainers/kata_agent.go index b88bec448..58ff1a4e8 100644 --- a/src/runtime/virtcontainers/kata_agent.go +++ b/src/runtime/virtcontainers/kata_agent.go @@ -635,10 +635,10 @@ func (k *kataAgent) listInterfaces() ([]*pbTypes.Interface, error) { return nil, err } resultInterfaces, ok := resultingInterfaces.(*grpc.Interfaces) - if ok { - return resultInterfaces.Interfaces, err + if !ok { + return nil, fmt.Errorf("Unexpected type %T for interfaces", resultingInterfaces) } - return nil, err + return resultInterfaces.Interfaces, nil } func (k *kataAgent) listRoutes() ([]*pbTypes.Route, error) { @@ -648,10 +648,10 @@ func (k *kataAgent) listRoutes() ([]*pbTypes.Route, error) { return nil, err } resultRoutes, ok := resultingRoutes.(*grpc.Routes) - if ok { - return resultRoutes.Routes, err + if !ok { + return nil, fmt.Errorf("Unexpected type %T for routes", resultingRoutes) } - return nil, err + return resultRoutes.Routes, nil } func (k *kataAgent) getAgentURL() (string, error) { diff --git a/src/runtime/virtcontainers/qemu.go b/src/runtime/virtcontainers/qemu.go index 5c64f1ad4..88e5ab71e 100644 --- a/src/runtime/virtcontainers/qemu.go +++ b/src/runtime/virtcontainers/qemu.go @@ -2203,6 +2203,12 @@ func (q *qemu) toGrpc() ([]byte, error) { } func (q *qemu) save() (s persistapi.HypervisorState) { + + // If QEMU isn't even running, there isn't any state to save + if q.stopped { + return + } + pids := q.getPids() if len(pids) != 0 { s.Pid = pids[0] diff --git a/src/trace-forwarder/Makefile b/src/trace-forwarder/Makefile index ae7332592..cfb46560e 100644 --- a/src/trace-forwarder/Makefile +++ b/src/trace-forwarder/Makefile @@ -13,10 +13,13 @@ clean: test: +install: + check: .PHONY: \ build \ test \ check \ + install \ clean diff --git a/tools/agent-ctl/Makefile b/tools/agent-ctl/Makefile index ae7332592..54c948a81 100644 --- a/tools/agent-ctl/Makefile +++ b/tools/agent-ctl/Makefile @@ -13,10 +13,13 @@ clean: test: +install: + check: .PHONY: \ build \ test \ check \ + install \ clean diff --git a/tools/osbuilder/rootfs-builder/gentoo/Dockerfile.in b/tools/osbuilder/rootfs-builder/gentoo/Dockerfile.in new file mode 100644 index 000000000..5c6e2fdce --- /dev/null +++ b/tools/osbuilder/rootfs-builder/gentoo/Dockerfile.in @@ -0,0 +1,13 @@ +# +# Copyright (c) 2020 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +from docker.io/gentoo/stage3-amd64:latest + +# This dockerfile needs to provide all the componets need to build a rootfs +# Install any package need to create a rootfs (package manager, extra tools) + +# This will install the proper golang to build Kata components +@INSTALL_GO@ +@INSTALL_RUST@ diff --git a/tools/osbuilder/rootfs-builder/gentoo/config.sh b/tools/osbuilder/rootfs-builder/gentoo/config.sh new file mode 100644 index 000000000..ec83a9d9d --- /dev/null +++ b/tools/osbuilder/rootfs-builder/gentoo/config.sh @@ -0,0 +1,22 @@ +# This is a configuration file add extra variables to +# +# Copyright (c) 2020 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# be used by build_rootfs() from rootfs_lib.sh the variables will be +# loaded just before call the function. For more information see the +# rootfs-builder/README.md file. + +OS_VERSION=${OS_VERSION:-latest} +OS_NAME=${OS_NAME:-"gentoo"} + +# packages to be installed by default +PACKAGES="sys-apps/systemd net-firewall/iptables net-misc/chrony" + +# Init process must be one of {systemd,kata-agent} +INIT_PROCESS=systemd +# List of zero or more architectures to exclude from build, +# as reported by `uname -m` +ARCH_EXCLUDE_LIST=( aarch64 ppc64le s390x ) + +[ "$SECCOMP" = "yes" ] && PACKAGES+=" sys-libs/libseccomp" || true diff --git a/tools/osbuilder/rootfs-builder/gentoo/rootfs_lib.sh b/tools/osbuilder/rootfs-builder/gentoo/rootfs_lib.sh new file mode 100644 index 000000000..0226bb250 --- /dev/null +++ b/tools/osbuilder/rootfs-builder/gentoo/rootfs_lib.sh @@ -0,0 +1,210 @@ +# Copyright (c) 2020 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +# - Arguments +# rootfs_dir=$1 +# +# - Optional environment variables +# +# EXTRA_PKGS: Variable to add extra PKGS provided by the user +# +# BIN_AGENT: Name of the Kata-Agent binary +# +# REPO_URL: URL to distribution repository ( should be configured in +# config.sh file) +# +# Any other configuration variable for a specific distro must be added +# and documented on its own config.sh +# +# - Expected result +# +# rootfs_dir populated with rootfs pkgs +# It must provide a binary in /sbin/init +# +gentoo_portage_container=gentoo_portage +gentoo_local_portage_dir="${HOME}/gentoo-$(date +%s)" + +build_rootfs() { + # Mandatory + local ROOTFS_DIR=$1 + + # In case of support EXTRA packages, use it to allow + # users to add more packages to the base rootfs + local EXTRA_PKGS=${EXTRA_PKGS:-} + + # Populate ROOTFS_DIR + # Must provide /sbin/init and /bin/${BIN_AGENT} + check_root + mkdir -p "${ROOTFS_DIR}" + + # trim whitespace + PACKAGES=$(echo $PACKAGES |xargs ) + EXTRA_PKGS=$(echo $EXTRA_PKGS |xargs) + + # extra packages are added to packages and finally passed to debootstrap + if [ "${EXTRA_PKGS}" = "" ]; then + echo "no extra packages" + else + PACKAGES="${PACKAGES} ${EXTRA_PKGS}" + fi + + local packageuseconf="/etc/portage/package.use/user" + local makeconf="/etc/portage/make.conf" + local systemd_optimizations=( + acl + -apparmor + -audit + cgroup-hybrid + -cryptsetup + -curl + -dns-over-tls + -gcrypt + -gnuefi + -homed + -http + -hwdb + -idn + -importd + kmod + -lz4 + -lzma + -nat + -pkcs11 + -policykit + -pwquality + -qrcode + -repart + -resolvconf + sysv-utils + -test + -xkb + -zstd + ) + + local packages_optimizations=( + -abi_x86_32 + -abi_x86_x32 + -debug + -doc + -examples + multicall + -ncurses + -nls + -selinux + systemd + -udev + -unicode + -X + ) + + local compiler_optimizations=( + -O3 + -fassociative-math + -fasynchronous-unwind-tables + -feliminate-unused-debug-types + -fexceptions + -ffat-lto-objects + -fno-semantic-interposition + -fno-signed-zeros + -fno-trapping-math + -fstack-protector + -ftree-loop-distribute-patterns + -m64 + -mtune=skylake + --param=ssp-buffer-size=32 + -pipe + -Wl,--copy-dt-needed-entries + -Wp,-D_REENTRANT + -Wl,--enable-new-dtags + -Wl,-sort-common + -Wl,-z -Wl,now + -Wl,-z -Wl,relro + ) + + local build_dependencies=( + dev-vcs/git + ) + + local conflicting_packages=( + net-misc/netifrc sys-apps/sysvinit + sys-fs/eudev sys-apps/openrc + virtual/service-manager + ) + + # systemd optimizations + echo "sys-apps/systemd ${systemd_optimizations[*]}" >> ${packageuseconf} + echo "MAKEOPTS=\"-j$(nproc)\"" >> ${makeconf} + + # Packages optimizations + echo "USE=\"${packages_optimizations[*]}\"" >> ${makeconf} + + # compiler optimizations + echo "CFLAGS=\"${compiler_optimizations[*]}\"" >> ${makeconf} + echo 'CXXFLAGS="${CFLAGS}"' >> ${makeconf} + + # remove conflicting packages + emerge -Cv $(echo "${conflicting_packages[*]}") + + # Get the latest systemd portage profile and set it + systemd_profile=$(profile-config list | grep stable | grep -E "[[:digit:]]/systemd" | xargs | cut -d' ' -f2) + profile-config set "${systemd_profile}" + + # Install build dependencies + emerge --newuse $(echo "${build_dependencies[*]}") + + quickpkg --include-unmodified-config=y "*/*" + + # Install needed packages excluding conflicting packages + ROOT=${ROOTFS_DIR} emerge --exclude "$(echo "${conflicting_packages[*]}")" --newuse -k ${PACKAGES} + + pushd ${ROOTFS_DIR} + + # systemd will need this library + cp /usr/lib/gcc/x86_64-pc-linux-gnu/*/libgcc_s.so* lib64/ + + # Clean up the rootfs. there are things that we don't need + rm -rf etc/{udev,X11,kernel,runlevels,terminfo,init.d} + rm -rf var/lib/{gentoo,portage} + rm -rf var/{db,cache} + rm -rf usr/share/* + rm -rf usr/lib/{udev,gconv,kernel} + rm -rf usr/{include,local} + rm -rf usr/lib64/gconv + rm -rf lib/{udev,gentoo} + + # Make sure important directories exist in the rootfs + ln -s ../run var/run + mkdir -p proc opt sys dev home root + + popd +} + +before_starting_container() { + gentoo_portage_image="gentoo/portage" + + if [ "${OS_VERSION}" = "latest" ];then + ${container_engine} pull "${gentoo_portage_image}:latest" + OS_VERSION=$(docker image inspect -f {{.Created}} ${gentoo_portage_image} | cut -dT -f1 | sed 's|-||g') + else + ${container_engine} pull "${gentoo_portage_image}:${OS_VERSION}" + fi + + # create portage volume and container + ${container_engine} create -v /usr/portage --name "${gentoo_portage_container}" "${gentoo_portage_image}" /bin/true +} + +after_stopping_container() { + # Get the list of volumes + volumes="" + for i in $(seq $(${container_engine} inspect -f "{{len .Mounts}}" "${gentoo_portage_container}")); do + volumes+="$(${container_engine} inspect -f "{{(index .Mounts $((i-1))).Name}}" "${gentoo_portage_container}") " + done + + # remove portage container + ${container_engine} rm -f "${gentoo_portage_container}" + sudo rm -rf "${gentoo_local_portage_dir}" + + # remove portage volumes + ${container_engine} volume rm -f ${volumes} +} diff --git a/tools/osbuilder/rootfs-builder/rootfs.sh b/tools/osbuilder/rootfs-builder/rootfs.sh index 92c2e5582..4d69e89d9 100755 --- a/tools/osbuilder/rootfs-builder/rootfs.sh +++ b/tools/osbuilder/rootfs-builder/rootfs.sh @@ -181,19 +181,38 @@ docker_extra_args() { local args="" + # Required to mount inside a container + args+=" --cap-add SYS_ADMIN" + # Requred to chroot + args+=" --cap-add SYS_CHROOT" + # debootstrap needs to create device nodes to properly function + args+=" --cap-add MKNOD" + case "$1" in - ubuntu | debian) - # Requred to chroot - args+=" --cap-add SYS_CHROOT" - # debootstrap needs to create device nodes to properly function - args+=" --cap-add MKNOD" - ;& - suse) - # Required to mount inside a container - args+=" --cap-add SYS_ADMIN" - # When AppArmor is enabled, mounting inside a container is blocked with docker-default profile. - # See https://github.com/moby/moby/issues/16429 - args+=" --security-opt apparmor=unconfined" + gentoo) + # Required to build glibc + args+=" --cap-add SYS_PTRACE" + # mount portage volume + args+=" -v ${gentoo_local_portage_dir}:/usr/portage/packages" + args+=" --volumes-from ${gentoo_portage_container}" + ;; + debian | ubuntu | suse) + source /etc/os-release + + case "$ID" in + fedora | centos | rhel) + # Depending on the podman version, we'll face issues when passing + # `--security-opt apparmor=unconfined` on a system where not apparmor is not installed. + # Because of this, let's just avoid adding this option when the host OS comes from Red Hat. + + # A explict check for podman, at least for now, can be avoided. + ;; + *) + # When AppArmor is enabled, mounting inside a container is blocked with docker-default profile. + # See https://github.com/moby/moby/issues/16429 + args+=" --security-opt apparmor=unconfined" + ;; + esac ;; *) ;; @@ -400,6 +419,9 @@ build_rootfs_distro() done fi + before_starting_container + trap after_stopping_container EXIT + #Make sure we use a compatible runtime to build rootfs # In case Clear Containers Runtime is installed we dont want to hit issue: #https://github.com/clearcontainers/runtime/issues/828 @@ -503,6 +525,10 @@ EOT mkdir -p "${ROOTFS_DIR}/etc" case "${distro}" in + "gentoo") + chrony_conf_file="${ROOTFS_DIR}/etc/chrony/chrony.conf" + chrony_systemd_service="${ROOTFS_DIR}/lib/systemd/system/chronyd.service" + ;; "ubuntu" | "debian") echo "I am ubuntu or debian" chrony_conf_file="${ROOTFS_DIR}/etc/chrony/chrony.conf" @@ -527,7 +553,9 @@ EOT sed -i 's/^\(server \|pool \|peer \)/# &/g' ${chrony_conf_file} if [ -f "$chrony_systemd_service" ]; then - sed -i '/^\[Unit\]/a ConditionPathExists=\/dev\/ptp0' ${chrony_systemd_service} + # Remove user option, user could not exist in the rootfs + sed -i -e 's/^\(ExecStart=.*\)-u [[:alnum:]]*/\1/g' \ + -e '/^\[Unit\]/a ConditionPathExists=\/dev\/ptp0' ${chrony_systemd_service} fi # The CC on s390x for fedora needs to be manually set to gcc when the golang is downloaded from the main page. diff --git a/tools/osbuilder/rootfs-builder/template/rootfs_lib_template.sh b/tools/osbuilder/rootfs-builder/template/rootfs_lib_template.sh index 238b6f702..978a89bc8 100644 --- a/tools/osbuilder/rootfs-builder/template/rootfs_lib_template.sh +++ b/tools/osbuilder/rootfs-builder/template/rootfs_lib_template.sh @@ -12,18 +12,19 @@ # # BIN_AGENT: Name of the Kata-Agent binary # -# REPO_URL: URL to distribution repository ( should be configured in +# REPO_URL: URL to distribution repository ( should be configured in # config.sh file) # -# Any other configuration variable for a specific distro must be added +# Any other configuration variable for a specific distro must be added # and documented on its own config.sh -# +# # - Expected result # # rootfs_dir populated with rootfs pkgs # It must provide a binary in /sbin/init # -# Note: For some distros, the build_rootfs() function provided in scripts/lib.sh +# Note: For some distros, the build_rootfs(), before_starting_container() +# and after_starting_container() functions provided in scripts/lib.sh # will suffice. If a new distro is introduced with a special requirement, # then, a rootfs_builder//rootfs_lib.sh file should be created # using this template. @@ -52,3 +53,19 @@ build_rootfs() { # Populate ROOTFS_DIR # Must provide /sbin/init and /bin/${BIN_AGENT} } + +before_starting_container() { + # Run the following tasks before starting the container that builds the rootfs. + # For example: + # * Create a container + # * Create a volume + return 0 +} + +after_stopping_container() { + # Run the following tasks after stoping the container that builds the rootfs. + # For example: + # * Delete a container + # * Delete a volume + return 0 +} diff --git a/tools/osbuilder/rootfs-builder/ubuntu/rootfs_lib.sh b/tools/osbuilder/rootfs-builder/ubuntu/rootfs_lib.sh index a012a5cc4..e773c62a8 100644 --- a/tools/osbuilder/rootfs-builder/ubuntu/rootfs_lib.sh +++ b/tools/osbuilder/rootfs-builder/ubuntu/rootfs_lib.sh @@ -80,5 +80,8 @@ build_rootfs() { ${ROOTFS_DIR} chroot $ROOTFS_DIR ln -s /lib/systemd/systemd /usr/lib/systemd/systemd -} + # Reduce image size and memory footprint + # removing not needed files and directories. + chroot $ROOTFS_DIR rm -rf /usr/share/{bash-completion,bug,doc,info,lintian,locale,man,menu,misc,pixmaps,terminfo,zoneinfo,zsh} +} diff --git a/tools/osbuilder/scripts/lib.sh b/tools/osbuilder/scripts/lib.sh index e7a39d889..e43f78143 100644 --- a/tools/osbuilder/scripts/lib.sh +++ b/tools/osbuilder/scripts/lib.sh @@ -421,3 +421,11 @@ detect_musl_version() [ "$?" == "0" ] && [ "$MUSL_VERSION" != "null" ] } + +before_starting_container() { + return 0 +} + +after_stopping_container() { + return 0 +} diff --git a/tools/packaging/scripts/apply_patches.sh b/tools/packaging/scripts/apply_patches.sh new file mode 100755 index 000000000..aa6b19b60 --- /dev/null +++ b/tools/packaging/scripts/apply_patches.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# +# Copyright (c) 2020 Red Hat, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# This script apply patches. +# +set -e + +script_dir="$(realpath $(dirname $0))" +patches_dir="$1" + +if [ -z "$patches_dir" ]; then + cat <<-EOT + Apply patches to the sources at the current directory. + + Patches are expected to be named in the standard git-format-patch(1) format where + the first part of the filename represents the patch ordering (lowest numbers + apply first): + 'NUMBER-DASHED_DESCRIPTION.patch' + + For example, + + 0001-fix-the-bad-thing.patch + 0002-improve-the-fix-the-bad-thing-fix.patch + 0003-correct-compiler-warnings.patch + + Usage: + $0 PATCHES_DIR + Where: + PATCHES_DIR is the directory containing the patches + EOT + exit 1 +fi + +echo "INFO: Apply patches from $patches_dir" +if [ -d "$patches_dir" ]; then + patches=($(find "$patches_dir" -name '*.patch'|sort -t- -k1,1n)) + echo "INFO: Found ${#patches[@]} patches" + for patch in ${patches[@]}; do + echo "INFO: Apply $patch" + git apply "$patch" || \ + { echo >&2 "ERROR: Not applied. Exiting..."; exit 1; } + done +else + echo "INFO: Patches directory does not exist" +fi diff --git a/tools/packaging/static-build/qemu-virtiofs/Dockerfile b/tools/packaging/static-build/qemu-virtiofs/Dockerfile index c16049d3e..0caa06451 100644 --- a/tools/packaging/static-build/qemu-virtiofs/Dockerfile +++ b/tools/packaging/static-build/qemu-virtiofs/Dockerfile @@ -4,6 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 from ubuntu:20.04 +ARG QEMU_DESTDIR ARG QEMU_VIRTIOFS_REPO # commit/tag/branch ARG QEMU_VIRTIOFS_TAG @@ -54,23 +55,24 @@ RUN git checkout "${QEMU_VIRTIOFS_TAG}" ADD scripts/configure-hypervisor.sh /root/configure-hypervisor.sh ADD qemu /root/kata_qemu +ADD scripts/apply_patches.sh /root/apply_patches.sh +ADD static-build /root/static-build # Apply experimental specific patches # Patches to quick fix virtiofs fork ENV VIRTIOFS_PATCHES_DIR=/root/kata_qemu/patches/${QEMU_VIRTIOFS_TAG}/ -RUN if [ -d ${VIRTIOFS_PATCHES_DIR} ]; then \ - echo "Patches to apply for virtiofs fixes:"; \ - for patch in $(find "${VIRTIOFS_PATCHES_DIR}" -name '*.patch' -type f |sort -t- -k1,1n); do \ - git apply $patch; \ - done;fi -RUN /root/kata_qemu/apply_patches.sh +RUN /root/apply_patches.sh ${VIRTIOFS_PATCHES_DIR} +# Apply the stable branch patches +RUN stable_branch=$(cat VERSION | awk 'BEGIN{FS=OFS="."}{print $1 "." $2 ".x"}') && \ + /root/apply_patches.sh "/root/kata_qemu/patches/${stable_branch}" RUN PREFIX="${PREFIX}" /root/configure-hypervisor.sh -s kata-qemu | sed -e 's|--disable-seccomp||g' | xargs ./configure \ --with-pkgversion=kata-static RUN make -j$(nproc) RUN make -j$(nproc) virtiofsd -RUN make install DESTDIR=/tmp/qemu-virtiofs-static -RUN mv /tmp/qemu-virtiofs-static/"${PREFIX}"/bin/qemu-system-x86_64 /tmp/qemu-virtiofs-static/"${PREFIX}"/bin/qemu-virtiofs-system-x86_64 -RUN mv /tmp/qemu-virtiofs-static/"${PREFIX}"/libexec/kata-qemu/virtiofsd /tmp/qemu-virtiofs-static/opt/kata/bin/virtiofsd-dax -RUN cd /tmp/qemu-virtiofs-static && tar -czvf "${QEMU_TARBALL}" * +RUN make install DESTDIR="${QEMU_DESTDIR}" +RUN cd "${QEMU_DESTDIR}/${PREFIX}" && \ + mv bin/qemu-system-x86_64 bin/qemu-virtiofs-system-x86_64 && \ + mv libexec/kata-qemu/virtiofsd bin/virtiofsd-dax +RUN /root/static-build/scripts/qemu-build-post.sh diff --git a/tools/packaging/static-build/qemu-virtiofs/build-static-qemu-virtiofs.sh b/tools/packaging/static-build/qemu-virtiofs/build-static-qemu-virtiofs.sh index 482339317..1178dafd5 100755 --- a/tools/packaging/static-build/qemu-virtiofs/build-static-qemu-virtiofs.sh +++ b/tools/packaging/static-build/qemu-virtiofs/build-static-qemu-virtiofs.sh @@ -26,6 +26,7 @@ qemu_virtiofs_repo=$(get_from_kata_deps "assets.hypervisor.qemu-experimental.url qemu_virtiofs_tag=$(get_from_kata_deps "assets.hypervisor.qemu-experimental.tag" "${kata_version}") qemu_virtiofs_tar="kata-static-qemu-virtiofsd.tar.gz" qemu_tmp_tar="kata-static-qemu-virtiofsd-tmp.tar.gz" +qemu_destdir="/tmp/qemu-virtiofs-static" info "Build ${qemu_virtiofs_repo} tag: ${qemu_virtiofs_tag}" @@ -37,6 +38,7 @@ sudo "${DOCKER_CLI}" build \ --no-cache \ --build-arg http_proxy="${http_proxy}" \ --build-arg https_proxy="${https_proxy}" \ + --build-arg QEMU_DESTDIR="${qemu_destdir}" \ --build-arg QEMU_VIRTIOFS_REPO="${qemu_virtiofs_repo}" \ --build-arg QEMU_VIRTIOFS_TAG="${qemu_virtiofs_tag}" \ --build-arg QEMU_TARBALL="${qemu_virtiofs_tar}" \ @@ -46,12 +48,9 @@ sudo "${DOCKER_CLI}" build \ -t qemu-virtiofs-static sudo "${DOCKER_CLI}" run \ + --rm \ -i \ -v "${PWD}":/share qemu-virtiofs-static \ - mv "/tmp/qemu-virtiofs-static/${qemu_virtiofs_tar}" /share/ + mv "${qemu_destdir}/${qemu_virtiofs_tar}" /share/ sudo chown ${USER}:${USER} "${PWD}/${qemu_virtiofs_tar}" - -# Remove blacklisted binaries -gzip -d < "${qemu_virtiofs_tar}" | tar --delete --wildcards -f - ${qemu_black_list[*]} | gzip > "${qemu_tmp_tar}" -mv -f "${qemu_tmp_tar}" "${qemu_virtiofs_tar}" diff --git a/tools/packaging/static-build/qemu.blacklist b/tools/packaging/static-build/qemu.blacklist index e52c54dc9..77ce3a28a 100644 --- a/tools/packaging/static-build/qemu.blacklist +++ b/tools/packaging/static-build/qemu.blacklist @@ -6,7 +6,7 @@ qemu_black_list=( */bin/qemu-pr-helper */bin/virtfs-proxy-helper */libexec/kata-qemu/qemu* -*/share/*/applications/ +*/share/*/applications */share/*/*.dtb */share/*/efi-e1000e.rom */share/*/efi-e1000.rom @@ -15,9 +15,9 @@ qemu_black_list=( */share/*/efi-pcnet.rom */share/*/efi-rtl8139.rom */share/*/efi-vmxnet3.rom -*/share/*/icons/ +*/share/*/icons */share/*/*.img -*/share/*/keymaps/ +*/share/*/keymaps */share/*/multiboot.bin */share/*/openbios-ppc */share/*/openbios-sparc32 diff --git a/tools/packaging/static-build/qemu/Dockerfile b/tools/packaging/static-build/qemu/Dockerfile index 74d479c9f..502b9018a 100644 --- a/tools/packaging/static-build/qemu/Dockerfile +++ b/tools/packaging/static-build/qemu/Dockerfile @@ -4,6 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 from ubuntu:20.04 +ARG QEMU_DESTDIR ARG QEMU_REPO # commit/tag/branch ARG QEMU_VERSION @@ -54,13 +55,16 @@ RUN git clone https://github.com/qemu/keycodemapdb.git ui/keycodemapdb ADD scripts/configure-hypervisor.sh /root/configure-hypervisor.sh ADD qemu /root/kata_qemu +ADD scripts/apply_patches.sh /root/apply_patches.sh +ADD static-build /root/static-build -RUN /root/kata_qemu/apply_patches.sh +RUN stable_branch=$(cat VERSION | awk 'BEGIN{FS=OFS="."}{print $1 "." $2 ".x"}') && \ + /root/apply_patches.sh "/root/kata_qemu/patches/${stable_branch}" RUN PREFIX="${PREFIX}" /root/configure-hypervisor.sh -s kata-qemu | xargs ./configure \ --with-pkgversion=kata-static RUN make -j$(nproc) RUN make -j$(nproc) virtiofsd -RUN make install DESTDIR=/tmp/qemu-static -RUN cd /tmp/qemu-static && tar -czvf "${QEMU_TARBALL}" * +RUN make install DESTDIR="${QEMU_DESTDIR}" +RUN /root/static-build/scripts/qemu-build-post.sh diff --git a/tools/packaging/static-build/qemu/build-static-qemu.sh b/tools/packaging/static-build/qemu/build-static-qemu.sh index fd6f52439..22cb8d0c0 100755 --- a/tools/packaging/static-build/qemu/build-static-qemu.sh +++ b/tools/packaging/static-build/qemu/build-static-qemu.sh @@ -16,6 +16,7 @@ source "${script_dir}/../qemu.blacklist" packaging_dir="${script_dir}/../.." qemu_tar="kata-static-qemu.tar.gz" qemu_tmp_tar="kata-static-qemu-tmp.tar.gz" +qemu_destdir="/tmp/qemu-static/" qemu_repo="${qemu_repo:-}" qemu_version="${qemu_version:-}" @@ -45,6 +46,7 @@ sudo docker build \ --no-cache \ --build-arg http_proxy="${http_proxy}" \ --build-arg https_proxy="${https_proxy}" \ + --build-arg QEMU_DESTDIR="${qemu_destdir}" \ --build-arg QEMU_REPO="${qemu_repo}" \ --build-arg QEMU_VERSION="${qemu_version}" \ --build-arg QEMU_TARBALL="${qemu_tar}" \ @@ -54,12 +56,9 @@ sudo docker build \ -t qemu-static sudo docker run \ + --rm \ -i \ -v "${PWD}":/share qemu-static \ - mv "/tmp/qemu-static/${qemu_tar}" /share/ + mv "${qemu_destdir}/${qemu_tar}" /share/ sudo chown ${USER}:${USER} "${PWD}/${qemu_tar}" - -# Remove blacklisted binaries -gzip -d < "${qemu_tar}" | tar --delete --wildcards -f - ${qemu_black_list[*]} | gzip > "${qemu_tmp_tar}" -mv -f "${qemu_tmp_tar}" "${qemu_tar}" diff --git a/tools/packaging/static-build/scripts/qemu-build-post.sh b/tools/packaging/static-build/scripts/qemu-build-post.sh new file mode 100755 index 000000000..fbb8f931c --- /dev/null +++ b/tools/packaging/static-build/scripts/qemu-build-post.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# +# Copyright (c) 2020 Red Hat, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# This script process QEMU post-build. +# +set -e + +script_dir="$(realpath $(dirname $0))" +source "${script_dir}/../qemu.blacklist" + +if [[ -z "${QEMU_TARBALL}" || -z "${QEMU_DESTDIR}" ]]; then + echo "$0: needs QEMU_TARBALL and QEMU_DESTDIR exported" + exit 1 +fi + +pushd "${QEMU_DESTDIR}" +# Remove files to reduce the surface. +echo "INFO: remove uneeded files" +for pattern in ${qemu_black_list[@]}; do + find . -path "$pattern" | xargs rm -rfv +done + +echo "INFO: create the tarball" +tar -czvf "${QEMU_TARBALL}" * +popd