vendor: move ttrpc proto code out of vendor

We do not really depend on the go agent generated grpc code any more.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
This commit is contained in:
Peng Tao
2020-05-25 08:48:00 -07:00
parent f7b941b6bf
commit 6bc69760c0
62 changed files with 1539 additions and 8919 deletions

View File

@@ -1,943 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
mksyscall_windows generates windows system call bodies
It parses all files specified on command line containing function
prototypes (like syscall_windows.go) and prints system call bodies
to standard output.
The prototypes are marked by lines beginning with "//sys" and read
like func declarations if //sys is replaced by func, but:
* The parameter lists must give a name for each argument. This
includes return parameters.
* The parameter lists must give a type for each argument:
the (x, y, z int) shorthand is not allowed.
* If the return parameter is an error number, it must be named err.
* If go func name needs to be different from it's winapi dll name,
the winapi name could be specified at the end, after "=" sign, like
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
* Each function that returns err needs to supply a condition, that
return value of winapi will be tested against to detect failure.
This would set err to windows "last-error", otherwise it will be nil.
The value can be provided at end of //sys declaration, like
//sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
and is [failretval==0] by default.
Usage:
mksyscall_windows [flags] [path ...]
The flags are:
-output
Specify output file name (outputs to console if blank).
-trace
Generate print statement after every syscall.
*/
package main
import (
"bufio"
"bytes"
"errors"
"flag"
"fmt"
"go/format"
"go/parser"
"go/token"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"text/template"
)
var (
filename = flag.String("output", "", "output file name (standard output if omitted)")
printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall")
systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory")
winio = flag.Bool("winio", false, "import go-winio")
)
func trim(s string) string {
return strings.Trim(s, " \t")
}
var packageName string
func packagename() string {
return packageName
}
func syscalldot() string {
if packageName == "syscall" {
return ""
}
return "syscall."
}
// Param is function parameter
type Param struct {
Name string
Type string
fn *Fn
tmpVarIdx int
}
// tmpVar returns temp variable name that will be used to represent p during syscall.
func (p *Param) tmpVar() string {
if p.tmpVarIdx < 0 {
p.tmpVarIdx = p.fn.curTmpVarIdx
p.fn.curTmpVarIdx++
}
return fmt.Sprintf("_p%d", p.tmpVarIdx)
}
// BoolTmpVarCode returns source code for bool temp variable.
func (p *Param) BoolTmpVarCode() string {
const code = `var %s uint32
if %s {
%s = 1
} else {
%s = 0
}`
tmp := p.tmpVar()
return fmt.Sprintf(code, tmp, p.Name, tmp, tmp)
}
// SliceTmpVarCode returns source code for slice temp variable.
func (p *Param) SliceTmpVarCode() string {
const code = `var %s *%s
if len(%s) > 0 {
%s = &%s[0]
}`
tmp := p.tmpVar()
return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name)
}
// StringTmpVarCode returns source code for string temp variable.
func (p *Param) StringTmpVarCode() string {
errvar := p.fn.Rets.ErrorVarName()
if errvar == "" {
errvar = "_"
}
tmp := p.tmpVar()
const code = `var %s %s
%s, %s = %s(%s)`
s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name)
if errvar == "-" {
return s
}
const morecode = `
if %s != nil {
return
}`
return s + fmt.Sprintf(morecode, errvar)
}
// TmpVarCode returns source code for temp variable.
func (p *Param) TmpVarCode() string {
switch {
case p.Type == "bool":
return p.BoolTmpVarCode()
case strings.HasPrefix(p.Type, "[]"):
return p.SliceTmpVarCode()
default:
return ""
}
}
// TmpVarHelperCode returns source code for helper's temp variable.
func (p *Param) TmpVarHelperCode() string {
if p.Type != "string" {
return ""
}
return p.StringTmpVarCode()
}
// SyscallArgList returns source code fragments representing p parameter
// in syscall. Slices are translated into 2 syscall parameters: pointer to
// the first element and length.
func (p *Param) SyscallArgList() []string {
t := p.HelperType()
var s string
switch {
case t[0] == '*':
s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name)
case t == "bool":
s = p.tmpVar()
case strings.HasPrefix(t, "[]"):
return []string{
fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()),
fmt.Sprintf("uintptr(len(%s))", p.Name),
}
default:
s = p.Name
}
return []string{fmt.Sprintf("uintptr(%s)", s)}
}
// IsError determines if p parameter is used to return error.
func (p *Param) IsError() bool {
return p.Name == "err" && p.Type == "error"
}
// HelperType returns type of parameter p used in helper function.
func (p *Param) HelperType() string {
if p.Type == "string" {
return p.fn.StrconvType()
}
return p.Type
}
// join concatenates parameters ps into a string with sep separator.
// Each parameter is converted into string by applying fn to it
// before conversion.
func join(ps []*Param, fn func(*Param) string, sep string) string {
if len(ps) == 0 {
return ""
}
a := make([]string, 0)
for _, p := range ps {
a = append(a, fn(p))
}
return strings.Join(a, sep)
}
// Rets describes function return parameters.
type Rets struct {
Name string
Type string
ReturnsError bool
FailCond string
}
// ErrorVarName returns error variable name for r.
func (r *Rets) ErrorVarName() string {
if r.ReturnsError {
return "err"
}
if r.Type == "error" {
return r.Name
}
return ""
}
// ToParams converts r into slice of *Param.
func (r *Rets) ToParams() []*Param {
ps := make([]*Param, 0)
if len(r.Name) > 0 {
ps = append(ps, &Param{Name: r.Name, Type: r.Type})
}
if r.ReturnsError {
ps = append(ps, &Param{Name: "err", Type: "error"})
}
return ps
}
// List returns source code of syscall return parameters.
func (r *Rets) List() string {
s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ")
if len(s) > 0 {
s = "(" + s + ")"
}
return s
}
// PrintList returns source code of trace printing part correspondent
// to syscall return values.
func (r *Rets) PrintList() string {
return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
}
// SetReturnValuesCode returns source code that accepts syscall return values.
func (r *Rets) SetReturnValuesCode() string {
if r.Name == "" && !r.ReturnsError {
return ""
}
retvar := "r0"
if r.Name == "" {
retvar = "r1"
}
errvar := "_"
if r.ReturnsError {
errvar = "e1"
}
return fmt.Sprintf("%s, _, %s := ", retvar, errvar)
}
func (r *Rets) useLongHandleErrorCode(retvar string) string {
const code = `if %s {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = %sEINVAL
}
}`
cond := retvar + " == 0"
if r.FailCond != "" {
cond = strings.Replace(r.FailCond, "failretval", retvar, 1)
}
return fmt.Sprintf(code, cond, syscalldot())
}
// SetErrorCode returns source code that sets return parameters.
func (r *Rets) SetErrorCode() string {
const code = `if r0 != 0 {
%s = %sErrno(r0)
}`
const hrCode = `if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
%s = %sErrno(r0)
}`
if r.Name == "" && !r.ReturnsError {
return ""
}
if r.Name == "" {
return r.useLongHandleErrorCode("r1")
}
if r.Type == "error" {
if r.Name == "hr" {
return fmt.Sprintf(hrCode, r.Name, syscalldot())
} else {
return fmt.Sprintf(code, r.Name, syscalldot())
}
}
s := ""
switch {
case r.Type[0] == '*':
s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type)
case r.Type == "bool":
s = fmt.Sprintf("%s = r0 != 0", r.Name)
default:
s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type)
}
if !r.ReturnsError {
return s
}
return s + "\n\t" + r.useLongHandleErrorCode(r.Name)
}
// Fn describes syscall function.
type Fn struct {
Name string
Params []*Param
Rets *Rets
PrintTrace bool
confirmproc bool
dllname string
dllfuncname string
src string
// TODO: get rid of this field and just use parameter index instead
curTmpVarIdx int // insure tmp variables have uniq names
}
// extractParams parses s to extract function parameters.
func extractParams(s string, f *Fn) ([]*Param, error) {
s = trim(s)
if s == "" {
return nil, nil
}
a := strings.Split(s, ",")
ps := make([]*Param, len(a))
for i := range ps {
s2 := trim(a[i])
b := strings.Split(s2, " ")
if len(b) != 2 {
b = strings.Split(s2, "\t")
if len(b) != 2 {
return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"")
}
}
ps[i] = &Param{
Name: trim(b[0]),
Type: trim(b[1]),
fn: f,
tmpVarIdx: -1,
}
}
return ps, nil
}
// extractSection extracts text out of string s starting after start
// and ending just before end. found return value will indicate success,
// and prefix, body and suffix will contain correspondent parts of string s.
func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) {
s = trim(s)
if strings.HasPrefix(s, string(start)) {
// no prefix
body = s[1:]
} else {
a := strings.SplitN(s, string(start), 2)
if len(a) != 2 {
return "", "", s, false
}
prefix = a[0]
body = a[1]
}
a := strings.SplitN(body, string(end), 2)
if len(a) != 2 {
return "", "", "", false
}
return prefix, a[0], a[1], true
}
// newFn parses string s and return created function Fn.
func newFn(s string) (*Fn, error) {
s = trim(s)
f := &Fn{
Rets: &Rets{},
src: s,
PrintTrace: *printTraceFlag,
}
// function name and args
prefix, body, s, found := extractSection(s, '(', ')')
if !found || prefix == "" {
return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"")
}
f.Name = prefix
var err error
f.Params, err = extractParams(body, f)
if err != nil {
return nil, err
}
// return values
_, body, s, found = extractSection(s, '(', ')')
if found {
r, err := extractParams(body, f)
if err != nil {
return nil, err
}
switch len(r) {
case 0:
case 1:
if r[0].IsError() {
f.Rets.ReturnsError = true
} else {
f.Rets.Name = r[0].Name
f.Rets.Type = r[0].Type
}
case 2:
if !r[1].IsError() {
return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"")
}
f.Rets.ReturnsError = true
f.Rets.Name = r[0].Name
f.Rets.Type = r[0].Type
default:
return nil, errors.New("Too many return values in \"" + f.src + "\"")
}
}
// fail condition
_, body, s, found = extractSection(s, '[', ']')
if found {
f.Rets.FailCond = body
}
// dll and dll function names
s = trim(s)
if s == "" {
return f, nil
}
if !strings.HasPrefix(s, "=") {
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
}
s = trim(s[1:])
a := strings.Split(s, ".")
switch len(a) {
case 1:
f.dllfuncname = a[0]
case 2:
f.dllname = a[0]
f.dllfuncname = a[1]
default:
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
}
if f.dllfuncname[len(f.dllfuncname)-1] == '?' {
f.confirmproc = true
f.dllfuncname = f.dllfuncname[0 : len(f.dllfuncname)-1]
}
return f, nil
}
// DLLName returns DLL name for function f.
func (f *Fn) DLLName() string {
if f.dllname == "" {
return "kernel32"
}
return f.dllname
}
// DLLName returns DLL function name for function f.
func (f *Fn) DLLFuncName() string {
if f.dllfuncname == "" {
return f.Name
}
return f.dllfuncname
}
func (f *Fn) ConfirmProc() bool {
return f.confirmproc
}
// ParamList returns source code for function f parameters.
func (f *Fn) ParamList() string {
return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ")
}
// HelperParamList returns source code for helper function f parameters.
func (f *Fn) HelperParamList() string {
return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ")
}
// ParamPrintList returns source code of trace printing part correspondent
// to syscall input parameters.
func (f *Fn) ParamPrintList() string {
return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
}
// ParamCount return number of syscall parameters for function f.
func (f *Fn) ParamCount() int {
n := 0
for _, p := range f.Params {
n += len(p.SyscallArgList())
}
return n
}
// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/...
// to use. It returns parameter count for correspondent SyscallX function.
func (f *Fn) SyscallParamCount() int {
n := f.ParamCount()
switch {
case n <= 3:
return 3
case n <= 6:
return 6
case n <= 9:
return 9
case n <= 12:
return 12
case n <= 15:
return 15
default:
panic("too many arguments to system call")
}
}
// Syscall determines which SyscallX function to use for function f.
func (f *Fn) Syscall() string {
c := f.SyscallParamCount()
if c == 3 {
return syscalldot() + "Syscall"
}
return syscalldot() + "Syscall" + strconv.Itoa(c)
}
// SyscallParamList returns source code for SyscallX parameters for function f.
func (f *Fn) SyscallParamList() string {
a := make([]string, 0)
for _, p := range f.Params {
a = append(a, p.SyscallArgList()...)
}
for len(a) < f.SyscallParamCount() {
a = append(a, "0")
}
return strings.Join(a, ", ")
}
// HelperCallParamList returns source code of call into function f helper.
func (f *Fn) HelperCallParamList() string {
a := make([]string, 0, len(f.Params))
for _, p := range f.Params {
s := p.Name
if p.Type == "string" {
s = p.tmpVar()
}
a = append(a, s)
}
return strings.Join(a, ", ")
}
// IsUTF16 is true, if f is W (utf16) function. It is false
// for all A (ascii) functions.
func (_ *Fn) IsUTF16() bool {
return true
}
// StrconvFunc returns name of Go string to OS string function for f.
func (f *Fn) StrconvFunc() string {
if f.IsUTF16() {
return syscalldot() + "UTF16PtrFromString"
}
return syscalldot() + "BytePtrFromString"
}
// StrconvType returns Go type name used for OS string for f.
func (f *Fn) StrconvType() string {
if f.IsUTF16() {
return "*uint16"
}
return "*byte"
}
// HasStringParam is true, if f has at least one string parameter.
// Otherwise it is false.
func (f *Fn) HasStringParam() bool {
for _, p := range f.Params {
if p.Type == "string" {
return true
}
}
return false
}
var uniqDllFuncName = make(map[string]bool)
// IsNotDuplicate is true if f is not a duplicated function
func (f *Fn) IsNotDuplicate() bool {
funcName := f.DLLFuncName()
if uniqDllFuncName[funcName] == false {
uniqDllFuncName[funcName] = true
return true
}
return false
}
// HelperName returns name of function f helper.
func (f *Fn) HelperName() string {
if !f.HasStringParam() {
return f.Name
}
return "_" + f.Name
}
// Source files and functions.
type Source struct {
Funcs []*Fn
Files []string
StdLibImports []string
ExternalImports []string
}
func (src *Source) Import(pkg string) {
src.StdLibImports = append(src.StdLibImports, pkg)
sort.Strings(src.StdLibImports)
}
func (src *Source) ExternalImport(pkg string) {
src.ExternalImports = append(src.ExternalImports, pkg)
sort.Strings(src.ExternalImports)
}
// ParseFiles parses files listed in fs and extracts all syscall
// functions listed in sys comments. It returns source files
// and functions collection *Source if successful.
func ParseFiles(fs []string) (*Source, error) {
src := &Source{
Funcs: make([]*Fn, 0),
Files: make([]string, 0),
StdLibImports: []string{
"unsafe",
},
ExternalImports: make([]string, 0),
}
for _, file := range fs {
if err := src.ParseFile(file); err != nil {
return nil, err
}
}
return src, nil
}
// DLLs return dll names for a source set src.
func (src *Source) DLLs() []string {
uniq := make(map[string]bool)
r := make([]string, 0)
for _, f := range src.Funcs {
name := f.DLLName()
if _, found := uniq[name]; !found {
uniq[name] = true
r = append(r, name)
}
}
return r
}
// ParseFile adds additional file path to a source set src.
func (src *Source) ParseFile(path string) error {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
s := bufio.NewScanner(file)
for s.Scan() {
t := trim(s.Text())
if len(t) < 7 {
continue
}
if !strings.HasPrefix(t, "//sys") {
continue
}
t = t[5:]
if !(t[0] == ' ' || t[0] == '\t') {
continue
}
f, err := newFn(t[1:])
if err != nil {
return err
}
src.Funcs = append(src.Funcs, f)
}
if err := s.Err(); err != nil {
return err
}
src.Files = append(src.Files, path)
// get package name
fset := token.NewFileSet()
_, err = file.Seek(0, 0)
if err != nil {
return err
}
pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly)
if err != nil {
return err
}
packageName = pkg.Name.Name
return nil
}
// IsStdRepo returns true if src is part of standard library.
func (src *Source) IsStdRepo() (bool, error) {
if len(src.Files) == 0 {
return false, errors.New("no input files provided")
}
abspath, err := filepath.Abs(src.Files[0])
if err != nil {
return false, err
}
goroot := runtime.GOROOT()
if runtime.GOOS == "windows" {
abspath = strings.ToLower(abspath)
goroot = strings.ToLower(goroot)
}
sep := string(os.PathSeparator)
if !strings.HasSuffix(goroot, sep) {
goroot += sep
}
return strings.HasPrefix(abspath, goroot), nil
}
// Generate output source file from a source set src.
func (src *Source) Generate(w io.Writer) error {
const (
pkgStd = iota // any package in std library
pkgXSysWindows // x/sys/windows package
pkgOther
)
isStdRepo, err := src.IsStdRepo()
if err != nil {
return err
}
var pkgtype int
switch {
case isStdRepo:
pkgtype = pkgStd
case packageName == "windows":
// TODO: this needs better logic than just using package name
pkgtype = pkgXSysWindows
default:
pkgtype = pkgOther
}
if *systemDLL {
switch pkgtype {
case pkgStd:
src.Import("internal/syscall/windows/sysdll")
case pkgXSysWindows:
default:
src.ExternalImport("golang.org/x/sys/windows")
}
}
if *winio {
src.ExternalImport("github.com/Microsoft/go-winio")
}
if packageName != "syscall" {
src.Import("syscall")
}
funcMap := template.FuncMap{
"packagename": packagename,
"syscalldot": syscalldot,
"newlazydll": func(dll string) string {
arg := "\"" + dll + ".dll\""
if !*systemDLL {
return syscalldot() + "NewLazyDLL(" + arg + ")"
}
if strings.HasPrefix(dll, "api_") || strings.HasPrefix(dll, "ext_") {
arg = strings.Replace(arg, "_", "-", -1)
}
switch pkgtype {
case pkgStd:
return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))"
case pkgXSysWindows:
return "NewLazySystemDLL(" + arg + ")"
default:
return "windows.NewLazySystemDLL(" + arg + ")"
}
},
}
t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate))
err = t.Execute(w, src)
if err != nil {
return errors.New("Failed to execute template: " + err.Error())
}
return nil
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n")
flag.PrintDefaults()
os.Exit(1)
}
func main() {
flag.Usage = usage
flag.Parse()
if len(flag.Args()) <= 0 {
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
usage()
}
src, err := ParseFiles(flag.Args())
if err != nil {
log.Fatal(err)
}
var buf bytes.Buffer
if err := src.Generate(&buf); err != nil {
log.Fatal(err)
}
data, err := format.Source(buf.Bytes())
if err != nil {
log.Fatal(err)
}
if *filename == "" {
_, err = os.Stdout.Write(data)
} else {
err = ioutil.WriteFile(*filename, data, 0644)
}
if err != nil {
log.Fatal(err)
}
}
// TODO: use println instead to print in the following template
const srcTemplate = `
{{define "main"}}// Code generated mksyscall_windows.exe DO NOT EDIT
package {{packagename}}
import (
{{range .StdLibImports}}"{{.}}"
{{end}}
{{range .ExternalImports}}"{{.}}"
{{end}}
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e {{syscalldot}}Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
{{template "dlls" .}}
{{template "funcnames" .}})
{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}}
{{end}}
{{/* help functions */}}
{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}}
{{end}}{{end}}
{{define "funcnames"}}{{range .Funcs}}{{if .IsNotDuplicate}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}"){{end}}
{{end}}{{end}}
{{define "helperbody"}}
func {{.Name}}({{.ParamList}}) {{template "results" .}}{
{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}})
}
{{end}}
{{define "funcbody"}}
func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{
{{template "tmpvars" .}} {{template "syscallcheck" .}}{{template "syscall" .}}
{{template "seterror" .}}{{template "printtrace" .}} return
}
{{end}}
{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}}
{{end}}{{end}}{{end}}
{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}}
{{end}}{{end}}{{end}}
{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}}
{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}}
{{define "syscallcheck"}}{{if .ConfirmProc}}if {{.Rets.ErrorVarName}} = proc{{.DLLFuncName}}.Find(); {{.Rets.ErrorVarName}} != nil {
return
}
{{end}}{{end}}
{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}}
{{end}}{{end}}
{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n")
{{end}}{{end}}
`

View File

@@ -1,294 +0,0 @@
// +build ignore
package main
import (
"bytes"
"fmt"
"go/format"
"html/template"
"io/ioutil"
"log"
"path/filepath"
"strings"
"github.com/globalsign/mgo/internal/json"
)
func main() {
log.SetFlags(0)
log.SetPrefix(name + ": ")
var g Generator
fmt.Fprintf(&g, "// Code generated by \"%s.go\"; DO NOT EDIT\n\n", name)
src := g.generate()
err := ioutil.WriteFile(fmt.Sprintf("%s.go", strings.TrimSuffix(name, "_generator")), src, 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
}
// Generator holds the state of the analysis. Primarily used to buffer
// the output for format.Source.
type Generator struct {
bytes.Buffer // Accumulated output.
}
// format returns the gofmt-ed contents of the Generator's buffer.
func (g *Generator) format() []byte {
src, err := format.Source(g.Bytes())
if err != nil {
// Should never happen, but can arise when developing this code.
// The user can compile the output to see the error.
log.Printf("warning: internal error: invalid Go generated: %s", err)
log.Printf("warning: compile the package to analyze the error")
return g.Bytes()
}
return src
}
// EVERYTHING ABOVE IS CONSTANT BETWEEN THE GENERATORS
const name = "bson_corpus_spec_test_generator"
func (g *Generator) generate() []byte {
testFiles, err := filepath.Glob("./specdata/specifications/source/bson-corpus/tests/*.json")
if err != nil {
log.Fatalf("error reading bson-corpus files: %s", err)
}
tests, err := g.loadTests(testFiles)
if err != nil {
log.Fatalf("error loading tests: %s", err)
}
tmpl, err := g.getTemplate()
if err != nil {
log.Fatalf("error loading template: %s", err)
}
tmpl.Execute(&g.Buffer, tests)
return g.format()
}
func (g *Generator) loadTests(filenames []string) ([]*testDef, error) {
var tests []*testDef
for _, filename := range filenames {
test, err := g.loadTest(filename)
if err != nil {
return nil, err
}
tests = append(tests, test)
}
return tests, nil
}
func (g *Generator) loadTest(filename string) (*testDef, error) {
content, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
var testDef testDef
err = json.Unmarshal(content, &testDef)
if err != nil {
return nil, err
}
names := make(map[string]struct{})
for i := len(testDef.Valid) - 1; i >= 0; i-- {
if testDef.BsonType == "0x05" && testDef.Valid[i].Description == "subtype 0x02" {
testDef.Valid = append(testDef.Valid[:i], testDef.Valid[i+1:]...)
continue
}
name := cleanupFuncName(testDef.Description + "_" + testDef.Valid[i].Description)
nameIdx := name
j := 1
for {
if _, ok := names[nameIdx]; !ok {
break
}
nameIdx = fmt.Sprintf("%s_%d", name, j)
}
names[nameIdx] = struct{}{}
testDef.Valid[i].TestDef = &testDef
testDef.Valid[i].Name = nameIdx
testDef.Valid[i].StructTest = testDef.TestKey != "" &&
(testDef.BsonType != "0x05" || strings.Contains(testDef.Valid[i].Description, "0x00")) &&
!testDef.Deprecated
}
for i := len(testDef.DecodeErrors) - 1; i >= 0; i-- {
if strings.Contains(testDef.DecodeErrors[i].Description, "UTF-8") {
testDef.DecodeErrors = append(testDef.DecodeErrors[:i], testDef.DecodeErrors[i+1:]...)
continue
}
name := cleanupFuncName(testDef.Description + "_" + testDef.DecodeErrors[i].Description)
nameIdx := name
j := 1
for {
if _, ok := names[nameIdx]; !ok {
break
}
nameIdx = fmt.Sprintf("%s_%d", name, j)
}
names[nameIdx] = struct{}{}
testDef.DecodeErrors[i].Name = nameIdx
}
return &testDef, nil
}
func (g *Generator) getTemplate() (*template.Template, error) {
content := `package bson_test
import (
"encoding/hex"
"time"
. "gopkg.in/check.v1"
"github.com/globalsign/mgo/bson"
)
func testValid(c *C, in []byte, expected []byte, result interface{}) {
err := bson.Unmarshal(in, result)
c.Assert(err, IsNil)
out, err := bson.Marshal(result)
c.Assert(err, IsNil)
c.Assert(string(expected), Equals, string(out), Commentf("roundtrip failed for %T, expected '%x' but got '%x'", result, expected, out))
}
func testDecodeSkip(c *C, in []byte) {
err := bson.Unmarshal(in, &struct{}{})
c.Assert(err, IsNil)
}
func testDecodeError(c *C, in []byte, result interface{}) {
err := bson.Unmarshal(in, result)
c.Assert(err, Not(IsNil))
}
{{range .}}
{{range .Valid}}
func (s *S) Test{{.Name}}(c *C) {
b, err := hex.DecodeString("{{.Bson}}")
c.Assert(err, IsNil)
{{if .CanonicalBson}}
cb, err := hex.DecodeString("{{.CanonicalBson}}")
c.Assert(err, IsNil)
{{else}}
cb := b
{{end}}
var resultD bson.D
testValid(c, b, cb, &resultD)
{{if .StructTest}}var resultS struct {
Element {{.TestDef.GoType}} ` + "`bson:\"{{.TestDef.TestKey}}\"`" + `
}
testValid(c, b, cb, &resultS){{end}}
testDecodeSkip(c, b)
}
{{end}}
{{range .DecodeErrors}}
func (s *S) Test{{.Name}}(c *C) {
b, err := hex.DecodeString("{{.Bson}}")
c.Assert(err, IsNil)
var resultD bson.D
testDecodeError(c, b, &resultD)
}
{{end}}
{{end}}
`
tmpl, err := template.New("").Parse(content)
if err != nil {
return nil, err
}
return tmpl, nil
}
func cleanupFuncName(name string) string {
return strings.Map(func(r rune) rune {
if (r >= 48 && r <= 57) || (r >= 65 && r <= 90) || (r >= 97 && r <= 122) {
return r
}
return '_'
}, name)
}
type testDef struct {
Description string `json:"description"`
BsonType string `json:"bson_type"`
TestKey string `json:"test_key"`
Valid []*valid `json:"valid"`
DecodeErrors []*decodeError `json:"decodeErrors"`
Deprecated bool `json:"deprecated"`
}
func (t *testDef) GoType() string {
switch t.BsonType {
case "0x01":
return "float64"
case "0x02":
return "string"
case "0x03":
return "bson.D"
case "0x04":
return "[]interface{}"
case "0x05":
return "[]byte"
case "0x07":
return "bson.ObjectId"
case "0x08":
return "bool"
case "0x09":
return "time.Time"
case "0x0E":
return "string"
case "0x10":
return "int32"
case "0x12":
return "int64"
case "0x13":
return "bson.Decimal"
default:
return "interface{}"
}
}
type valid struct {
Description string `json:"description"`
Bson string `json:"bson"`
CanonicalBson string `json:"canonical_bson"`
Name string
StructTest bool
TestDef *testDef
}
type decodeError struct {
Description string `json:"description"`
Bson string `json:"bson"`
Name string
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,27 +0,0 @@
Copyright (c) 2016, gRPC Ecosystem
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of grpc-opentracing nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,23 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the GRPC project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of GRPC, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of GRPC. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of GRPC or any code incorporated within this
implementation of GRPC constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of GRPC
shall terminate as of the date such litigation is filed.
Status API Training Shop Blog About

View File

@@ -1,57 +0,0 @@
# OpenTracing support for gRPC in Go
The `otgrpc` package makes it easy to add OpenTracing support to gRPC-based
systems in Go.
## Installation
```
go get github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc
```
## Documentation
See the basic usage examples below and the [package documentation on
godoc.org](https://godoc.org/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc).
## Client-side usage example
Wherever you call `grpc.Dial`:
```go
// You must have some sort of OpenTracing Tracer instance on hand.
var tracer opentracing.Tracer = ...
...
// Set up a connection to the server peer.
conn, err := grpc.Dial(
address,
... // other options
grpc.WithUnaryInterceptor(
otgrpc.OpenTracingClientInterceptor(tracer)),
grpc.WithStreamInterceptor(
otgrpc.OpenTracingStreamClientInterceptor(tracer)))
// All future RPC activity involving `conn` will be automatically traced.
```
## Server-side usage example
Wherever you call `grpc.NewServer`:
```go
// You must have some sort of OpenTracing Tracer instance on hand.
var tracer opentracing.Tracer = ...
...
// Initialize the gRPC server.
s := grpc.NewServer(
... // other options
grpc.UnaryInterceptor(
otgrpc.OpenTracingServerInterceptor(tracer)),
grpc.StreamInterceptor(
otgrpc.OpenTracingStreamServerInterceptor(tracer)))
// All future RPC activity involving `s` will be automatically traced.
```

View File

@@ -1,239 +0,0 @@
package otgrpc
import (
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/opentracing/opentracing-go/log"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"io"
"runtime"
"sync/atomic"
)
// OpenTracingClientInterceptor returns a grpc.UnaryClientInterceptor suitable
// for use in a grpc.Dial call.
//
// For example:
//
// conn, err := grpc.Dial(
// address,
// ..., // (existing DialOptions)
// grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer)))
//
// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC
// metadata; they will also look in the context.Context for an active
// in-process parent Span and establish a ChildOf reference if such a parent
// Span could be found.
func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryClientInterceptor {
otgrpcOpts := newOptions()
otgrpcOpts.apply(optFuncs...)
return func(
ctx context.Context,
method string,
req, resp interface{},
cc *grpc.ClientConn,
invoker grpc.UnaryInvoker,
opts ...grpc.CallOption,
) error {
var err error
var parentCtx opentracing.SpanContext
if parent := opentracing.SpanFromContext(ctx); parent != nil {
parentCtx = parent.Context()
}
if otgrpcOpts.inclusionFunc != nil &&
!otgrpcOpts.inclusionFunc(parentCtx, method, req, resp) {
return invoker(ctx, method, req, resp, cc, opts...)
}
clientSpan := tracer.StartSpan(
method,
opentracing.ChildOf(parentCtx),
ext.SpanKindRPCClient,
gRPCComponentTag,
)
defer clientSpan.Finish()
ctx = injectSpanContext(ctx, tracer, clientSpan)
if otgrpcOpts.logPayloads {
clientSpan.LogFields(log.Object("gRPC request", req))
}
err = invoker(ctx, method, req, resp, cc, opts...)
if err == nil {
if otgrpcOpts.logPayloads {
clientSpan.LogFields(log.Object("gRPC response", resp))
}
} else {
SetSpanTags(clientSpan, err, true)
clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
}
if otgrpcOpts.decorator != nil {
otgrpcOpts.decorator(clientSpan, method, req, resp, err)
}
return err
}
}
// OpenTracingStreamClientInterceptor returns a grpc.StreamClientInterceptor suitable
// for use in a grpc.Dial call. The interceptor instruments streaming RPCs by creating
// a single span to correspond to the lifetime of the RPC's stream.
//
// For example:
//
// conn, err := grpc.Dial(
// address,
// ..., // (existing DialOptions)
// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer)))
//
// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC
// metadata; they will also look in the context.Context for an active
// in-process parent Span and establish a ChildOf reference if such a parent
// Span could be found.
func OpenTracingStreamClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamClientInterceptor {
otgrpcOpts := newOptions()
otgrpcOpts.apply(optFuncs...)
return func(
ctx context.Context,
desc *grpc.StreamDesc,
cc *grpc.ClientConn,
method string,
streamer grpc.Streamer,
opts ...grpc.CallOption,
) (grpc.ClientStream, error) {
var err error
var parentCtx opentracing.SpanContext
if parent := opentracing.SpanFromContext(ctx); parent != nil {
parentCtx = parent.Context()
}
if otgrpcOpts.inclusionFunc != nil &&
!otgrpcOpts.inclusionFunc(parentCtx, method, nil, nil) {
return streamer(ctx, desc, cc, method, opts...)
}
clientSpan := tracer.StartSpan(
method,
opentracing.ChildOf(parentCtx),
ext.SpanKindRPCClient,
gRPCComponentTag,
)
ctx = injectSpanContext(ctx, tracer, clientSpan)
cs, err := streamer(ctx, desc, cc, method, opts...)
if err != nil {
clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
SetSpanTags(clientSpan, err, true)
clientSpan.Finish()
return cs, err
}
return newOpenTracingClientStream(cs, method, desc, clientSpan, otgrpcOpts), nil
}
}
func newOpenTracingClientStream(cs grpc.ClientStream, method string, desc *grpc.StreamDesc, clientSpan opentracing.Span, otgrpcOpts *options) grpc.ClientStream {
finishChan := make(chan struct{})
isFinished := new(int32)
*isFinished = 0
finishFunc := func(err error) {
// The current OpenTracing specification forbids finishing a span more than
// once. Since we have multiple code paths that could concurrently call
// `finishFunc`, we need to add some sort of synchronization to guard against
// multiple finishing.
if !atomic.CompareAndSwapInt32(isFinished, 0, 1) {
return
}
close(finishChan)
defer clientSpan.Finish()
if err != nil {
clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
SetSpanTags(clientSpan, err, true)
}
if otgrpcOpts.decorator != nil {
otgrpcOpts.decorator(clientSpan, method, nil, nil, err)
}
}
go func() {
select {
case <-finishChan:
// The client span is being finished by another code path; hence, no
// action is necessary.
case <-cs.Context().Done():
finishFunc(cs.Context().Err())
}
}()
otcs := &openTracingClientStream{
ClientStream: cs,
desc: desc,
finishFunc: finishFunc,
}
// The `ClientStream` interface allows one to omit calling `Recv` if it's
// known that the result will be `io.EOF`. See
// http://stackoverflow.com/q/42915337
// In such cases, there's nothing that triggers the span to finish. We,
// therefore, set a finalizer so that the span and the context goroutine will
// at least be cleaned up when the garbage collector is run.
runtime.SetFinalizer(otcs, func(otcs *openTracingClientStream) {
otcs.finishFunc(nil)
})
return otcs
}
type openTracingClientStream struct {
grpc.ClientStream
desc *grpc.StreamDesc
finishFunc func(error)
}
func (cs *openTracingClientStream) Header() (metadata.MD, error) {
md, err := cs.ClientStream.Header()
if err != nil {
cs.finishFunc(err)
}
return md, err
}
func (cs *openTracingClientStream) SendMsg(m interface{}) error {
err := cs.ClientStream.SendMsg(m)
if err != nil {
cs.finishFunc(err)
}
return err
}
func (cs *openTracingClientStream) RecvMsg(m interface{}) error {
err := cs.ClientStream.RecvMsg(m)
if err == io.EOF {
cs.finishFunc(nil)
return err
} else if err != nil {
cs.finishFunc(err)
return err
}
if !cs.desc.ServerStreams {
cs.finishFunc(nil)
}
return err
}
func (cs *openTracingClientStream) CloseSend() error {
err := cs.ClientStream.CloseSend()
if err != nil {
cs.finishFunc(err)
}
return err
}
func injectSpanContext(ctx context.Context, tracer opentracing.Tracer, clientSpan opentracing.Span) context.Context {
md, ok := metadata.FromOutgoingContext(ctx)
if !ok {
md = metadata.New(nil)
} else {
md = md.Copy()
}
mdWriter := metadataReaderWriter{md}
err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, mdWriter)
// We have no better place to record an error than the Span itself :-/
if err != nil {
clientSpan.LogFields(log.String("event", "Tracer.Inject() failed"), log.Error(err))
}
return metadata.NewOutgoingContext(ctx, md)
}

View File

@@ -1,69 +0,0 @@
package otgrpc
import (
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// A Class is a set of types of outcomes (including errors) that will often
// be handled in the same way.
type Class string
const (
Unknown Class = "0xx"
// Success represents outcomes that achieved the desired results.
Success Class = "2xx"
// ClientError represents errors that were the client's fault.
ClientError Class = "4xx"
// ServerError represents errors that were the server's fault.
ServerError Class = "5xx"
)
// ErrorClass returns the class of the given error
func ErrorClass(err error) Class {
if s, ok := status.FromError(err); ok {
switch s.Code() {
// Success or "success"
case codes.OK, codes.Canceled:
return Success
// Client errors
case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists,
codes.PermissionDenied, codes.Unauthenticated, codes.FailedPrecondition,
codes.OutOfRange:
return ClientError
// Server errors
case codes.DeadlineExceeded, codes.ResourceExhausted, codes.Aborted,
codes.Unimplemented, codes.Internal, codes.Unavailable, codes.DataLoss:
return ServerError
// Not sure
case codes.Unknown:
fallthrough
default:
return Unknown
}
}
return Unknown
}
// SetSpanTags sets one or more tags on the given span according to the
// error.
func SetSpanTags(span opentracing.Span, err error, client bool) {
c := ErrorClass(err)
code := codes.Unknown
if s, ok := status.FromError(err); ok {
code = s.Code()
}
span.SetTag("response_code", code)
span.SetTag("response_class", c)
if err == nil {
return
}
if client || c == ServerError {
ext.Error.Set(span, true)
}
}

View File

@@ -1,76 +0,0 @@
package otgrpc
import "github.com/opentracing/opentracing-go"
// Option instances may be used in OpenTracing(Server|Client)Interceptor
// initialization.
//
// See this post about the "functional options" pattern:
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type Option func(o *options)
// LogPayloads returns an Option that tells the OpenTracing instrumentation to
// try to log application payloads in both directions.
func LogPayloads() Option {
return func(o *options) {
o.logPayloads = true
}
}
// SpanInclusionFunc provides an optional mechanism to decide whether or not
// to trace a given gRPC call. Return true to create a Span and initiate
// tracing, false to not create a Span and not trace.
//
// parentSpanCtx may be nil if no parent could be extraction from either the Go
// context.Context (on the client) or the RPC (on the server).
type SpanInclusionFunc func(
parentSpanCtx opentracing.SpanContext,
method string,
req, resp interface{}) bool
// IncludingSpans binds a IncludeSpanFunc to the options
func IncludingSpans(inclusionFunc SpanInclusionFunc) Option {
return func(o *options) {
o.inclusionFunc = inclusionFunc
}
}
// SpanDecoratorFunc provides an (optional) mechanism for otgrpc users to add
// arbitrary tags/logs/etc to the opentracing.Span associated with client
// and/or server RPCs.
type SpanDecoratorFunc func(
span opentracing.Span,
method string,
req, resp interface{},
grpcError error)
// SpanDecorator binds a function that decorates gRPC Spans.
func SpanDecorator(decorator SpanDecoratorFunc) Option {
return func(o *options) {
o.decorator = decorator
}
}
// The internal-only options struct. Obviously overkill at the moment; but will
// scale well as production use dictates other configuration and tuning
// parameters.
type options struct {
logPayloads bool
decorator SpanDecoratorFunc
// May be nil.
inclusionFunc SpanInclusionFunc
}
// newOptions returns the default options.
func newOptions() *options {
return &options{
logPayloads: false,
inclusionFunc: nil,
}
}
func (o *options) apply(opts ...Option) {
for _, opt := range opts {
opt(o)
}
}

View File

@@ -1,5 +0,0 @@
// Package otgrpc provides OpenTracing support for any gRPC client or server.
//
// See the README for simple usage examples:
// https://github.com/grpc-ecosystem/grpc-opentracing/blob/master/go/otgrpc/README.md
package otgrpc

View File

@@ -1,141 +0,0 @@
package otgrpc
import (
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/opentracing/opentracing-go/log"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
// OpenTracingServerInterceptor returns a grpc.UnaryServerInterceptor suitable
// for use in a grpc.NewServer call.
//
// For example:
//
// s := grpc.NewServer(
// ..., // (existing ServerOptions)
// grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer)))
//
// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC
// metadata; if found, the server span will act as the ChildOf that RPC
// SpanContext.
//
// Root or not, the server Span will be embedded in the context.Context for the
// application-specific gRPC handler(s) to access.
func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryServerInterceptor {
otgrpcOpts := newOptions()
otgrpcOpts.apply(optFuncs...)
return func(
ctx context.Context,
req interface{},
info *grpc.UnaryServerInfo,
handler grpc.UnaryHandler,
) (resp interface{}, err error) {
spanContext, err := extractSpanContext(ctx, tracer)
if err != nil && err != opentracing.ErrSpanContextNotFound {
// TODO: establish some sort of error reporting mechanism here. We
// don't know where to put such an error and must rely on Tracer
// implementations to do something appropriate for the time being.
}
if otgrpcOpts.inclusionFunc != nil &&
!otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, req, nil) {
return handler(ctx, req)
}
serverSpan := tracer.StartSpan(
info.FullMethod,
ext.RPCServerOption(spanContext),
gRPCComponentTag,
)
defer serverSpan.Finish()
ctx = opentracing.ContextWithSpan(ctx, serverSpan)
if otgrpcOpts.logPayloads {
serverSpan.LogFields(log.Object("gRPC request", req))
}
resp, err = handler(ctx, req)
if err == nil {
if otgrpcOpts.logPayloads {
serverSpan.LogFields(log.Object("gRPC response", resp))
}
} else {
SetSpanTags(serverSpan, err, false)
serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
}
if otgrpcOpts.decorator != nil {
otgrpcOpts.decorator(serverSpan, info.FullMethod, req, resp, err)
}
return resp, err
}
}
// OpenTracingStreamServerInterceptor returns a grpc.StreamServerInterceptor suitable
// for use in a grpc.NewServer call. The interceptor instruments streaming RPCs by
// creating a single span to correspond to the lifetime of the RPC's stream.
//
// For example:
//
// s := grpc.NewServer(
// ..., // (existing ServerOptions)
// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer)))
//
// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC
// metadata; if found, the server span will act as the ChildOf that RPC
// SpanContext.
//
// Root or not, the server Span will be embedded in the context.Context for the
// application-specific gRPC handler(s) to access.
func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamServerInterceptor {
otgrpcOpts := newOptions()
otgrpcOpts.apply(optFuncs...)
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
spanContext, err := extractSpanContext(ss.Context(), tracer)
if err != nil && err != opentracing.ErrSpanContextNotFound {
// TODO: establish some sort of error reporting mechanism here. We
// don't know where to put such an error and must rely on Tracer
// implementations to do something appropriate for the time being.
}
if otgrpcOpts.inclusionFunc != nil &&
!otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, nil, nil) {
return handler(srv, ss)
}
serverSpan := tracer.StartSpan(
info.FullMethod,
ext.RPCServerOption(spanContext),
gRPCComponentTag,
)
defer serverSpan.Finish()
ss = &openTracingServerStream{
ServerStream: ss,
ctx: opentracing.ContextWithSpan(ss.Context(), serverSpan),
}
err = handler(srv, ss)
if err != nil {
SetSpanTags(serverSpan, err, false)
serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
}
if otgrpcOpts.decorator != nil {
otgrpcOpts.decorator(serverSpan, info.FullMethod, nil, nil, err)
}
return err
}
}
type openTracingServerStream struct {
grpc.ServerStream
ctx context.Context
}
func (ss *openTracingServerStream) Context() context.Context {
return ss.ctx
}
func extractSpanContext(ctx context.Context, tracer opentracing.Tracer) (opentracing.SpanContext, error) {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
md = metadata.New(nil)
}
return tracer.Extract(opentracing.HTTPHeaders, metadataReaderWriter{md})
}

View File

@@ -1,42 +0,0 @@
package otgrpc
import (
"strings"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"google.golang.org/grpc/metadata"
)
var (
// Morally a const:
gRPCComponentTag = opentracing.Tag{string(ext.Component), "gRPC"}
)
// metadataReaderWriter satisfies both the opentracing.TextMapReader and
// opentracing.TextMapWriter interfaces.
type metadataReaderWriter struct {
metadata.MD
}
func (w metadataReaderWriter) Set(key, val string) {
// The GRPC HPACK implementation rejects any uppercase keys here.
//
// As such, since the HTTP_HEADERS format is case-insensitive anyway, we
// blindly lowercase the key (which is guaranteed to work in the
// Inject/Extract sense per the OpenTracing spec).
key = strings.ToLower(key)
w.MD[key] = append(w.MD[key], val)
}
func (w metadataReaderWriter) ForeachKey(handler func(key, val string) error) error {
for k, vals := range w.MD {
for _, v := range vals {
if err := handler(k, v); err != nil {
return err
}
}
}
return nil
}

View File

@@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

File diff suppressed because it is too large Load Diff

View File

@@ -1,48 +0,0 @@
//
// Copyright 2018 Intel Corporation.
//
// SPDX-License-Identifier: Apache-2.0
//
syntax = "proto3";
package types;
enum IPFamily {
v4 = 0;
v6 = 1;
}
message IPAddress {
IPFamily family = 1;
string address = 2;
string mask = 3;
}
message Interface {
string device = 1;
string name = 2;
repeated IPAddress IPAddresses = 3;
uint64 mtu = 4;
string hwAddr = 5;
// pciAddr is the PCI address in the format "bridgeAddr/deviceAddr".
// Here, bridgeAddr is the address at which the bridge is attached on the root bus,
// while deviceAddr is the address at which the network device is attached on the bridge.
string pciAddr = 6;
// Type defines the type of interface described by this structure.
// The expected values are the one that are defined by the netlink
// library, regarding each type of link. Here is a non exhaustive
// list: "veth", "macvtap", "vlan", "macvlan", "tap", ...
string type = 7;
uint32 raw_flags = 8;
}
message Route {
string dest = 1;
string gateway = 2;
string device = 3;
string source = 4;
uint32 scope = 5;
}

View File

@@ -1,473 +0,0 @@
// Copyright 2017 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
// gRPC client wrapper
package client
import (
"bufio"
"context"
"errors"
"fmt"
"net"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/hashicorp/yamux"
"github.com/mdlayher/vsock"
// opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
// "google.golang.org/grpc"
"google.golang.org/grpc/codes"
grpcStatus "google.golang.org/grpc/status"
agentgrpc "github.com/kata-containers/agent/protocols/grpc"
"github.com/containerd/ttrpc"
)
const (
UnixSocketScheme = "unix"
VSockSocketScheme = "vsock"
HybridVSockScheme = "hvsock"
)
var defaultDialTimeout = 15 * time.Second
var defaultCloseTimeout = 5 * time.Second
var hybridVSockPort uint32
var agentClientFields = logrus.Fields{
"name": "agent-client",
"pid": os.Getpid(),
"source": "agent-client",
}
var agentClientLog = logrus.WithFields(agentClientFields)
// AgentClient is an agent gRPC client connection wrapper for agentgrpc.AgentServiceClient
type AgentClient struct {
AgentServiceClient agentgrpc.AgentServiceService
HealthClient agentgrpc.HealthService
conn *ttrpc.Client
}
type yamuxSessionStream struct {
net.Conn
session *yamux.Session
}
func (y *yamuxSessionStream) Close() error {
waitCh := y.session.CloseChan()
timeout := time.NewTimer(defaultCloseTimeout)
if err := y.Conn.Close(); err != nil {
return err
}
if err := y.session.Close(); err != nil {
return err
}
// block until session is really closed
select {
case <-waitCh:
timeout.Stop()
case <-timeout.C:
return fmt.Errorf("timeout waiting for session close")
}
return nil
}
type dialer func(string, time.Duration) (net.Conn, error)
// NewAgentClient creates a new agent gRPC client and handles both unix and vsock addresses.
//
// Supported sock address formats are:
// - unix://<unix socket path>
// - vsock://<cid>:<port>
// - <unix socket path>
// - hvsock://<path>:<port>. Firecracker implements the virtio-vsock device
// model, and mediates communication between AF_UNIX sockets (on the host end)
// and AF_VSOCK sockets (on the guest end).
func NewAgentClient(ctx context.Context, sock string, enableYamux bool) (*AgentClient, error) {
grpcAddr, parsedAddr, err := parse(sock)
if err != nil {
return nil, err
}
var conn net.Conn
var d dialer
d = agentDialer(parsedAddr, enableYamux)
conn, err = d(grpcAddr, defaultDialTimeout)
if err != nil {
return nil, err
}
/*
dialOpts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithBlock()}
dialOpts = append(dialOpts, grpc.WithDialer(agentDialer(parsedAddr, enableYamux)))
var tracer opentracing.Tracer
span := opentracing.SpanFromContext(ctx)
// If the context contains a trace span, trace all client comms
if span != nil {
tracer = span.Tracer()
dialOpts = append(dialOpts,
grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer)))
dialOpts = append(dialOpts,
grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer)))
}
ctx, cancel := context.WithTimeout(ctx, defaultDialTimeout)
defer cancel()
conn, err := grpc.DialContext(ctx, grpcAddr, dialOpts...)
if err != nil {
return nil, err
}
*/
client := ttrpc.NewClient(conn)
return &AgentClient{
AgentServiceClient: agentgrpc.NewAgentServiceClient(client),
HealthClient: agentgrpc.NewHealthClient(client),
conn: client,
}, nil
}
// Close an existing connection to the agent gRPC server.
func (c *AgentClient) Close() error {
return c.conn.Close()
}
// vsock scheme is self-defined to be kept from being parsed by grpc.
// Any format starting with "scheme://" will be parsed by grpc and we lose
// all address information because vsock scheme is not supported by grpc.
// Therefore we use the format vsock:<cid>:<port> for vsock address.
//
// See https://github.com/grpc/grpc/blob/master/doc/naming.md
//
// In the long term, we should patch grpc to support vsock scheme and also
// upstream the timed vsock dialer.
func parse(sock string) (string, *url.URL, error) {
addr, err := url.Parse(sock)
if err != nil {
return "", nil, err
}
var grpcAddr string
// validate more
switch addr.Scheme {
case VSockSocketScheme:
if addr.Hostname() == "" || addr.Port() == "" || addr.Path != "" {
return "", nil, grpcStatus.Errorf(codes.InvalidArgument, "Invalid vsock scheme: %s", sock)
}
if _, err := strconv.ParseUint(addr.Hostname(), 10, 32); err != nil {
return "", nil, grpcStatus.Errorf(codes.InvalidArgument, "Invalid vsock cid: %s", sock)
}
if _, err := strconv.ParseUint(addr.Port(), 10, 32); err != nil {
return "", nil, grpcStatus.Errorf(codes.InvalidArgument, "Invalid vsock port: %s", sock)
}
grpcAddr = VSockSocketScheme + ":" + addr.Host
case UnixSocketScheme:
fallthrough
case "":
if (addr.Host == "" && addr.Path == "") || addr.Port() != "" {
return "", nil, grpcStatus.Errorf(codes.InvalidArgument, "Invalid unix scheme: %s", sock)
}
if addr.Host == "" {
grpcAddr = UnixSocketScheme + ":///" + addr.Path
} else {
grpcAddr = UnixSocketScheme + ":///" + addr.Host + "/" + addr.Path
}
case HybridVSockScheme:
if addr.Path == "" {
return "", nil, grpcStatus.Errorf(codes.InvalidArgument, "Invalid hybrid vsock scheme: %s", sock)
}
hvsocket := strings.Split(addr.Path, ":")
if len(hvsocket) != 2 {
return "", nil, grpcStatus.Errorf(codes.InvalidArgument, "Invalid hybrid vsock scheme: %s", sock)
}
// Save port since agent dialer not sent the port to the hybridVSock dialer
var port uint64
if port, err = strconv.ParseUint(hvsocket[1], 10, 32); err != nil {
return "", nil, grpcStatus.Errorf(codes.InvalidArgument, "Invalid hybrid vsock port %s: %v", sock, err)
}
hybridVSockPort = uint32(port)
grpcAddr = HybridVSockScheme + ":" + hvsocket[0]
default:
return "", nil, grpcStatus.Errorf(codes.InvalidArgument, "Invalid scheme: %s", sock)
}
return grpcAddr, addr, nil
}
// This function is meant to run in a go routine since it will send ping
// commands every second. It behaves as a heartbeat to maintain a proper
// communication state with the Yamux server in the agent.
func heartBeat(session *yamux.Session) {
if session == nil {
return
}
for {
if session.IsClosed() {
break
}
session.Ping()
// 1 Hz heartbeat
time.Sleep(time.Second)
}
}
func agentDialer(addr *url.URL, enableYamux bool) dialer {
var d dialer
switch addr.Scheme {
case VSockSocketScheme:
d = vsockDialer
case HybridVSockScheme:
d = HybridVSockDialer
case UnixSocketScheme:
fallthrough
default:
d = unixDialer
}
if !enableYamux {
return d
}
// yamux dialer
return func(sock string, timeout time.Duration) (net.Conn, error) {
conn, err := d(sock, timeout)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
conn.Close()
}
}()
var session *yamux.Session
sessionConfig := yamux.DefaultConfig()
// Disable keepAlive since we don't know how much time a container can be paused
sessionConfig.EnableKeepAlive = false
sessionConfig.ConnectionWriteTimeout = time.Second
session, err = yamux.Client(conn, sessionConfig)
if err != nil {
return nil, err
}
// Start the heartbeat in a separate go routine
go heartBeat(session)
var stream net.Conn
stream, err = session.Open()
if err != nil {
return nil, err
}
y := &yamuxSessionStream{
Conn: stream.(net.Conn),
session: session,
}
return y, nil
}
}
func unixDialer(sock string, timeout time.Duration) (net.Conn, error) {
if strings.HasPrefix(sock, "unix:") {
sock = strings.Trim(sock, "unix:")
}
dialFunc := func() (net.Conn, error) {
return net.DialTimeout("unix", sock, timeout)
}
timeoutErr := grpcStatus.Errorf(codes.DeadlineExceeded, "timed out connecting to unix socket %s", sock)
return commonDialer(timeout, dialFunc, timeoutErr)
}
func parseGrpcVsockAddr(sock string) (uint32, uint32, error) {
sp := strings.Split(sock, ":")
if len(sp) != 3 {
return 0, 0, grpcStatus.Errorf(codes.InvalidArgument, "Invalid vsock address: %s", sock)
}
if sp[0] != VSockSocketScheme {
return 0, 0, grpcStatus.Errorf(codes.InvalidArgument, "Invalid vsock URL scheme: %s", sp[0])
}
cid, err := strconv.ParseUint(sp[1], 10, 32)
if err != nil {
return 0, 0, grpcStatus.Errorf(codes.InvalidArgument, "Invalid vsock cid: %s", sp[1])
}
port, err := strconv.ParseUint(sp[2], 10, 32)
if err != nil {
return 0, 0, grpcStatus.Errorf(codes.InvalidArgument, "Invalid vsock port: %s", sp[2])
}
return uint32(cid), uint32(port), nil
}
func parseGrpcHybridVSockAddr(sock string) (string, uint32, error) {
sp := strings.Split(sock, ":")
// scheme and host are required
if len(sp) < 2 {
return "", 0, grpcStatus.Errorf(codes.InvalidArgument, "Invalid hybrid vsock address: %s", sock)
}
if sp[0] != HybridVSockScheme {
return "", 0, grpcStatus.Errorf(codes.InvalidArgument, "Invalid hybrid vsock URL scheme: %s", sock)
}
port := uint32(0)
// the third is the port
if len(sp) == 3 {
p, err := strconv.ParseUint(sp[2], 10, 32)
if err == nil {
port = uint32(p)
}
}
return sp[1], port, nil
}
// This would bypass the grpc dialer backoff strategy and handle dial timeout
// internally. Because we do not have a large number of concurrent dialers,
// it is not reasonable to have such aggressive backoffs which would kill kata
// containers boot up speed. For more information, see
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md
func commonDialer(timeout time.Duration, dialFunc func() (net.Conn, error), timeoutErrMsg error) (net.Conn, error) {
t := time.NewTimer(timeout)
cancel := make(chan bool)
ch := make(chan net.Conn)
go func() {
for {
select {
case <-cancel:
// canceled or channel closed
return
default:
}
conn, err := dialFunc()
if err == nil {
// Send conn back iff timer is not fired
// Otherwise there might be no one left reading it
if t.Stop() {
ch <- conn
} else {
conn.Close()
}
return
}
}
}()
var conn net.Conn
var ok bool
select {
case conn, ok = <-ch:
if !ok {
return nil, timeoutErrMsg
}
case <-t.C:
cancel <- true
return nil, timeoutErrMsg
}
return conn, nil
}
func vsockDialer(sock string, timeout time.Duration) (net.Conn, error) {
cid, port, err := parseGrpcVsockAddr(sock)
if err != nil {
return nil, err
}
dialFunc := func() (net.Conn, error) {
return vsock.Dial(cid, port)
}
timeoutErr := grpcStatus.Errorf(codes.DeadlineExceeded, "timed out connecting to vsock %d:%d", cid, port)
return commonDialer(timeout, dialFunc, timeoutErr)
}
// HybridVSockDialer dials to a hybrid virtio socket
func HybridVSockDialer(sock string, timeout time.Duration) (net.Conn, error) {
udsPath, port, err := parseGrpcHybridVSockAddr(sock)
if err != nil {
return nil, err
}
dialFunc := func() (net.Conn, error) {
handshakeTimeout := 10 * time.Second
conn, err := net.DialTimeout("unix", udsPath, timeout)
if err != nil {
return nil, err
}
if port == 0 {
// use the port read at parse()
port = hybridVSockPort
}
// Once the connection is opened, the following command MUST BE sent,
// the hypervisor needs to know the port number where the agent is listening in order to
// create the connection
if _, err = conn.Write([]byte(fmt.Sprintf("CONNECT %d\n", port))); err != nil {
conn.Close()
return nil, err
}
errChan := make(chan error)
go func() {
reader := bufio.NewReader(conn)
response, err := reader.ReadString('\n')
if err != nil {
errChan <- err
return
}
agentClientLog.WithField("response", response).Debug("HybridVsock trivial handshake")
if strings.Contains(response, "OK") {
errChan <- nil
} else {
errChan <- errors.New("HybridVsock trivial handshake failed with malformed response code")
}
}()
select {
case err = <-errChan:
if err != nil {
conn.Close()
agentClientLog.WithField("Error", err).Debug("HybridVsock trivial handshake failed")
return nil, err
}
return conn, nil
case <-time.After(handshakeTimeout):
// Timeout: kernel vsock implementation has a race condition, where no response is given
// Instead of waiting forever for a response, timeout after a fair amount of time.
// See: https://lore.kernel.org/netdev/668b0eda8823564cd604b1663dc53fbaece0cd4e.camel@intel.com/
conn.Close()
return nil, errors.New("timeout waiting for hybrid vsocket handshake")
}
}
timeoutErr := grpcStatus.Errorf(codes.DeadlineExceeded, "timed out connecting to hybrid vsocket %s", sock)
return commonDialer(timeout, dialFunc, timeoutErr)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,496 +0,0 @@
//
// Copyright 2017 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
syntax = "proto3";
package grpc;
import "github.com/kata-containers/agent/protocols/grpc/oci.proto";
import "github.com/kata-containers/agent/pkg/types/types.proto";
import "google/protobuf/empty.proto";
// unstable
service AgentService {
// execution
rpc CreateContainer(CreateContainerRequest) returns (google.protobuf.Empty);
rpc StartContainer(StartContainerRequest) returns (google.protobuf.Empty);
// RemoveContainer will tear down an existing container by forcibly terminating
// all processes running inside that container and releasing all internal
// resources associated with it.
// RemoveContainer will wait for all processes termination before returning.
// If any process can not be killed or if it can not be killed after
// the RemoveContainerRequest timeout, RemoveContainer will return an error.
rpc RemoveContainer(RemoveContainerRequest) returns (google.protobuf.Empty);
rpc ExecProcess(ExecProcessRequest) returns (google.protobuf.Empty);
rpc SignalProcess(SignalProcessRequest) returns (google.protobuf.Empty);
rpc WaitProcess(WaitProcessRequest) returns (WaitProcessResponse); // wait & reap like waitpid(2)
rpc ListProcesses(ListProcessesRequest) returns (ListProcessesResponse);
rpc UpdateContainer(UpdateContainerRequest) returns (google.protobuf.Empty);
rpc StatsContainer(StatsContainerRequest) returns (StatsContainerResponse);
rpc PauseContainer(PauseContainerRequest) returns (google.protobuf.Empty);
rpc ResumeContainer(ResumeContainerRequest) returns (google.protobuf.Empty);
// stdio
rpc WriteStdin(WriteStreamRequest) returns (WriteStreamResponse);
rpc ReadStdout(ReadStreamRequest) returns (ReadStreamResponse);
rpc ReadStderr(ReadStreamRequest) returns (ReadStreamResponse);
rpc CloseStdin(CloseStdinRequest) returns (google.protobuf.Empty);
rpc TtyWinResize(TtyWinResizeRequest) returns (google.protobuf.Empty);
// networking
rpc UpdateInterface(UpdateInterfaceRequest) returns (types.Interface);
rpc UpdateRoutes(UpdateRoutesRequest) returns (Routes);
rpc ListInterfaces(ListInterfacesRequest) returns(Interfaces);
rpc ListRoutes(ListRoutesRequest) returns (Routes);
// tracing
rpc StartTracing(StartTracingRequest) returns (google.protobuf.Empty);
rpc StopTracing(StopTracingRequest) returns (google.protobuf.Empty);
// misc (TODO: some rpcs can be replaced by hyperstart-exec)
rpc CreateSandbox(CreateSandboxRequest) returns (google.protobuf.Empty);
rpc DestroySandbox(DestroySandboxRequest) returns (google.protobuf.Empty);
rpc OnlineCPUMem(OnlineCPUMemRequest) returns (google.protobuf.Empty);
rpc ReseedRandomDev(ReseedRandomDevRequest) returns (google.protobuf.Empty);
rpc GetGuestDetails(GuestDetailsRequest) returns (GuestDetailsResponse);
rpc MemHotplugByProbe(MemHotplugByProbeRequest) returns (google.protobuf.Empty);
rpc SetGuestDateTime(SetGuestDateTimeRequest) returns (google.protobuf.Empty);
rpc CopyFile(CopyFileRequest) returns (google.protobuf.Empty);
}
message CreateContainerRequest {
string container_id = 1;
string exec_id = 2;
StringUser string_user = 3;
repeated Device devices = 4;
repeated Storage storages = 5;
Spec OCI = 6;
// This field is used to indicate if the container needs to join
// sandbox shared pid ns or create a new namespace. This field is
// meant to override the NEWPID config settings in the OCI spec.
// The agent would receive an OCI spec with PID namespace cleared
// out altogether and not just the pid ns path.
bool sandbox_pidns = 7;
}
message StartContainerRequest {
string container_id = 1;
}
message RemoveContainerRequest {
string container_id = 1;
// RemoveContainer will return an error if
// it could not kill some container processes
// after timeout seconds.
// Setting timeout to 0 means RemoveContainer will
// wait for ever.
uint32 timeout = 2;
}
message ExecProcessRequest {
string container_id = 1;
string exec_id = 2;
StringUser string_user = 3;
Process process = 4;
}
message SignalProcessRequest {
string container_id = 1;
// Special case for SignalProcess(): exec_id can be empty(""),
// which means to send the signal to all the processes including their descendants.
// Other APIs with exec_id should treat empty exec_id as an invalid request.
string exec_id = 2;
uint32 signal = 3;
}
message WaitProcessRequest {
string container_id = 1;
string exec_id = 2;
}
message WaitProcessResponse {
int32 status = 1;
}
// ListProcessesRequest contains the options used to list running processes inside the container
message ListProcessesRequest {
string container_id = 1;
string format = 2;
repeated string args = 3;
}
// ListProcessesResponse represents the list of running processes inside the container
message ListProcessesResponse {
bytes process_list = 1;
}
message UpdateContainerRequest {
string container_id = 1;
LinuxResources resources = 2;
}
message StatsContainerRequest {
string container_id = 1;
}
message PauseContainerRequest {
string container_id = 1;
}
message ResumeContainerRequest {
string container_id = 1;
}
message CpuUsage {
uint64 total_usage = 1;
repeated uint64 percpu_usage = 2;
uint64 usage_in_kernelmode = 3;
uint64 usage_in_usermode = 4;
}
message ThrottlingData {
uint64 periods = 1;
uint64 throttled_periods = 2;
uint64 throttled_time = 3;
}
message CpuStats {
CpuUsage cpu_usage = 1;
ThrottlingData throttling_data = 2;
}
message PidsStats {
uint64 current = 1;
uint64 limit = 2;
}
message MemoryData {
uint64 usage = 1;
uint64 max_usage = 2;
uint64 failcnt = 3;
uint64 limit = 4;
}
message MemoryStats {
uint64 cache = 1;
MemoryData usage = 2;
MemoryData swap_usage = 3;
MemoryData kernel_usage = 4;
bool use_hierarchy = 5;
map<string, uint64> stats = 6;
}
message BlkioStatsEntry {
uint64 major = 1;
uint64 minor = 2;
string op = 3;
uint64 value = 4;
}
message BlkioStats {
repeated BlkioStatsEntry io_service_bytes_recursive = 1; // number of bytes transferred to and from the block device
repeated BlkioStatsEntry io_serviced_recursive = 2;
repeated BlkioStatsEntry io_queued_recursive = 3;
repeated BlkioStatsEntry io_service_time_recursive = 4;
repeated BlkioStatsEntry io_wait_time_recursive = 5;
repeated BlkioStatsEntry io_merged_recursive = 6;
repeated BlkioStatsEntry io_time_recursive = 7;
repeated BlkioStatsEntry sectors_recursive = 8;
}
message HugetlbStats {
uint64 usage = 1;
uint64 max_usage = 2;
uint64 failcnt = 3;
}
message CgroupStats {
CpuStats cpu_stats = 1;
MemoryStats memory_stats = 2;
PidsStats pids_stats = 3;
BlkioStats blkio_stats = 4;
map<string, HugetlbStats> hugetlb_stats = 5; // the map is in the format "size of hugepage: stats of the hugepage"
}
message NetworkStats {
string name = 1;
uint64 rx_bytes = 2;
uint64 rx_packets = 3;
uint64 rx_errors = 4;
uint64 rx_dropped = 5;
uint64 tx_bytes = 6;
uint64 tx_packets = 7;
uint64 tx_errors = 8;
uint64 tx_dropped = 9;
}
message StatsContainerResponse {
CgroupStats cgroup_stats = 1;
repeated NetworkStats network_stats = 2;
}
message WriteStreamRequest {
string container_id = 1;
string exec_id = 2;
bytes data = 3;
}
message WriteStreamResponse {
uint32 len = 1;
}
message ReadStreamRequest {
string container_id = 1;
string exec_id = 2;
uint32 len = 3;
}
message ReadStreamResponse {
bytes data = 1;
}
message CloseStdinRequest {
string container_id = 1;
string exec_id = 2;
}
message TtyWinResizeRequest {
string container_id = 1;
string exec_id = 2;
uint32 row = 3;
uint32 column = 4;
}
message KernelModule {
// This field is the name of the kernel module.
string name = 1;
// This field are the parameters for the kernel module which are
// whitespace-delimited key=value pairs passed to modprobe(8).
repeated string parameters = 2;
}
message CreateSandboxRequest {
string hostname = 1;
repeated string dns = 2;
repeated Storage storages = 3;
// This field means that a pause process needs to be created by the
// agent. This pid namespace of the pause process will be treated as
// a shared pid namespace. All containers created will join this shared
// pid namespace.
bool sandbox_pidns = 4;
// SandboxId identifies which sandbox is using the agent. We allow only
// one sandbox per agent and implicitly require that CreateSandbox is
// called before other sandbox/network calls.
string sandbox_id = 5;
// This field, if non-empty, designates an absolute path to a directory
// that the agent will search for OCI hooks to run within the guest.
string guest_hook_path = 6;
// This field is the list of kernel modules to be loaded in the guest kernel.
repeated KernelModule kernel_modules = 7;
}
message DestroySandboxRequest {
}
message Interfaces {
repeated types.Interface Interfaces = 1;
}
message Routes {
repeated types.Route Routes = 1;
}
message UpdateInterfaceRequest {
types.Interface interface = 1;
}
message UpdateRoutesRequest {
Routes routes = 1;
}
message ListInterfacesRequest {
}
message ListRoutesRequest {
}
message OnlineCPUMemRequest {
// Wait specifies if the caller waits for the agent to online all resources.
// If true the agent returns once all resources have been connected, otherwise all
// resources are connected asynchronously and the agent returns immediately.
bool wait = 1;
// NbCpus specifies the number of CPUs that were added and the agent has to online.
uint32 nb_cpus = 2;
// CpuOnly specifies whether only online CPU or not.
bool cpu_only = 3;
}
message ReseedRandomDevRequest {
// Data specifies the random data used to reseed the guest crng.
bytes data = 2;
}
// AgentDetails provides information to the client about the running agent.
message AgentDetails {
// Semantic version of agent (see https://semver.org).
string version = 1;
// Set if the agent is running as PID 1.
bool init_daemon = 2;
// List of available device handlers.
repeated string device_handlers = 3;
// List of available storage handlers.
repeated string storage_handlers = 4;
// Set only if the agent is built with seccomp support and the guest
// environment supports seccomp.
bool supports_seccomp = 5;
}
message GuestDetailsRequest {
// MemBlockSize asks server to return the system memory block size that can be used
// for memory hotplug alignment. Typically the server returns what's in
// /sys/devices/system/memory/block_size_bytes.
bool mem_block_size = 1;
// MemoryHotplugProbe asks server to return whether guest kernel supports memory hotplug
// via probeinterface. Typically the server will check if the path
// /sys/devices/system/memory/probe exists.
bool mem_hotplug_probe = 2;
}
message GuestDetailsResponse {
// MemBlockSizeBytes returns the system memory block size in bytes.
uint64 mem_block_size_bytes = 1;
AgentDetails agent_details = 2;
bool support_mem_hotplug_probe = 3;
}
message MemHotplugByProbeRequest {
// server needs to send the value of memHotplugProbeAddr into file /sys/devices/system/memory/probe,
// in order to notify the guest kernel about hot-add memory event
repeated uint64 memHotplugProbeAddr = 1;
}
message SetGuestDateTimeRequest {
// Sec the second since the Epoch.
int64 Sec = 1;
// Usec the microseconds portion of time since the Epoch.
int64 Usec = 2;
}
// Storage represents both the rootfs of the container, and any volume that
// could have been defined through the Mount list of the OCI specification.
message Storage {
// Driver is used to define the way the storage is passed through the
// virtual machine. It can be "9p", "blk", or something else, but for
// all cases, this will define if some extra steps are required before
// this storage gets mounted into the container.
string driver = 1;
// DriverOptions allows the caller to define a list of options such
// as block sizes, numbers of luns, ... which are very specific to
// every device and cannot be generalized through extra fields.
repeated string driver_options = 2;
// Source can be anything representing the source of the storage. This
// will be handled by the proper handler based on the Driver used.
// For instance, it can be a very simple path if the caller knows the
// name of device inside the VM, or it can be some sort of identifier
// to let the agent find the device inside the VM.
string source = 3;
// Fstype represents the filesystem that needs to be used to mount the
// storage inside the VM. For instance, it could be "xfs" for block
// device, "9p" for shared filesystem, or "tmpfs" for shared /dev/shm.
string fstype = 4;
// Options describes the additional options that might be needed to
// mount properly the storage filesytem.
repeated string options = 5;
// MountPoint refers to the path where the storage should be mounted
// inside the VM.
string mount_point = 6;
}
// Device represents only the devices that could have been defined through the
// Linux Device list of the OCI specification.
message Device {
// Id can be used to identify the device inside the VM. Some devices
// might not need it to be identified on the VM, and will rely on the
// provided VmPath instead.
string id = 1;
// Type defines the type of device described. This can be "blk",
// "scsi", "vfio", ...
// Particularly, this should be used to trigger the use of the
// appropriate device handler.
string type = 2;
// VmPath can be used by the caller to provide directly the path of
// the device as it will appear inside the VM. For some devices, the
// device id or the list of options passed might not be enough to find
// the device. In those cases, the caller should predict and provide
// this vm_path.
string vm_path = 3;
// ContainerPath defines the path where the device should be found inside
// the container. This path should match the path of the device from
// the device list listed inside the OCI spec. This is used in order
// to identify the right device in the spec and update it with the
// right options such as major/minor numbers as they appear inside
// the VM for instance. Note that an empty ctr_path should be used
// to make sure the device handler inside the agent is called, but
// no spec update needs to be performed. This has to happen for the
// case of rootfs, when a device has to be waited for after it has
// been hotplugged. An equivalent Storage entry should be defined if
// any mount needs to be performed afterwards.
string container_path = 4;
// Options allows the caller to define a list of options such as block
// sizes, numbers of luns, ... which are very specific to every device
// and cannot be generalized through extra fields.
repeated string options = 5;
}
message StringUser {
string uid = 1;
string gid = 2;
repeated string additionalGids = 3;
}
message CopyFileRequest {
// Path is the destination file in the guest. It must be absolute,
// canonical and below /run.
string path = 1;
// FileSize is the expected file size, for security reasons write operations
// are made in a temporary file, once it has the expected size, it's moved
// to the destination path.
int64 file_size = 2;
// FileMode is the file mode.
uint32 file_mode = 3;
// DirMode is the mode for the parent directories of destination path.
uint32 dir_mode = 4;
// Uid is the numeric user id.
int32 uid = 5;
// Gid is the numeric group id.
int32 gid = 6;
// Offset for the next write operation.
int64 offset = 7;
// Data to write in the destination file.
bytes data = 8;
}
message StartTracingRequest {
}
message StopTracingRequest {
}

View File

@@ -1,586 +0,0 @@
{
"ociVersion": "1.0.0",
"process": {
"user": {
"uid": 0,
"gid": 0
},
"args": [
"sh"
],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm"
],
"cwd": "/",
"capabilities": {
"bounding": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"effective": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"inheritable": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"permitted": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"ambient": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
]
},
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}
]
},
"root": {
"path": "rootfs"
},
"hostname": "mrsdalloway",
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc"
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/dev/pts",
"type": "devpts",
"source": "devpts",
"options": [
"nosuid",
"noexec",
"newinstance",
"ptmxmode=0666",
"mode=0620",
"gid=5"
]
},
{
"destination": "/dev/shm",
"type": "tmpfs",
"source": "shm",
"options": [
"nosuid",
"noexec",
"nodev",
"mode=1777",
"size=65536k"
]
},
{
"destination": "/dev/mqueue",
"type": "mqueue",
"source": "mqueue",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
}
],
"linux": {
"resources": {
"devices": [
{
"allow": false,
"access": "rwm"
}
],
"blockIO": {
"weight": 32,
"leafWeight": 33,
"weightDevice": [
{
"major": 10,
"minor": 11,
"weight": 64,
"leafWeight": 128
}
]
}
},
"namespaces": [
{
"type": "pid"
},
{
"type": "network"
},
{
"type": "ipc"
},
{
"type": "uts"
},
{
"type": "mount"
}
],
"seccomp": {
"defaultAction": "SCMP_ACT_ERRNO",
"architectures": [
"SCMP_ARCH_X86_64",
"SCMP_ARCH_X86",
"SCMP_ARCH_X32"
],
"syscalls": [
{
"names": [
"accept",
"accept4",
"access",
"alarm",
"bind",
"brk",
"capget",
"capset",
"chdir",
"chmod",
"chown",
"chown32",
"clock_getres",
"clock_gettime",
"clock_nanosleep",
"close",
"connect",
"copy_file_range",
"creat",
"dup",
"dup2",
"dup3",
"epoll_create",
"epoll_create1",
"epoll_ctl",
"epoll_ctl_old",
"epoll_pwait",
"epoll_wait",
"epoll_wait_old",
"eventfd",
"eventfd2",
"execve",
"execveat",
"exit",
"exit_group",
"faccessat",
"fadvise64",
"fadvise64_64",
"fallocate",
"fanotify_mark",
"fchdir",
"fchmod",
"fchmodat",
"fchown",
"fchown32",
"fchownat",
"fcntl",
"fcntl64",
"fdatasync",
"fgetxattr",
"flistxattr",
"flock",
"fork",
"fremovexattr",
"fsetxattr",
"fstat",
"fstat64",
"fstatat64",
"fstatfs",
"fstatfs64",
"fsync",
"ftruncate",
"ftruncate64",
"futex",
"futimesat",
"getcpu",
"getcwd",
"getdents",
"getdents64",
"getegid",
"getegid32",
"geteuid",
"geteuid32",
"getgid",
"getgid32",
"getgroups",
"getgroups32",
"getitimer",
"getpeername",
"getpgid",
"getpgrp",
"getpid",
"getppid",
"getpriority",
"getrandom",
"getresgid",
"getresgid32",
"getresuid",
"getresuid32",
"getrlimit",
"get_robust_list",
"getrusage",
"getsid",
"getsockname",
"getsockopt",
"get_thread_area",
"gettid",
"gettimeofday",
"getuid",
"getuid32",
"getxattr",
"inotify_add_watch",
"inotify_init",
"inotify_init1",
"inotify_rm_watch",
"io_cancel",
"ioctl",
"io_destroy",
"io_getevents",
"ioprio_get",
"ioprio_set",
"io_setup",
"io_submit",
"ipc",
"kill",
"lchown",
"lchown32",
"lgetxattr",
"link",
"linkat",
"listen",
"listxattr",
"llistxattr",
"_llseek",
"lremovexattr",
"lseek",
"lsetxattr",
"lstat",
"lstat64",
"madvise",
"memfd_create",
"mincore",
"mkdir",
"mkdirat",
"mknod",
"mknodat",
"mlock",
"mlock2",
"mlockall",
"mmap",
"mmap2",
"mprotect",
"mq_getsetattr",
"mq_notify",
"mq_open",
"mq_timedreceive",
"mq_timedsend",
"mq_unlink",
"mremap",
"msgctl",
"msgget",
"msgrcv",
"msgsnd",
"msync",
"munlock",
"munlockall",
"munmap",
"nanosleep",
"newfstatat",
"_newselect",
"open",
"openat",
"pause",
"pipe",
"pipe2",
"poll",
"ppoll",
"prctl",
"pread64",
"preadv",
"prlimit64",
"pselect6",
"pwrite64",
"pwritev",
"read",
"readahead",
"readlink",
"readlinkat",
"readv",
"recv",
"recvfrom",
"recvmmsg",
"recvmsg",
"remap_file_pages",
"removexattr",
"rename",
"renameat",
"renameat2",
"restart_syscall",
"rmdir",
"rt_sigaction",
"rt_sigpending",
"rt_sigprocmask",
"rt_sigqueueinfo",
"rt_sigreturn",
"rt_sigsuspend",
"rt_sigtimedwait",
"rt_tgsigqueueinfo",
"sched_getaffinity",
"sched_getattr",
"sched_getparam",
"sched_get_priority_max",
"sched_get_priority_min",
"sched_getscheduler",
"sched_rr_get_interval",
"sched_setaffinity",
"sched_setattr",
"sched_setparam",
"sched_setscheduler",
"sched_yield",
"seccomp",
"select",
"semctl",
"semget",
"semop",
"semtimedop",
"send",
"sendfile",
"sendfile64",
"sendmmsg",
"sendmsg",
"sendto",
"setfsgid",
"setfsgid32",
"setfsuid",
"setfsuid32",
"setgid",
"setgid32",
"setgroups",
"setgroups32",
"setitimer",
"setpgid",
"setpriority",
"setregid",
"setregid32",
"setresgid",
"setresgid32",
"setresuid",
"setresuid32",
"setreuid",
"setreuid32",
"setrlimit",
"set_robust_list",
"setsid",
"setsockopt",
"set_thread_area",
"set_tid_address",
"setuid",
"setuid32",
"setxattr",
"shmat",
"shmctl",
"shmdt",
"shmget",
"shutdown",
"sigaltstack",
"signalfd",
"signalfd4",
"sigreturn",
"socket",
"socketcall",
"socketpair",
"splice",
"stat",
"stat64",
"statfs",
"statfs64",
"symlink",
"symlinkat",
"sync",
"sync_file_range",
"syncfs",
"sysinfo",
"syslog",
"tee",
"tgkill",
"time",
"timer_create",
"timer_delete",
"timerfd_create",
"timerfd_gettime",
"timerfd_settime",
"timer_getoverrun",
"timer_gettime",
"timer_settime",
"times",
"tkill",
"truncate",
"truncate64",
"ugetrlimit",
"umask",
"uname",
"unlink",
"unlinkat",
"utime",
"utimensat",
"utimes",
"vfork",
"vmsplice",
"wait4",
"waitid",
"waitpid",
"write",
"writev"
],
"action": "SCMP_ACT_ALLOW"
},
{
"names": [
"personality"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 0,
"op": "SCMP_CMP_EQ"
},
{
"index": 0,
"value": 8,
"op": "SCMP_CMP_EQ"
},
{
"index": 0,
"value": 4294967295,
"op": "SCMP_CMP_EQ"
}
]
},
{
"names": [
"chroot"
],
"action": "SCMP_ACT_ALLOW"
},
{
"names": [
"clone"
],
"action": "SCMP_ACT_ALLOW",
"args": [
{
"index": 0,
"value": 2080505856,
"op": "SCMP_CMP_MASKED_EQ"
}
]
},
{
"names": [
"arch_prctl"
],
"action": "SCMP_ACT_ALLOW"
},
{
"names": [
"modify_ldt"
],
"action": "SCMP_ACT_ALLOW"
}
]
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,39 +0,0 @@
//
// Copyright 2017 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
syntax = "proto3";
package grpc;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.equal_all) = true;
option (gogoproto.populate_all) = true;
option (gogoproto.testgen_all) = true;
option (gogoproto.benchgen_all) = true;
message CheckRequest {
string service = 1;
}
message HealthCheckResponse {
enum ServingStatus {
UNKNOWN = 0;
SERVING = 1;
NOT_SERVING = 2;
}
ServingStatus status = 1;
}
message VersionCheckResponse {
string grpc_version = 1;
string agent_version = 2;
}
service Health {
rpc Check(CheckRequest) returns (HealthCheckResponse);
rpc Version(CheckRequest) returns (VersionCheckResponse);
}

View File

@@ -1,585 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/kata-containers/agent/protocols/grpc/health.proto
package grpc
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb"
github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
proto "github.com/gogo/protobuf/proto"
math "math"
math_rand "math/rand"
testing "testing"
time "time"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
func TestCheckRequestProto(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedCheckRequest(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &CheckRequest{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
littlefuzz := make([]byte, len(dAtA))
copy(littlefuzz, dAtA)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
if len(littlefuzz) > 0 {
fuzzamount := 100
for i := 0; i < fuzzamount; i++ {
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
}
// shouldn't panic
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
}
}
func TestCheckRequestMarshalTo(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedCheckRequest(popr, false)
size := p.Size()
dAtA := make([]byte, size)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
_, err := p.MarshalTo(dAtA)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &CheckRequest{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func BenchmarkCheckRequestProtoMarshal(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
pops := make([]*CheckRequest, 10000)
for i := 0; i < 10000; i++ {
pops[i] = NewPopulatedCheckRequest(popr, false)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
dAtA, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000])
if err != nil {
panic(err)
}
total += len(dAtA)
}
b.SetBytes(int64(total / b.N))
}
func BenchmarkCheckRequestProtoUnmarshal(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
datas := make([][]byte, 10000)
for i := 0; i < 10000; i++ {
dAtA, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedCheckRequest(popr, false))
if err != nil {
panic(err)
}
datas[i] = dAtA
}
msg := &CheckRequest{}
b.ResetTimer()
for i := 0; i < b.N; i++ {
total += len(datas[i%10000])
if err := github_com_gogo_protobuf_proto.Unmarshal(datas[i%10000], msg); err != nil {
panic(err)
}
}
b.SetBytes(int64(total / b.N))
}
func TestHealthCheckResponseProto(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedHealthCheckResponse(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &HealthCheckResponse{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
littlefuzz := make([]byte, len(dAtA))
copy(littlefuzz, dAtA)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
if len(littlefuzz) > 0 {
fuzzamount := 100
for i := 0; i < fuzzamount; i++ {
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
}
// shouldn't panic
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
}
}
func TestHealthCheckResponseMarshalTo(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedHealthCheckResponse(popr, false)
size := p.Size()
dAtA := make([]byte, size)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
_, err := p.MarshalTo(dAtA)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &HealthCheckResponse{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func BenchmarkHealthCheckResponseProtoMarshal(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
pops := make([]*HealthCheckResponse, 10000)
for i := 0; i < 10000; i++ {
pops[i] = NewPopulatedHealthCheckResponse(popr, false)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
dAtA, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000])
if err != nil {
panic(err)
}
total += len(dAtA)
}
b.SetBytes(int64(total / b.N))
}
func BenchmarkHealthCheckResponseProtoUnmarshal(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
datas := make([][]byte, 10000)
for i := 0; i < 10000; i++ {
dAtA, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedHealthCheckResponse(popr, false))
if err != nil {
panic(err)
}
datas[i] = dAtA
}
msg := &HealthCheckResponse{}
b.ResetTimer()
for i := 0; i < b.N; i++ {
total += len(datas[i%10000])
if err := github_com_gogo_protobuf_proto.Unmarshal(datas[i%10000], msg); err != nil {
panic(err)
}
}
b.SetBytes(int64(total / b.N))
}
func TestVersionCheckResponseProto(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedVersionCheckResponse(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &VersionCheckResponse{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
littlefuzz := make([]byte, len(dAtA))
copy(littlefuzz, dAtA)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
if len(littlefuzz) > 0 {
fuzzamount := 100
for i := 0; i < fuzzamount; i++ {
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
}
// shouldn't panic
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
}
}
func TestVersionCheckResponseMarshalTo(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedVersionCheckResponse(popr, false)
size := p.Size()
dAtA := make([]byte, size)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
_, err := p.MarshalTo(dAtA)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &VersionCheckResponse{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func BenchmarkVersionCheckResponseProtoMarshal(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
pops := make([]*VersionCheckResponse, 10000)
for i := 0; i < 10000; i++ {
pops[i] = NewPopulatedVersionCheckResponse(popr, false)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
dAtA, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000])
if err != nil {
panic(err)
}
total += len(dAtA)
}
b.SetBytes(int64(total / b.N))
}
func BenchmarkVersionCheckResponseProtoUnmarshal(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
datas := make([][]byte, 10000)
for i := 0; i < 10000; i++ {
dAtA, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedVersionCheckResponse(popr, false))
if err != nil {
panic(err)
}
datas[i] = dAtA
}
msg := &VersionCheckResponse{}
b.ResetTimer()
for i := 0; i < b.N; i++ {
total += len(datas[i%10000])
if err := github_com_gogo_protobuf_proto.Unmarshal(datas[i%10000], msg); err != nil {
panic(err)
}
}
b.SetBytes(int64(total / b.N))
}
func TestCheckRequestJSON(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedCheckRequest(popr, true)
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
jsondata, err := marshaler.MarshalToString(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &CheckRequest{}
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
}
}
func TestHealthCheckResponseJSON(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedHealthCheckResponse(popr, true)
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
jsondata, err := marshaler.MarshalToString(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &HealthCheckResponse{}
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
}
}
func TestVersionCheckResponseJSON(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedVersionCheckResponse(popr, true)
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
jsondata, err := marshaler.MarshalToString(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &VersionCheckResponse{}
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
}
}
func TestCheckRequestProtoText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedCheckRequest(popr, true)
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
msg := &CheckRequest{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestCheckRequestProtoCompactText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedCheckRequest(popr, true)
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
msg := &CheckRequest{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestHealthCheckResponseProtoText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedHealthCheckResponse(popr, true)
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
msg := &HealthCheckResponse{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestHealthCheckResponseProtoCompactText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedHealthCheckResponse(popr, true)
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
msg := &HealthCheckResponse{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestVersionCheckResponseProtoText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedVersionCheckResponse(popr, true)
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
msg := &VersionCheckResponse{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestVersionCheckResponseProtoCompactText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedVersionCheckResponse(popr, true)
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
msg := &VersionCheckResponse{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestCheckRequestSize(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedCheckRequest(popr, true)
size2 := github_com_gogo_protobuf_proto.Size(p)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
size := p.Size()
if len(dAtA) != size {
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))
}
if size2 != size {
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
}
size3 := github_com_gogo_protobuf_proto.Size(p)
if size3 != size {
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
}
}
func BenchmarkCheckRequestSize(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
pops := make([]*CheckRequest, 1000)
for i := 0; i < 1000; i++ {
pops[i] = NewPopulatedCheckRequest(popr, false)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
total += pops[i%1000].Size()
}
b.SetBytes(int64(total / b.N))
}
func TestHealthCheckResponseSize(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedHealthCheckResponse(popr, true)
size2 := github_com_gogo_protobuf_proto.Size(p)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
size := p.Size()
if len(dAtA) != size {
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))
}
if size2 != size {
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
}
size3 := github_com_gogo_protobuf_proto.Size(p)
if size3 != size {
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
}
}
func BenchmarkHealthCheckResponseSize(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
pops := make([]*HealthCheckResponse, 1000)
for i := 0; i < 1000; i++ {
pops[i] = NewPopulatedHealthCheckResponse(popr, false)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
total += pops[i%1000].Size()
}
b.SetBytes(int64(total / b.N))
}
func TestVersionCheckResponseSize(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedVersionCheckResponse(popr, true)
size2 := github_com_gogo_protobuf_proto.Size(p)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
size := p.Size()
if len(dAtA) != size {
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))
}
if size2 != size {
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
}
size3 := github_com_gogo_protobuf_proto.Size(p)
if size3 != size {
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
}
}
func BenchmarkVersionCheckResponseSize(b *testing.B) {
popr := math_rand.New(math_rand.NewSource(616))
total := 0
pops := make([]*VersionCheckResponse, 1000)
for i := 0; i < 1000; i++ {
pops[i] = NewPopulatedVersionCheckResponse(popr, false)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
total += pops[i%1000].Size()
}
b.SetBytes(int64(total / b.N))
}
func TestCheckRequestStringer(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedCheckRequest(popr, false)
s1 := p.String()
s2 := fmt.Sprintf("%v", p)
if s1 != s2 {
t.Fatalf("String want %v got %v", s1, s2)
}
}
func TestHealthCheckResponseStringer(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedHealthCheckResponse(popr, false)
s1 := p.String()
s2 := fmt.Sprintf("%v", p)
if s1 != s2 {
t.Fatalf("String want %v got %v", s1, s2)
}
}
func TestVersionCheckResponseStringer(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedVersionCheckResponse(popr, false)
s1 := p.String()
s2 := fmt.Sprintf("%v", p)
if s1 != s2 {
t.Fatalf("String want %v got %v", s1, s2)
}
}
//These tests are generated by github.com/gogo/protobuf/plugin/testgen

File diff suppressed because it is too large Load Diff

View File

@@ -1,462 +0,0 @@
//
// Copyright (c) 2017 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
syntax = "proto3";
package grpc;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/wrappers.proto";
option (gogoproto.equal_all) = true;
option (gogoproto.populate_all) = true;
option (gogoproto.testgen_all) = true;
option (gogoproto.benchgen_all) = true;
message Spec {
// Version of the Open Container Initiative Runtime Specification with which the bundle complies.
string Version = 1;
// Process configures the container process.
Process Process = 2;
// Root configures the container's root filesystem.
Root Root = 3;
// Hostname configures the container's hostname.
string Hostname = 4;
// Mounts configures additional mounts (on top of Root).
repeated Mount Mounts = 5 [(gogoproto.nullable) = false];
// Hooks configures callbacks for container lifecycle events.
Hooks Hooks = 6;
// Annotations contains arbitrary metadata for the container.
map<string, string> Annotations = 7;
// Linux is platform-specific configuration for Linux based containers.
Linux Linux = 8;
// Solaris is platform-specific configuration for Solaris based containers.
Solaris Solaris = 9;
// Windows is platform-specific configuration for Windows based containers.
Windows Windows = 10;
}
message Process {
// Terminal creates an interactive terminal for the container.
bool Terminal = 1;
// ConsoleSize specifies the size of the console.
Box ConsoleSize = 2;
// User specifies user information for the process.
User User = 3 [(gogoproto.nullable) = false];
// Args specifies the binary and arguments for the application to execute.
repeated string Args = 4;
// Env populates the process environment for the process.
repeated string Env = 5;
// Cwd is the current working directory for the process and must be
// relative to the container's root.
string Cwd = 6;
// Capabilities are Linux capabilities that are kept for the process.
LinuxCapabilities Capabilities = 7;
// Rlimits specifies rlimit options to apply to the process.
repeated POSIXRlimit Rlimits = 8 [(gogoproto.nullable) = false];
// NoNewPrivileges controls whether additional privileges could be gained by processes in the container.
bool NoNewPrivileges = 9;
// ApparmorProfile specifies the apparmor profile for the container.
string ApparmorProfile = 10;
// Specify an oom_score_adj for the container.
int64 OOMScoreAdj = 11;
// SelinuxLabel specifies the selinux context that the container process is run as.
string SelinuxLabel = 12;
}
message Box {
// Height is the vertical dimension of a box.
uint32 Height = 1;
// Width is the horizontal dimension of a box.
uint32 Width = 2;
}
message User {
// UID is the user id.
uint32 UID = 1;
// GID is the group id.
uint32 GID = 2;
// AdditionalGids are additional group ids set for the container's process.
repeated uint32 AdditionalGids = 3;
// Username is the user name.
string Username = 4;
}
message LinuxCapabilities {
// Bounding is the set of capabilities checked by the kernel.
repeated string Bounding = 1;
// Effective is the set of capabilities checked by the kernel.
repeated string Effective = 2;
// Inheritable is the capabilities preserved across execve.
repeated string Inheritable = 3;
// Permitted is the limiting superset for effective capabilities.
repeated string Permitted = 4;
// Ambient is the ambient set of capabilities that are kept.
repeated string Ambient = 5;
}
message POSIXRlimit {
// Type of the rlimit to set
string Type = 1;
// Hard is the hard limit for the specified type
uint64 Hard = 2;
// Soft is the soft limit for the specified type
uint64 Soft = 3;
}
message Mount {
// destination is the path inside the container expect when it starts with "tmp:/"
string destination = 1;
// source is the path inside the container expect when it starts with "vm:/dev/" or "tmp:/"
// the path which starts with "vm:/dev/" refers the guest vm's "/dev",
// especially, "vm:/dev/hostfs/" refers to the shared filesystem.
// "tmp:/" is a temporary directory which is used for temporary mounts.
string source = 2;
string type = 3;
repeated string options = 4;
}
message Root {
// Path is the absolute path to the container's root filesystem.
string Path = 1;
// Readonly makes the root filesystem for the container readonly before the process is executed.
bool Readonly = 2;
}
message Hooks {
// Prestart is a list of hooks to be run before the container process is executed.
repeated Hook Prestart = 1 [(gogoproto.nullable) = false];
// Poststart is a list of hooks to be run after the container process is started.
repeated Hook Poststart = 2 [(gogoproto.nullable) = false];
// Poststop is a list of hooks to be run after the container process exits.
repeated Hook Poststop = 3 [(gogoproto.nullable) = false];
}
message Hook {
string Path = 1;
repeated string Args = 2;
repeated string Env = 3;
int64 Timeout = 4;
}
message Linux {
// UIDMapping specifies user mappings for supporting user namespaces.
repeated LinuxIDMapping UIDMappings = 1 [(gogoproto.nullable) = false];
// GIDMapping specifies group mappings for supporting user namespaces.
repeated LinuxIDMapping GIDMappings = 2 [(gogoproto.nullable) = false];
// Sysctl are a set of key value pairs that are set for the container on start
map<string, string> Sysctl = 3;
// Resources contain cgroup information for handling resource constraints
// for the container
LinuxResources Resources = 4;
// CgroupsPath specifies the path to cgroups that are created and/or joined by the container.
// The path is expected to be relative to the cgroups mountpoint.
// If resources are specified, the cgroups at CgroupsPath will be updated based on resources.
string CgroupsPath = 5;
// Namespaces contains the namespaces that are created and/or joined by the container
repeated LinuxNamespace Namespaces = 6 [(gogoproto.nullable) = false];
// Devices are a list of device nodes that are created for the container
repeated LinuxDevice Devices = 7 [(gogoproto.nullable) = false];
// Seccomp specifies the seccomp security settings for the container.
LinuxSeccomp Seccomp = 8;
// RootfsPropagation is the rootfs mount propagation mode for the container.
string RootfsPropagation = 9;
// MaskedPaths masks over the provided paths inside the container.
repeated string MaskedPaths = 10;
// ReadonlyPaths sets the provided paths as RO inside the container.
repeated string ReadonlyPaths = 11;
// MountLabel specifies the selinux context for the mounts in the container.
string MountLabel = 12;
// IntelRdt contains Intel Resource Director Technology (RDT) information
// for handling resource constraints (e.g., L3 cache) for the container
LinuxIntelRdt IntelRdt = 13;
}
message Windows {
// Dummy string, never used.
string dummy = 1;
}
message Solaris {
// Dummy string, never used.
string dummy = 1;
}
message LinuxIDMapping {
// HostID is the starting UID/GID on the host to be mapped to 'ContainerID'
uint32 HostID = 1;
// ContainerID is the starting UID/GID in the container
uint32 ContainerID = 2;
// Size is the number of IDs to be mapped
uint32 Size = 3;
}
message LinuxNamespace {
// Type is the type of namespace
string Type = 1;
// Path is a path to an existing namespace persisted on disk that can be joined
// and is of the same type
string Path = 2;
}
message LinuxDevice {
// Path to the device.
string Path = 1;
// Device type, block, char, etc.
string Type = 2;
// Major is the device's major number.
int64 Major = 3;
// Minor is the device's minor number.
int64 Minor = 4;
// FileMode permission bits for the device.
uint32 FileMode = 5;
// UID of the device.
uint32 UID = 6;
// Gid of the device.
uint32 GID = 7;
}
message LinuxResources {
// Devices configures the device whitelist.
repeated LinuxDeviceCgroup Devices = 1 [(gogoproto.nullable) = false];
// Memory restriction configuration
LinuxMemory Memory = 2;
// CPU resource restriction configuration
LinuxCPU CPU = 3;
// Task resource restriction configuration.
LinuxPids Pids = 4;
// BlockIO restriction configuration
LinuxBlockIO BlockIO = 5;
// Hugetlb limit (in bytes)
repeated LinuxHugepageLimit HugepageLimits = 6 [(gogoproto.nullable) = false];
// Network restriction configuration
LinuxNetwork Network = 7;
}
message LinuxMemory {
// Memory limit (in bytes).
int64 Limit = 1;
// Memory reservation or soft_limit (in bytes).
int64 Reservation = 2;
// Total memory limit (memory + swap).
int64 Swap = 3;
// Kernel memory limit (in bytes).
int64 Kernel = 4;
// Kernel memory limit for tcp (in bytes)
int64 KernelTCP = 5;
// How aggressive the kernel will swap memory pages.
uint64 Swappiness = 6;
// DisableOOMKiller disables the OOM killer for out of memory conditions
bool DisableOOMKiller = 7;
}
message LinuxCPU {
// CPU shares (relative weight (ratio) vs. other cgroups with cpu shares).
uint64 Shares = 1;
// CPU hardcap limit (in usecs). Allowed cpu time in a given period.
int64 Quota = 2;
// CPU period to be used for hardcapping (in usecs).
uint64 Period = 3;
// How much time realtime scheduling may use (in usecs).
int64 RealtimeRuntime = 4;
// CPU period to be used for realtime scheduling (in usecs).
uint64 RealtimePeriod = 5;
// CPUs to use within the cpuset. Default is to use any CPU available.
string Cpus = 6;
// List of memory nodes in the cpuset. Default is to use any available memory node.
string Mems = 7;
}
message LinuxWeightDevice {
// Major is the device's major number.
int64 Major = 1;
// Minor is the device's minor number.
int64 Minor = 2;
// Weight is the bandwidth rate for the device.
uint32 Weight = 3;
// LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only
uint32 LeafWeight = 4;
}
message LinuxThrottleDevice {
// Major is the device's major number.
int64 Major = 1;
// Minor is the device's minor number.
int64 Minor = 2;
// Rate is the IO rate limit per cgroup per device
uint64 Rate = 3;
}
message LinuxBlockIO {
// Specifies per cgroup weight
uint32 Weight = 1;
// Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, CFQ scheduler only
uint32 LeafWeight = 2;
// Weight per cgroup per device, can override BlkioWeight
repeated LinuxWeightDevice WeightDevice = 3 [(gogoproto.nullable) = false];
// IO read rate limit per cgroup per device, bytes per second
repeated LinuxThrottleDevice ThrottleReadBpsDevice = 4 [(gogoproto.nullable) = false];
// IO write rate limit per cgroup per device, bytes per second
repeated LinuxThrottleDevice ThrottleWriteBpsDevice = 5 [(gogoproto.nullable) = false];
// IO read rate limit per cgroup per device, IO per second
repeated LinuxThrottleDevice ThrottleReadIOPSDevice = 6 [(gogoproto.nullable) = false];
// IO write rate limit per cgroup per device, IO per second
repeated LinuxThrottleDevice ThrottleWriteIOPSDevice = 7 [(gogoproto.nullable) = false];
}
message LinuxPids {
// Maximum number of PIDs. Default is "no limit".
int64 Limit = 1;
}
message LinuxDeviceCgroup {
// Allow or deny
bool Allow = 1;
// Device type, block, char, etc.
string Type = 2;
// Major is the device's major number.
int64 Major = 3;
// Minor is the device's minor number.
int64 Minor = 4;
// Cgroup access permissions format, rwm.
string Access = 5;
}
message LinuxNetwork {
// Set class identifier for container's network packets
uint32 ClassID = 1;
// Set priority of network traffic for container
repeated LinuxInterfacePriority Priorities = 2 [(gogoproto.nullable) = false];
}
message LinuxHugepageLimit {
// Pagesize is the hugepage size
string Pagesize = 1;
// Limit is the limit of "hugepagesize" hugetlb usage
uint64 Limit = 2;
}
message LinuxInterfacePriority {
// Name is the name of the network interface
string Name = 1;
// Priority for the interface
uint32 Priority = 2;
}
message LinuxSeccomp {
string DefaultAction = 1;
repeated string Architectures = 2;
repeated LinuxSyscall Syscalls = 3 [(gogoproto.nullable) = false];
}
message LinuxSeccompArg {
uint64 Index = 1;
uint64 Value = 2;
uint64 ValueTwo = 3;
string Op = 4;
}
message LinuxSyscall {
repeated string Names = 1;
string Action = 2;
repeated LinuxSeccompArg Args = 3 [(gogoproto.nullable) = false];
}
message LinuxIntelRdt {
// The schema for L3 cache id and capacity bitmask (CBM)
// Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
string L3CacheSchema = 1;
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,289 +0,0 @@
//
// Copyright (c) 2017 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package grpc
import (
"reflect"
"google.golang.org/grpc/codes"
grpcStatus "google.golang.org/grpc/status"
"github.com/opencontainers/runtime-spec/specs-go"
)
func copyValue(to, from reflect.Value) error {
toKind := to.Kind()
fromKind := from.Kind()
if !from.IsValid() {
return nil
}
if toKind == reflect.Ptr {
// Handle the case of nil pointers.
if fromKind == reflect.Ptr && from.IsNil() {
return nil
}
// If the destination and the origin are both pointers, and
// if the origin is not nil, we need to allocate a new one
// for the destination.
to.Set(reflect.New(to.Type().Elem()))
if fromKind == reflect.Ptr {
return copyValue(to.Elem(), from.Elem())
}
return copyValue(to.Elem(), from)
}
// Here the destination is not a pointer.
// Let's check what's the origin.
if fromKind == reflect.Ptr {
return copyValue(to, from.Elem())
}
switch toKind {
case reflect.Struct:
return copyStructValue(to, from)
case reflect.Slice:
return copySliceValue(to, from)
case reflect.Map:
return copyMapValue(to, from)
default:
// We now are copying non pointers scalar.
// This is the leaf of the recursion.
if from.Type() != to.Type() {
if from.Type().ConvertibleTo(to.Type()) {
to.Set(from.Convert(to.Type()))
return nil
}
return grpcStatus.Errorf(codes.InvalidArgument, "Can not convert %v to %v", from.Type(), to.Type())
}
to.Set(from)
return nil
}
}
func copyMapValue(to, from reflect.Value) error {
if to.Kind() != reflect.Map && from.Kind() != reflect.Map {
return grpcStatus.Errorf(codes.InvalidArgument, "Can only copy maps into maps")
}
to.Set(reflect.MakeMap(to.Type()))
keys := from.MapKeys()
for _, k := range keys {
newValue := reflect.New(to.Type().Elem())
v := from.MapIndex(k)
if err := copyValue(newValue.Elem(), v); err != nil {
return err
}
to.SetMapIndex(k, newValue.Elem())
}
return nil
}
func copySliceValue(to, from reflect.Value) error {
if to.Kind() != reflect.Slice && from.Kind() != reflect.Slice {
return grpcStatus.Errorf(codes.InvalidArgument, "Can only copy slices into slices")
}
sliceLen := from.Len()
to.Set(reflect.MakeSlice(to.Type(), sliceLen, sliceLen))
for j := 0; j < sliceLen; j++ {
if err := copyValue(to.Index(j), from.Index(j)); err != nil {
return err
}
}
return nil
}
func copyStructSkipField(to, from reflect.Value) bool {
var grpcSolaris Solaris
var ociSolaris specs.Solaris
var grpcWindows Windows
var ociWindows specs.Windows
toType := to.Type()
grpcSolarisType := reflect.TypeOf(grpcSolaris)
ociSolarisType := reflect.TypeOf(ociSolaris)
grpcWindowsType := reflect.TypeOf(grpcWindows)
ociWindowsType := reflect.TypeOf(ociWindows)
// We skip all Windows and Solaris types
if toType == grpcSolarisType || toType == grpcWindowsType || toType == ociSolarisType || toType == ociWindowsType {
return true
}
return false
}
func structFieldName(v reflect.Value, index int) (string, error) {
if v.Kind() != reflect.Struct {
return "", grpcStatus.Errorf(codes.InvalidArgument, "Can only infer field name from structs")
}
return v.Type().Field(index).Name, nil
}
func isEmbeddedStruct(v reflect.Value, index int) bool {
if v.Kind() != reflect.Struct || index > v.Type().NumField()-1 {
return false
}
return v.Type().Field(index).Anonymous
}
func findStructField(v reflect.Value, name string) (reflect.Value, error) {
if v.Kind() != reflect.Struct {
return reflect.Value{}, grpcStatus.Errorf(codes.InvalidArgument, "Can only infer field name from structs")
}
for i := 0; i < v.NumField(); i++ {
if v.Type().Field(i).Name == name {
return v.Field(i), nil
}
}
return reflect.Value{}, grpcStatus.Errorf(codes.InvalidArgument, "Could not find field %s", name)
}
func copyStructValue(to, from reflect.Value) error {
if to.Kind() != reflect.Struct && from.Kind() != reflect.Struct {
return grpcStatus.Errorf(codes.InvalidArgument, "Can only copy structs into structs")
}
if copyStructSkipField(to, from) {
return nil
}
for i := 0; i < to.NumField(); i++ {
// If one of the field is embedded, we copy between the embedded field
// and the structure itself. The fields in the embedded field should
// be found in the parent structure.
if isEmbeddedStruct(to, i) {
if err := copyStructValue(to.Field(i), from); err != nil {
return err
}
continue
}
if isEmbeddedStruct(from, i) {
if err := copyStructValue(to, from.Field(i)); err != nil {
return err
}
continue
}
// Find the destination structure field name.
fieldName, err := structFieldName(to, i)
if err != nil {
return err
}
// Try to find the same field name in the origin structure.
// This can fail as we support copying between structures
// that optionally have embedded fields.
v, err := findStructField(from, fieldName)
if err != nil {
continue
}
if err := copyValue(to.Field(i), v); err != nil {
return err
}
}
return nil
}
func copyStruct(to interface{}, from interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
toVal := reflect.ValueOf(to)
fromVal := reflect.ValueOf(from)
if toVal.Kind() != reflect.Ptr || toVal.Elem().Kind() != reflect.Struct ||
fromVal.Kind() != reflect.Ptr || fromVal.Elem().Kind() != reflect.Struct {
return grpcStatus.Errorf(codes.InvalidArgument, "Arguments must be pointers to structures")
}
toVal = toVal.Elem()
fromVal = fromVal.Elem()
return copyStructValue(toVal, fromVal)
}
// OCItoGRPC converts an OCI specification to its gRPC representation
func OCItoGRPC(ociSpec *specs.Spec) (*Spec, error) {
s := &Spec{}
err := copyStruct(s, ociSpec)
return s, err
}
// GRPCtoOCI converts a gRPC specification back into an OCI representation
func GRPCtoOCI(grpcSpec *Spec) (*specs.Spec, error) {
s := &specs.Spec{}
err := copyStruct(s, grpcSpec)
return s, err
}
// ProcessOCItoGRPC converts an OCI process specification into its gRPC
// representation
func ProcessOCItoGRPC(ociProcess *specs.Process) (*Process, error) {
s := &Process{}
err := copyStruct(s, ociProcess)
return s, err
}
// ProcessGRPCtoOCI converts a gRPC specification back into an OCI
// representation
func ProcessGRPCtoOCI(grpcProcess *Process) (*specs.Process, error) {
s := &specs.Process{}
err := copyStruct(s, grpcProcess)
return s, err
}
// ResourcesOCItoGRPC converts an OCI LinuxResources specification into its gRPC
// representation
func ResourcesOCItoGRPC(ociResources *specs.LinuxResources) (*LinuxResources, error) {
s := &LinuxResources{}
err := copyStruct(s, ociResources)
return s, err
}
// ResourcesGRPCtoOCI converts an gRPC LinuxResources specification into its OCI
// representation
func ResourcesGRPCtoOCI(grpcResources *LinuxResources) (*specs.LinuxResources, error) {
s := &specs.LinuxResources{}
err := copyStruct(s, grpcResources)
return s, err
}

View File

@@ -1,196 +0,0 @@
//
// Copyright (c) 2017 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package grpc
import (
"encoding/json"
"io/ioutil"
"reflect"
"testing"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/stretchr/testify/assert"
)
const ociConfigFile = "config.json"
func assertProcessIsEqual(t *testing.T, ociProcess *specs.Process, grpcProcess *Process) {
assert := assert.New(t)
// Process checks: User
assert.Equal(grpcProcess.User.UID, ociProcess.User.UID)
assert.Equal(grpcProcess.User.GID, ociProcess.User.GID)
// Process checks: Capabilities
assert.Equal(grpcProcess.Capabilities.Bounding, ociProcess.Capabilities.Bounding)
assert.Equal(grpcProcess.Capabilities.Effective, ociProcess.Capabilities.Effective)
assert.Equal(grpcProcess.Capabilities.Inheritable, ociProcess.Capabilities.Inheritable)
assert.Equal(grpcProcess.Capabilities.Permitted, ociProcess.Capabilities.Permitted)
assert.Equal(grpcProcess.Capabilities.Ambient, ociProcess.Capabilities.Ambient)
}
func assertIsEqual(t *testing.T, ociSpec *specs.Spec, grpcSpec *Spec) {
assert := assert.New(t)
// Version check
assert.Equal(grpcSpec.Version, ociSpec.Version)
// Process checks:
assertProcessIsEqual(t, ociSpec.Process, grpcSpec.Process)
// Annotations checks: Annotations
assert.Equal(len(grpcSpec.Annotations), len(ociSpec.Annotations))
for k := range grpcSpec.Annotations {
assert.Equal(grpcSpec.Annotations[k], ociSpec.Annotations[k])
}
// Linux checks: Devices
assert.Equal(len(grpcSpec.Linux.Resources.Devices), len(ociSpec.Linux.Resources.Devices))
assert.Equal(len(grpcSpec.Linux.Resources.Devices), 1)
assert.Equal(grpcSpec.Linux.Resources.Devices[0].Access, "rwm")
// Linux checks: Block IO, for checking embedded structures copy
assert.NotNil(ociSpec.Linux.Resources.BlockIO.LeafWeight)
assert.NotNil(ociSpec.Linux.Resources.BlockIO.Weight)
assert.EqualValues(grpcSpec.Linux.Resources.BlockIO.Weight, *ociSpec.Linux.Resources.BlockIO.Weight)
assert.EqualValues(grpcSpec.Linux.Resources.BlockIO.LeafWeight, *ociSpec.Linux.Resources.BlockIO.LeafWeight)
assert.NotEqual(len(grpcSpec.Linux.Resources.BlockIO.WeightDevice), 0)
assert.Equal(len(grpcSpec.Linux.Resources.BlockIO.WeightDevice), len(grpcSpec.Linux.Resources.BlockIO.WeightDevice))
assert.EqualValues(grpcSpec.Linux.Resources.BlockIO.WeightDevice[0].Major, ociSpec.Linux.Resources.BlockIO.WeightDevice[0].Major)
assert.EqualValues(grpcSpec.Linux.Resources.BlockIO.WeightDevice[0].Minor, ociSpec.Linux.Resources.BlockIO.WeightDevice[0].Minor)
assert.NotNil(ociSpec.Linux.Resources.BlockIO.WeightDevice[0].LeafWeight)
assert.NotNil(ociSpec.Linux.Resources.BlockIO.WeightDevice[0].Weight)
assert.EqualValues(grpcSpec.Linux.Resources.BlockIO.WeightDevice[0].Weight, *ociSpec.Linux.Resources.BlockIO.WeightDevice[0].Weight)
assert.EqualValues(grpcSpec.Linux.Resources.BlockIO.WeightDevice[0].LeafWeight, *ociSpec.Linux.Resources.BlockIO.WeightDevice[0].LeafWeight)
// Linux checks: Namespaces
assert.Equal(len(grpcSpec.Linux.Namespaces), len(ociSpec.Linux.Namespaces))
assert.Equal(len(grpcSpec.Linux.Namespaces), 5)
for i := range grpcSpec.Linux.Namespaces {
assert.Equal(grpcSpec.Linux.Namespaces[i].Type, (string)(ociSpec.Linux.Namespaces[i].Type)) //nolint:unconvert
assert.Equal(grpcSpec.Linux.Namespaces[i].Path, (string)(ociSpec.Linux.Namespaces[i].Path)) //nolint:unconvert
}
}
func TestOCItoGRPC(t *testing.T) {
assert := assert.New(t)
var ociSpec specs.Spec
configJSONBytes, err := ioutil.ReadFile(ociConfigFile)
assert.NoError(err, "Could not open OCI config file")
err = json.Unmarshal(configJSONBytes, &ociSpec)
assert.NoError(err, "Could not unmarshall OCI config file")
spec, err := OCItoGRPC(&ociSpec)
assert.NoError(err, "Could not convert OCI config file")
assertIsEqual(t, &ociSpec, spec)
}
func TestGRPCtoOCI(t *testing.T) {
assert := assert.New(t)
var ociSpec specs.Spec
configJSONBytes, err := ioutil.ReadFile(ociConfigFile)
assert.NoError(err, "Could not open OCI config file")
err = json.Unmarshal(configJSONBytes, &ociSpec)
assert.NoError(err, "Could not unmarshall OCI config file")
grpcSpec, err := OCItoGRPC(&ociSpec)
assert.NoError(err, "Could not convert OCI config file")
newOciSpec, err := GRPCtoOCI(grpcSpec)
assert.NoError(err, "Could not convert gRPC structure")
assertIsEqual(t, newOciSpec, grpcSpec)
}
func TestProcessOCItoGRPC(t *testing.T) {
assert := assert.New(t)
var ociSpec specs.Spec
configJSONBytes, err := ioutil.ReadFile(ociConfigFile)
assert.NoError(err, "Could not open OCI config file")
err = json.Unmarshal(configJSONBytes, &ociSpec)
assert.NoError(err, "Could not unmarshall OCI config file")
process, err := ProcessOCItoGRPC(ociSpec.Process)
assert.NoError(err, "Could not convert OCI config file")
assertProcessIsEqual(t, ociSpec.Process, process)
}
func TestProcessGRPCtoOCI(t *testing.T) {
assert := assert.New(t)
var ociSpec specs.Spec
configJSONBytes, err := ioutil.ReadFile(ociConfigFile)
assert.NoError(err, "Could not open OCI config file")
err = json.Unmarshal(configJSONBytes, &ociSpec)
assert.NoError(err, "Could not unmarshall OCI config file")
grpcProcess, err := ProcessOCItoGRPC(ociSpec.Process)
assert.NoError(err, "Could not convert OCI config file")
newOciProcess, err := ProcessGRPCtoOCI(grpcProcess)
assert.NoError(err, "Could not convert gRPC structure")
assertProcessIsEqual(t, newOciProcess, grpcProcess)
}
func testCopyValue(t *testing.T, to, from interface{}) {
assert := assert.New(t)
err := copyValue(reflect.ValueOf(to).Elem(), reflect.ValueOf(from))
assert.NoError(err, "Could not copy to %v", reflect.ValueOf(from).Kind())
assert.Equal(reflect.ValueOf(to).Elem().Interface(), reflect.ValueOf(from).Interface())
}
func TestCopyValueString(t *testing.T) {
from := "foobar"
to := new(string)
testCopyValue(t, to, from)
}
func TestCopyValueSlice(t *testing.T) {
from := []string{"foobar", "barfoo"}
to := new([]string)
testCopyValue(t, to, from)
}
func TestCopyValueStruc(t *testing.T) {
type dummyStruct struct {
S string
I int
}
from := dummyStruct{
S: "foobar",
I: 18,
}
to := new(dummyStruct)
testCopyValue(t, to, from)
}
func TestCopyValueMap(t *testing.T) {
from := map[string]string{
"key1": "value1",
"key2": "value2",
}
to := new(map[string]string)
testCopyValue(t, to, from)
}

View File

@@ -1,11 +0,0 @@
//
// Copyright 2017 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package grpc
// APIVersion specifies the version of the gRPC communications protocol used
// by Kata Containers.
const APIVersion = "0.0.1"