Compare commits

..

210 Commits

Author SHA1 Message Date
Bin Liu
aa295c91f2 Merge pull request #992 from bergwolf/2.0.0-branch-bump
# Kata Containers 2.0.0
2020-10-19 16:02:09 +08:00
Ubuntu
6648c8c7fc release: Kata Containers 2.0.0
- backport 2.0-dev commits to stable-2.0.0

dbfe85e snap: install libseccomp-dev
0c3b6a9 package: drop qemu-virtiofs shim
f751c98 packaging: install virtiofsd for normal qemu build as well
08361c5 runtime: enable virtiofs by default
da9bfb2 runtime: Pass `--thread-pool-size=1` to virtiofsd
7347d43 packaging: Apply virtiofs performance related fixes to 5.x
c7bb1e2 tools: Improve agent-ctl README
e6f7ddd tools: Make agent-ctl support more APIs
46cfed5 tools: Remove commented out code in agent-ctl
81fb2c9 tools: Log request in agent-ctl tool if debug enabled
0c43215 tools: Rename agent-ctl command to GetGuestDetails
6511ffe tools: Fix comment in agent-ctl
ee59378 kernel: update to 5.4.71
ef11213 config: make virtio-fs part of standard kernel
1fb6730 agent: remove `unwrap()` for `e.as_errno()`
05e9fe0 agent: Use `?` instead of `match` when the error returns directly
d658129 kata-monitor: use regexp to check if runtime is kata containers
ae2d89e agent: use anyhow `context` to attach context to `Error` instead of `match`
095d4ad agent: remove useless match
bd816df agent: Use `ok_or_else` instead of match for Option -> Result
d413bf7 agent: Fix crasher if AddARPNeighbors request empty
76408c0 agent: Fix crasher if UpdateRoutes request empty
6e4da19 agent: Fix crasher if UpdateInterface request empty
8f8061d agent: replace `match Result` with `or_else`
64e4b2f agent: replace unnecessary `match Result` with `map_err`
7c0d68f agent: replace check! with map_err for readability
82ed34a agent: remove `check!` in child process because we cant' see logs.
9def624 agent: replace `if let Err` with `or_else`
6926914 agent: refactor namespace::setup to optimize error handling
e733c13 agent: replace `if let Err` with `map_err`
ba069f9 rustjail: add length check for uid_mappings in rootless euid mapping
cc8ec7b versions: Update Kubernetes, containerd, cri-o and cri-tools
8a364d2 annotations: Correct unit tests to validate new protections
0cc6297 annotations: Split addHypervisorOverrides to reduce complexity
b6059f3 annotations: Add unit test for checkPathIsInGlobs
c6afad2 annotations: Add unit test for regexpContains function
451608f makefile: Add missing generated vars to `USER_VARS`
8328136 makefile: Improve names of config entries for annotation checks
a92a630 annotations: Give better names to local variabes in search functions
997f7c4 annotations: Rename checkPathIsInGlobList with checkPathIsInGlobs
74d4065 config: Add better comments in the template files
73bb3fd config: Whitelist hypervisor annotations by name
5a587ba config: Use glob instead of regexp to match paths in annotations
29f5dec annotations: Fix typo in comment
d71f9e1 config: Add makefile variables for path lists
28c386c config: Protect file_mem_backend against annotation attacks
c2a186b config: Protect vhost_user_store_path against annotation attacks
8cd094c config: Add security warning on configuration examples
b5f2a1e config: Protect ctlpath from annotation attack
2d65b3b config: Protect jailer_path annotation
fe5e1cf config: Add examples for path_list configuration
3f7bcf5 annotations: Simplify negative logic
80144fc config: Add hypervisor path override through annotations
2f5f356 config: Fix typo in function name
2faafbd config: Protect virtio_fs_daemon annotation
9e5ed41 config: Add 'List' alternates for hypervisor configuration paths
b33d4fe agent: fix panic on malformed device resource in container update
1838233 cpuset: add cpuset pkg
bfbbe8b cpuset: don't set cpuset.mems in the guest
5c21ec2 sandbox: consider cpusets if quota is not enforced
9bb0d48 cpuset: support setting mems for sandbox
64a2ef6 virtcontainers: add method for calculating cpuset for sandbox
a441f21 cpuset: add cpuset pkg
ce54090 docs: Update upgrading guide
e884fef docs: update the build kata containers kernel document
9c16643 agent/device: Check type as well as major:minor when looking up devices
4978c90 agent/device: Index all devices in spec before updating them
a7ba362 agent/device: Forward port update_spec_device_list() unit test
230a983 agent/device: update_spec_device_list() should error if dev not found
a6d9fd4 sandbox: don't constrain cpus, mem only cpuset, devices
8f0cb2f cgroups: add ability to update CPUSet
cbdae44 agent: fix errorneous parsing for guest block size
97acaa8 docs: Add containerd install guide
2324666 agent: use ok_or/map_err instead of match
ebe5ad1 rustjail: use Iterator to manipulate vector elements
c9497c8 rustjail: delete codes commented out
d5d9928 rustjail: delete unused test code
f70892a agent: use chain of Result to avoid early return
ab64780 agent: update not accurate comments
9e064ba agent: use macro to simplify parse_cmdline function in config.rs
42c48f5 agent: add blank lines between methods
d3a36fa agent: delete unused field in agentService
fa54660 agent: use no-named closure to reduce codes
efddcb4 agent: use a local fn to reduce duplicated codes
7bb3e56 packaging: fix cloud-hypervisor binary path
7b53041 packaging: fix missing cloud_hypervisor_repo
38212ba packaging: apply qemu v5.1 stable fixes
fb7e9b4 agent: fix aarch64 build
0cfcbf7 docs: add namespace key to pod/container config files
997f1f6 docs: Add crictl example json files
f60f43a runtime: Clear the VCMock 1.x API Methods from 2.0
1789527 ci: snap: add event filtering
999f67d agent: do not follow link when mounting container proc and sysfs
cb2255f agent: set init process non-dumpable
2a6c9ee agent-ctl: include cargo lock updates
eaff5de versions: add plugins section
4f1d23b virtiofs: Disable DAX
6d80df9 snap: specify python version
a116ce0 osbuilder: Create target directory for agent
4dc3bc0 rust-agent: Treat warnings as error
8f7a484 rust-agent: Identify unused results in tests
ce54e5d rust-agent: Log returned errors rather than ignore them
9adb7b7 rust-agent: Remove unused imports
73ab9b1 rust-agent: Report errors to caller if possible
4db3f9e rust-agent: Ignore write errors while writing to the logs
19cb657 rust-agent: Remove unused code that has undefined behavior
86bc151 rust-agent: Remove 'mut' where not needed
8d8adb6 rust-agent: Remove uses of deprecated functions
76298c1 rust-agent: Remove or rename unused parameters
7d303ec rust-agent: Remove or rename unused variables
e0b79eb rust-agent: Remove unused functions
8ed61b1 rust-agent: Remove useless braces
cc4f02e rust-agent: Remove unused macros
ace6f1e clh: Support VFIO device unplug
47cfeaa clh: Remove unnecessary VmmPing
63c4757 versions: cloud-hypervisor: Bump to version 6d30fe05
059b89c docs: Change kata_tap0 to tap0_kata
4ff3ed5 docs: update networking description
de8dcb1 dev-guide: update kata-agent install details
c488cc4 docs: Update docs for enabling agent debug console
e5acb12 docs: update dev guide for agent build
1bddde7 ci: add github action to test the snap
9517b0a versions: cloud-hypervisor: bump version
f5a7175 runtime: cloud-hypervisor: tag openapi-generator-cli container

Signed-off-by: Ubuntu <ubuntu@ip-172-31-19-197.ap-southeast-1.compute.internal>
2020-10-19 06:18:08 +00:00
Xu Wang
49776f76bf Merge pull request #984 from bergwolf/prepare-release
backport 2.0-dev commits to stable-2.0.0
2020-10-18 13:46:16 +08:00
Peng Tao
dbfe85e705 snap: install libseccomp-dev
To build qemu with virtio-fs support.

Depends-on: github.com/kata-containers/tests#2979
Fixes: #982
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-18 00:43:15 +08:00
Peng Tao
0c3b6a94b3 package: drop qemu-virtiofs shim
We have enabled qemu-virtiofs by default.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-18 00:43:15 +08:00
Peng Tao
f751c98da3 packaging: install virtiofsd for normal qemu build as well
For experimental-virtiofs, we use it to test virtiofs with DAX. Let's
rename its virtiofsd to virtiofsd-dax.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-18 00:43:15 +08:00
Peng Tao
08361c5948 runtime: enable virtiofs by default
We've been shipping it for a long time. It's time to make it default
replacing the old obsolet 9pfs.

Fixes: #935
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-18 00:43:15 +08:00
Fabiano Fidêncio
da9bfb27ed runtime: Pass --thread-pool-size=1 to virtiofsd
Dave Gilbert brough up that passing --thread-pool-size=1 to virtiofsd
may result in a performance improvement especially when using
`cache=none`. While our current default is `cache=auto`, Dave mentioned
that he seems no harm in having it set and he also mentiond that it may
use a lot less stack space on aarch/arm.

Fixes: #943

Signed-off-by: Fabiano Fidêncio <fidencio@redhat.com>
2020-10-18 00:43:15 +08:00
Fabiano Fidêncio
7347d43cf9 packaging: Apply virtiofs performance related fixes to 5.x
Vivek Goyal found out that using "shared" thread pool, instead of
"exclusive" results in better performance.

Knowning that and with the plan to have virtio-fs as the default fs for
the 2.0, let's bring this patch in for both 5.0 and 5.1.

Fixes: #944

Signed-off-by: Fabiano Fidêncio <fidencio@redhat.com>
2020-10-18 00:43:15 +08:00
James O. D. Hunt
c7bb1e2790 tools: Improve agent-ctl README
Add a summary to help understand how to use the `agent-ctl` tool.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-18 00:43:15 +08:00
James O. D. Hunt
e6f7ddd9a2 tools: Make agent-ctl support more APIs
Added new `agent-ctl` commands to allow the following agent API calls to
be made:

- `AddARPNeighborsRequest`
- `CloseStdinRequest`
- `CopyFileRequest`
- `GetMetricsRequest`
- `GetOOMEventRequest`
- `MemHotplugByProbeRequest`
- `OnlineCPUMemRequest`
- `ReadStreamRequest`
- `ReseedRandomDevRequest`
- `SetGuestDateTimeRequest`
- `TtyWinResizeRequest`
- `UpdateContainerRequest`
- `WriteStreamRequest`

Fixes: #969.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-18 00:43:15 +08:00
James O. D. Hunt
46cfed5025 tools: Remove commented out code in agent-ctl
Remove a few lines of commented out code.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-18 00:43:15 +08:00
James O. D. Hunt
81fb2c9980 tools: Log request in agent-ctl tool if debug enabled
Display the API request before making the call so users can see what is
sent to the agent.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-18 00:43:15 +08:00
James O. D. Hunt
0c432153df tools: Rename agent-ctl command to GetGuestDetails
Rename the `GuestDetails` command to `GetGuestDetails` to match the
actual agent API name.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-18 00:43:15 +08:00
James O. D. Hunt
6511ffe89d tools: Fix comment in agent-ctl
Correct a comment in the agent control tool.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-18 00:43:15 +08:00
Eric Ernst
ee59378232 kernel: update to 5.4.71
vsock fix was backported to 5.4 stable, so we can drop this patch.

Fixes: #973

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-18 00:43:15 +08:00
Eric Ernst
ef11213a4e config: make virtio-fs part of standard kernel
Basic virtio-fs support has made it upstream in the Linux kernel, as
well as in QEMU and Cloud Hypervisor. Let's go ahead and add it to the
standard configuration.

Since the device driver / DAX handling is still in progress for
upstream, we will want to still build a seperate experimental kernel for
those who are comfortable trading off bleeding edge stability/kernel
updates for improved FIO numbers.

Fixes: #963

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-18 00:43:15 +08:00
Tim Zhang
1fb6730984 agent: remove unwrap() for e.as_errno()
Use `{:?}` to print `e.as_errno()` instead of using `{}`
to print `e.as_errno().unwrap().desc()`.

Avoid panic only caused by error's content.

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-18 00:43:15 +08:00
Tim Zhang
05e9fe0591 agent: Use ? instead of match when the error returns directly
It's more clear and more readable.

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-18 00:43:15 +08:00
bin liu
d658129695 kata-monitor: use regexp to check if runtime is kata containers
To support a few common configurations for Kata, including:

- `io.containerd.kata.v2`
- `io.containerd.kata-qemu.v2`
- `io.containerd.kata-clh.v2`

`kata-monintor` changes to use regexp instead of direct string comparison.

Fixes: #957

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:43:15 +08:00
Tim Zhang
ae2d89e95e agent: use anyhow context to attach context to Error instead of match
Context is clearer than match for these situations.

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-18 00:43:15 +08:00
Tim Zhang
095d4ad08d agent: remove useless match
Remove useless match.

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-18 00:43:15 +08:00
Tim Zhang
bd816dfcec agent: Use ok_or_else instead of match for Option -> Result
Using ok_or is clearer than match.

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-18 00:43:15 +08:00
James O. D. Hunt
d413bf7d44 agent: Fix crasher if AddARPNeighbors request empty
Check if the ARP neighbours specified in the `AddARPNeighbors` API is
set before using it to avoid crashing the agent.

Fixes: #955.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-18 00:43:15 +08:00
James O. D. Hunt
76408c0f13 agent: Fix crasher if UpdateRoutes request empty
Check if the routes specified in the `UpdateRoutes` API is set before
using it to avoid crashing the agent.

Fixes: #949.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-18 00:43:15 +08:00
James O. D. Hunt
6e4da19fa5 agent: Fix crasher if UpdateInterface request empty
Check if the interface specified in the `UpdateInterface` API is set
before using it to avoid crashing the agent.

Fixes: #950.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-18 00:43:15 +08:00
Tim Zhang
8f8061da08 agent: replace match Result with or_else
`or_else` is suitable for more complicated situations.
We can use it to return Ok in Err handling.

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-18 00:43:15 +08:00
Tim Zhang
64e4b2fa83 agent: replace unnecessary match Result with map_err
Replace `match Result` whose Ok hand is useless.

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-18 00:43:15 +08:00
Tim Zhang
7c0d68f7f7 agent: replace check! with map_err for readability
It's ambiguous and not easy to read to call method use macro.

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-18 00:43:15 +08:00
Tim Zhang
82ed34aee1 agent: remove check! in child process because we cant' see logs.
The check macro will log the errors but the log in child process can't
be seen, just ignore it.

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-18 00:43:15 +08:00
Tim Zhang
9def624c05 agent: replace if let Err with or_else
Fixes #934

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-18 00:43:15 +08:00
Tim Zhang
6926914683 agent: refactor namespace::setup to optimize error handling
- Replace the return value with anyhow::Result.
- Remove if let Err.
- Remove match.

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-18 00:43:15 +08:00
Tim Zhang
e733c13cf7 agent: replace if let Err with map_err
Fixes #934

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-18 00:43:15 +08:00
bin liu
ba069f9baa rustjail: add length check for uid_mappings in rootless euid mapping
This might be a copy miss, gid_mappings is checked twice, one should
be uid_mappings.

Fixes: #952

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:43:15 +08:00
Salvador Fuentes
cc8ec7b0e9 versions: Update Kubernetes, containerd, cri-o and cri-tools
Kubernetes: from 1.17.3 to 1.18.9
CRI-O: from 0eec454168e381e460b3d6de07bf50bfd9b0d082 (1.17) to 1.18.3
Containerd: from 3a4acfbc99aa976849f51a8edd4af20ead51d8d7 (1.3.3) to 1.3.7
cri-tools: from 1.17.0 to 1.18.0

Fixes: #960.
Signed-off-by: Salvador Fuentes <salvador.fuentes@intel.com>
2020-10-18 00:43:15 +08:00
Christophe de Dinechin
8a364d2145 annotations: Correct unit tests to validate new protections
Add the verification of some basic protections, namely that:
- EnableAnnotations is honored
- Dangerous paths cannot be modified if no match
- Errors are returned when expected

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:43:15 +08:00
Christophe de Dinechin
0cc6297716 annotations: Split addHypervisorOverrides to reduce complexity
Warning from gocyclo during make check:
 virtcontainers/pkg/oci/utils.go:404:1: cyclomatic complexity 37 of func `addHypervisorConfigOverrides` is high (> 30) (gocyclo)
 func addHypervisorConfigOverrides(ocispec specs.Spec, config *vc.SandboxConfig, runtime RuntimeConfig) error {
^

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:43:15 +08:00
Christophe de Dinechin
b6059f3566 annotations: Add unit test for checkPathIsInGlobs
There are a few interesting corner cases to consider for this
function.

Fixes: #901

Suggested-by: James O.D. Hunt <james.o.hunt@intel.com>
Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:43:15 +08:00
Christophe de Dinechin
c6afad2a06 annotations: Add unit test for regexpContains function
James O.D Hunt: "But also, regexpContains() and
checkPathIsInGlobList() seem like good candidates for some unit
tests. The "look" obvious, but a few boundary condition tests would be
useful I think (filenames with spaces, backslashes, special
characters, and relative & absolute paths are also an interesting
thought here)."

There aren't that many boundary conditions on a list with regexps,
if you assume the regexp match function itself works. However, the
tests is useful in documenting expectations.

Fixes: #901

Suggested-by: James O.D. Hunt <james.o.hunt@intel.com>
Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:43:15 +08:00
Christophe de Dinechin
451608fb28 makefile: Add missing generated vars to USER_VARS
This was discovered while checking a massive change in variables.
The root cause for the error is a very long list of manual
replacements, that is best replaced with a $(foreach).

All individual variables in the output configuration files were
checked against the old build using diff.

This is a forward port of a makefile fix included in
PR https://github.com/kata-containers/runtime/issues/3004
for issue https://github.com/kata-containers/runtime/issues/2943

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:43:15 +08:00
Christophe de Dinechin
8328136575 makefile: Improve names of config entries for annotation checks
The entries used to be things like PATH_LIST, which are too generic.
Replace them with more precise name with a distinguishing keyword,
namely VALID. For example valid_hypervisor_paths.

Fixes: #901

Suggested-by: James O.D. Hunt <james.o.hunt@intel.com>
Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:43:15 +08:00
Christophe de Dinechin
a92a63031d annotations: Give better names to local variabes in search functions
Use more meaningful variable names for clarity.

Fixes: #901

Suggested-by: James O.D. Hunt james.o.hunt@intel.com>
Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:43:15 +08:00
Christophe de Dinechin
997f7c4433 annotations: Rename checkPathIsInGlobList with checkPathIsInGlobs
The name is shorter and more specific

Fixes: #901

Suggested-by: James O.D. Hunt <james.o.hunt@intel.com>
Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:43:15 +08:00
Christophe de Dinechin
74d4065197 config: Add better comments in the template files
When there is a default value from the code (usually empty) that
differs from a possible suggested value from the distro, then the
wording "default: empty" is confusing.

Fixes: #901

Suggested-by: Julio Montes <julio.montes@intel.com>
Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:43:15 +08:00
Christophe de Dinechin
73bb3fdbee config: Whitelist hypervisor annotations by name
Add a field "enable_annotations" to the runtime configuration that can
be used to whitelist annotations using a list of regular expressions,
which are used to match any part of the base annotation name, i.e. the
part after "io.katacontainers.config.hypervisor."

For example, the following configuraiton will match "virtio_fs_daemon",
"initrd" and "jailer_path", but not "path" nor "firmware":

  enable_annotations = [ "virtio.*", "initrd", "_path" ]

The default is an empty list of enabled annotations, which disables
annotations entirely.

If an anontation is rejected, the message is something like:

  annotation io.katacontainers.config.hypervisor.virtio_fs_daemon is not enabled

Fixes: #901

Suggested-by: Peng Tao <tao.peng@linux.alibaba.com>
Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:43:10 +08:00
Christophe de Dinechin
5a587ba506 config: Use glob instead of regexp to match paths in annotations
When filtering annotations that correspond to paths,
e.g. hypervisor.path, it is better to use a glob syntax than a regexp
syntax, as it is more usual for paths, and prevents classes of matches
that are undesirable in our case, such as matching .. against .*

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
29f5dec38f annotations: Fix typo in comment
A comment talking about runtime related annotations describes them as
being related to the agent. A similar comment for the agent
annotations is missing.

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
d71f9e1155 config: Add makefile variables for path lists
Add variables to override defaults at build time for the various lists
used to control path annotations.

Fixes: #901

Suggested-by: Fabiano Fidencio <fidencio@redhat.com>
Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
28c386c51f config: Protect file_mem_backend against annotation attacks
This one could theoretically be used to overwrite data on the host.
It seems somewhat less risky than the earlier ones for a number
of reasons, but worth protecting a little anyway.

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
c2a186b18c config: Protect vhost_user_store_path against annotation attacks
This path could be used to overwrite data on the host.

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
8cd094cf06 config: Add security warning on configuration examples
Add the following text explaining the risk of using regular
expressions in path lists:

Each member of the list can be a regular expression, but prefer names.
Otherwise, please read and understand the following carefully.
SECURITY WARNING: If you use regular expressions, be mindful that
an attacker could craft an annotation that uses .. to escape the paths
you gave. For example, if your regexp is /bin/qemu.* then if there is
a directory named /bin/qemu.d/, then an attacker can pass an annotation
containing /bin/qemu.d/../put-any-binary-name-here and attack your host.

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
b5f2a1e8c4 config: Protect ctlpath from annotation attack
This also adds annotation for ctlpath which were not present
before. It's better to implement the code consistenly right now to make
sure that we don't end up with a leaky implementation tacked on later.

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
2d65b3bfd8 config: Protect jailer_path annotation
The jailer_path annotation can be used to execute arbitrary code on
the host. Add a jailer_path_list configuration entry providing a list
of regular expressions that can be used to filter annotations that
represent valid file names.

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
fe5e1cf2e1 config: Add examples for path_list configuration
The path_list configuration gives a series of regular expressions that
limit which values are acceptable through annotations in order to
avoid kata launching arbitrary binaries on the host when receiving an
annotation.

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
3f7bcf54f0 annotations: Simplify negative logic
Replace strange negative logic  (!ok -> continue) with positive
logic (ok -> do it)

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
80144fc415 config: Add hypervisor path override through annotations
The annotation is provided, so it should be respected.
Furthermore, it is important to implement it with the appropriate
protetions similar to what was done for virtiofsd.

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
2f5f35608a config: Fix typo in function name
There was an extra 'p' in addHypervisorVirtioFsOverrides.

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
2faafbdd3a config: Protect virtio_fs_daemon annotation
Sending the virtio_fs_daemon annotation can be used to execute
arbitrary code on the host. In order to prevent this, restrict the
values of the annotation to a list provided by the configuration
file.

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
9e5ed41511 config: Add 'List' alternates for hypervisor configuration paths
Paths mentioned in the hypervisor configuration can be overriden
using annotations, which is potentially dangerous. For each path,
add a 'List' variant that specifies the list of acceptable values
from annotations.

Bug: https://bugs.launchpad.net/katacontainers.io/+bug/1878234

Fixes: #901

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Peng Tao
b33d4fe708 agent: fix panic on malformed device resource in container update
Somehow containerd is sending a malformed device in update API. While it
should not happen, we should not panic either.

Fixes: #946
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-18 00:40:16 +08:00
Eric Ernst
183823398d cpuset: add cpuset pkg
Pulled from 1.18.4 Kubernetes, adding the cpuset pkg for managing
CPUSet calculations on the host. Go mod'ing the original code from
k8s.io/kubernetes was very painful, and this is very static, so let's
just pull in what we need.

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-18 00:40:16 +08:00
Eric Ernst
bfbbe8ba6b cpuset: don't set cpuset.mems in the guest
Kata doesn't map any numa topologies in the guest. Let's make sure we
clear the Cpuset fields before passing container updates to the
guest.

Note, in the future we may want to have a vCPU to guest CPU mapping and
still include the cpuset.Cpus. Until we have this support, clear this as
well.

Fixes: #932

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-18 00:40:16 +08:00
Eric Ernst
5c21ec278c sandbox: consider cpusets if quota is not enforced
CPUSet cgroup allows for pinning the memory associated with a cpuset to
a given numa node. Similar to cpuset.cpus, we should take cpuset.mems
into account for the sandbox-cgroup that Kata creates.

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-18 00:40:16 +08:00
Eric Ernst
9bb0d48d56 cpuset: support setting mems for sandbox
CPUSet cgroup allows for pinning the memory associated with a cpuset to
a given numa node. Similar to cpuset.cpus, we should take cpuset.mems
into account for the sandbox-cgroup that Kata creates.

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-18 00:40:16 +08:00
Eric Ernst
64a2ef62e0 virtcontainers: add method for calculating cpuset for sandbox
Calculate sandbox's CPUSet as the union of each of the container's
CPUSets.

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-18 00:40:16 +08:00
Eric Ernst
a441f21c40 cpuset: add cpuset pkg
Pulled from 1.18.4 Kubernetes, adding the cpuset pkg for managing
CPUSet calculations on the host. Go mod'ing the original code from
k8s.io/kubernetes was very painful, and this is very static, so let's
just pull in what we need.

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-18 00:40:16 +08:00
James O. D. Hunt
ce54090f25 docs: Update upgrading guide
Update the upgrading guide for 2.0.

Fixes: #928.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-18 00:40:16 +08:00
Ychau Wang
e884fef483 docs: update the build kata containers kernel document
Update the build kata containers kernel document for 2.0 release. Fixed
the 1.x release project paths and urls, using the kata-containers
project file paths and urls.

Fixes: #929

Signed-off-by: Ychau Wang <wangyongchao.bj@inspur.com>
2020-10-18 00:40:16 +08:00
David Gibson
9c16643c12 agent/device: Check type as well as major:minor when looking up devices
To update device resource entries from host to guest, we search for
the right entry by host major:minor numbers, then later update it.
However block and character devices exist in separate major:minor
namespaces so we could have one block and one character device with
matching major:minor and thus incorrectly update both with the details
for whichever device is processed second.

Add a check on device type to prevent this.

Port from the Kata 1 Go agent
https://github.com/kata-containers/agent/commit/27ebdc9d2761

Fixes: #703

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-10-18 00:40:16 +08:00
David Gibson
4978c9092c agent/device: Index all devices in spec before updating them
The agent needs to update device entries in the OCI spec so that it
has the correct major:minor numbers for the guest, which may differ
from the host.

Entries in the main device list are looked up by device path, but
entries in the device resources list are looked up by (host)
major:minor.  This is done one device at a time, updating as we go in
update_spec_device_list().

But since the host and guest have different namespaces, one device
might have the same major:minor as a different device on the host.  In
that case we could update one resource entry to the correct guest
values, then mistakenly update it again because it now matches a
different host device.

To avoid this, rather than looking up and updating one by one, we make
all the lookups in advance, creating a map from (host) device path to
the indices in the spec where the device and resource entries can be
found.

Port from the Go agent in Kata 1,
https://github.com/kata-containers/agent/commit/d88d46849130

Fixes: #703

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-10-18 00:40:16 +08:00
David Gibson
a7ba362f92 agent/device: Forward port update_spec_device_list() unit test
The Kata 1 Go agent included a unit test for updateSpecDeviceList, but no
such unit test exists for the Rust agent's equivalent
update_spec_device_list().  Port the Kata1 test to Rust.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-10-18 00:40:16 +08:00
David Gibson
230a9833f8 agent/device: update_spec_device_list() should error if dev not found
If update_spec_device_list() is given a device that can't be found in the
OCI spec, it currently does nothing, and returns Ok(()).  That doesn't
seem like what we'd expect and is not what the Go agent in Kata 1 does.

Change it to return an error in that case, like Kata 1.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-10-18 00:40:16 +08:00
Eric Ernst
a6d9fd4118 sandbox: don't constrain cpus, mem only cpuset, devices
Allow for constraining the cpuset as well as the devices-whitelist . Revert
sandbox constraints for cpu/memory, as they break the K8S use case. Can
re-add behind a non-default flag in the future.

The sandbox CPUSet should be updated every time a container is created,
updated, or removed.

To facilitate this without rewriting the 'non constrained cgroup'
handling, let's add to the Sandbox's cgroupsUpdate function.

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-18 00:40:16 +08:00
Eric Ernst
8f0cb2f1ea cgroups: add ability to update CPUSet
Add function for applying a cpuset change to a cgroup

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-18 00:40:16 +08:00
Eric Ernst
cbdae44992 agent: fix errorneous parsing for guest block size
We were assuming base 10 string before, when the block size from sysfs
is actually a hex string. Let's fix that.

Fixes: #908

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-18 00:40:16 +08:00
James O. D. Hunt
97acaa8124 docs: Add containerd install guide
Create a containerd installation guide and a new `kata-manager` script
for 2.0 that automated the steps outlined in the guide.

Also cleaned up and improved the installation documentation in various
ways, the most significant being:

- Added legacy install link for 1.x installs.
- Official packages section:
  - Removed "Contact" column (since it was empty!)
  - Reworded "Versions" column to clarify the versions are a minimum
    (to reduce maintenance burden).
  - Add a column to show which installation methods receive automatic updates.
  - Modified order of installation options in table and document to
    de-emphasise automatic installation and promote official packages
    and snap more.
- Removed sections no longer relevant for 2.0.

Fixes: #738.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-18 00:40:16 +08:00
bin liu
23246662b2 agent: use ok_or/map_err instead of match
Sometimes `Option.or_or` and `Result.map_err` may be simpler
than match statement. Especially in rpc.rs, there are
many `ctr.get_process` and `sandbox.get_container` which
are using `match`.

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:40:16 +08:00
bin liu
ebe5ad1386 rustjail: use Iterator to manipulate vector elements
Use Iterator can save codes, and make code more readable

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:40:16 +08:00
bin liu
c9497c88e4 rustjail: delete codes commented out
There are some uses/codes/struct fields are commented out, and
may not turn into  un-comment these codes, so delete these comments.

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:40:16 +08:00
bin liu
d5d9928f97 rustjail: delete unused test code
The auto generated test code is no meanings, delete it.

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:40:16 +08:00
bin liu
f70892a5bb agent: use chain of Result to avoid early return
Use rust `Result`'s `or_else`/`and_then` can write clean codes.
And can avoid early return by check wether the `Result`
is `Ok` or `Err`.

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:40:16 +08:00
bin liu
ab64780a0b agent: update not accurate comments
This commit includes:
- update comments that not matched the function name
- file path with doubled slash

Fixes: #922

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:40:16 +08:00
bin liu
9e064ba192 agent: use macro to simplify parse_cmdline function in config.rs
In function parse_cmdline there are some similar codes, if we want
to add more commandline arguments, the code will grow too long.
Use macro can reduce some codes with the same logic/processing.

Fixes: #914

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:40:16 +08:00
bin liu
42c48f54ed agent: add blank lines between methods
In rpc.rs, there are no blank lines between methods, this commit
add blank lines for these methods.

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:40:16 +08:00
bin liu
d3a36fa06f agent: delete unused field in agentService
The code is for test, and not needed now.

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:40:16 +08:00
bin liu
fa546600ff agent: use no-named closure to reduce codes
For simple closures, inline closures can save codes.

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:40:16 +08:00
bin liu
efddcb4ab8 agent: use a local fn to reduce duplicated codes
The same codes used twices, aggregated into a function can
reduce codes.

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:40:16 +08:00
Peng Tao
7bb3e562bc packaging: fix cloud-hypervisor binary path
1. ensure build-static-clh.sh puts cloud-hypervisor under ./cloud-hypervisor directory
2. install cloud-hypervisor/cloud-hypervisor binary

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-18 00:40:16 +08:00
Peng Tao
7b53041bad packaging: fix missing cloud_hypervisor_repo
It is needed in order to build from source.

Fixes: #916
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-18 00:40:16 +08:00
Peng Tao
38212ba6d8 packaging: apply qemu v5.1 stable fixes
Qemu v5.1 was released with an affending commit 9b3a35ec82
(virtio: verify that legacy support is not accidentally on).
As a result, it breaks commandline compatiblilities for old qemu
users. Upstream qemu has fixed it but no release has been put out yet.
Let's apply these fixes by hand for now.

Refs: https://www.mail-archive.com/qemu-devel@nongnu.org/msg729556.html

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-18 00:40:16 +08:00
Jianyong Wu
fb7e9b4f32 agent: fix aarch64 build
aarch64 needs libgcc to resolve some non-builtin symbols.

Fixes: #909
Signed-off-by: Jianyong Wu <jianyong.wu@arm.com>
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-18 00:40:16 +08:00
bin liu
0cfcbf79b8 docs: add namespace key to pod/container config files
If no namespace field in config files, CRI-O will failed:
 setting pod sandbox name and id: cannot generate pod name without namespace

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:40:16 +08:00
bin liu
997f1f6cd0 docs: Add crictl example json files
Add basic sample pod/container config files to show
how to use `crictl` with Kata containers.

Fixes: #881

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-18 00:40:16 +08:00
Ychau Wang
f60f43af6b runtime: Clear the VCMock 1.x API Methods from 2.0
Clear the 1.x branch api methods in the 2.0. Keep the same methods to
the VC interface, like the VCImpl struct.

Fixes: #751

Signed-off-by: Ychau Wang <wangyongchao.bj@inspur.com>
2020-10-18 00:40:16 +08:00
Julio Montes
1789527d61 ci: snap: add event filtering
Run the snap CI on every PR is not needed. Don't run the snap CI
on PRs that don't change the source code (*.go/*.rs), a configuration
file or Makefile.

fixes #896

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-18 00:40:16 +08:00
Peng Tao
999f67d573 agent: do not follow link when mounting container proc and sysfs
Attackers might use it to explore other containers in the same pod.
While it is still safe to allow it, we can just close the race window
like runc does.

Fixes: #885
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-18 00:40:16 +08:00
Peng Tao
cb2255f199 agent: set init process non-dumpable
On old kernels (like v4.9), kernel applies CLOECEC in wrong order w.r.t.
dumpable task flags. As a result, we might leak guest file descriptor to
containers. This is a former runc CVE-2016-9962 and still applies to
kata agent. Although Kata container is still valid at protecting the
host, we should not leak extra resources to user containers.

This sets the init processes that join and setup the container's
namespaces as non-dumpable before they setns to the container's pid (or
any other ) namespace.

This settings is automatically reset to the default after the Exec in
the container so that it does not change functionality for the
applications that are running inside, just our init processes.

This prevents parent processes, the pid 1 of the container, to ptrace
the init process before it drops caps and other sets LSMs.

The order during the exec syscall is that the process is set back to
dumpable before O_CLOEXEC are processed.

Refs:
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=613cc2b6f272c1a8ad33aefa21cad77af23139f7
https://github.com/torvalds/linux/blob/v4.9/fs/exec.c#L1290-L1318
opencontainers/runc@50a19c6
https://nvd.nist.gov/vuln/detail/CVE-2016-9962

Fixes: #890
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-18 00:40:16 +08:00
Peng Tao
2a6c9eec74 agent-ctl: include cargo lock updates
Simply running `make` would generate some cargo lock updates for
agent-ctl. Let's include them so that we have fixed dependencies.

Fixes: #883
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-18 00:40:16 +08:00
Julio Montes
eaff5de37a versions: add plugins section
plugins sections contains the details of plugins required for
the components or testing.

Add sriov-network-device-plugin url and version that are consumed
by the VFIO test in the tests repository.

fixes #879

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-18 00:40:16 +08:00
Jose Carlos Venegas Munoz
4f1d23b651 virtiofs: Disable DAX
virtiofs DAX support is not stable today, there are
a few corner cases to make it default.

Fixes: #862
Fixes: #875

Signed-off-by: Jose Carlos Venegas Munoz <jose.carlos.venegas.munoz@intel.com>
2020-10-18 00:40:16 +08:00
Julio Montes
6d80df9831 snap: specify python version
In order to avoid `unmet dependencies` error in the CI,
the python version must be specified in the yaml.

fixes #877

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-18 00:40:16 +08:00
Ralf Haferkamp
a116ce0b75 osbuilder: Create target directory for agent
When building with AGENT_SOURCE_BIN pointing to an already built
kata-agent binary, the target directory needs to be created in the
rootfs tree.

Fixes #873

Signed-off-by: Ralf Haferkamp <rhafer@suse.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
4dc3bc0020 rust-agent: Treat warnings as error
Avoid the accumulation of warnings we had, as reported in #750.

Fixes: #750

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
8f7a4842c2 rust-agent: Identify unused results in tests
Assign unused results to _ in order to silence warnings.

This addresses the following warnings:

    warning: unused `std::result::Result` that must be used
        --> rustjail/src/mount.rs:1182:16
         |
    1182 |         defer!(unistd::chdir(&olddir););
         |                ^^^^^^^^^^^^^^^^^^^^^^^
         |
         = note: `#[warn(unused_must_use)]` on by default
         = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
        --> rustjail/src/mount.rs:1183:9
         |
    1183 |         unistd::chdir(tempdir.path());
         |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
         |
         = note: this `Result` may be an `Err` variant, which should be handled

While in regular code, we want to log possible errors, in test code
it's OK to simply ignore the returned value.

Fixes: #750

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
ce54e5dd57 rust-agent: Log returned errors rather than ignore them
In a number of cases, we have functions that return a Result<...>
and where the possible error case is simply ignored. This is a bit
unhealthy.

Add a `check!` macro that allows us to not ignore error values
that we want to log, while not interrupting the flow by returning
them. This is useful for low-level functions such as `signal::kill` or
`unistd::close` where an error is probably significant, but should not
necessarily interrupt the flow of the program (i.e. using `call()?` is
not the right answer.

The check! macro is then used on low-level calls. This addresses the
following warnings from #750:

This addresses the following warning:

    warning: unused `std::result::Result` that must be used
       --> /home/ddd/go/src/github.com/kata-containers-2.0/src/agent/rustjail/src/container.rs:903:17
        |
    903 |                 signal::kill(Pid::from_raw(p.pid), Some(Signal::SIGKILL));
        |                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: `#[warn(unused_must_use)]` on by default
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> /home/ddd/go/src/github.com/kata-containers-2.0/src/agent/rustjail/src/container.rs:916:17
        |
    916 |                 signal::kill(Pid::from_raw(child.id() as i32), Some(Signal::SIGKILL));
        |                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:340:13
        |
    340 |             write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str());
        |             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: `#[warn(unused_must_use)]` on by default
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:554:13
        |
    554 | /             write_sync(
    555 | |                 cwfd,
    556 | |                 SYNC_FAILED,
    557 | |                 format!("setgroups failed: {:?}", e).as_str(),
    558 | |             );
        | |______________^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:340:13
        |
    340 |             write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str());
        |             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:340:13
        |
    340 |             write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str());
        |             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: `#[warn(unused_must_use)]` on by default
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:554:13
        |
    554 | /             write_sync(
    555 | |                 cwfd,
    556 | |                 SYNC_FAILED,
    557 | |                 format!("setgroups failed: {:?}", e).as_str(),
    558 | |             );
        | |______________^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:626:5
        |
    626 |     unistd::close(cfd_log);
        |     ^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: `#[warn(unused_must_use)]` on by default
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:627:5
        |
    627 |     unistd::close(crfd);
        |     ^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:628:5
        |
    628 |     unistd::close(cwfd);
        |     ^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:770:9
        |
    770 |         fcntl::fcntl(pfd_log, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC));
        |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: `#[warn(unused_must_use)]` on by default
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:799:9
        |
    799 |         fcntl::fcntl(prfd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC));
        |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:800:9
        |
    800 |         fcntl::fcntl(pwfd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC));
        |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:803:13
        |
    803 |             unistd::close(prfd);
        |             ^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:930:9
        |
    930 |         log_handler.join();
        |         ^^^^^^^^^^^^^^^^^^^
        |
        = note: `#[warn(unused_must_use)]` on by default
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:803:13
        |
    803 |             unistd::close(prfd);
        |             ^^^^^^^^^^^^^^^^^^^^
        |
        = note: `#[warn(unused_must_use)]` on by default
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:804:13
        |
    804 |             unistd::close(pwfd);
        |             ^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:842:13
        |
    842 |             sched::setns(old_pid_ns, CloneFlags::CLONE_NEWPID);
        |             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/container.rs:843:13
        |
    843 |             unistd::close(old_pid_ns);
        |             ^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

Fixes: #844
Fixes: #750

Suggested-by: Tim Zhang <tim@hyper.sh>
Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
9adb7b7c28 rust-agent: Remove unused imports
This addresses the following warnings (and similar ones)::

    Compiling rustjail v0.1.0 (/home/ddd/go/src/github.com/kata-containers-2.0/src/agent/rustjail)
    warning: unused import: `debug`
      --> rustjail/src/container.rs:57:12
       |
    57 | use slog::{debug, info, o, Logger};
       |            ^^^^^

    warning: unused imports: `AddressFamily`, `SockFlag`, `SockType`, `self`
      --> rustjail/src/process.rs:18:24
       |
    18 | use nix::sys::socket::{self, AddressFamily, SockFlag, SockType};
       |                        ^^^^  ^^^^^^^^^^^^^  ^^^^^^^^  ^^^^^^^^

    warning: unused import: `nix::Error`
      --> rustjail/src/process.rs:23:5
       |
    23 | use nix::Error;
       |     ^^^^^^^^^^

    warning: unused import: `protobuf::RepeatedField`
      --> rustjail/src/validator.rs:11:5
       |
    11 | use protobuf::RepeatedField;
       |     ^^^^^^^^^^^^^^^^^^^^^^^

Fixes: #750

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
73ab9b1d6d rust-agent: Report errors to caller if possible
Various recently added error-causing calls

This addresses the following warning:

    warning: unused `std::result::Result` that must be used
      --> rustjail/src/cgroups/fs/mod.rs:93:9
       |
    93 |         cg.add_task(CgroupPid::from(pid as u64));
       |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
       |
       = note: `#[warn(unused_must_use)]` on by default
       = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/cgroups/fs/mod.rs:196:17
        |
    196 |                 freezer_controller.thaw();
        |                 ^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/cgroups/fs/mod.rs:199:17
        |
    199 |                 freezer_controller.freeze();
        |                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/cgroups/fs/mod.rs:365:9
        |
    365 |         cpuset_controller.set_cpus(&cpu.cpus);
        |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/cgroups/fs/mod.rs:369:9
        |
    369 |         cpuset_controller.set_mems(&cpu.mems);
        |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/cgroups/fs/mod.rs:381:13
        |
    381 |             cpu_controller.set_shares(shares);
        |             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/cgroups/fs/mod.rs:385:5
        |
    385 |     cpu_controller.set_cfs_quota_and_period(cpu.quota, cpu.period);
        |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
        = note: this `Result` may be an `Err` variant, which should be handled

    warning: unused `std::result::Result` that must be used
        --> rustjail/src/cgroups/fs/mod.rs:1061:13
         |
    1061 |             cpuset_controller.set_cpus(cpuset_cpus);
         |             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
         |
         = note: this `Result` may be an `Err` variant, which should be handled

The specific case of cpu_controller.set_cfs_quota_and_period is
addressed in a way that changes the logic following a suggestion by
Liu Bin, who had just added the code.

Fixes: #750

Suggested-by: Liu Bin <bin@hyper.sh>
Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
4db3f9e226 rust-agent: Ignore write errors while writing to the logs
When we are writing to the logs and there is an error doing so, there
is not much we can do. Chances are that a panic would make things
worse. So let it go through.

    warning: unused `std::result::Result` that must be used
       --> rustjail/src/sync.rs:26:9
        |
    26  |         write_count(lfd, log_str.as_bytes(), log_str.len());
        |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |
       ::: rustjail/src/container.rs:339:13
        |
    339 |             log_child!(cfd_log, "child exit: {:?}", e);
        |             ------------------------------------------- in this macro invocation
        |
        = note: this `Result` may be an `Err` variant, which should be handled
        = note: this warning originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info)

Fixes: #750

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
19cb657299 rust-agent: Remove unused code that has undefined behavior
Some functions have undefined behavior and are not actually used.

This addresses the following warning:
    warning: the type `oci::User` does not permit zero-initialization
      --> rustjail/src/lib.rs:99:18
       |
    99 |         unsafe { MaybeUninit::zeroed().assume_init() }
       |                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
       |                  |
       |                  this code causes undefined behavior when executed
       |                  help: use `MaybeUninit<T>` instead, and only call `assume_init` after initialization is done
       |
       = note: `#[warn(invalid_value)]` on by default
    note: `std::ptr::Unique<u32>` must be non-null (in this struct field)

    warning: the type `protocols::oci::Process` does not permit zero-initialization
       --> rustjail/src/lib.rs:146:14
        |
    146 |     unsafe { MaybeUninit::zeroed().assume_init() }
        |              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
        |              |
        |              this code causes undefined behavior when executed
        |              help: use `MaybeUninit<T>` instead, and only call `assume_init` after initialization is done
        |
    note: `std::ptr::Unique<std::string::String>` must be non-null (in this struct field)

Fixes: #750

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
86bc151787 rust-agent: Remove 'mut' where not needed
Addresses the following warning (and a few similar ones):
    warning: variable does not need to be mutable
       --> rustjail/src/container.rs:369:9
        |
    369 |     let mut oci_process: oci::Process = serde_json::from_str(process_str)?;
        |         ----^^^^^^^^^^^
        |         |
        |         help: remove this `mut`
        |
        = note: `#[warn(unused_mut)]` on by default

Fixes: #750

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
8d8adb6887 rust-agent: Remove uses of deprecated functions
This addresses the following:

    warning: use of deprecated item 'std::error::Error::description': use the Display impl or to_string()
        --> rustjail/src/container.rs:1598:31
         |
    1598 | ...                   e.description(),
         |                         ^^^^^^^^^^^
         |
         = note: `#[warn(deprecated)]` on by default

Fixes: #750

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
76298c12b7 rust-agent: Remove or rename unused parameters
Parameters that are never used were removed.
Parameters that are unused, but necessary because of some common
interface were renamed with a _ prefix.
In one case, consume the parameter by adding an info! call, and fix a
minor typo in a message in the same function.

This addresses the following warning:

    warning: unused variable: `child`
        --> rustjail/src/container.rs:1128:5
         |
    1128 |     child: &mut Child,
         |     ^^^^^ help: if this is intentional, prefix it with an underscore: `_child`

    warning: unused variable: `logger`
        --> rustjail/src/container.rs:1049:22
         |
    1049 | fn update_namespaces(logger: &Logger, spec: &mut Spec, init_pid: RawFd) -> Result<()> {
         |                      ^^^^^^ help: if this is intentional, prefix it with an underscore: `_logger`

Fixes: #750

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
7d303ec2d0 rust-agent: Remove or rename unused variables
Remove variables that are simply not used.
Rename as _ variables where only initialization matters.

This addresses the following warnings:

    warning: unused variable: `writer`
       --> src/main.rs:130:9
        |
    130 |     let writer = unsafe { File::from_raw_fd(wfd) };
        |         ^^^^^^ help: if this is intentional, prefix it with an underscore: `_writer`
        |
        = note: `#[warn(unused_variables)]` on by default

    warning: unused variable: `ctx`
       --> src/rpc.rs:782:9
        |
    782 |         ctx: &ttrpc::TtrpcContext,
        |         ^^^ help: if this is intentional, prefix it with an underscore: `_ctx`

    warning: unused variable: `ctx`
       --> src/rpc.rs:808:9
        |
    808 |         ctx: &ttrpc::TtrpcContext,
        |         ^^^ help: if this is intentional, prefix it with an underscore: `_ctx`

    warning: unused variable: `dns_list`
        --> src/rpc.rs:1152:16
         |
    1152 |             Ok(dns_list) => {
         |                ^^^^^^^^ help: if this is intentional, prefix it with an underscore: `_dns_list`

    warning: value assigned to `child_stdin` is never read
       --> rustjail/src/container.rs:807:13
        |
    807 |         let mut child_stdin = std::process::Stdio::null();
        |             ^^^^^^^^^^^^^^^
        |
        = note: `#[warn(unused_assignments)]` on by default
        = help: maybe it is overwritten before being read?

    warning: value assigned to `child_stdout` is never read
       --> rustjail/src/container.rs:808:13
        |
    808 |         let mut child_stdout = std::process::Stdio::null();
        |             ^^^^^^^^^^^^^^^^
        |
        = help: maybe it is overwritten before being read?

    warning: value assigned to `child_stderr` is never read
       --> rustjail/src/container.rs:809:13
        |
    809 |         let mut child_stderr = std::process::Stdio::null();
        |             ^^^^^^^^^^^^^^^^
        |
        = help: maybe it is overwritten before being read?

    warning: value assigned to `stdin` is never read
       --> rustjail/src/container.rs:810:13
        |
    810 |         let mut stdin = -1;
        |             ^^^^^^^^^
        |
        = help: maybe it is overwritten before being read?

    warning: value assigned to `stdout` is never read
       --> rustjail/src/container.rs:811:13
        |
    811 |         let mut stdout = -1;
        |             ^^^^^^^^^^
        |
        = help: maybe it is overwritten before being read?

    warning: value assigned to `stderr` is never read
       --> rustjail/src/container.rs:812:13
        |
    812 |         let mut stderr = -1;
        |             ^^^^^^^^^^
        |
        = help: maybe it is overwritten before being read?

Fixes: #750

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
e0b79eb57f rust-agent: Remove unused functions
Fixes the following warning:

   Compiling logging v0.1.0 (/home/ddd/go/src/github.com/kata-containers-2.0/pkg/logging)
   warning: associated function is never used: `set_level`
      --> /home/ddd/go/src/github.com/kata-containers-2.0/pkg/logging/src/lib.rs:186:8
       |
   186 |     fn set_level(&self, level: slog::Level) {
       |        ^^^^^^^^^
       |
       = note: `#[warn(dead_code)]` on by default

Fixes: #750

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
8ed61b1bb9 rust-agent: Remove useless braces
This addresses the following warning:

    warning: unnecessary braces around assigned value
        --> src/rpc.rs:1411:26
         |
    1411 |     detail.init_daemon = { unistd::getpid() == Pid::from_raw(1) };
         |                          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: remove these braces
         |
         = note: `#[warn(unused_braces)]` on by default

Fixes: #750

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Christophe de Dinechin
cc4f02e2b6 rust-agent: Remove unused macros
This addresses the following warnings:

   Compiling rustjail v0.1.0 (/home/ddd/go/src/github.com/kata-containers-2.0/src/agent/rustjail)
   warning: unused `#[macro_use]` import
     --> rustjail/src/lib.rs:15:1
      |
   15 | #[macro_use]
      | ^^^^^^^^^^^^
      |
      = note: `#[warn(unused_imports)]` on by default

   warning: unused macro definition
     --> rustjail/src/lib.rs:38:1
      |
   38 | / macro_rules! sl {
   39 | |     () => {
   40 | |         slog_scope::logger().new(o!("subsystem" => "rustjail"))
   41 | |     };
   42 | | }
      | |_^
      |
      = note: `#[warn(unused_macros)]` on by default

Fixes: #750

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-18 00:40:16 +08:00
Bo Chen
ace6f1e66e clh: Support VFIO device unplug
This patch adds the support of VFIO device unplug when using
cloud-hypervisor.

Fixes: #860

Signed-off-by: Bo Chen <chen.bo@intel.com>
2020-10-18 00:40:16 +08:00
Bo Chen
47cfeaaf18 clh: Remove unnecessary VmmPing
We can rely on the error handling of the actual HTTP API calls to catch
errors, and don't need to call VmmPing explicitly in advance.

Signed-off-by: Bo Chen <chen.bo@intel.com>
2020-10-18 00:40:16 +08:00
Bo Chen
63c475786f versions: cloud-hypervisor: Bump to version 6d30fe05
The cloud-hypervisor commit `6d30fe05` introduced a fix on its API for
VFIO device hotplug (`VmAddDevice`), which is required for supporting
VFIO unplug through openAPI calls in kata.

Signed-off-by: Bo Chen <chen.bo@intel.com>
2020-10-18 00:40:16 +08:00
Chelsea Mafrica
059b89cd03 docs: Change kata_tap0 to tap0_kata
Tap device's should be tap0_kata for architecture.md

Fixes #797

Signed-off-by: duanquanfeng <duanquanfeng_yewu@cmss.chinamobile.com>
Signed-off-by: Chelsea Mafrica <chelsea.e.mafrica@intel.com>
2020-10-18 00:40:16 +08:00
Chelsea Mafrica
4ff3ed5101 docs: update networking description
First, most people don't care about CNM. Move that out of main doc.

Second, tc-filter is the default. Let's add a bit more background on
our usage of tc-filter (and clarify why we use this instead of macvtap).

Fixes #797

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
Signed-off-by: Chelsea Mafrica <chelsea.e.mafrica@intel.com>
2020-10-18 00:40:16 +08:00
Eric Ernst
de8dcb1549 dev-guide: update kata-agent install details
Install paths were wrong. Updated based on new agent...

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-18 00:40:16 +08:00
Archana Shinde
c488cc48a2 docs: Update docs for enabling agent debug console
The systemd method of adding a debug console is not really
user friendly. Since we have added a much more straightforward
method to enable agent debug console, update developer guide to
reflect this.

Fixes #834

Signed-off-by: Archana Shinde <archana.m.shinde@intel.com>
2020-10-18 00:40:16 +08:00
Eric Ernst
e5acb1257f docs: update dev guide for agent build
Include details on setting up rust.

Fixes: #851

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-18 00:40:16 +08:00
Julio Montes
1bddde729b ci: add github action to test the snap
Add github action to test that the snap package was generated
correctly, this CI don't test the snap, it just build it.

fixes #838

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-18 00:40:16 +08:00
Julio Montes
9517b0a933 versions: cloud-hypervisor: bump version
Use commit c54452c08a467a3e35d8d72f2a91d424e9718c57 as
version for cloud-hypervisor.
Bring openapi fix cloud-hypervisor/cloud-hypervisor#1760 to
support SGX.

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-18 00:40:16 +08:00
Julio Montes
f5a7175f92 runtime: cloud-hypervisor: tag openapi-generator-cli container
Tag openapi-generator-cli container to v4.3.1 that is the latest
stable, this way we can have reproducible builds and the same
generated code in all the systems

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-18 00:40:16 +08:00
Eric Ernst
9b969bb7da packaging: fix image build script
Relative paths are error prone. Fix error.

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-06 17:57:28 -07:00
Eric Ernst
fb2f3cfce2 release: Kata Containers 2.0.0-rc1
ae6ccbe8 rust-agent: Update README
3faef791 docs: drop docker installation guide
f3466b87 docs: fix static check errors in docs/install/README.md
89ec614d docs: update architecture.md
1ed73179 qemu: upgrade qemu version to 5.1.0 for arm64.
cb79dddf agent: Fix OCI Windows network shared container name typo
c50aee9d github: Remove issue template and use central one
2a4c3e6a docs: fix broken links
9e2a314e Packaging: release notes script using error kernel path urls
aed20f43 rust-agent: Replaces improper use of match for non-constant patterns
868d0248 devices: fix go test warning in manager_test.go
14164392 action: Allow long lines if non-alphabetic
2ece152c agent: remove unreachable code
033925f9 agent: Change do_exec return type to ! because it will never return
c90fff82 agent: propagate the internal detail errors to users
c0ea9102 packaging: Stop providing OBS packages
ca54edef install: Add contacts to the distribution packages
b5ece037 install: Update information about Community Packages
378e429d install: Update SUSE information
567f8587 install: Update openSUSE information
18f32d13 install: Update RHEL information
8280523c install: Update Fedora information
578db2fc install: Update CentOS information
781d6eca ci: fix clone_tests_repo function
c18c5e2c agent: Set LIBC=gnu for ppc64le arch by default
a378ba53 fc: integrate Firecracker's metrics
9991f4b5 static-build/qemu-virtiofs: Refactor apply virtiofs patches
4a0fd6c2 packaging/qemu: Add common code to apply patches
37acc030 static-build/qemu-virtiofs: Fix to apply QEMU patches
6c275c92 runtime: fix TestNewConsole UT failure
0479a4cb travis: skip static checker for ppc64
b3e52844 runtime: fix golint errors
d36d3486 agent: fix cargo fmt
e1094d7f ci: always checkout 2.0-dev of test repository
c8ba30f9 docs: fix static check errors
eaa5c433 runtime: fix make check
07caa2f2 gitignore: ignore agent service file
f34e2e66 agent: fix UT failures due to chdir
442e5906 agent: Only allow proc mount if it is procfs
f2850668 rustjail: make the mount error info much more clear
73414554 runtime: add enable_debug_console configuration item for agent
0b62f5a9 runtime: add debug console service
c23a401e runtime: Call s.newStore.Destroy if globalSandboxList.addSandbox
80879197 shimv2: add a comment in checkAndMount()
b6066cbc osbuilder: specify default toolchain verion in rust-init.
1290d007 runtime: Update cloud-hypervisor client pkg to version v0.10.0
afeece42 agent/oci: Don't use deprecated Error::description() method
a4075f0f runtime: Fix linter errors in release files
01df3c1d packaging: Build from source if the clh release binary is missing
bacd41bb runtime: add podman configuration to data collection script
d9746f31 ci: use export command to export envs instead of env config item
ca2a1176 ci: use Travis cache to reduce build time
67af593a agent: update cgroups crate
cabc60f3 docs: Update the reference path of kata-deploy in the packaging
a5859197 runtime: make kata-check check for newer release
08d194b8 how-to: add privileged_without_host_devices to containerd guide
89ade8f3 travis: enable RUST_BACKTRACE
4b30001d agent/rustjail: add more unit tests
232c8213 agent/rustjail: remove makedev function
74bcd510 agent/rustjail: add unit tests for ms_move_rootfs and mask_path
a36f93c9 agent/rustjail: implement functions to chroot
fe0f2198 agent/rustjail: add unit test for pivot_rootfs
5770c2a2 agent/rustjail: implement functions to pivot_root
838b1794 agent/rustjail: add unit test for mount_cgroups
1a60c1de agent/rustjail: add unit test for init_rootfs
77ecfed2 agent/rustjail/mount: don't use unwrap
fa7079bc agent/rustjail: add tempfile crate as depedency
c23bac5c rustjail: implement functions to mount and umount files
e99f3e79 docs: Fix the kata-pkgsync tool's docs script path
d05a7cda docs: fix k8s containerd howto links
f6877fa4 docs: fix up developer guide for 2.0
6d326f21 gitignore: ignore agent version.rs
407cb9a3 agent: fix agent panic running as init
38eb1df4 packaging: use local version file for kata 2.0 in Makefile
313dfee3 docs: fix release process doc
0c4e7b21 packaging: fix release notes

Signed-off-by: Eric Ernst <eric_ernst@apple.com>
2020-10-06 17:54:13 -07:00
Eric Ernst
f32a741c76 actions: add kata deploy test
Pull over kata-deploy-test from the 1.x packaging repository. This is
intended to be used for testing any changes to the kata-deploy
scripting, and does not exercise any new source code changes.

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-06 17:54:13 -07:00
Eric Ernst
512e79f61a packaging: cleaning, updating based on new filepaths
Update scripts to take into account some files being moved, and some
general cleanup.

Fixes: #866

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-06 17:54:13 -07:00
Eric Ernst
aa70080423 packaging: remove obs-packaging
No longer required -- let's remove them.

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-06 17:54:13 -07:00
Eric Ernst
34015bae12 packaging: pull versions, build-image out from obs dir
These are still required; let's pull them out.

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-06 17:54:13 -07:00
Eric Ernst
93b60a8327 packaging: Revert "packaging: Stop providing OBS packages"
This reverts commit c0ea910273.

Two scripts are still required for release and testing, which should
have never been under obs-packaging dir in the first place.  Let's
revert, move the scripts / update references to it, and then we can
remove the remaining obs-packaging/ tooling.

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-10-06 17:54:13 -07:00
Yang Bo
aa9951f2cd rust-agent: Update README
rust agent does not use grpc as submodule for a while, update README
to reflect the change.

Fixes: #196
Signed-off-by: Yang Bo <bo@hyper.sh>
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
9d8c72998b docs: drop docker installation guide
We have removed cli support and that means dockder support is dropped
for now. Also it doesn't make sense to have so many duplications on each
distribution as we can simply refer to the official docker guide on how
to install docker.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
033ed13202 docs: fix static check errors in docs/install/README.md
It was merged in while the static checker is disabled.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
c058d04b94 docs: update architecture.md
To match the current architecture of Kata Containers 2.0.

Fixes: #831
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Edmond AK Dantes
9d2bb0c452 qemu: upgrade qemu version to 5.1.0 for arm64.
Now, the qemu version used in arm is so old. As some new features have merged
in current qemu, so it's time to upgrade it. As obs-packaging has been removed,
I put the qemu patch under qemu/patch/5.1.x.
As vxfs has been Deprecated in qemu-5.1, it will be no longer exist in
configuration-hyperversior.sh when qemu version larger than 5.0.

Fixes: #816
Signed-off-by: Edmond AK Dantes <edmond.dantes.ak47@outlook.com>
2020-10-06 17:54:13 -07:00
James O. D. Hunt
627d062fb2 agent: Fix OCI Windows network shared container name typo
Correct the typo which would break the Windows-specific OCI network
shared container name feature.

See:

- https://github.com/opencontainers/runtime-spec/blob/master/config-windows.md#network

Fixes: #685.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-06 17:54:13 -07:00
James O. D. Hunt
96afe62576 github: Remove issue template and use central one
Remove the GitHub issue template from this repository. We already have a
central set of templates [1] that are being used so the template in this
repository is redundant.

[1] - https://github.com/kata-containers/.github/tree/master/.github/ISSUE_TEMPLATE/

Fixes: #728.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-06 17:54:13 -07:00
Julio Montes
d946016eb7 docs: fix broken links
Some sections and files were removed in a previous commit,
remove all reference to such sections and files to fix the
check-markdown test.

fixes #826

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-06 17:54:13 -07:00
Ychau Wang
37f1a77a6a Packaging: release notes script using error kernel path urls
2.0 Packaging runtime-release-notes.sh script is using 1.x Packaging
kernel urls. Fix these urls to 2.0 branch Packaging urls.

Fixes: #829

Signed-off-by: Ychau Wang <wangyongchao.bj@inspur.com>
2020-10-06 17:54:13 -07:00
Christophe de Dinechin
450a81cc54 rust-agent: Replaces improper use of match for non-constant patterns
The code used `match` as a switch with variable patterns `ev_fd` and
`cf_fd`, but the way Rust interprets the code is that the first
pattern matches all values. The code does not perform as expected.

This addresses the following warning:

   warning: unreachable pattern
      --> rustjail/src/cgroups/notifier.rs:114:21
       |
   107 |                     ev_fd => {
       |                     ----- matches any value
   ...
   114 |                     cg_fd => {
       |                     ^^^^^ unreachable pattern
       |
       = note: `#[warn(unreachable_patterns)]` on by default

Fixes: #750
Fixes: #793

Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
2020-10-06 17:54:13 -07:00
zhanghj
c09f02e6f6 devices: fix go test warning in manager_test.go
Create "class" and "config" file in temporary device BDF dir,
and remove dir created  by ioutil.TempDir() when test finished.

fixes: #746

Signed-off-by: zhanghj <zhanghj.lc@inspur.com>
2020-10-06 17:54:13 -07:00
James O. D. Hunt
58c7469110 action: Allow long lines if non-alphabetic
Overly long commit lines are annoying. But sometimes,
we need to be able to force the use of long lines
(for example to reference a URL).

Ironically, I can't refer to the URL that explains this
because of ... the long line check! Hence:

```sh
$ cat <<EOT | tr -d '\n'; echo
See: https://github.com/kata-containers/tests/tree/master/
cmd/checkcommits#handling-long-lines
EOT
```

Maximum body length updated to 150 bytes for parity with:

https://github.com/kata-containers/tests/pull/2848

Fixes: #687.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-06 17:54:13 -07:00
Tim Zhang
c36ea0968d agent: remove unreachable code
The code in the end of init_child is unreachable and need to be removed.
The code after do_exec is unreachable and need to be removed.

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-06 17:54:13 -07:00
Tim Zhang
ba197302e2 agent: Change do_exec return type to ! because it will never return
Indicates unreachable code.

Fixes #819

Signed-off-by: Tim Zhang <tim@hyper.sh>
2020-10-06 17:54:13 -07:00
fupan.lfp
725ad067c1 agent: propagate the internal detail errors to users
It's should propagate the detail errors to users when
the rpc call failed.

Fixes: #824

Signed-off-by: fupan.lfp <fupan.lfp@antfin.com>
2020-10-06 17:54:13 -07:00
Fabiano Fidêncio
9858c23c59 packaging: Stop providing OBS packages
The community has discussed and took the decision in favour of promoting
kata-deploy as the way of distributing and using kata for distros that
officially don't maintain the project.

Fixes: #623
Fixes: https://github.com/kata-containers/packaging/issues/1120

Signed-off-by: Fabiano Fidêncio <fidencio@redhat.com>
2020-10-06 17:54:13 -07:00
Fabiano Fidêncio
fc8f1ff03c install: Add contacts to the distribution packages
Let's add a new column to the Official packages table, and let the
maintainers of the official distro packages to jump in and add their
names there.

This will help us to ping & redirect to the right people possible issues
that are reported against the official packages.

Fixes: #623

Signed-off-by: Fabiano Fidêncio <fidencio@redhat.com>
2020-10-06 17:54:13 -07:00
Fabiano Fidêncio
f7b4f76082 install: Update information about Community Packages
Kata Containers will stop distributing the community packages in favour
of kata-deploy.

Fixes: #623

Signed-off-by: Fabiano Fidêncio <fidencio@redhat.com>
2020-10-06 17:54:13 -07:00
Fabiano Fidêncio
4fd66fa689 install: Update SUSE information
Following up a conversation with Ralf Haferkamp, we can safely drop the
instructions for using Kata Containers on SLES 12 SP3 in favour of using
the official builds provided for SLE 15 SP1, and SLE 15 SP2.

Fixes: #623

Signed-off-by: Fabiano Fidêncio <fidencio@redhat.com>
2020-10-06 17:54:13 -07:00
Fabiano Fidêncio
e6ff42b8ad install: Update openSUSE information
Let's update the openSUSE Installation Guide to reflect the current
information on how to install kata packages provided by the distro
itself.

The official packages are present on Leap 15.2 and Tumbleweed, and can
be just installed. Leap 15.1 is slightly different, as the .repo file
has to be added before the packages can be installed.

Leap 15.0 has been removed as it already reached its EOL.

Fixes: #623

Signed-off-by: Fabiano Fidêncio <fidencio@redhat.com>
2020-10-06 17:54:13 -07:00
Fabiano Fidêncio
6710d87c6a install: Update RHEL information
Although the community packages are present for RHEL, everything about
them is extremely unsupported on the Red Hat side.

Knowing this, we'd be better to simply not mentioned those and, if users
really want to try kata-containers on RHEL, they can simply follow the
CentOS installation guide.

In the future, if the Fedora packages make their way to RHEL, we can add
the information here. However, if we're recommending something
unsupported we'd be better recommending kata-deploy instead.

Fixes: #623

Signed-off-by: Fabiano Fidêncio <fidencio@redhat.com>
2020-10-06 17:54:13 -07:00
Fabiano Fidêncio
178b79f122 install: Update Fedora information
Let's update the Fedora Installation Guide to reflect the current
information on how to install kata packages provided by the distro
itself.

These are official packages and we, as Fedora members, recommend using
kata-containers on Fedora 32 and onwards, as from this version
everything works out-of-the-box. Also, Fedora 31 will reach its EOL as
soon as Fedora 33 is out, which should happen on October.

Fixes: #623

Signed-off-by: Fabiano Fidêncio <fidencio@redhat.com>
2020-10-06 17:54:13 -07:00
Fabiano Fidêncio
bc545c6549 install: Update CentOS information
Let's update the CentOS Installation Guide to reflect the current
information on how to install kata packages provided by the
Virtualiation Special Interest Group.

These are not official CentOS packages, as those are not coming from Red
Hat Enterprise Linux. These are the same packages we have on Fedora and
we have decided to keep them up-to-date and sync'ed on both Fedora and
CentOS, so people can give Kata Containers a try also on CentOS.

The nature of these packages makes me think that those are "as official
as they can be", so that's the reason I've decided to add the
instructions to the "official" table.

Together with the change in the Installation Guide, let's also update
the README and reflect the fact we **strongly recommend** using CentOS
8, with the packages provided by the Virtualization Special Interest
Group, instead of using the CentOS 7 with packages built on OBS.

Fixes: #623

Signed-off-by: Fabiano Fidêncio <fidencio@redhat.com>
2020-10-06 17:54:13 -07:00
Salvador Fuentes
585481990a ci: fix clone_tests_repo function
We should not checkout to 2.0-dev branch in the clone_tests_repo
function when running in Jenkins CI as it discards changes from
tests repo.

Fixes: #818.

Signed-off-by: Salvador Fuentes <salvador.fuentes@intel.com>
2020-10-06 17:54:13 -07:00
Pradipta Kr. Banerjee
0057f86cfa agent: Set LIBC=gnu for ppc64le arch by default
Fixes: #812

Signed-off-by: Pradipta Kr. Banerjee <pradipta.banerjee@gmail.com>
2020-10-06 17:54:13 -07:00
bin liu
fa0401793f fc: integrate Firecracker's metrics
Firecracker expose metrics through fifo file
and using a JSON format. This PR will parse the
Firecracker's metrics and convert to Prometheus metrics.

Fixes: #472

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-06 17:54:13 -07:00
Wainer dos Santos Moschetta
60b7265961 static-build/qemu-virtiofs: Refactor apply virtiofs patches
In static-build/qemu-virtiofs/Dockerfile the code which
applies the virtiofs specific patches is spread in several
RUN instructions. Refactor this code so that it runs in a
single RUN and produce a single overlay image.

Signed-off-by: Wainer dos Santos Moschetta <wainersm@redhat.com>
2020-10-06 17:54:13 -07:00
Wainer dos Santos Moschetta
57b53dbae8 packaging/qemu: Add common code to apply patches
The qemu and qemu-virtiofs Dockerfile files repeat the code to apply
patches based on QEMU stable branch being built. Instead, this adds
a common script (qemu/apply_patches.sh) and make it called by the
respective Dockerfile files.

Signed-off-by: Wainer dos Santos Moschetta <wainersm@redhat.com>
2020-10-06 17:54:13 -07:00
Wainer dos Santos Moschetta
ddf1a545d1 static-build/qemu-virtiofs: Fix to apply QEMU patches
Fix a bug on qemu-virtiofs Dockerfile which end up not applying
the QEMU patches.

Fixes #786

Signed-off-by: Wainer dos Santos Moschetta <wainersm@redhat.com>
2020-10-06 17:54:13 -07:00
Peng Tao
cbdf6400ae runtime: fix TestNewConsole UT failure
It needs root.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
ceeecf9c66 travis: skip static checker for ppc64
As we have already run it on x64.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
7c53baea8a runtime: fix golint errors
Need to run gofmt -s on them.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
b549d354bf agent: fix cargo fmt
Otherwise travis fails.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
9f3113e1f6 ci: always checkout 2.0-dev of test repository
We use 2.0-dev in the tests repository now. Always make sure
we use the right branch.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
ef94742320 docs: fix static check errors
Somehow we are not running static checks for a long time.
And that ended up with a lot for errors.

* Ensure debug options are valid is dropped
* fix snap links
* drop extra CONTRIBUTING.md
* reference kata-pkgsync
* move CODEOWNERS to proper place
* remove extra CODE_OF_CONDUCT.md.
* fix spell checker error on Developer-Guide.md

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
d71764985d runtime: fix make check
Need to use the correct script path.

Fixes: #802
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
0fc04a269d gitignore: ignore agent service file
As it is auto-generated.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
8d7ac5f01c agent: fix UT failures due to chdir
Current working directory is a process level resource. We cannot call
chdir in parallel from multiple threads, which would cause cwd confusion
and result in UT failures.

The agent code itself is correct that chdir is only called from spawned
child init process. Well, there is one exception that it is also called
in do_create_container() but it is safe to assume that containers are
never created in parallel (at least for now).

Fixes: #782
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
fupan.lfp
612acbe319 agent: Only allow proc mount if it is procfs
This only allows some whitelists files bind mounted under proc
and prevent other malicious mount to procfs.

Fixes: #807

Signed-off-by: fupan.lfp <fupan.lfp@antfin.com>
2020-10-06 17:54:13 -07:00
fupan.lfp
f3a487cd41 rustjail: make the mount error info much more clear
Make the invalid mount destination's error info much
more clear.

Signed-off-by: fupan.lfp <fupan.lfp@antfin.com>
2020-10-06 17:54:13 -07:00
bin liu
3a559521d1 runtime: add enable_debug_console configuration item for agent
Set enable_debug_console=true in Kata's congiguration file,
runtime will pass `agent.debug_console`
and `agent.debug_console_vport=1026` to agent.

Fixes: #245

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-06 17:54:13 -07:00
bin liu
567daf5a42 runtime: add debug console service
Add `kata-runtime exec` to enter guest OS
through shell started by agent

Fixes: #245

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-06 17:54:13 -07:00
Shukui Yang
c7d913f436 runtime: Call s.newStore.Destroy if globalSandboxList.addSandbox
Fixes: #696

Signed-off-by: Shukui Yang <keloyangsk@gmail.com>
2020-10-06 17:54:13 -07:00
Qian Cai
7bd410c725 shimv2: add a comment in checkAndMount()
In checkAndMount(), it is not clear why we check IsBlockDevice() and if
DisableBlockDeviceUse == false and then only return "false, nil" instead
of "false, err". Adding a comment to make it a bit more readable.

Fixes: #732
Signed-off-by: Qian Cai <cai@redhat.com>
2020-10-06 17:54:13 -07:00
zhanghj
7fbc789855 osbuilder: specify default toolchain verion in rust-init.
Specify default toolchain version in rust-init.

Fixes: #799

Signed-off-by: zhanghj <zhanghj.lc@inspur.com>
2020-10-06 17:54:13 -07:00
Bo Chen
7fc41a771a runtime: Update cloud-hypervisor client pkg to version v0.10.0
The latest release of cloud-hypervisor v0.10.0 contains the following
updates: 1) `virtio-block` Support for Multiple Descriptors; 2) Memory
Zones; 3) `Seccomp` Sandbox Improvements; 4) Preliminary KVM HyperV
Emulation Control; 5) various bug fixes and refactoring.

Note that this patch updates the client code of clh's HTTP API in kata,
while the 'versions.yaml' file was updated in an earlier PR.

Fixes: #789

Signed-off-by: Bo Chen <chen.bo@intel.com>
2020-10-06 17:54:13 -07:00
David Gibson
a31d82fec2 agent/oci: Don't use deprecated Error::description() method
We shouldn't use it, and we don't need to implement it.

fixes #791

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2020-10-06 17:54:13 -07:00
James O. D. Hunt
9ef4c80340 runtime: Fix linter errors in release files
Fix the linter errors caught in the `runtime` repos `master` branch [1],
but not in the `2.0-dev` branch [2]. See [3] for further details.

[1] - https://github.com/kata-containers/runtime/pull/2976
[2] - https://github.com/kata-containers/kata-containers/pull/735
[3] - https://github.com/kata-containers/tests/issues/2870

Fixes: #783.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-06 17:54:13 -07:00
Bo Chen
6a4e413758 packaging: Build from source if the clh release binary is missing
This patch add fall-back code path that builds cloud-hypervisor static
binary from source, when the downloading of cloud-hypervisor binary is
failing. This is useful when we experience network issues, and also
useful for upgrading clh to non-released version.

Together with the changes in the tests repo
(https://github.com/kata-containers/tests/pull/2862), the Jenkins config
file is also updated with new Execute shell script for the clh CI in the
kata-containers repo. Those two changes fix the regression on clh CI
here. Please check details in the issue below.

Fixes: #781
Fixes: https://github.com/kata-containers/tests/issues/2858

Signed-off-by: Bo Chen <chen.bo@intel.com>
2020-10-06 17:54:13 -07:00
Francesco Giudici
678d4d189d runtime: add podman configuration to data collection script
Be more verbose about podman configuration in the output of the data
collection script: get the system configuration as seen by podman and
dump the configuration files when present.

Fixes: #243
Signed-off-by: Francesco Giudici <fgiudici@redhat.com>
2020-10-06 17:54:13 -07:00
bin liu
718f718764 ci: use export command to export envs instead of env config item
Config item env is used as a Matrix Expansion key, so these envs
will export to build jobs individually.

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-06 17:54:13 -07:00
bin liu
d860ded3f0 ci: use Travis cache to reduce build time
This PR includes these changes:
- use Rust installed by Travis
- install x86_64-unknown-linux-musl
- install rustfmt
- use Travis cache
- delete ci/install_vc.sh

Fixes: #748

Signed-off-by: bin liu <bin@hyper.sh>
2020-10-06 17:54:13 -07:00
fupan.lfp
a141da8a20 agent: update cgroups crate
Update cgroups crate to fix the building issue
on Aarch64.

Fixes: #770

Signed-off-by: fupan.lfp <fupan.lfp@antfin.com>
2020-10-06 17:54:13 -07:00
Ychau Wang
aaaaee7a4b docs: Update the reference path of kata-deploy in the packaging
Use the relative path of kata-deploy to replace the 1.x packaging url in
the kata-deploy/README.md file. Fixed the path issue, producted by
creating new branch.

Fixes: #777

Signed-off-by: Ychau Wang <wangyongchao.bj@inspur.com>
2020-10-06 17:54:13 -07:00
James O. D. Hunt
21efaf1fca runtime: make kata-check check for newer release
Update `kata-check` to see if there is a newer version available for
download. Useful for users installing static packages (without a package
manager).

Fixes: #734.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
2020-10-06 17:54:13 -07:00
Peng Tao
2056623e13 how-to: add privileged_without_host_devices to containerd guide
It should be set by default for Kata containers working with containerd.

Fixes: #775
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Julio Montes
34126ee704 travis: enable RUST_BACKTRACE
RUST_BACKTRACE=1 will help us a lot to debug unit tests when
a test is failing

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-06 17:54:13 -07:00
Julio Montes
980a338454 agent/rustjail: add more unit tests
Add unit tests for finish_root, read_only_path and mknod_dev
increasing code coverage of mount.rs

fixes #284

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-06 17:54:13 -07:00
Julio Montes
e14f766895 agent/rustjail: remove makedev function
remove `makedev` function, use `nix`'s implementation instead

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-06 17:54:13 -07:00
Julio Montes
2e0731f479 agent/rustjail: add unit tests for ms_move_rootfs and mask_path
Increase code coverage of mount.rs

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-06 17:54:13 -07:00
Julio Montes
addf62087c agent/rustjail: implement functions to chroot
Use conditional compilation (#[cfg]) to change chroot behaviour
at compilation time. For example, such function will just return
`Ok(())` when the unit tests are being compiled, otherwise real
chroot operation is performed.

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-06 17:54:13 -07:00
Julio Montes
c24b68dc4f agent/rustjail: add unit test for pivot_rootfs
Add unit test for pivot_rootfs increasing the code coverage of
mount.rs

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-06 17:54:13 -07:00
Julio Montes
24677d7484 agent/rustjail: implement functions to pivot_root
Use conditional compilation (#[cfg]) to change pivot_root behaviour
at compilation time. For example, such function will just return
`Ok(())` when the unit tests are being compiled, otherwise real
pivot_root operation is performed.

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-06 17:54:13 -07:00
Julio Montes
9e74c28158 agent/rustjail: add unit test for mount_cgroups
Add a unit test for `mount_cgroups` increasing the code coverage
of mount.rs from 44% to 52%

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-06 17:54:13 -07:00
Julio Montes
b7aae33cc1 agent/rustjail: add unit test for init_rootfs
Add a unit test for `init_rootfs` increasing the code coverage
of mount.rs from 0% to 44%.

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-06 17:54:13 -07:00
Julio Montes
6d9d58278e agent/rustjail/mount: don't use unwrap
Don't use unwrap in `init_rootfs` instead return an Error, this way
we can write unit tests that don't panic.

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-06 17:54:13 -07:00
Julio Montes
1bc6fbda8c agent/rustjail: add tempfile crate as depedency
Add tempfile crate as depedency, it will be used in the following
commits to create temporary directories for unit testing.

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-06 17:54:13 -07:00
Julio Montes
d39f5a85e6 rustjail: implement functions to mount and umount files
Use conditional compilation (#[cfg]) to change mount and umount
behaviours at compilation time. For example, such functions will just
return `Ok(())` when the unit tests are being compiled, otherwise real
mount and umount operations are performed.

Signed-off-by: Julio Montes <julio.montes@intel.com>
2020-10-06 17:54:13 -07:00
Ychau Wang
d90a0eefbe docs: Fix the kata-pkgsync tool's docs script path
Fix the kata-pkgsync tool's docs, change the download path of the
packaging tool in 2.0 release.

Fixes: #773

Signed-off-by: Ychau Wang <wangyongchao.bj@inspur.com>
2020-10-06 17:54:13 -07:00
Peng Tao
2618c014a0 docs: fix k8s containerd howto links
It should points to the internal versions.yaml file.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
5c4878f37e docs: fix up developer guide for 2.0
1. Until we restore docker/moby support, we should use crictl as
developer example.
2. Most of the hyperlinks should point to kata-containers repository.
3. There is no more standalone mode.

Fixes: #767
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
bd6b169e98 gitignore: ignore agent version.rs
It is auto-generated.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
5770336572 agent: fix agent panic running as init
We should mount procfs before trying to parse kernel command lines.

Fixes: #771
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
zhanghj
45daec7b37 packaging: use local version file for kata 2.0 in Makefile
Use local version file instead of downloading from upstream repo.

Fixes: #756

Signed-off-by: zhanghj <zhanghj.lc@inspur.com>
2020-10-06 17:54:13 -07:00
Peng Tao
ed5a7dc022 docs: fix release process doc
We no longer build OBS packages. And we use
kata-containers/tools/packaging/release to do release.

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
Peng Tao
6fc7c77721 packaging: fix release notes
Should mention the 2.0 branch docs.

Fixes: #763
Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-10-06 17:54:13 -07:00
268 changed files with 34353 additions and 22464 deletions

View File

@@ -10,7 +10,7 @@ env:
error_msg: |+
See the document below for help on formatting commits for the project.
https://github.com/kata-containers/community/blob/master/CONTRIBUTING.md#patch-format
https://github.com/kata-containers/community/blob/master/CONTRIBUTING.md#patch-forma
jobs:
commit-message-check:

View File

@@ -38,12 +38,12 @@ jobs:
run: |
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
with:
name: kata-artifacts
@@ -66,12 +66,12 @@ jobs:
run: |
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
with:
name: kata-artifacts
@@ -92,12 +92,12 @@ jobs:
run: |
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
with:
name: kata-artifacts
@@ -118,12 +118,12 @@ jobs:
run: |
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
with:
name: kata-artifacts
@@ -145,12 +145,12 @@ jobs:
run: |
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
with:
name: kata-artifacts
@@ -172,12 +172,12 @@ jobs:
run: |
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
with:
name: kata-artifacts
@@ -199,12 +199,12 @@ jobs:
run: |
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
with:
name: kata-artifacts
@@ -226,12 +226,12 @@ jobs:
run: |
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
with:
name: kata-artifacts
@@ -253,12 +253,12 @@ jobs:
run: |
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
with:
name: kata-artifacts
@@ -303,7 +303,9 @@ jobs:
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha ./packaging/kata-deploy
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
docker push katadocker/kata-deploy-ci:$pkg_sha
echo "::set-output name=PKG_SHA::${pkg_sha}"
echo "##[set-output name=PKG_SHA;]${pkg_sha}"
echo ::set-env name=TAG::$tag
- name: test-kata-deploy-ci-in-aks
uses: ./packaging/kata-deploy/action
with:

View File

@@ -39,12 +39,12 @@ jobs:
run: |
if grep -q $buildstr artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@v2
with:
name: kata-artifacts
@@ -67,12 +67,12 @@ jobs:
run: |
if grep -q $buildstr artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@v2
with:
name: kata-artifacts
@@ -93,12 +93,12 @@ jobs:
run: |
if grep -q $buildstr artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@v2
with:
name: kata-artifacts
@@ -119,12 +119,12 @@ jobs:
run: |
if grep -q $buildstr artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@v2
with:
name: kata-artifacts
@@ -145,12 +145,12 @@ jobs:
run: |
if grep -q $buildstr artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@v2
with:
name: kata-artifacts
@@ -171,12 +171,12 @@ jobs:
run: |
if grep -q $buildstr artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@v2
with:
name: kata-artifacts
@@ -198,12 +198,12 @@ jobs:
run: |
if grep -q $buildstr artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@v2
with:
name: kata-artifacts
@@ -224,12 +224,12 @@ jobs:
run: |
if grep -q $buildstr artifact-list.txt; then
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
echo "artifact-built=true" >> $GITHUB_ENV
echo ::set-env name=artifact-built::true
else
echo "artifact-built=false" >> $GITHUB_ENV
echo ::set-env name=artifact-built::false
fi
- name: store-artifacts
if: ${{ env.artifact-built }} == 'true'
if: env.artifact-built == 'true'
uses: actions/upload-artifact@v2
with:
name: kata-artifacts
@@ -275,9 +275,11 @@ jobs:
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
docker push katadocker/kata-deploy-ci:$pkg_sha
echo "##[set-output name=PKG_SHA;]${pkg_sha}"
echo ::set-env name=TAG::$tag
mkdir -p packaging/kata-deploy
ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action
echo "::set-output name=PKG_SHA::${pkg_sha}"
- name: test-kata-deploy-ci-in-aks
uses: ./packaging/kata-deploy/action
with:

View File

@@ -1,37 +0,0 @@
name: Release Kata 2.x in snapcraft store
on:
push:
tags:
- '2.*'
jobs:
release-snap:
runs-on: ubuntu-20.04
steps:
- name: Check out Git repository
uses: actions/checkout@v2
- name: Install Snapcraft
uses: samuelmeuli/action-snapcraft@v1
with:
snapcraft_token: ${{ secrets.snapcraft_token }}
- name: Build snap
run: |
sudo apt-get install -y git git-extras
kata_url="https://github.com/kata-containers/kata-containers"
latest_version=$(git ls-remote --tags ${kata_url} | egrep -o "refs.*" | egrep -v "\-alpha|\-rc|{}" | egrep -o "[[:digit:]]+\.[[:digit:]]+\.[[:digit:]]+" | sort -V -r | head -1)
current_version="$(echo ${GITHUB_REF} | cut -d/ -f3)"
# Check if the current tag is the latest tag
if echo -e "$latest_version\n$current_version" | sort -C -V; then
# Current version is the latest version, build it
snapcraft -d snap --destructive-mode
fi
- name: Upload snap
run: |
snap_version="$(echo ${GITHUB_REF} | cut -d/ -f3)"
snap_file="kata-containers_${snap_version}_amd64.snap"
# Upload the snap if it exists
if [ -f ${snap_file} ]; then
snapcraft upload --release=candidate ${snap_file}
fi

View File

@@ -1,68 +0,0 @@
on: ["pull_request"]
name: Static checks
jobs:
test:
strategy:
matrix:
go-version: [1.13.x, 1.14.x, 1.15.x]
os: [ubuntu-18.04]
runs-on: ${{ matrix.os }}
env:
GO111MODULE: off
TRAVIS: "true"
TRAVIS_BRANCH: ${{ github.base_ref }}
TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }}
TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }}
RUST_BACKTRACE: "1"
RUST_AGENT: "yes"
target_branch: ${TRAVIS_BRANCH}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
env:
GOPATH: ${{ runner.workspace }}/kata-containers
- name: Setup GOPATH
run: |
gopath_org=$(go env GOPATH)/src/github.com/kata-containers/
mkdir -p ${gopath_org}
ln -s ${PWD} ${gopath_org}
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
echo "TRAVIS: ${TRAVIS}"
- name: Set env
run: |
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
path: ./src/github.com/${{ github.repository }}
- name: Setup travis references
run: |
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
target_branch=${TRAVIS_BRANCH}
- name: Setup
run: |
cd ${GOPATH}/src/github.com/kata-containers/kata-containers && ./ci/setup.sh
env:
GOPATH: ${{ runner.workspace }}/kata-containers
- name: Building rust
run: |
cd ${GOPATH}/src/github.com/kata-containers/kata-containers && ./ci/install_rust.sh
PATH=$PATH:"$HOME/.cargo/bin"
- name: Make clippy
run: |
cd ${GOPATH}/src/github.com/kata-containers/kata-containers/src/agent && rustup target add x86_64-unknown-linux-musl && rustup component add rustfmt && rustup component add clippy && make clippy
- name: Static checks
run: |
cd ${GOPATH}/src/github.com/kata-containers/kata-containers && ./ci/static-checks.sh
- name: Build agent
run: |
cd ${GOPATH}/src/github.com/kata-containers/kata-containers/src/agent && make
- name: Run agent unit tests
run: |
cd ${GOPATH}/src/github.com/kata-containers/kata-containers/src/agent && make check

4
.gitignore vendored
View File

@@ -3,9 +3,5 @@
**/*.rej
**/target
**/.vscode
pkg/logging/Cargo.lock
src/agent/src/version.rs
src/agent/kata-agent.service
src/agent/protocols/src/*.rs
!src/agent/protocols/src/lib.rs

62
.travis.yml Normal file
View File

@@ -0,0 +1,62 @@
# Copyright (c) 2019 Ant Financial
#
# SPDX-License-Identifier: Apache-2.0
#
dist: bionic
os: linux
# set cache directories manually, because
# we are using a non-standard directory struct
# cargo root is in srs/agent
#
# If needed, caches can be cleared
# by ways documented in
# https://docs.travis-ci.com/user/caching#clearing-caches
language: rust
rust:
- 1.44.1
cache:
cargo: true
directories:
- src/agent/target
before_install:
- git remote set-branches --add origin "${TRAVIS_BRANCH}"
- git fetch
- export RUST_BACKTRACE=1
- export target_branch=$TRAVIS_BRANCH
- "ci/setup.sh"
# we use install to run check agent
# so that it is easy to skip for non-amd64 platform
install:
- export PATH=$PATH:"$HOME/.cargo/bin"
- export RUST_AGENT=yes
- rustup target add x86_64-unknown-linux-musl
- sudo ln -sf /usr/bin/g++ /bin/musl-g++
- rustup component add rustfmt
- make -C ${TRAVIS_BUILD_DIR}/src/agent
- make -C ${TRAVIS_BUILD_DIR}/src/agent check
- sudo -E PATH=$PATH make -C ${TRAVIS_BUILD_DIR}/src/agent check
before_script:
- "ci/install_go.sh"
- make -C ${TRAVIS_BUILD_DIR}/src/runtime
- make -C ${TRAVIS_BUILD_DIR}/src/runtime test
- sudo -E PATH=$PATH GOPATH=$GOPATH make -C ${TRAVIS_BUILD_DIR}/src/runtime test
script:
- "ci/static-checks.sh"
jobs:
include:
- name: x86_64 test
os: linux
- name: ppc64le test
os: linux-ppc64le
install: skip
script: skip
allow_failures:
- name: ppc64le test
fast_finish: true

207
README.md
View File

@@ -2,143 +2,136 @@
# Kata Containers
* [Kata Containers](#kata-containers)
* [Introduction](#introduction)
* [Getting started](#getting-started)
* [Documentation](#documentation)
* [Raising issues](#raising-issues)
* [Kata Containers repositories](#kata-containers-repositories)
* [Code Repositories](#code-repositories)
* [Kata Containers-developed components](#kata-containers-developed-components)
* [Agent](#agent)
* [KSM throttler](#ksm-throttler)
* [Runtime](#runtime)
* [Trace forwarder](#trace-forwarder)
* [Additional](#additional)
* [Hypervisor](#hypervisor)
* [Kernel](#kernel)
* [CI](#ci)
* [Community](#community)
* [Getting help](#getting-help)
* [Raising issues](#raising-issues)
* [Kata Containers 1.x versions](#kata-containers-1x-versions)
* [Developers](#developers)
* [Components](#components)
* [Kata Containers 1.x components](#kata-containers-1x-components)
* [Common repositories](#common-repositories)
* [Packaging and releases](#packaging-and-releases)
* [Documentation](#documentation)
* [Packaging](#packaging)
* [Test code](#test-code)
* [Utilities](#utilities)
* [OS builder](#os-builder)
* [Web content](#web-content)
---
Welcome to Kata Containers!
This repository is the home of the Kata Containers code for the 2.0 and newer
releases.
The purpose of this repository is to act as a "top level" site for the project. Specifically it is used:
If you want to learn about Kata Containers, visit the main
[Kata Containers website](https://katacontainers.io).
- To provide a list of the various *other* [Kata Containers repositories](#kata-containers-repositories),
along with a brief explanation of their purpose.
For further details on the older (first generation) Kata Containers 1.x
versions, see the
[Kata Containers 1.x components](#kata-containers-1x-components)
section.
- To provide a general area for [Raising Issues](#raising-issues).
## Introduction
## Raising issues
Kata Containers is an open source project and community working to build a
standard implementation of lightweight Virtual Machines (VMs) that feel and
perform like containers, but provide the workload isolation and security
advantages of VMs.
This repository is used for [raising
issues](https://github.com/kata-containers/kata-containers/issues/new):
## Getting started
- That might affect multiple code repositories.
See the [installation documentation](docs/install).
## Documentation
See the [official documentation](docs)
(including [installation guides](docs/install),
[the developer guide](docs/Developer-Guide.md),
[design documents](docs/design) and more).
## Community
To learn more about the project, its community and governance, see the
[community repository](https://github.com/kata-containers/community). This is
the first place to go if you wish to contribute to the project.
## Getting help
See the [community](#community) section for ways to contact us.
### Raising issues
Please raise an issue
[in this repository](https://github.com/kata-containers/kata-containers/issues).
- Where the raiser is unsure which repositories are affected.
> **Note:**
> If you are reporting a security issue, please follow the [vulnerability reporting process](https://github.com/kata-containers/community#vulnerability-handling)
>
> - If an issue affects only a single component, it should be raised in that
> components repository.
#### Kata Containers 1.x versions
## Kata Containers repositories
For older Kata Containers 1.x releases, please raise an issue in the
[Kata Containers 1.x component repository](#kata-containers-1x-components)
that seems most appropriate.
### CI
If in doubt, raise an issue
[in the Kata Containers 1.x runtime repository](https://github.com/kata-containers/runtime/issues).
The [CI](https://github.com/kata-containers/ci) repository stores the Continuous
Integration (CI) system configuration information.
## Developers
### Community
### Components
The [Community](https://github.com/kata-containers/community) repository is
the first place to go if you want to use or contribute to the project.
| Component | Type | Description |
|-|-|-|
| [agent-ctl](tools/agent-ctl) | utility | Tool that provides low-level access for testing the agent. |
| [agent](src/agent) | core | Management process running inside the virtual machine / POD that sets up the container environment. |
| [documentation](docs) | documentation | Documentation common to all components (such as design and install documentation). |
| [osbuilder](tools/osbuilder) | infrastructure | Tool to create "mini O/S" rootfs and initrd images for the hypervisor. |
| [packaging](tools/packaging) | infrastructure | Scripts and metadata for producing packaged binaries<br/>(components, hypervisors, kernel and rootfs). |
| [runtime](src/runtime) | core | Main component run by a container manager and providing a containerd shimv2 runtime implementation. |
| [trace-forwarder](src/trace-forwarder) | utility | Agent tracing helper. |
### Code Repositories
#### Kata Containers 1.x components
#### Kata Containers-developed components
For the first generation of Kata Containers (1.x versions), each component was
kept in a separate repository.
##### Agent
For information on the Kata Containers 1.x releases, see the
[Kata Containers 1.x releases page](https://github.com/kata-containers/runtime/releases).
The [`kata-agent`](src/agent/README.md) runs inside the
virtual machine and sets up the container environment.
For further information on particular Kata Containers 1.x components, see the
individual component repositories:
##### KSM throttler
| Component | Type | Description |
|-|-|-|
| [agent](https://github.com/kata-containers/agent) | core | See [components](#components). |
| [documentation](https://github.com/kata-containers/documentation) | documentation | |
| [KSM throttler](https://github.com/kata-containers/ksm-throttler) | optional core | Daemon that monitors containers and deduplicates memory to maximize container density on the host. |
| [osbuilder](https://github.com/kata-containers/osbuilder) | infrastructure | See [components](#components). |
| [packaging](https://github.com/kata-containers/packaging) | infrastructure | See [components](#components). |
| [proxy](https://github.com/kata-containers/proxy) | core | Multiplexes communications between the shims, agent and runtime. |
| [runtime](https://github.com/kata-containers/runtime) | core | See [components](#components). |
| [shim](https://github.com/kata-containers/shim) | core | Handles standard I/O and signals on behalf of the container process. |
The [`kata-ksm-throttler`](https://github.com/kata-containers/ksm-throttler)
is an optional utility that monitors containers and deduplicates memory to
maximize container density on a host.
> **Note:**
>
> - There are more components for the original Kata Containers 1.x implementation.
> - The current implementation simplifies the design significantly:
> compare the [current](docs/design/architecture.md) and
> [previous generation](https://github.com/kata-containers/documentation/blob/master/design/architecture.md)
> designs.
##### Runtime
### Common repositories
The [`kata-runtime`](src/runtime/README.md) is usually
invoked by a container manager and provides high-level verbs to manage
containers.
The following repositories are used by both the current and first generation Kata Containers implementations:
##### Trace forwarder
| Component | Description | Current | First generation | Notes |
|-|-|-|-|-|
| CI | Continuous Integration configuration files and scripts. | [Kata 2.x](https://github.com/kata-containers/ci/tree/2.0-dev) | [Kata 1.x](https://github.com/kata-containers/ci/tree/master) | |
| kernel | The Linux kernel used by the hypervisor to boot the guest image. | [Kata 2.x][kernel] | [Kata 1.x][kernel] | Patches are stored in the packaging component. |
| tests | Test code. | [Kata 2.x](https://github.com/kata-containers/tests/tree/2.0-dev) | [Kata 1.x](https://github.com/kata-containers/tests/tree/master) | Excludes unit tests which live with the main code. |
| www.katacontainers.io | Contains the source for the [main web site](https://www.katacontainers.io). | [Kata 2.x][github-katacontainers.io] | [Kata 1.x][github-katacontainers.io] | | |
The [`kata-trace-forwarder`](src/trace-forwarder) is a component only used
when tracing the [agent](#agent) process.
### Packaging and releases
#### Additional
Kata Containers is now
[available natively for most distributions](docs/install/README.md#packaged-installation-methods).
However, packaging scripts and metadata are still used to generate snap and GitHub releases. See
the [components](#components) section for further details.
##### Hypervisor
---
The [`qemu`](https://github.com/kata-containers/qemu) hypervisor is used to
create virtual machines for hosting the containers.
[kernel]: https://www.kernel.org
[github-katacontainers.io]: https://github.com/kata-containers/www.katacontainers.io
##### Kernel
The hypervisor uses a [Linux\* kernel](https://github.com/kata-containers/linux) to boot the guest image.
### Documentation
The [docs](docs/README.md) directory holds documentation common to all code components.
### Packaging
We use the [packaging](tools/packaging/README.md) to create packages for the [system
components](#kata-containers-developed-components) including
[rootfs](#os-builder) and [kernel](#kernel) images.
### Test code
The [tests](https://github.com/kata-containers/tests) repository hosts all
test code except the unit testing code (which is kept in the same repository
as the component it tests).
### Utilities
#### OS builder
The [osbuilder](tools/osbuilder/README.md) tool can create
a rootfs and a "mini O/S" image. This image is used by the hypervisor to setup
the environment before switching to the workload.
#### `kata-agent-ctl`
[`kata-agent-ctl`](tools/agent-ctl) is a low-level test tool for
interacting with the agent.
### Web content
The
[www.katacontainers.io](https://github.com/kata-containers/www.katacontainers.io)
repository contains all sources for the https://www.katacontainers.io site.
## Credits
Kata Containers uses [packagecloud](https://packagecloud.io) for package
hosting.

View File

@@ -1 +1 @@
2.1-alpha0
2.0.0

View File

@@ -104,7 +104,7 @@ The build will create the following:
You can check if your system is capable of creating a Kata Container by running the following:
```
$ sudo kata-runtime check
$ sudo kata-runtime kata-check
```
If your system is *not* able to run Kata Containers, the previous command will error out and explain why.
@@ -354,12 +354,9 @@ You MUST choose one of `alpine`, `centos`, `clearlinux`, `euleros`, and `fedora`
>
> - Check the [compatibility matrix](../tools/osbuilder/README.md#platform-distro-compatibility-matrix) before creating rootfs.
Optionally, add your custom agent binary to the rootfs with the following, `LIBC` default is `musl`, if `ARCH` is `ppc64le`, should set the `LIBC=gnu` and `ARCH=powerpc64le`:
Optionally, add your custom agent binary to the rootfs with the following:
```
$ export ARCH=$(shell uname -m)
$ [ ${ARCH} == "ppc64le" ] && export LIBC=gnu || export LIBC=musl
$ [ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
$ sudo install -o root -g root -m 0550 -T ../../../src/agent/target/$(ARCH)-unknown-linux-$(LIBC)/release/kata-agent ${ROOTFS_DIR}/sbin/init
$ sudo install -o root -g root -m 0550 -T ../../agent/kata-agent ${ROOTFS_DIR}/sbin/init
```
### Build an initrd image

View File

@@ -25,7 +25,7 @@ All documents must:
- Have a `.md` file extension.
- Include a TOC (table of contents) at the top of the document with links to
all heading sections. We recommend using the
[`check-markdown`](https://github.com/kata-containers/tests/tree/master/cmd/check-markdown)
[`kata-check-markdown`](https://github.com/kata-containers/tests/tree/master/cmd/check-markdown)
tool to generate the TOC.
- Be linked to from another document in the same repository.

View File

@@ -40,7 +40,6 @@ See the [howto documentation](how-to).
* [Intel QAT with Kata](./use-cases/using-Intel-QAT-and-kata.md)
* [VPP with Kata](./use-cases/using-vpp-and-kata.md)
* [SPDK vhost-user with Kata](./use-cases/using-SPDK-vhostuser-and-kata.md)
* [Intel SGX with Kata](./use-cases/using-Intel-SGX-and-kata.md)
## Developer Guide

View File

@@ -48,10 +48,10 @@ Alternatively, if you are using Kata Containers version 1.12.0 or newer, you
can check for newer releases using the command line:
```bash
$ kata-runtime check --check-version-only
$ kata-runtime kata-check --check-version-only
```
There are various other related options. Run `kata-runtime check --help`
There are various other related options. Run `kata-runtime kata-check --help`
for further details.
# Configuration changes

View File

@@ -58,7 +58,7 @@ to go through the VSOCK interface exported by QEMU.
The container workload, that is, the actual OCI bundle rootfs, is exported from the
host to the virtual machine. In the case where a block-based graph driver is
configured, `virtio-scsi` will be used. In all other cases a `virtio-fs` VIRTIO mount point
configured, `virtio-scsi` will be used. In all other cases a 9pfs VIRTIO mount point
will be used. `kata-agent` uses this mount point as the root filesystem for the
container processes.

View File

@@ -3,6 +3,7 @@ To fulfill the [Kata design requirements](kata-design-requirements.md), and base
- Sandbox based top API
- Storage and network hotplug API
- Plugin frameworks for external proprietary Kata runtime extensions
- Built-in shim and proxy types and capabilities
## Sandbox Based API
### Sandbox Management API
@@ -22,17 +23,17 @@ To fulfill the [Kata design requirements](kata-design-requirements.md), and base
|`sandbox.Stats()`| Get the stats of a running sandbox, return a `SandboxStats` structure.|
|`sandbox.Status()`| Get the status of the sandbox and containers, return a `SandboxStatus` structure.|
|`sandbox.Stop(force)`| Stop a sandbox and Destroy the containers in the sandbox. When force is true, ignore guest related stop failures.|
|`sandbox.CreateContainer(contConfig)`| Create new container in the sandbox with the `ContainerConfig` parameter. It will add new container config to `sandbox.config.Containers`.|
|`sandbox.DeleteContainer(containerID)`| Delete a container from the sandbox by `containerID`, return a `Container` structure.|
|`sandbox.CreateContainer(contConfig)`| Create new container in the sandbox with the `ContainerConfig` param. It will add new container config to `sandbox.config.Containers`.|
|`sandbox.DeleteContainer(containerID)`| Delete a container from the sandbox by containerID, return a `Container` structure.|
|`sandbox.EnterContainer(containerID, cmd)`| Run a new process in a container, executing customer's `types.Cmd` command.|
|`sandbox.KillContainer(containerID, signal, all)`| Signal a container in the sandbox by the `containerID`.|
|`sandbox.PauseContainer(containerID)`| Pause a running container in the sandbox by the `containerID`.|
|`sandbox.KillContainer(containerID, signal, all)`| Signal a container in the sandbox by the containerID.|
|`sandbox.PauseContainer(containerID)`| Pause a running container in the sandbox by the containerID.|
|`sandbox.ProcessListContainer(containerID, options)`| List every process running inside a specific container in the sandbox, return a `ProcessList` structure.|
|`sandbox.ResumeContainer(containerID)`| Resume a paused container in the sandbox by the `containerID`.|
|`sandbox.StartContainer(containerID)`| Start a container in the sandbox by the `containerID`.|
|`sandbox.ResumeContainer(containerID)`| Resume a paused container in the sandbox by the containerID.|
|`sandbox.StartContainer(containerID)`| Start a container in the sandbox by the containerID.|
|`sandbox.StatsContainer(containerID)`| Get the stats of a running container, return a `ContainerStats` structure.|
|`sandbox.StatusContainer(containerID)`| Get the status of a container in the sandbox, return a `ContainerStatus` structure.|
|`sandbox.StopContainer(containerID, force)`| Stop a container in the sandbox by the `containerID`.|
|`sandbox.StopContainer(containerID, force)`| Stop a container in the sandbox by the containerID.|
|`sandbox.UpdateContainer(containerID, resources)`| Update a running container in the sandbox.|
|`sandbox.WaitProcess(containerID, processID)`| Wait on a process to terminate.|
### Sandbox Hotplug API
@@ -56,7 +57,7 @@ To fulfill the [Kata design requirements](kata-design-requirements.md), and base
|Name|Description|
|---|---|
|`sandbox.GetOOMEvent()`| Monitor the OOM events that occur in the sandbox..|
|`sandbox.UpdateRuntimeMetrics()`| Update the `shim/hypervisor` metrics of the running sandbox.|
|`sandbox.UpdateRuntimeMetrics()`| Update the shim/hypervisor's metrics of the running sandbox.|
|`sandbox.GetAgentMetrics()`| Get metrics of the agent and the guest in the running sandbox.|
## Plugin framework for external proprietary Kata runtime extensions
@@ -98,3 +99,32 @@ Built-in implementations include:
### Sandbox Connection Plugin Workflow
![Sandbox Connection Plugin Workflow](https://raw.githubusercontent.com/bergwolf/raw-contents/master/kata/Sandbox-Connection.png "Sandbox Connection Plugin Workflow")
## Built-in Shim and Proxy Types and Capabilities
### Built-in shim/proxy sandbox configurations
- Supported shim configurations:
|Name|Description|
|---|---|
|`noopshim`|Do not start any shim process.|
|`ccshim`| Start the cc-shim binary.|
|`katashim`| Start the `kata-shim` binary.|
|`katashimbuiltin`|No standalone shim process but shim functionality APIs are exported.|
- Supported proxy configurations:
|Name|Description|
|---|---|
|`noopProxy`| a dummy proxy implementation of the proxy interface, only used for testing purpose.|
|`noProxy`|generic implementation for any case where no actual proxy is needed.|
|`ccProxy`|run `ccProxy` to proxy between runtime and agent.|
|`kataProxy`|run `kata-proxy` to translate Yamux connections between runtime and Kata agent. |
|`kataProxyBuiltin`| no standalone proxy process and connect to Kata agent with internal Yamux translation.|
### Built-in Shim Capability
Built-in shim capability is implemented by removing standalone shim process, and
supporting the shim related APIs.
### Built-in Proxy Capability
Built-in proxy capability is achieved by removing standalone proxy process, and
connecting to Kata agent with a custom gRPC dialer that is internal Yamux translation.
The behavior is enabled when proxy is configured as `kataProxyBuiltin`.

View File

@@ -22,10 +22,10 @@ the multiple hypervisors and virtual machine monitors that Kata supports.
## Mapping container concepts to virtual machine technologies
A typical deployment of Kata Containers will be in Kubernetes by way of a Container Runtime Interface (CRI) implementation. On every node,
Kubelet will interact with a CRI implementer (such as containerd or CRI-O), which will in turn interface with Kata Containers (an OCI based runtime).
Kubelet will interact with a CRI implementor (such as containerd or CRI-O), which will in turn interface with Kata Containers (an OCI based runtime).
The CRI API, as defined at the [Kubernetes CRI-API repo](https://github.com/kubernetes/cri-api/), implies a few constructs being supported by the
CRI implementation, and ultimately in Kata Containers. In order to support the full [API](https://github.com/kubernetes/cri-api/blob/a6f63f369f6d50e9d0886f2eda63d585fbd1ab6a/pkg/apis/runtime/v1alpha2/api.proto#L34-L110) with the CRI-implementer, Kata must provide the following constructs:
CRI implementation, and ultimately in Kata Containers. In order to support the full [API](https://github.com/kubernetes/cri-api/blob/a6f63f369f6d50e9d0886f2eda63d585fbd1ab6a/pkg/apis/runtime/v1alpha2/api.proto#L34-L110) with the CRI-implementor, Kata must provide the following constructs:
![API to construct](./arch-images/api-to-construct.png)
@@ -41,9 +41,14 @@ Each hypervisor or VMM varies on how or if it handles each of these.
## Kata Containers Hypervisor and VMM support
Kata Containers [supports multiple hypervisors](../hypervisors.md).
Kata Containers is designed to support multiple virtual machine monitors (VMMs) and hypervisors.
Kata Containers supports:
- [ACRN hypervisor](https://projectacrn.org/)
- [Cloud Hypervisor](https://github.com/cloud-hypervisor/cloud-hypervisor)/[KVM](https://www.linux-kvm.org/page/Main_Page)
- [Firecracker](https://github.com/firecracker-microvm/firecracker)/KVM
- [QEMU](http://www.qemu-project.org/)/KVM
Details of each solution and a summary are provided below.
Which configuration to use will depend on the end user's requirements. Details of each solution and a summary are provided below.
### QEMU/KVM
@@ -57,7 +62,7 @@ be changed by editing the runtime [`configuration`](./architecture.md/#configura
Devices and features used:
- virtio VSOCK or virtio serial
- virtio block or virtio SCSI
- [virtio net](https://www.redhat.com/en/virtio-networking-series)
- virtio net
- virtio fs or virtio 9p (recommend: virtio fs)
- VFIO
- hotplug
@@ -100,34 +105,25 @@ Devices used:
### Cloud Hypervisor/KVM
[Cloud Hypervisor](https://github.com/cloud-hypervisor/cloud-hypervisor), based
on [rust-vmm](https://github.com/rust-vmm), is designed to have a
lighter footprint and smaller attack surface for running modern cloud
workloads. Kata Containers with Cloud
Hypervisor provides mostly complete compatibility with Kubernetes
comparable to the QEMU configuration. As of the 1.12 and 2.0.0 release
of Kata Containers, the Cloud Hypervisor configuration supports both CPU
and memory resize, device hotplug (disk and VFIO), file-system sharing through virtio-fs,
block-based volumes, booting from VM images backed by pmem device, and
fine-grained seccomp filters for each VMM threads (e.g. all virtio
device worker threads). Please check [this GitHub Project](https://github.com/orgs/kata-containers/projects/21)
for details of ongoing integration efforts.
Cloud Hypervisor, based on [rust-VMM](https://github.com/rust-vmm), is designed to have a lighter footprint and attack surface. For Kata Containers,
relative to Firecracker, the Cloud Hypervisor configuration provides better compatibility at the expense of exposing additional devices: file system
sharing and direct device assignment. As of the 1.10 release of Kata Containers, Cloud Hypervisor does not support device hotplug, and as a result
does not support updating container resources after boot, or utilizing block based volumes. While Cloud Hypervisor does support VFIO, Kata is still adding
this support. As of 1.10, Kata does not support block based volumes or direct device assignment. See [Cloud Hypervisor device support documentation](https://github.com/cloud-hypervisor/cloud-hypervisor/blob/master/docs/device_model.md)
for more details on Cloud Hypervisor.
Devices and features used:
- virtio VSOCK or virtio serial
Devices used:
- virtio VSOCK
- virtio block
- virtio net
- virtio fs
- virtio pmem
- VFIO
- hotplug
- seccomp filters
- [HTTP OpenAPI](https://github.com/cloud-hypervisor/cloud-hypervisor/blob/master/vmm/src/api/openapi/cloud-hypervisor.yaml)
### Summary
| Solution | release introduced | brief summary |
|-|-|-|
| Cloud Hypervisor | 1.10 | upstream Cloud Hypervisor with rich feature support, e.g. hotplug, VFIO and FS sharing|
| Firecracker | 1.5 | upstream Firecracker, rust-VMM based, no VFIO, no FS sharing, no memory/CPU hotplug |
| QEMU | 1.0 | upstream QEMU, with support for hotplug and filesystem sharing |
| NEMU | 1.4 | Deprecated, removed as of 1.10 release. Slimmed down fork of QEMU, with experimental support of virtio-fs |
| Firecracker | 1.5 | upstream Firecracker, rust-VMM based, no VFIO, no FS sharing, no memory/CPU hotplug |
| QEMU-virtio-fs | 1.7 | upstream QEMU with support for virtio-fs. Will be removed once virtio-fs lands in upstream QEMU |
| Cloud Hypervisor | 1.10 | rust-VMM based, includes VFIO and FS sharing through virtio-fs, no hotplug |

View File

@@ -14,17 +14,8 @@
- [How to import Kata Containers logs into Fluentd](how-to-import-kata-logs-with-fluentd.md)
## Hypervisors Integration
Currently supported hypervisors with Kata Containers include:
- `qemu`
- `cloud-hypervisor`
- `firecracker`
- `ACRN`
While `qemu` and `cloud-hypervisor` work out of the box with installation of Kata,
some additional configuration is needed in case of `firecracker` and `ACRN`.
Refer to the following guides for additional configuration steps:
- [Kata Containers with Firecracker](https://github.com/kata-containers/documentation/wiki/Initial-release-of-Kata-Containers-with-Firecracker-support)
- [Kata Containers with NEMU](how-to-use-kata-containers-with-nemu.md)
- [Kata Containers with ACRN Hypervisor](how-to-use-kata-containers-with-acrn.md)
## Advanced Topics

View File

@@ -56,9 +56,8 @@ There are some limitations with this approach:
As was mentioned above, not all containers need the same modules, therefore using
the configuration file for specifying the list of kernel modules per [POD][3] can
be a pain.
Unlike the configuration file, [annotations](how-to-set-sandbox-config-kata.md)
provide a way to specify custom configurations per POD.
be a pain. Unlike the configuration file, annotations provide a way to specify
custom configurations per POD.
The list of kernel modules and parameters can be set using the annotation
`io.katacontainers.config.agent.kernel_modules` as a semicolon separated
@@ -102,7 +101,7 @@ spec:
tty: true
```
> **Note**: To pass annotations to Kata containers, [CRI-O must be configured correctly](how-to-set-sandbox-config-kata.md#cri-o-configuration)
> **Note**: To pass annotations to Kata containers, [cri must to be configurated correctly](how-to-set-sandbox-config-kata.md#cri-configuration)
[1]: ../../src/runtime
[2]: ../../src/agent

View File

@@ -3,11 +3,6 @@
Kata Containers gives users freedom to customize at per-pod level, by setting
a wide range of Kata specific annotations in the pod specification.
Some annotations may be [restricted](#restricted-annotations) by the
configuration file for security reasons, notably annotations that could lead the
runtime to execute programs on the host. Such annotations are marked with _(R)_ in
the tables below.
# Kata Configuration Annotations
There are several kinds of Kata configurations and they are listed below.
@@ -31,7 +26,6 @@ There are several kinds of Kata configurations and they are listed below.
| Key | Value Type | Comments |
|-------| ----- | ----- |
| `io.katacontainers.config.agent.enable_tracing` | `boolean` | enable tracing for the agent |
| `io.katacontainers.config.agent.container_pipe_size` | uint32 | specify the size of the std(in/out) pipes created for containers |
| `io.katacontainers.config.agent.kernel_modules` | string | the list of kernel modules and their parameters that will be loaded in the guest kernel. Semicolon separated list of kernel modules and their parameters. These modules will be loaded in the guest kernel using `modprobe`(8). E.g., `e1000e InterruptThrottleRate=3000,3000,3000 EEE=1; i915 enable_ppgtt=0` |
| `io.katacontainers.config.agent.trace_mode` | string | the trace mode for the agent |
| `io.katacontainers.config.agent.trace_type` | string | the trace type for the agent |
@@ -44,24 +38,17 @@ There are several kinds of Kata configurations and they are listed below.
| `io.katacontainers.config.hypervisor.block_device_cache_noflush` | `boolean` | Denotes whether flush requests for the device are ignored |
| `io.katacontainers.config.hypervisor.block_device_cache_set` | `boolean` | cache-related options will be set to block devices or not |
| `io.katacontainers.config.hypervisor.block_device_driver` | string | the driver to be used for block device, valid values are `virtio-blk`, `virtio-scsi`, `nvdimm`|
| `io.katacontainers.config.hypervisor.cpu_features` | `string` | Comma-separated list of CPU features to pass to the CPU (QEMU) |
| `io.katacontainers.config.hypervisor.ctlpath` (R) | `string` | Path to the `acrnctl` binary for the ACRN hypervisor |
| `io.katacontainers.config.hypervisor.default_max_vcpus` | uint32| the maximum number of vCPUs allocated for the VM by the hypervisor |
| `io.katacontainers.config.hypervisor.default_memory` | uint32| the memory assigned for a VM by the hypervisor in `MiB` |
| `io.katacontainers.config.hypervisor.default_vcpus` | uint32| the default vCPUs assigned for a VM by the hypervisor |
| `io.katacontainers.config.hypervisor.disable_block_device_use` | `boolean` | disallow a block device from being used |
| `io.katacontainers.config.hypervisor.disable_image_nvdimm` | `boolean` | specify if a `nvdimm` device should be used as rootfs for the guest (QEMU) |
| `io.katacontainers.config.hypervisor.disable_vhost_net` | `boolean` | specify if `vhost-net` is not available on the host |
| `io.katacontainers.config.hypervisor.enable_hugepages` | `boolean` | if the memory should be `pre-allocated` from huge pages |
| `io.katacontainers.config.hypervisor.enable_iommu_platform` | `boolean` | enable `iommu` on CCW devices (QEMU s390x) |
| `io.katacontainers.config.hypervisor.enable_iommu` | `boolean` | enable `iommu` on Q35 (QEMU x86_64) |
| `io.katacontainers.config.hypervisor.enable_iothreads` | `boolean`| enable IO to be processed in a separate thread. Supported currently for virtio-`scsi` driver |
| `io.katacontainers.config.hypervisor.enable_mem_prealloc` | `boolean` | the memory space used for `nvdimm` device by the hypervisor |
| `io.katacontainers.config.hypervisor.enable_swap` | `boolean` | enable swap of VM memory |
| `io.katacontainers.config.hypervisor.enable_vhost_user_store` | `boolean` | enable vhost-user storage device (QEMU) |
| `io.katacontainers.config.hypervisor.enable_virtio_mem` | `boolean` | enable virtio-mem (QEMU) |
| `io.katacontainers.config.hypervisor.entropy_source` | string| the path to a host source of entropy (`/dev/random`, `/dev/urandom` or real hardware RNG device) |
| `io.katacontainers.config.hypervisor.file_mem_backend` (R) | string | file based memory backend root directory |
| `io.katacontainers.config.hypervisor.file_mem_backend` | string | file based memory backend root directory |
| `io.katacontainers.config.hypervisor.firmware_hash` | string | container firmware SHA-512 hash value |
| `io.katacontainers.config.hypervisor.firmware` | string | the guest firmware that will run the container VM |
| `io.katacontainers.config.hypervisor.guest_hook_path` | string | the path within the VM that will be used for drop in hooks |
@@ -72,7 +59,7 @@ There are several kinds of Kata configurations and they are listed below.
| `io.katacontainers.config.hypervisor.initrd_hash` | string | container guest initrd SHA-512 hash value |
| `io.katacontainers.config.hypervisor.initrd` | string | the guest initrd image that will run in the container VM |
| `io.katacontainers.config.hypervisor.jailer_hash` | string | container jailer SHA-512 hash value |
| `io.katacontainers.config.hypervisor.jailer_path` (R) | string | the jailer that will constrain the container VM |
| `io.katacontainers.config.hypervisor.jailer_path` | string | the jailer that will constrain the container VM |
| `io.katacontainers.config.hypervisor.kernel_hash` | string | container kernel image SHA-512 hash value |
| `io.katacontainers.config.hypervisor.kernel_params` | string | additional guest kernel parameters |
| `io.katacontainers.config.hypervisor.kernel` | string | the kernel used to boot the container VM |
@@ -82,16 +69,14 @@ There are several kinds of Kata configurations and they are listed below.
| `io.katacontainers.config.hypervisor.memory_slots` | uint32| the memory slots assigned to the VM by the hypervisor |
| `io.katacontainers.config.hypervisor.msize_9p` | uint32 | the `msize` for 9p shares |
| `io.katacontainers.config.hypervisor.path` | string | the hypervisor that will run the container VM |
| `io.katacontainers.config.hypervisor.pcie_root_port` | specify the number of PCIe Root Port devices. The PCIe Root Port device is used to hot-plug a PCIe device (QEMU) |
| `io.katacontainers.config.hypervisor.shared_fs` | string | the shared file system type, either `virtio-9p` or `virtio-fs` |
| `io.katacontainers.config.hypervisor.use_vsock` | `boolean` | specify use of `vsock` for agent communication |
| `io.katacontainers.config.hypervisor.vhost_user_store_path` (R) | `string` | specify the directory path where vhost-user devices related folders, sockets and device nodes should be (QEMU) |
| `io.katacontainers.config.hypervisor.virtio_fs_cache_size` | uint32 | virtio-fs DAX cache size in `MiB` |
| `io.katacontainers.config.hypervisor.virtio_fs_cache` | string | the cache mode for virtio-fs, valid values are `always`, `auto` and `none` |
| `io.katacontainers.config.hypervisor.virtio_fs_daemon` | string | virtio-fs `vhost-user` daemon path |
| `io.katacontainers.config.hypervisor.virtio_fs_extra_args` | string | extra options passed to `virtiofs` daemon |
# CRI-O Configuration
# CRI Configuration
In case of CRI-O, all annotations specified in the pod spec are passed down to Kata.
@@ -116,7 +101,7 @@ $ cat /etc/containerd/config
```
Additional documentation on the above configuration can be found in the
Additional documentation on the above configuration can be found in the
[containerd docs](https://github.com/containerd/cri/blob/8d5a8355d07783ba2f8f451209f6bdcc7c412346/docs/config.md).
# Example - Using annotations
@@ -174,31 +159,3 @@ spec:
stdin: true
tty: true
```
# Restricted annotations
Some annotations are _restricted_, meaning that the configuration file specifies
the acceptable values. Currently, only hypervisor annotations are restricted,
for security reason, with the intent to control which binaries the Kata
Containers runtime will launch on your behalf.
The configuration file validates the annotation _name_ as well as the annotation
_value_.
The acceptable annotation names are defined by the `enable_annotations` entry in
the configuration file.
For restricted annotations, an additional configuration entry provides a list of
acceptable values. Since most restricted annotations are intended to control
which binaries the runtime can execute, the valid value is generally provided by
a shell pattern, as defined by `glob(3)`. The table below provides the name of
the configuration entry:
| Key | Config file entry | Comments |
|-------| ----- | ----- |
| `ctlpath` | `valid_ctlpaths` | Valid paths for `acrnctl` binary |
| `file_mem_backend` | `valid_file_mem_backends` | Valid locations for the file-based memory backend root directory |
| `jailer_path` | `valid_jailer_paths`| Valid paths for the jailer constraining the container VM (Firecracker) |
| `path` | `valid_hypervisor_paths` | Valid hypervisors to run the container VM |
| `vhost_user_store_path` | `valid_vhost_user_store_paths` | Valid paths for vhost-user related files|
| `virtio_fs_daemon` | `valid_virtio_fs_daemon_paths` | Valid paths for the `virtiofsd` daemon |

View File

@@ -0,0 +1,115 @@
# Kata Containers with NEMU
* [Introduction](#introduction)
* [Pre-requisites](#pre-requisites)
* [NEMU](#nemu)
* [Download and build](#download-and-build)
* [x86_64](#x86_64)
* [aarch64](#aarch64)
* [Configure Kata Containers](#configure-kata-containers)
Kata Containers relies by default on the QEMU hypervisor in order to spawn the virtual machines running containers. [NEMU](https://github.com/intel/nemu) is a fork of QEMU that:
- Reduces the number of lines of code.
- Removes all legacy devices.
- Reduces the emulation as far as possible.
## Introduction
This document describes how to run Kata Containers with NEMU, first by explaining how to download, build and install it. Then it walks through the steps needed to update your Kata Containers configuration in order to run with NEMU.
## Pre-requisites
This document requires Kata Containers to be [installed](../install/README.md) on your system.
Also, it's worth noting that NEMU only supports `x86_64` and `aarch64` architecture.
## NEMU
### Download and build
```bash
$ git clone https://github.com/intel/nemu.git
$ cd nemu
$ git fetch origin
$ git checkout origin/experiment/automatic-removal
```
#### x86_64
```
$ SRCDIR=$PWD ./tools/build_x86_64_virt.sh
```
#### aarch64
```
$ SRCDIR=$PWD ./tools/build_aarch64.sh
```
> **Note:** The branch `experiment/automatic-removal` is a branch published by Jenkins after it has applied the automatic removal script to the `topic/virt-x86` branch. The purpose of this code removal being to reduce the source tree by removing files not being used by NEMU.
After those commands have successfully returned, you will find the NEMU binary at `$HOME/build-x86_64_virt/x86_64_virt-softmmu/qemu-system-x86_64_virt` (__x86__), or `$HOME/build-aarch64/aarch64-softmmu/qemu-system-aarch64` (__ARM__).
You also need the `OVMF` firmware in order to boot the virtual machine's kernel. It can currently be found at this [location](https://github.com/intel/ovmf-virt/releases).
```bash
$ sudo mkdir -p /usr/share/nemu
$ OVMF_URL=$(curl -sL https://api.github.com/repos/intel/ovmf-virt/releases/latest | jq -S '.assets[0].browser_download_url')
$ curl -o OVMF.fd -L $(sed -e 's/^"//' -e 's/"$//' <<<"$OVMF_URL")
$ sudo install -o root -g root -m 0640 OVMF.fd /usr/share/nemu/
```
> **Note:** The OVMF firmware will be located at this temporary location until the changes can be pushed upstream.
## Configure Kata Containers
All you need from this section is to modify the configuration file `/usr/share/defaults/kata-containers/configuration.toml` to specify the options related to the hypervisor.
```diff
[hypervisor.qemu]
-path = "/usr/bin/qemu-lite-system-x86_64"
+path = "/home/foo/build-x86_64_virt/x86_64_virt-softmmu/qemu-system-x86_64_virt"
kernel = "/usr/share/kata-containers/vmlinuz.container"
initrd = "/usr/share/kata-containers/kata-containers-initrd.img"
image = "/usr/share/kata-containers/kata-containers.img"
-machine_type = "pc"
+machine_type = "virt"
# Optional space-separated list of options to pass to the guest kernel.
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
@@ -31,7 +31,7 @@
# Path to the firmware.
# If you want that qemu uses the default firmware leave this option empty
-firmware = ""
+firmware = "/usr/share/nemu/OVMF.fd"
# Machine accelerators
# comma-separated list of machine accelerators to pass to the hypervisor.
```
As you can see from this snippet above, all you need to change is:
- The path to the hypervisor binary, `/home/foo/build-x86_64_virt/x86_64_virt-softmmu/qemu-system-x86_64_virt` in this example.
- The machine type from `pc` to `virt`.
- The path to the firmware binary, `/usr/share/nemu/OVMF.fd` in this example.
Once you have saved those modifications, you can start a new container:
```bash
$ docker run --runtime=kata-runtime -it busybox
```
And you will be able to verify this new container is running with the NEMU hypervisor by looking for the hypervisor path and the machine type from the `qemu` process running on your system:
```bash
$ ps -aux | grep qemu
root ... /home/foo/build-x86_64_virt/x86_64_virt-softmmu/qemu-system-x86_64_virt
... -machine virt,accel=kvm,kernel_irqchip,nvdimm ...
```
Also relying on `kata-runtime kata-env` is a reliable way to validate you are using the expected hypervisor:
```bash
$ kata-runtime kata-env | awk -v RS= '/\[Hypervisor\]/'
[Hypervisor]
MachineType = "virt"
Version = "NEMU (like QEMU) version 3.0.0 (v3.0.0-179-gaf9a791)\nCopyright (c) 2003-2017 Fabrice Bellard and the QEMU Project developers"
Path = "/home/foo/build-x86_64_virt/x86_64_virt-softmmu/qemu-system-x86_64_virt"
BlockDeviceDriver = "virtio-scsi"
EntropySource = "/dev/urandom"
Msize9p = 8192
MemorySlots = 10
Debug = true
UseVSock = false
```

View File

@@ -46,7 +46,6 @@ overridden by `/etc/kata-containers/configuration.toml` if provided) such that:
- `enable_template = true`
- `initrd =` is set
- `image =` option is commented out or removed
- `shared_fs` should not be `virtio-fs`
Then you can create a VM templating for later usage by calling
```

View File

@@ -1,68 +0,0 @@
# Hypervisors
* [Hypervisors](#hypervisors)
* [Introduction](#introduction)
* [Types](#types)
* [Determine currently configured hypervisor](#determine-currently-configured-hypervisor)
* [Choose a Hypervisor](#choose-a-hypervisor)
## Introduction
Kata Containers supports multiple hypervisors. This document provides a very
high level overview of the available hypervisors, giving suggestions as to
which hypervisors you may wish to investigate further.
> **Note:**
>
> This document is not prescriptive or authoritative:
>
> - It is up to you to decide which hypervisors may be most appropriate for
> your use-case.
> - Refer to the official documentation for each hypervisor for further details.
## Types
Since each hypervisor offers different features and options, Kata Containers
provides a separate
[configuration file](/src/runtime/README.md#configuration)
for each. The configuration files contain comments explaining which options
are available, their default values and how each setting can be used.
> **Note:**
>
> The simplest way to switch between hypervisors is to create a symbolic link
> to the appropriate hypervisor-specific configuration file.
| Hypervisor | Written in | Architectures | Type | Configuration file |
|-|-|-|-|-|
[ACRN] | C | `x86_64` | Type 1 (bare metal) | `configuration-acrn.toml` |
[Cloud Hypervisor] | rust | `aarch64`, `x86_64` | Type 2 ([KVM]) | `configuration-clh.toml` |
[Firecracker] | rust | `aarch64`, `x86_64` | Type 2 ([KVM]) | `configuration-fc.toml` |
[QEMU] | C | all | Type 2 ([KVM]) | `configuration-qemu.toml` |
## Determine currently configured hypervisor
```bash
$ kata-runtime kata-env | awk -v RS= '/\[Hypervisor\]/' | grep Path
```
## Choose a Hypervisor
The table below provides a brief summary of some of the differences between
the hypervisors:
| Hypervisor | Summary | Features | Limitations | Container Creation speed | Memory density | Use cases | Comment |
|-|-|-|-|-|-|-|-|
[ACRN] | Safety critical and real-time workloads | | | excellent | excellent | Embedded and IOT systems | For advanced users |
[Cloud Hypervisor] | Low latency, small memory footprint, small attack surface | Minimal | | excellent | excellent | High performance modern cloud workloads | |
[Firecracker] | Very slimline | Extremely minimal | Doesn't support all device types | excellent | excellent | Serverless / FaaS | |
[QEMU] | Lots of features | Lots | | good | good | Good option for most users | | All users |
For further details, see the [Virtualization in Kata Containers](design/virtualization.md) document and the official documentation for each hypervisor.
[ACRN]: https://projectacrn.org
[Cloud Hypervisor]: https://github.com/cloud-hypervisor/cloud-hypervisor
[Firecracker]: https://github.com/firecracker-microvm/firecracker
[KVM]: https://en.wikipedia.org/wiki/Kernel-based_Virtual_Machine
[QEMU]: http://www.qemu-project.org

View File

@@ -18,7 +18,7 @@
>
> - If you decide to proceed and install a Kata Containers release, you can
> still check for the latest version of Kata Containers by running
> `kata-runtime check --only-list-releases`.
> `kata-runtime kata-check --only-list-releases`.
>
> - These instructions will not work for Fedora 31 and higher since those
> distribution versions only support cgroups version 2 by default. However,

View File

@@ -6,7 +6,7 @@
* [Install Kata](#install-kata)
* [Create a Kata-enabled Image](#create-a-kata-enabled-image)
Kata Containers on Google Compute Engine (GCE) makes use of [nested virtualization](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances). Most of the installation procedure is identical to that for Kata on your preferred distribution, but enabling nested virtualization currently requires extra steps on GCE. This guide walks you through creating an image and instance with nested virtualization enabled. Note that `kata-runtime check` checks for nested virtualization, but does not fail if support is not found.
Kata Containers on Google Compute Engine (GCE) makes use of [nested virtualization](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances). Most of the installation procedure is identical to that for Kata on your preferred distribution, but enabling nested virtualization currently requires extra steps on GCE. This guide walks you through creating an image and instance with nested virtualization enabled. Note that `kata-runtime kata-check` checks for nested virtualization, but does not fail if support is not found.
As a pre-requisite this guide assumes an installed and configured instance of the [Google Cloud SDK](https://cloud.google.com/sdk/downloads). For a zero-configuration option, all of the commands below were been tested under [Google Cloud Shell](https://cloud.google.com/shell/) (as of Jun 2018). Verify your `gcloud` installation and configuration:

View File

@@ -54,7 +54,7 @@ to enable nested virtualization can be found on the
[KVM Nested Guests page](https://www.linux-kvm.org/page/Nested_Guests)
Alternatively, and for other architectures, the Kata Containers built in
[`check`](../../src/runtime/README.md#hardware-requirements)
[`kata-check`](../../src/runtime/README.md#hardware-requirements)
command can be used *inside Minikube* once Kata has been installed, to check for compatibility.
## Setting up Minikube

View File

@@ -1,123 +1,13 @@
# Kata Containers snap package
* [Install Kata Containers](#install-kata-containers)
* [Configure Kata Containers](#configure-kata-containers)
* [Integration with non-compatible shim v2 Container Engines](#integration-with-non-compatible-shim-v2-container-engines)
* [Integration with Docker](#integration-with-docker)
* [Integration with Podman](#integration-with-podman)
* [Integration with shim v2 Container Engines](#integration-with-shim-v2-container-engines)
* [Remove Kata Containers snap package](#remove-kata-containers-snap-package)
## Install Kata Containers
# Install Kata Containers from `snapcraft.io`
Kata Containers can be installed in any Linux distribution that supports
[snapd](https://docs.snapcraft.io/installing-snapd).
> NOTE: From Kata Containers 2.x, only the [Containerd Runtime V2 (Shim API)](https://github.com/containerd/containerd/tree/master/runtime/v2)
> is supported, note that some container engines (`docker`, `podman`, etc) may not
> be able to run Kata Containers 2.x.
Run the following command to install Kata Containers:
Kata Containers 1.x is released through the *stable* channel while Kata Containers
2.x is available in the *candidate* channel.
```bash
$ sudo snap install kata-containers --classic
```
Run the following command to install **Kata Containers 1.x**:
```sh
$ sudo snap install kata-containers --classic
```
Run the following command to install **Kata Containers 2.x**:
```sh
$ sudo snap install kata-containers --candidate --classic
```
## Configure Kata Containers
By default Kata Containers snap image is mounted at `/snap/kata-containers` as a
read-only file system, therefore default configuration file can not be edited.
Fortunately Kata Containers supports loading a configuration file from another
path than the default.
```sh
$ sudo mkdir -p /etc/kata-containers
$ sudo cp /snap/kata-containers/current/usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers/
$ $EDITOR /etc/kata-containers/configuration.toml
```
## Integration with non-compatible shim v2 Container Engines
At the time of writing this document, `docker` and `podman` **do not support Kata
Containers 2.x, therefore Kata Containers 1.x must be used instead.**
The path to the runtime provided by the Kata Containers 1.x snap package is
`/snap/bin/kata-containers.runtime`, it should be used to run Kata Containers 1.x.
### Integration with Docker
`/etc/docker/daemon.json` is the configuration file for `docker`, use the
following configuration to add a new runtime (`kata`) to `docker`.
```json
{
"runtimes": {
"kata": {
"path": "/snap/bin/kata-containers.runtime"
}
}
}
```
Once the above configuration has been applied, use the
following commands to restart `docker` and run Kata Containers 1.x.
```sh
$ sudo systemctl restart docker
$ docker run -ti --runtime kata busybox sh
```
### Integration with Podman
`/usr/share/containers/containers.conf` is the configuration file for `podman`,
add the following configuration in the `[engine.runtimes]` section.
```toml
kata = [
"/snap/bin/kata-containers.runtime"
]
```
Once the above configuration has been applied, use the following command to run
Kata Containers 1.x with `podman`
```sh
$ sudo podman run -ti --runtime kata docker.io/library/busybox sh
```
## Integration with shim v2 Container Engines
The Container engine daemon (`cri-o`, `containerd`, etc) needs to be able to find the
`containerd-shim-kata-v2` binary to allow Kata Containers to be created.
Run the following command to create a symbolic link to the shim v2 binary.
```sh
$ sudo ln -sf /snap/kata-containers/current/usr/bin/containerd-shim-kata-v2 /usr/local/bin/containerd-shim-kata-v2
```
Once the symbolic link has been created and the engine daemon configured, `io.containerd.kata.v2`
can be used as runtime.
Read the following documents to know how to run Kata Containers 2.x with `containerd`.
* [How to use Kata Containers and Containerd](https://github.com/kata-containers/kata-containers/blob/2.0-dev/docs/how-to/containerd-kata.md)
* [Install Kata Containers with containerd](https://github.com/kata-containers/kata-containers/blob/2.0-dev/docs/install/container-manager/containerd/containerd-install.md)
## Remove Kata Containers snap package
Run the following command to remove the Kata Containers snap:
```sh
$ sudo snap remove kata-containers
```
For further information on integrating and configuring the `snap` Kata Containers install,
refer to the [Kata Containers packaging `snap` documentation](https://github.com/kata-containers/packaging/blob/master/snap/README.md#configure-kata-containers).

View File

@@ -1,112 +0,0 @@
# Kata Containers with SGX
- [Check if SGX is enabled](#check-if-sgx-is-enabled)
- [Install Host kernel with SGX support](#install-host-kernel-with-sgx-support)
- [Install Guest kernel with SGX support](#install-guest-kernel-with-sgx-support)
- [Run Kata Containers with SGX enabled](#run-kata-containers-with-sgx-enabled)
Intel® Software Guard Extensions (SGX) is a set of instructions that increases the security
of applications code and data, giving them more protections from disclosure or modification.
> **Note:** At the time of writing this document, SGX patches have not landed on the Linux kernel
> project, so specific versions for guest and host kernels must be installed to enable SGX.
## Check if SGX is enabled
Run the following command to check if your host supports SGX.
```sh
$ grep -o sgx /proc/cpuinfo
```
Continue to the following section if the output of the above command is empty,
otherwise continue to section [Install Guest kernel with SGX support](#install-guest-kernel-with-sgx-support)
## Install Host kernel with SGX support
The following commands were tested on Fedora 32, they might work on other distros too.
```sh
$ git clone --depth=1 https://github.com/intel/kvm-sgx
$ pushd kvm-sgx
$ cp /boot/config-$(uname -r) .config
$ yes "" | make oldconfig
$ # In the following step, enable: INTEL_SGX and INTEL_SGX_VIRTUALIZATION
$ make menuconfig
$ make -j$(($(nproc)-1)) bzImage
$ make -j$(($(nproc)-1)) modules
$ sudo make modules_install
$ sudo make install
$ popd
$ sudo reboot
```
> **Notes:**
> * Run: `mokutil --sb-state` to check whether secure boot is enabled, if so, you will need to sign the kernel.
> * You'll lose SGX support when a new distro kernel is installed and the system rebooted.
Once you have restarted your system with the new brand Linux Kernel with SGX support, run
the following command to make sure it's enabled. If the output is empty, go to the BIOS
setup and enable SGX manually.
```sh
$ grep -o sgx /proc/cpuinfo
```
## Install Guest kernel with SGX support
Install the guest kernel in the Kata Containers directory, this way it can be used to run
Kata Containers.
```sh
$ curl -LOk https://github.com/devimc/kvm-sgx/releases/download/v0.0.1/kata-virtiofs-sgx.tar.gz
$ sudo tar -xf kata-virtiofs-sgx.tar.gz -C /usr/share/kata-containers/
$ sudo sed -i 's|kernel =|kernel = "/usr/share/kata-containers/vmlinux-virtiofs-sgx.container"|g' \
/usr/share/defaults/kata-containers/configuration.toml
```
## Run Kata Containers with SGX enabled
Before running a Kata Container make sure that your version of `crio` or `containerd`
supports annotations.
For `containerd` check in `/etc/containerd/config.toml` that the list of `pod_annotations` passed
to the `sandbox` are: `["io.katacontainers.*", "sgx.intel.com/epc"]`.
> `sgx.yaml`
```yaml
apiVersion: v1
kind: Pod
metadata:
name: sgx
annotations:
sgx.intel.com/epc: "32Mi"
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: c1
image: busybox
command:
- sh
stdin: true
tty: true
volumeMounts:
- mountPath: /dev/sgx/
name: test-volume
volumes:
- name: test-volume
hostPath:
path: /dev/sgx/
type: Directory
```
```sh
$ kubectl apply -f sgx.yaml
$ kubectl exec -ti sgx ls /dev/sgx/
enclave provision
```
The output of the latest command shouldn't be empty, otherwise check
your system environment to make sure SGX is fully supported.
[1]: github.com/cloud-hypervisor/cloud-hypervisor/

View File

@@ -93,7 +93,9 @@ impl HashSerializer {
// Take care to only add the first instance of a key. This matters for loggers (but not
// Records) since a child loggers have parents and the loggers are serialised child first
// meaning the *newest* fields are serialised first.
self.fields.entry(key).or_insert(value);
if !self.fields.contains_key(&key) {
self.fields.insert(key, value);
}
}
fn remove_field(&mut self, key: &str) {

View File

@@ -21,10 +21,20 @@ parts:
version="9999"
kata_url="https://github.com/kata-containers/kata-containers"
if echo "${GITHUB_REF}" | grep -q -E "^refs/tags"; then
version=$(echo ${GITHUB_REF} | cut -d/ -f3)
git checkout ${version}
fi
image_info="${SNAPCRAFT_IMAGE_INFO:-}"
snap_env="$(echo "${image_info}" | egrep -o "build_url.*" | egrep -o "snap.*build" | cut -d/ -f2)"
case "${snap_env}" in
stable)
# Get the latest stable version
version=$(git ls-remote --tags ${kata_url} | egrep -o "refs.*" | egrep -v "\-alpha|\-rc|{}" | egrep -o "[[:digit:]]+\.[[:digit:]]+\.[[:digit:]]+" | sort -V -r | head -1)
git checkout ${version}
;;
*-dev)
version="${snap_env}"
;;
esac
snapcraftctl set-grade "stable"
snapcraftctl set-version "${version}"
@@ -57,10 +67,15 @@ parts:
*) echo "unsupported architecture: $(uname -m)"; exit 1;;
esac
yq_version=3.4.1
# Workaround to get latest release from github (to not use github token).
# Get the redirection to latest release on github.
yq_latest_url=$(curl -Ls -o /dev/null -w %{url_effective} "https://${yq_pkg}/releases/latest")
# The redirected url should include the latest release version
# https://github.com/mikefarah/yq/releases/tag/<VERSION-HERE>
yq_version=$(basename "${yq_latest_url}")
yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos}_${goarch}"
curl -o "${yq_path}" -LSsf "${yq_url}"
chmod +x "${yq_path}"
curl -o "${yq_path}" -LSsf ${yq_url}
chmod +x ${yq_path}
kata_dir=gopath/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
version="$(${yq_path} r ${kata_dir}/versions.yaml languages.golang.meta.newest-version)"
@@ -305,8 +320,4 @@ parts:
apps:
runtime:
command: usr/bin/kata-runtime
shim:
command: usr/bin/containerd-shim-kata-v2
collect-data:
command: usr/bin/kata-collect-data.sh

592
src/agent/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,7 @@ rustjail = { path = "rustjail" }
protocols = { path = "protocols" }
netlink = { path = "netlink", features = ["with-log", "with-agent-handler"] }
lazy_static = "1.3.0"
ttrpc = "0.3.0"
ttrpc = { git = "https://github.com/containerd/ttrpc-rust.git", branch="0.3.0" }
protobuf = "=2.14.0"
libc = "0.2.58"
nix = "0.17.0"
@@ -21,24 +21,18 @@ signal-hook = "0.1.9"
scan_fmt = "0.2.3"
scopeguard = "1.0.0"
regex = "1"
# slog:
# - Dynamic keys required to allow HashMap keys to be slog::Serialized.
# - The 'max_*' features allow changing the log level at runtime
# (by stopping the compiler from removing log calls).
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
slog-scope = "4.1.2"
# Redirect ttrpc log calls
slog-stdlog = "4.0.0"
log = "0.4.11"
# for testing
tempfile = "3.1.0"
prometheus = { version = "0.9.0", features = ["process"] }
procfs = "0.7.9"
anyhow = "1.0.32"
cgroups = { package = "cgroups-rs", version = "0.2.0" }
cgroups = { git = "https://github.com/kata-containers/cgroups-rs", branch = "stable-0.1.1"}
[workspace]
members = [
@@ -47,6 +41,3 @@ members = [
"protocols",
"rustjail",
]
[profile.release]
lto = true

View File

@@ -25,6 +25,11 @@ export VERSION_COMMIT := $(if $(COMMIT),$(VERSION)-$(COMMIT),$(VERSION))
BUILD_TYPE = release
# set proto file to generate
ifdef proto
PROTO_FILE=${proto}
endif
ARCH = $(shell uname -m)
LIBC ?= musl
ifneq ($(LIBC),musl)
@@ -116,12 +121,6 @@ optimize: $(SOURCES) | show-summary show-header
show-header:
@printf "%s - version %s (commit %s)\n\n" "$(TARGET)" "$(VERSION)" "$(COMMIT_MSG)"
clippy: $(GENERATED_CODE)
cargo clippy --all-targets --all-features --release \
-- \
-Aclippy::redundant_allocation \
-D warnings
$(GENERATED_FILES): %: %.in
@sed $(foreach r,$(GENERATED_REPLACEMENTS),-e 's|@$r@|$($r)|g') "$<" > "$@"
@@ -170,4 +169,4 @@ help: show-summary
optimize
generate-protocols:
protocols/hack/update-generated-proto.sh all
protocols/hack/update-generated-proto.sh "${PROTO_FILE}"

View File

@@ -39,22 +39,11 @@ After that, we drafted the initial code here, and any contributions are welcome.
## Getting Started
### Build from Source
The rust-agent needs to be built statically and linked with `musl`
> **Note:** skip this step for ppc64le, the build scripts explicitly use gnu for ppc64le.
The rust-agent need to be built with rust newer than 1.37, and static linked with `musl`.
```bash
$ arch=$(uname -m)
$ rustup target add "${arch}-unknown-linux-musl"
$ sudo ln -s /usr/bin/g++ /bin/musl-g++
```
Download the source files in the Kata containers repository and build the agent:
```bash
$ GOPATH="${GOPATH:-$HOME/go}"
$ dir="$GOPATH/src/github.com/kata-containers"
$ git -C ${dir} clone --depth 1 https://github.com/kata-containers/kata-containers
$ make -C ${dir}/kata-containers/src/agent
rustup target add x86_64-unknown-linux-musl
sudo ln -s /usr/bin/g++ /bin/musl-g++
cargo build --target x86_64-unknown-linux-musl --release
```
## Run Kata CI with rust-agent

View File

@@ -1 +0,0 @@
2.0.0

1
src/agent/VERSION Symbolic link
View File

@@ -0,0 +1 @@
../../VERSION

View File

@@ -20,5 +20,3 @@ LimitNOFILE=infinity
# the runtime handles shutting down the VM.
ExecStop=/bin/sync ; /usr/bin/systemctl --force poweroff
FailureAction=poweroff
# Discourage OOM-killer from touching the agent
OOMScoreAdjust=-997

View File

@@ -142,7 +142,7 @@ pub struct User {
pub gid: u32,
#[serde(
default,
rename = "additionalGids",
rename = "addtionalGids",
skip_serializing_if = "Vec::is_empty"
)]
pub additional_gids: Vec<u32>,
@@ -302,7 +302,6 @@ pub struct LinuxBlockIODevice {
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
pub struct LinuxWeightDevice {
#[serde(flatten)]
pub blk: LinuxBlockIODevice,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub weight: Option<u16>,
@@ -316,7 +315,6 @@ pub struct LinuxWeightDevice {
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
pub struct LinuxThrottleDevice {
#[serde(flatten)]
pub blk: LinuxBlockIODevice,
#[serde(default)]
pub rate: u64,
@@ -377,7 +375,7 @@ pub struct LinuxMemory {
#[serde(default, skip_serializing_if = "Option::is_none", rename = "kernelTCP")]
pub kernel_tcp: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub swappiness: Option<i64>,
pub swapiness: Option<i64>,
#[serde(
default,
skip_serializing_if = "Option::is_none",
@@ -835,7 +833,7 @@ mod tests {
}
#[test]
fn test_deserialize_spec() {
fn test_deserialize_sepc() {
let data = r#"{
"ociVersion": "1.0.1",
"process": {
@@ -1120,28 +1118,36 @@ mod tests {
"leafWeight": 10,
"weightDevice": [
{
"major": 8,
"minor": 0,
"blk": {
"major": 8,
"minor": 0
},
"weight": 500,
"leafWeight": 300
},
{
"major": 8,
"minor": 16,
"blk":{
"major": 8,
"minor": 16
},
"weight": 500
}
],
"throttleReadBpsDevice": [
{
"major": 8,
"minor": 0,
"blk":{
"major": 8,
"minor": 0
},
"rate": 600
}
],
"throttleWriteIOPSDevice": [
{
"major": 8,
"minor": 16,
"blk":{
"major": 8,
"minor": 16
},
"rate": 300
}
]
@@ -1217,7 +1223,8 @@ mod tests {
uid: 1,
gid: 1,
// incompatible with oci
additional_gids: vec![5, 6],
// additional_gids: vec![5, 6],
additional_gids: vec![],
username: "".to_string(),
},
args: vec!["sh".to_string()],
@@ -1430,7 +1437,8 @@ mod tests {
swap: Some(536870912),
kernel: Some(-1),
kernel_tcp: Some(-1),
swappiness: Some(0),
// incompatible with oci
swapiness: None,
disable_oom_killer: Some(false),
}),
cpu: Some(crate::LinuxCPU {
@@ -1583,6 +1591,25 @@ mod tests {
vm: None,
};
// warning : incompatible with oci : https://github.com/opencontainers/runtime-spec/blob/master/config.md
// 1. User use addtionalGids while oci use additionalGids
// 2. LinuxMemory use swapiness while oci use swappiness
// 3. LinuxWeightDevice with blk
// {
// "blk": {
// "major": 8,
// "minor": 0
// },
// "weight": 500,
// "leafWeight": 300
// }
// oci without blk
// {
// "major": 8,
// "minor": 0,
// "weight": 500,
// "leafWeight": 300
// }
let current: crate::Spec = serde_json::from_str(data).unwrap();
assert_eq!(expected, current);
}

View File

@@ -4,6 +4,7 @@
//
use serde::{Deserialize, Serialize};
use serde_json;
use std::error;
use std::fmt::{Display, Formatter, Result as FmtResult};

View File

@@ -5,9 +5,6 @@ authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
edition = "2018"
[dependencies]
ttrpc = "0.3.0"
ttrpc = { git = "https://github.com/containerd/ttrpc-rust.git", branch="0.3.0" }
protobuf = "=2.14.0"
futures = "0.1.27"
[build-dependencies]
ttrpc-codegen = "0.1.2"

View File

@@ -1,55 +0,0 @@
// Copyright (c) 2020 Ant Group
//
// SPDX-License-Identifier: Apache-2.0
//
use std::fs::File;
use std::io::{Read, Write};
fn main() {
let protos = vec![
"protos/types.proto",
"protos/agent.proto",
"protos/health.proto",
"protos/google/protobuf/empty.proto",
"protos/oci.proto",
];
// Tell Cargo that if the .proto files changed, to rerun this build script.
protos
.iter()
.for_each(|p| println!("cargo:rerun-if-changed={}", &p));
ttrpc_codegen::Codegen::new()
.out_dir("src")
.inputs(&protos)
.include("protos")
.rust_protobuf()
.run()
.expect("Gen codes failed.");
// There is a message named 'Box' in oci.proto
// so there is a struct named 'Box', we should replace Box<Self> to ::std::boxed::Box<Self>
// to avoid the conflict.
replace_text_in_file(
"src/oci.rs",
"self: Box<Self>",
"self: ::std::boxed::Box<Self>",
)
.unwrap();
}
fn replace_text_in_file(file_name: &str, from: &str, to: &str) -> Result<(), std::io::Error> {
let mut src = File::open(file_name)?;
let mut contents = String::new();
src.read_to_string(&mut contents).unwrap();
drop(src);
let new_contents = contents.replace(from, to);
let mut dst = File::create(&file_name)?;
dst.write_all(new_contents.as_bytes())?;
Ok(())
}

View File

@@ -1,7 +1,7 @@
#!/bin/bash
# //
# // Copyright (c) 2020 Ant Group
# // Copyright 2020 Ant Financial
# //
# // SPDX-License-Identifier: Apache-2.0
# //
@@ -47,11 +47,11 @@ show_usage() {
}
generate_go_sources() {
local cmd="protoc -I$GOPATH/src:$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos \
local cmd="protoc -I$GOPATH/src/github.com/kata-containers/agent/vendor/github.com/gogo/protobuf:$GOPATH/src/github.com/kata-containers/agent/vendor:$GOPATH/src/github.com/gogo/protobuf:$GOPATH/src/github.com/gogo/googleapis:$GOPATH/src:$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos \
--gogottrpc_out=plugins=ttrpc+fieldpath,\
import_path=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc,\
\
Mgithub.com/kata-containers/kata-containers/src/agent/protocols/protos/types.proto=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols,\
Mgithub.com/kata-containers/kata-containers/src/agent/protocols/protos/github.com/kata-containers/agent/pkg/types/types.proto=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols,\
\
Mgithub.com/kata-containers/kata-containers/src/agent/protocols/protos/oci.proto=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc,\
\
@@ -64,12 +64,31 @@ $GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/proto
[ $? -eq 0 ] || die "Failed to generate golang file from $1"
}
generate_rust_sources() {
local cmd="protoc --rust_out=./protocols/src/ \
--ttrpc_out=./protocols/src/,plugins=ttrpc:./protocols/src/ \
--plugin=protoc-gen-ttrpc=`which ttrpc_rust_plugin` \
-I $GOPATH/src/github.com/kata-containers/agent/vendor/github.com/gogo/protobuf:$GOPATH/src/github.com/kata-containers/agent/vendor:$GOPATH/src/github.com/gogo/protobuf:$GOPATH/src/github.com/gogo/googleapis:$GOPATH/src:$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos \
$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos/$1"
echo $cmd
$cmd
[ $? -eq 0 ] || die "Failed to generate rust file from $1"
if [ "$1" = "oci.proto" ]; then
# Need change Box<Self> to ::std::boxed::Box<Self> because there is another struct Box
sed 's/fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> {/fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<::std::any::Any> {/g' ./protocols/src/oci.rs > ./protocols/src/new_oci.rs
sed 's/fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {/fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {/g' ./protocols/src/oci.rs > ./protocols/src/new_oci.rs
mv ./protocols/src/new_oci.rs ./protocols/src/oci.rs
fi;
}
if [ "$(basename $(pwd))" != "agent" ]; then
die "Please go to directory of protocols before execute this shell"
fi
# Protocol buffer files required to generate golang/rust bindings.
proto_files_list=(agent.proto health.proto oci.proto types.proto)
proto_files_list=(agent.proto health.proto oci.proto github.com/kata-containers/agent/pkg/types/types.proto)
if [ "$1" = "" ]; then
show_usage "${proto_files_list[@]}"
@@ -80,6 +99,12 @@ fi;
which protoc
[ $? -eq 0 ] || die "Please install protoc from github.com/protocolbuffers/protobuf"
which protoc-gen-rust
[ $? -eq 0 ] || die "Please install protobuf-codegen from github.com/pingcap/grpc-rs"
which ttrpc_rust_plugin
[ $? -eq 0 ] || die "Please install ttrpc_rust_plugin from https://github.com/containerd/ttrpc-rust"
which protoc-gen-gogottrpc
[ $? -eq 0 ] || die "Please install protoc-gen-gogottrpc from https://github.com/containerd/ttrpc"
@@ -93,6 +118,10 @@ if [ "$target" = "all" ]; then
echo -e "\n [golang] compiling ${f} ..."
generate_go_sources $f
echo -e " [golang] ${f} compiled\n"
echo -e "\n [rust] compiling ${f} ..."
generate_rust_sources $f
echo -e " [rust] ${f} compiled\n"
done
else
# compile individual proto file
@@ -101,6 +130,10 @@ else
echo -e "\n [golang] compiling ${target} ..."
generate_go_sources $target
echo -e " [golang] ${target} compiled\n"
echo -e "\n [rust] compiling ${target} ..."
generate_rust_sources $target
echo -e " [rust] ${target} compiled\n"
fi
done
fi;

View File

@@ -1,6 +1,6 @@
//
// Copyright 2017 HyperHQ Inc.
// Copyright (c) 2019-2020 Ant Group
// Copyright 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -11,8 +11,8 @@ option go_package = "github.com/kata-containers/kata-containers/src/runtime/virt
package grpc;
import "oci.proto";
import "types.proto";
import "github.com/kata-containers/kata-containers/src/agent/protocols/protos/oci.proto";
import "github.com/kata-containers/kata-containers/src/agent/protocols/protos/github.com/kata-containers/agent/pkg/types/types.proto";
import "google/protobuf/empty.proto";

View File

@@ -1,6 +1,6 @@
//
// Copyright 2018 Intel Corporation.
// Copyright (c) 2019-2020 Ant Group
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//

View File

@@ -1,6 +1,6 @@
//
// Copyright (c) 2017 HyperHQ Inc.
// Copyright (c) 2019-2020 Ant Group
// Copyright 2017 HyperHQ Inc.
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -11,7 +11,7 @@ option go_package = "github.com/kata-containers/kata-containers/src/runtime/virt
package grpc;
import "gogo/protobuf/gogoproto/gogo.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.equal_all) = true;
option (gogoproto.populate_all) = true;

View File

@@ -1,6 +1,6 @@
//
// Copyright (c) 2017 Intel Corporation
// Copyright (c) 2019-2020 Ant Group
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
@@ -11,7 +11,8 @@ option go_package = "github.com/kata-containers/kata-containers/src/runtime/virt
package grpc;
import "gogo/protobuf/gogoproto/gogo.proto";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/wrappers.proto";
option (gogoproto.equal_all) = true;
option (gogoproto.populate_all) = true;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,808 @@
// Copyright (c) 2020 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
// This file is generated by ttrpc-compiler 0.3.0. Do not edit
// @generated
// https://github.com/Manishearth/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clipto_camel_casepy)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unsafe_code)]
#![allow(unused_imports)]
#![allow(unused_results)]
use protobuf::{CodedInputStream, CodedOutputStream, Message};
use std::collections::HashMap;
use std::sync::Arc;
#[derive(Clone)]
pub struct AgentServiceClient {
client: ::ttrpc::Client,
}
impl AgentServiceClient {
pub fn new(client: ::ttrpc::Client) -> Self {
AgentServiceClient {
client: client,
}
}
pub fn create_container(&self, req: &super::agent::CreateContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "CreateContainer", cres);
Ok(cres)
}
pub fn start_container(&self, req: &super::agent::StartContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "StartContainer", cres);
Ok(cres)
}
pub fn remove_container(&self, req: &super::agent::RemoveContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "RemoveContainer", cres);
Ok(cres)
}
pub fn exec_process(&self, req: &super::agent::ExecProcessRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ExecProcess", cres);
Ok(cres)
}
pub fn signal_process(&self, req: &super::agent::SignalProcessRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "SignalProcess", cres);
Ok(cres)
}
pub fn wait_process(&self, req: &super::agent::WaitProcessRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::WaitProcessResponse> {
let mut cres = super::agent::WaitProcessResponse::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "WaitProcess", cres);
Ok(cres)
}
pub fn list_processes(&self, req: &super::agent::ListProcessesRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::ListProcessesResponse> {
let mut cres = super::agent::ListProcessesResponse::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ListProcesses", cres);
Ok(cres)
}
pub fn update_container(&self, req: &super::agent::UpdateContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "UpdateContainer", cres);
Ok(cres)
}
pub fn stats_container(&self, req: &super::agent::StatsContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::StatsContainerResponse> {
let mut cres = super::agent::StatsContainerResponse::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "StatsContainer", cres);
Ok(cres)
}
pub fn pause_container(&self, req: &super::agent::PauseContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "PauseContainer", cres);
Ok(cres)
}
pub fn resume_container(&self, req: &super::agent::ResumeContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ResumeContainer", cres);
Ok(cres)
}
pub fn write_stdin(&self, req: &super::agent::WriteStreamRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::WriteStreamResponse> {
let mut cres = super::agent::WriteStreamResponse::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "WriteStdin", cres);
Ok(cres)
}
pub fn read_stdout(&self, req: &super::agent::ReadStreamRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::ReadStreamResponse> {
let mut cres = super::agent::ReadStreamResponse::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ReadStdout", cres);
Ok(cres)
}
pub fn read_stderr(&self, req: &super::agent::ReadStreamRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::ReadStreamResponse> {
let mut cres = super::agent::ReadStreamResponse::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ReadStderr", cres);
Ok(cres)
}
pub fn close_stdin(&self, req: &super::agent::CloseStdinRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "CloseStdin", cres);
Ok(cres)
}
pub fn tty_win_resize(&self, req: &super::agent::TtyWinResizeRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "TtyWinResize", cres);
Ok(cres)
}
pub fn update_interface(&self, req: &super::agent::UpdateInterfaceRequest, timeout_nano: i64) -> ::ttrpc::Result<super::types::Interface> {
let mut cres = super::types::Interface::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "UpdateInterface", cres);
Ok(cres)
}
pub fn update_routes(&self, req: &super::agent::UpdateRoutesRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::Routes> {
let mut cres = super::agent::Routes::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "UpdateRoutes", cres);
Ok(cres)
}
pub fn list_interfaces(&self, req: &super::agent::ListInterfacesRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::Interfaces> {
let mut cres = super::agent::Interfaces::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ListInterfaces", cres);
Ok(cres)
}
pub fn list_routes(&self, req: &super::agent::ListRoutesRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::Routes> {
let mut cres = super::agent::Routes::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ListRoutes", cres);
Ok(cres)
}
pub fn add_arp_neighbors(&self, req: &super::agent::AddARPNeighborsRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "AddARPNeighbors", cres);
Ok(cres)
}
pub fn start_tracing(&self, req: &super::agent::StartTracingRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "StartTracing", cres);
Ok(cres)
}
pub fn stop_tracing(&self, req: &super::agent::StopTracingRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "StopTracing", cres);
Ok(cres)
}
pub fn get_metrics(&self, req: &super::agent::GetMetricsRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::Metrics> {
let mut cres = super::agent::Metrics::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "GetMetrics", cres);
Ok(cres)
}
pub fn create_sandbox(&self, req: &super::agent::CreateSandboxRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "CreateSandbox", cres);
Ok(cres)
}
pub fn destroy_sandbox(&self, req: &super::agent::DestroySandboxRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "DestroySandbox", cres);
Ok(cres)
}
pub fn online_cpu_mem(&self, req: &super::agent::OnlineCPUMemRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "OnlineCPUMem", cres);
Ok(cres)
}
pub fn reseed_random_dev(&self, req: &super::agent::ReseedRandomDevRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ReseedRandomDev", cres);
Ok(cres)
}
pub fn get_guest_details(&self, req: &super::agent::GuestDetailsRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::GuestDetailsResponse> {
let mut cres = super::agent::GuestDetailsResponse::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "GetGuestDetails", cres);
Ok(cres)
}
pub fn mem_hotplug_by_probe(&self, req: &super::agent::MemHotplugByProbeRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "MemHotplugByProbe", cres);
Ok(cres)
}
pub fn set_guest_date_time(&self, req: &super::agent::SetGuestDateTimeRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "SetGuestDateTime", cres);
Ok(cres)
}
pub fn copy_file(&self, req: &super::agent::CopyFileRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
let mut cres = super::empty::Empty::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "CopyFile", cres);
Ok(cres)
}
pub fn get_oom_event(&self, req: &super::agent::GetOOMEventRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::OOMEvent> {
let mut cres = super::agent::OOMEvent::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "GetOOMEvent", cres);
Ok(cres)
}
}
struct CreateContainerMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for CreateContainerMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, CreateContainerRequest, create_container);
Ok(())
}
}
struct StartContainerMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for StartContainerMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, StartContainerRequest, start_container);
Ok(())
}
}
struct RemoveContainerMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for RemoveContainerMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, RemoveContainerRequest, remove_container);
Ok(())
}
}
struct ExecProcessMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for ExecProcessMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, ExecProcessRequest, exec_process);
Ok(())
}
}
struct SignalProcessMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for SignalProcessMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, SignalProcessRequest, signal_process);
Ok(())
}
}
struct WaitProcessMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for WaitProcessMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, WaitProcessRequest, wait_process);
Ok(())
}
}
struct ListProcessesMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for ListProcessesMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, ListProcessesRequest, list_processes);
Ok(())
}
}
struct UpdateContainerMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for UpdateContainerMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, UpdateContainerRequest, update_container);
Ok(())
}
}
struct StatsContainerMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for StatsContainerMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, StatsContainerRequest, stats_container);
Ok(())
}
}
struct PauseContainerMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for PauseContainerMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, PauseContainerRequest, pause_container);
Ok(())
}
}
struct ResumeContainerMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for ResumeContainerMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, ResumeContainerRequest, resume_container);
Ok(())
}
}
struct WriteStdinMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for WriteStdinMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, WriteStreamRequest, write_stdin);
Ok(())
}
}
struct ReadStdoutMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for ReadStdoutMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, ReadStreamRequest, read_stdout);
Ok(())
}
}
struct ReadStderrMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for ReadStderrMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, ReadStreamRequest, read_stderr);
Ok(())
}
}
struct CloseStdinMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for CloseStdinMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, CloseStdinRequest, close_stdin);
Ok(())
}
}
struct TtyWinResizeMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for TtyWinResizeMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, TtyWinResizeRequest, tty_win_resize);
Ok(())
}
}
struct UpdateInterfaceMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for UpdateInterfaceMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, UpdateInterfaceRequest, update_interface);
Ok(())
}
}
struct UpdateRoutesMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for UpdateRoutesMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, UpdateRoutesRequest, update_routes);
Ok(())
}
}
struct ListInterfacesMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for ListInterfacesMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, ListInterfacesRequest, list_interfaces);
Ok(())
}
}
struct ListRoutesMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for ListRoutesMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, ListRoutesRequest, list_routes);
Ok(())
}
}
struct AddArpNeighborsMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for AddArpNeighborsMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, AddARPNeighborsRequest, add_arp_neighbors);
Ok(())
}
}
struct StartTracingMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for StartTracingMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, StartTracingRequest, start_tracing);
Ok(())
}
}
struct StopTracingMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for StopTracingMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, StopTracingRequest, stop_tracing);
Ok(())
}
}
struct GetMetricsMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for GetMetricsMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, GetMetricsRequest, get_metrics);
Ok(())
}
}
struct CreateSandboxMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for CreateSandboxMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, CreateSandboxRequest, create_sandbox);
Ok(())
}
}
struct DestroySandboxMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for DestroySandboxMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, DestroySandboxRequest, destroy_sandbox);
Ok(())
}
}
struct OnlineCpuMemMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for OnlineCpuMemMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, OnlineCPUMemRequest, online_cpu_mem);
Ok(())
}
}
struct ReseedRandomDevMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for ReseedRandomDevMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, ReseedRandomDevRequest, reseed_random_dev);
Ok(())
}
}
struct GetGuestDetailsMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for GetGuestDetailsMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, GuestDetailsRequest, get_guest_details);
Ok(())
}
}
struct MemHotplugByProbeMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for MemHotplugByProbeMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, MemHotplugByProbeRequest, mem_hotplug_by_probe);
Ok(())
}
}
struct SetGuestDateTimeMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for SetGuestDateTimeMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, SetGuestDateTimeRequest, set_guest_date_time);
Ok(())
}
}
struct CopyFileMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for CopyFileMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, CopyFileRequest, copy_file);
Ok(())
}
}
struct GetOomEventMethod {
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for GetOomEventMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, agent, GetOOMEventRequest, get_oom_event);
Ok(())
}
}
pub trait AgentService {
fn create_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::CreateContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/CreateContainer is not supported".to_string())))
}
fn start_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::StartContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/StartContainer is not supported".to_string())))
}
fn remove_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::RemoveContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/RemoveContainer is not supported".to_string())))
}
fn exec_process(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ExecProcessRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ExecProcess is not supported".to_string())))
}
fn signal_process(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::SignalProcessRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/SignalProcess is not supported".to_string())))
}
fn wait_process(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::WaitProcessRequest) -> ::ttrpc::Result<super::agent::WaitProcessResponse> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/WaitProcess is not supported".to_string())))
}
fn list_processes(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ListProcessesRequest) -> ::ttrpc::Result<super::agent::ListProcessesResponse> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ListProcesses is not supported".to_string())))
}
fn update_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::UpdateContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/UpdateContainer is not supported".to_string())))
}
fn stats_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::StatsContainerRequest) -> ::ttrpc::Result<super::agent::StatsContainerResponse> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/StatsContainer is not supported".to_string())))
}
fn pause_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::PauseContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/PauseContainer is not supported".to_string())))
}
fn resume_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ResumeContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ResumeContainer is not supported".to_string())))
}
fn write_stdin(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::WriteStreamRequest) -> ::ttrpc::Result<super::agent::WriteStreamResponse> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/WriteStdin is not supported".to_string())))
}
fn read_stdout(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ReadStreamRequest) -> ::ttrpc::Result<super::agent::ReadStreamResponse> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ReadStdout is not supported".to_string())))
}
fn read_stderr(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ReadStreamRequest) -> ::ttrpc::Result<super::agent::ReadStreamResponse> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ReadStderr is not supported".to_string())))
}
fn close_stdin(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::CloseStdinRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/CloseStdin is not supported".to_string())))
}
fn tty_win_resize(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::TtyWinResizeRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/TtyWinResize is not supported".to_string())))
}
fn update_interface(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::UpdateInterfaceRequest) -> ::ttrpc::Result<super::types::Interface> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/UpdateInterface is not supported".to_string())))
}
fn update_routes(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::UpdateRoutesRequest) -> ::ttrpc::Result<super::agent::Routes> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/UpdateRoutes is not supported".to_string())))
}
fn list_interfaces(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ListInterfacesRequest) -> ::ttrpc::Result<super::agent::Interfaces> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ListInterfaces is not supported".to_string())))
}
fn list_routes(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ListRoutesRequest) -> ::ttrpc::Result<super::agent::Routes> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ListRoutes is not supported".to_string())))
}
fn add_arp_neighbors(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::AddARPNeighborsRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/AddARPNeighbors is not supported".to_string())))
}
fn start_tracing(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::StartTracingRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/StartTracing is not supported".to_string())))
}
fn stop_tracing(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::StopTracingRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/StopTracing is not supported".to_string())))
}
fn get_metrics(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::GetMetricsRequest) -> ::ttrpc::Result<super::agent::Metrics> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/GetMetrics is not supported".to_string())))
}
fn create_sandbox(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::CreateSandboxRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/CreateSandbox is not supported".to_string())))
}
fn destroy_sandbox(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::DestroySandboxRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/DestroySandbox is not supported".to_string())))
}
fn online_cpu_mem(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::OnlineCPUMemRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/OnlineCPUMem is not supported".to_string())))
}
fn reseed_random_dev(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ReseedRandomDevRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ReseedRandomDev is not supported".to_string())))
}
fn get_guest_details(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::GuestDetailsRequest) -> ::ttrpc::Result<super::agent::GuestDetailsResponse> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/GetGuestDetails is not supported".to_string())))
}
fn mem_hotplug_by_probe(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::MemHotplugByProbeRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/MemHotplugByProbe is not supported".to_string())))
}
fn set_guest_date_time(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::SetGuestDateTimeRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/SetGuestDateTime is not supported".to_string())))
}
fn copy_file(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::CopyFileRequest) -> ::ttrpc::Result<super::empty::Empty> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/CopyFile is not supported".to_string())))
}
fn get_oom_event(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::GetOOMEventRequest) -> ::ttrpc::Result<super::agent::OOMEvent> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/GetOOMEvent is not supported".to_string())))
}
}
pub fn create_agent_service(service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>) -> HashMap <String, Box<dyn ::ttrpc::MethodHandler + Send + Sync>> {
let mut methods = HashMap::new();
methods.insert("/grpc.AgentService/CreateContainer".to_string(),
std::boxed::Box::new(CreateContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/StartContainer".to_string(),
std::boxed::Box::new(StartContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/RemoveContainer".to_string(),
std::boxed::Box::new(RemoveContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/ExecProcess".to_string(),
std::boxed::Box::new(ExecProcessMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/SignalProcess".to_string(),
std::boxed::Box::new(SignalProcessMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/WaitProcess".to_string(),
std::boxed::Box::new(WaitProcessMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/ListProcesses".to_string(),
std::boxed::Box::new(ListProcessesMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/UpdateContainer".to_string(),
std::boxed::Box::new(UpdateContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/StatsContainer".to_string(),
std::boxed::Box::new(StatsContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/PauseContainer".to_string(),
std::boxed::Box::new(PauseContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/ResumeContainer".to_string(),
std::boxed::Box::new(ResumeContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/WriteStdin".to_string(),
std::boxed::Box::new(WriteStdinMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/ReadStdout".to_string(),
std::boxed::Box::new(ReadStdoutMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/ReadStderr".to_string(),
std::boxed::Box::new(ReadStderrMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/CloseStdin".to_string(),
std::boxed::Box::new(CloseStdinMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/TtyWinResize".to_string(),
std::boxed::Box::new(TtyWinResizeMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/UpdateInterface".to_string(),
std::boxed::Box::new(UpdateInterfaceMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/UpdateRoutes".to_string(),
std::boxed::Box::new(UpdateRoutesMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/ListInterfaces".to_string(),
std::boxed::Box::new(ListInterfacesMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/ListRoutes".to_string(),
std::boxed::Box::new(ListRoutesMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/AddARPNeighbors".to_string(),
std::boxed::Box::new(AddArpNeighborsMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/StartTracing".to_string(),
std::boxed::Box::new(StartTracingMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/StopTracing".to_string(),
std::boxed::Box::new(StopTracingMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/GetMetrics".to_string(),
std::boxed::Box::new(GetMetricsMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/CreateSandbox".to_string(),
std::boxed::Box::new(CreateSandboxMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/DestroySandbox".to_string(),
std::boxed::Box::new(DestroySandboxMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/OnlineCPUMem".to_string(),
std::boxed::Box::new(OnlineCpuMemMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/ReseedRandomDev".to_string(),
std::boxed::Box::new(ReseedRandomDevMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/GetGuestDetails".to_string(),
std::boxed::Box::new(GetGuestDetailsMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/MemHotplugByProbe".to_string(),
std::boxed::Box::new(MemHotplugByProbeMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/SetGuestDateTime".to_string(),
std::boxed::Box::new(SetGuestDateTimeMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/CopyFile".to_string(),
std::boxed::Box::new(CopyFileMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.AgentService/GetOOMEvent".to_string(),
std::boxed::Box::new(GetOomEventMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods
}

View File

@@ -0,0 +1,242 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
// This file is generated by rust-protobuf 2.14.0. Do not edit
// @generated
// https://github.com/rust-lang/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unsafe_code)]
#![allow(unused_imports)]
#![allow(unused_results)]
//! Generated file from `google/protobuf/empty.proto`
use protobuf::Message as Message_imported_for_functions;
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
/// Generated files are compatible only with the same version
/// of protobuf runtime.
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_14_0;
#[derive(PartialEq,Clone,Default)]
pub struct Empty {
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Empty {
fn default() -> &'a Empty {
<Empty as ::protobuf::Message>::default_instance()
}
}
impl Empty {
pub fn new() -> Empty {
::std::default::Default::default()
}
}
impl ::protobuf::Message for Empty {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Empty {
Empty::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
unsafe {
descriptor.get(|| {
let fields = ::std::vec::Vec::new();
::protobuf::reflect::MessageDescriptor::new_pb_name::<Empty>(
"Empty",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static Empty {
static mut instance: ::protobuf::lazy::Lazy<Empty> = ::protobuf::lazy::Lazy::INIT;
unsafe {
instance.get(Empty::new)
}
}
}
impl ::protobuf::Clear for Empty {
fn clear(&mut self) {
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for Empty {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for Empty {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\n\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\x07\n\x05Empt\
yBT\n\x13com.google.protobufB\nEmptyProtoP\x01Z\x05types\xf8\x01\x01\xa2\
\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesJ\xa9\x14\n\x06\x12\
\x04\x1e\03\x10\n\xcc\x0c\n\x01\x0c\x12\x03\x1e\0\x122\xc1\x0c\x20Protoc\
ol\x20Buffers\x20-\x20Google's\x20data\x20interchange\x20format\n\x20Cop\
yright\x202008\x20Google\x20Inc.\x20\x20All\x20rights\x20reserved.\n\x20\
https://developers.google.com/protocol-buffers/\n\n\x20Redistribution\
\x20and\x20use\x20in\x20source\x20and\x20binary\x20forms,\x20with\x20or\
\x20without\n\x20modification,\x20are\x20permitted\x20provided\x20that\
\x20the\x20following\x20conditions\x20are\n\x20met:\n\n\x20\x20\x20\x20\
\x20*\x20Redistributions\x20of\x20source\x20code\x20must\x20retain\x20th\
e\x20above\x20copyright\n\x20notice,\x20this\x20list\x20of\x20conditions\
\x20and\x20the\x20following\x20disclaimer.\n\x20\x20\x20\x20\x20*\x20Red\
istributions\x20in\x20binary\x20form\x20must\x20reproduce\x20the\x20abov\
e\n\x20copyright\x20notice,\x20this\x20list\x20of\x20conditions\x20and\
\x20the\x20following\x20disclaimer\n\x20in\x20the\x20documentation\x20an\
d/or\x20other\x20materials\x20provided\x20with\x20the\n\x20distribution.\
\n\x20\x20\x20\x20\x20*\x20Neither\x20the\x20name\x20of\x20Google\x20Inc\
.\x20nor\x20the\x20names\x20of\x20its\n\x20contributors\x20may\x20be\x20\
used\x20to\x20endorse\x20or\x20promote\x20products\x20derived\x20from\n\
\x20this\x20software\x20without\x20specific\x20prior\x20written\x20permi\
ssion.\n\n\x20THIS\x20SOFTWARE\x20IS\x20PROVIDED\x20BY\x20THE\x20COPYRIG\
HT\x20HOLDERS\x20AND\x20CONTRIBUTORS\n\x20\"AS\x20IS\"\x20AND\x20ANY\x20\
EXPRESS\x20OR\x20IMPLIED\x20WARRANTIES,\x20INCLUDING,\x20BUT\x20NOT\n\
\x20LIMITED\x20TO,\x20THE\x20IMPLIED\x20WARRANTIES\x20OF\x20MERCHANTABIL\
ITY\x20AND\x20FITNESS\x20FOR\n\x20A\x20PARTICULAR\x20PURPOSE\x20ARE\x20D\
ISCLAIMED.\x20IN\x20NO\x20EVENT\x20SHALL\x20THE\x20COPYRIGHT\n\x20OWNER\
\x20OR\x20CONTRIBUTORS\x20BE\x20LIABLE\x20FOR\x20ANY\x20DIRECT,\x20INDIR\
ECT,\x20INCIDENTAL,\n\x20SPECIAL,\x20EXEMPLARY,\x20OR\x20CONSEQUENTIAL\
\x20DAMAGES\x20(INCLUDING,\x20BUT\x20NOT\n\x20LIMITED\x20TO,\x20PROCUREM\
ENT\x20OF\x20SUBSTITUTE\x20GOODS\x20OR\x20SERVICES;\x20LOSS\x20OF\x20USE\
,\n\x20DATA,\x20OR\x20PROFITS;\x20OR\x20BUSINESS\x20INTERRUPTION)\x20HOW\
EVER\x20CAUSED\x20AND\x20ON\x20ANY\n\x20THEORY\x20OF\x20LIABILITY,\x20WH\
ETHER\x20IN\x20CONTRACT,\x20STRICT\x20LIABILITY,\x20OR\x20TORT\n\x20(INC\
LUDING\x20NEGLIGENCE\x20OR\x20OTHERWISE)\x20ARISING\x20IN\x20ANY\x20WAY\
\x20OUT\x20OF\x20THE\x20USE\n\x20OF\x20THIS\x20SOFTWARE,\x20EVEN\x20IF\
\x20ADVISED\x20OF\x20THE\x20POSSIBILITY\x20OF\x20SUCH\x20DAMAGE.\n\n\x08\
\n\x01\x02\x12\x03\x20\x08\x17\n\x08\n\x01\x08\x12\x03\"\0;\n\x0b\n\x04\
\x08\xe7\x07\0\x12\x03\"\0;\n\x0c\n\x05\x08\xe7\x07\0\x02\x12\x03\"\x07\
\x17\n\r\n\x06\x08\xe7\x07\0\x02\0\x12\x03\"\x07\x17\n\x0e\n\x07\x08\xe7\
\x07\0\x02\0\x01\x12\x03\"\x07\x17\n\x0c\n\x05\x08\xe7\x07\0\x07\x12\x03\
\"\x1a:\n\x08\n\x01\x08\x12\x03#\0\x1c\n\x0b\n\x04\x08\xe7\x07\x01\x12\
\x03#\0\x1c\n\x0c\n\x05\x08\xe7\x07\x01\x02\x12\x03#\x07\x11\n\r\n\x06\
\x08\xe7\x07\x01\x02\0\x12\x03#\x07\x11\n\x0e\n\x07\x08\xe7\x07\x01\x02\
\0\x01\x12\x03#\x07\x11\n\x0c\n\x05\x08\xe7\x07\x01\x07\x12\x03#\x14\x1b\
\n\x08\n\x01\x08\x12\x03$\0,\n\x0b\n\x04\x08\xe7\x07\x02\x12\x03$\0,\n\
\x0c\n\x05\x08\xe7\x07\x02\x02\x12\x03$\x07\x13\n\r\n\x06\x08\xe7\x07\
\x02\x02\0\x12\x03$\x07\x13\n\x0e\n\x07\x08\xe7\x07\x02\x02\0\x01\x12\
\x03$\x07\x13\n\x0c\n\x05\x08\xe7\x07\x02\x07\x12\x03$\x16+\n\x08\n\x01\
\x08\x12\x03%\0+\n\x0b\n\x04\x08\xe7\x07\x03\x12\x03%\0+\n\x0c\n\x05\x08\
\xe7\x07\x03\x02\x12\x03%\x07\x1b\n\r\n\x06\x08\xe7\x07\x03\x02\0\x12\
\x03%\x07\x1b\n\x0e\n\x07\x08\xe7\x07\x03\x02\0\x01\x12\x03%\x07\x1b\n\
\x0c\n\x05\x08\xe7\x07\x03\x07\x12\x03%\x1e*\n\x08\n\x01\x08\x12\x03&\0\
\"\n\x0b\n\x04\x08\xe7\x07\x04\x12\x03&\0\"\n\x0c\n\x05\x08\xe7\x07\x04\
\x02\x12\x03&\x07\x1a\n\r\n\x06\x08\xe7\x07\x04\x02\0\x12\x03&\x07\x1a\n\
\x0e\n\x07\x08\xe7\x07\x04\x02\0\x01\x12\x03&\x07\x1a\n\x0c\n\x05\x08\
\xe7\x07\x04\x03\x12\x03&\x1d!\n\x08\n\x01\x08\x12\x03'\0!\n\x0b\n\x04\
\x08\xe7\x07\x05\x12\x03'\0!\n\x0c\n\x05\x08\xe7\x07\x05\x02\x12\x03'\
\x07\x18\n\r\n\x06\x08\xe7\x07\x05\x02\0\x12\x03'\x07\x18\n\x0e\n\x07\
\x08\xe7\x07\x05\x02\0\x01\x12\x03'\x07\x18\n\x0c\n\x05\x08\xe7\x07\x05\
\x07\x12\x03'\x1b\x20\n\x08\n\x01\x08\x12\x03(\0\x1f\n\x0b\n\x04\x08\xe7\
\x07\x06\x12\x03(\0\x1f\n\x0c\n\x05\x08\xe7\x07\x06\x02\x12\x03(\x07\x17\
\n\r\n\x06\x08\xe7\x07\x06\x02\0\x12\x03(\x07\x17\n\x0e\n\x07\x08\xe7\
\x07\x06\x02\0\x01\x12\x03(\x07\x17\n\x0c\n\x05\x08\xe7\x07\x06\x03\x12\
\x03(\x1a\x1e\n\xfb\x02\n\x02\x04\0\x12\x033\0\x10\x1a\xef\x02\x20A\x20g\
eneric\x20empty\x20message\x20that\x20you\x20can\x20re-use\x20to\x20avoi\
d\x20defining\x20duplicated\n\x20empty\x20messages\x20in\x20your\x20APIs\
.\x20A\x20typical\x20example\x20is\x20to\x20use\x20it\x20as\x20the\x20re\
quest\n\x20or\x20the\x20response\x20type\x20of\x20an\x20API\x20method.\
\x20For\x20instance:\n\n\x20\x20\x20\x20\x20service\x20Foo\x20{\n\x20\
\x20\x20\x20\x20\x20\x20rpc\x20Bar(google.protobuf.Empty)\x20returns\x20\
(google.protobuf.Empty);\n\x20\x20\x20\x20\x20}\n\n\x20The\x20JSON\x20re\
presentation\x20for\x20`Empty`\x20is\x20empty\x20JSON\x20object\x20`{}`.\
\n\n\n\n\x03\x04\0\x01\x12\x033\x08\rb\x06proto3\
";
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy::INIT;
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
unsafe {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}
}

View File

@@ -0,0 +1,672 @@
// This file is generated by rust-protobuf 2.14.0. Do not edit
// @generated
// https://github.com/rust-lang/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unsafe_code)]
#![allow(unused_imports)]
#![allow(unused_results)]
//! Generated file from `github.com/kata-containers/kata-containers/src/agent/protocols/protos/health.proto`
use protobuf::Message as Message_imported_for_functions;
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
/// Generated files are compatible only with the same version
/// of protobuf runtime.
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_14_0;
#[derive(PartialEq,Clone,Default)]
pub struct CheckRequest {
// message fields
pub service: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a CheckRequest {
fn default() -> &'a CheckRequest {
<CheckRequest as ::protobuf::Message>::default_instance()
}
}
impl CheckRequest {
pub fn new() -> CheckRequest {
::std::default::Default::default()
}
// string service = 1;
pub fn get_service(&self) -> &str {
&self.service
}
pub fn clear_service(&mut self) {
self.service.clear();
}
// Param is passed by value, moved
pub fn set_service(&mut self, v: ::std::string::String) {
self.service = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_service(&mut self) -> &mut ::std::string::String {
&mut self.service
}
// Take field
pub fn take_service(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.service, ::std::string::String::new())
}
}
impl ::protobuf::Message for CheckRequest {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.service)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.service.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.service);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.service.is_empty() {
os.write_string(1, &self.service)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> CheckRequest {
CheckRequest::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"service",
|m: &CheckRequest| { &m.service },
|m: &mut CheckRequest| { &mut m.service },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<CheckRequest>(
"CheckRequest",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static CheckRequest {
static mut instance: ::protobuf::lazy::Lazy<CheckRequest> = ::protobuf::lazy::Lazy::INIT;
unsafe {
instance.get(CheckRequest::new)
}
}
}
impl ::protobuf::Clear for CheckRequest {
fn clear(&mut self) {
self.service.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for CheckRequest {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for CheckRequest {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct HealthCheckResponse {
// message fields
pub status: HealthCheckResponse_ServingStatus,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a HealthCheckResponse {
fn default() -> &'a HealthCheckResponse {
<HealthCheckResponse as ::protobuf::Message>::default_instance()
}
}
impl HealthCheckResponse {
pub fn new() -> HealthCheckResponse {
::std::default::Default::default()
}
// .grpc.HealthCheckResponse.ServingStatus status = 1;
pub fn get_status(&self) -> HealthCheckResponse_ServingStatus {
self.status
}
pub fn clear_status(&mut self) {
self.status = HealthCheckResponse_ServingStatus::UNKNOWN;
}
// Param is passed by value, moved
pub fn set_status(&mut self, v: HealthCheckResponse_ServingStatus) {
self.status = v;
}
}
impl ::protobuf::Message for HealthCheckResponse {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.status, 1, &mut self.unknown_fields)?
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.status != HealthCheckResponse_ServingStatus::UNKNOWN {
my_size += ::protobuf::rt::enum_size(1, self.status);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.status != HealthCheckResponse_ServingStatus::UNKNOWN {
os.write_enum(1, self.status.value())?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> HealthCheckResponse {
HealthCheckResponse::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<HealthCheckResponse_ServingStatus>>(
"status",
|m: &HealthCheckResponse| { &m.status },
|m: &mut HealthCheckResponse| { &mut m.status },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<HealthCheckResponse>(
"HealthCheckResponse",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static HealthCheckResponse {
static mut instance: ::protobuf::lazy::Lazy<HealthCheckResponse> = ::protobuf::lazy::Lazy::INIT;
unsafe {
instance.get(HealthCheckResponse::new)
}
}
}
impl ::protobuf::Clear for HealthCheckResponse {
fn clear(&mut self) {
self.status = HealthCheckResponse_ServingStatus::UNKNOWN;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for HealthCheckResponse {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for HealthCheckResponse {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum HealthCheckResponse_ServingStatus {
UNKNOWN = 0,
SERVING = 1,
NOT_SERVING = 2,
}
impl ::protobuf::ProtobufEnum for HealthCheckResponse_ServingStatus {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<HealthCheckResponse_ServingStatus> {
match value {
0 => ::std::option::Option::Some(HealthCheckResponse_ServingStatus::UNKNOWN),
1 => ::std::option::Option::Some(HealthCheckResponse_ServingStatus::SERVING),
2 => ::std::option::Option::Some(HealthCheckResponse_ServingStatus::NOT_SERVING),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [HealthCheckResponse_ServingStatus] = &[
HealthCheckResponse_ServingStatus::UNKNOWN,
HealthCheckResponse_ServingStatus::SERVING,
HealthCheckResponse_ServingStatus::NOT_SERVING,
];
values
}
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::EnumDescriptor> = ::protobuf::lazy::Lazy::INIT;
unsafe {
descriptor.get(|| {
::protobuf::reflect::EnumDescriptor::new_pb_name::<HealthCheckResponse_ServingStatus>("HealthCheckResponse.ServingStatus", file_descriptor_proto())
})
}
}
}
impl ::std::marker::Copy for HealthCheckResponse_ServingStatus {
}
impl ::std::default::Default for HealthCheckResponse_ServingStatus {
fn default() -> Self {
HealthCheckResponse_ServingStatus::UNKNOWN
}
}
impl ::protobuf::reflect::ProtobufValue for HealthCheckResponse_ServingStatus {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Enum(self.descriptor())
}
}
#[derive(PartialEq,Clone,Default)]
pub struct VersionCheckResponse {
// message fields
pub grpc_version: ::std::string::String,
pub agent_version: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a VersionCheckResponse {
fn default() -> &'a VersionCheckResponse {
<VersionCheckResponse as ::protobuf::Message>::default_instance()
}
}
impl VersionCheckResponse {
pub fn new() -> VersionCheckResponse {
::std::default::Default::default()
}
// string grpc_version = 1;
pub fn get_grpc_version(&self) -> &str {
&self.grpc_version
}
pub fn clear_grpc_version(&mut self) {
self.grpc_version.clear();
}
// Param is passed by value, moved
pub fn set_grpc_version(&mut self, v: ::std::string::String) {
self.grpc_version = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_grpc_version(&mut self) -> &mut ::std::string::String {
&mut self.grpc_version
}
// Take field
pub fn take_grpc_version(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.grpc_version, ::std::string::String::new())
}
// string agent_version = 2;
pub fn get_agent_version(&self) -> &str {
&self.agent_version
}
pub fn clear_agent_version(&mut self) {
self.agent_version.clear();
}
// Param is passed by value, moved
pub fn set_agent_version(&mut self, v: ::std::string::String) {
self.agent_version = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_agent_version(&mut self) -> &mut ::std::string::String {
&mut self.agent_version
}
// Take field
pub fn take_agent_version(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.agent_version, ::std::string::String::new())
}
}
impl ::protobuf::Message for VersionCheckResponse {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.grpc_version)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.agent_version)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.grpc_version.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.grpc_version);
}
if !self.agent_version.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.agent_version);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.grpc_version.is_empty() {
os.write_string(1, &self.grpc_version)?;
}
if !self.agent_version.is_empty() {
os.write_string(2, &self.agent_version)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> VersionCheckResponse {
VersionCheckResponse::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"grpc_version",
|m: &VersionCheckResponse| { &m.grpc_version },
|m: &mut VersionCheckResponse| { &mut m.grpc_version },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"agent_version",
|m: &VersionCheckResponse| { &m.agent_version },
|m: &mut VersionCheckResponse| { &mut m.agent_version },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<VersionCheckResponse>(
"VersionCheckResponse",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static VersionCheckResponse {
static mut instance: ::protobuf::lazy::Lazy<VersionCheckResponse> = ::protobuf::lazy::Lazy::INIT;
unsafe {
instance.get(VersionCheckResponse::new)
}
}
}
impl ::protobuf::Clear for VersionCheckResponse {
fn clear(&mut self) {
self.grpc_version.clear();
self.agent_version.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for VersionCheckResponse {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for VersionCheckResponse {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\nRgithub.com/kata-containers/kata-containers/src/agent/protocols/protos\
/health.proto\x12\x04grpc\x1a-github.com/gogo/protobuf/gogoproto/gogo.pr\
oto\"(\n\x0cCheckRequest\x12\x18\n\x07service\x18\x01\x20\x01(\tR\x07ser\
vice\"\x92\x01\n\x13HealthCheckResponse\x12?\n\x06status\x18\x01\x20\x01\
(\x0e2'.grpc.HealthCheckResponse.ServingStatusR\x06status\":\n\rServingS\
tatus\x12\x0b\n\x07UNKNOWN\x10\0\x12\x0b\n\x07SERVING\x10\x01\x12\x0f\n\
\x0bNOT_SERVING\x10\x02\"^\n\x14VersionCheckResponse\x12!\n\x0cgrpc_vers\
ion\x18\x01\x20\x01(\tR\x0bgrpcVersion\x12#\n\ragent_version\x18\x02\x20\
\x01(\tR\x0cagentVersion2{\n\x06Health\x126\n\x05Check\x12\x12.grpc.Chec\
kRequest\x1a\x19.grpc.HealthCheckResponse\x129\n\x07Version\x12\x12.grpc\
.CheckRequest\x1a\x1a.grpc.VersionCheckResponseBpZ^github.com/kata-conta\
iners/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grp\
c\xb8\xe2\x1e\x01\xc0\xe2\x1e\x01\xa8\xe2\x1e\x01\xf8\xe1\x1e\x01J\x90\
\x07\n\x06\x12\x04\x07\0)\x01\nq\n\x01\x0c\x12\x03\x07\0\x122g\n\x20Copy\
right\x202017\x20HyperHQ\x20Inc.\n\x20Copyright\x20(c)\x202019\x20Ant\
\x20Financial\n\n\x20SPDX-License-Identifier:\x20Apache-2.0\n\n\n\x08\n\
\x01\x08\x12\x03\t\0u\n\t\n\x02\x08\x0b\x12\x03\t\0u\n\x08\n\x01\x02\x12\
\x03\x0b\0\r\n\t\n\x02\x03\0\x12\x03\r\07\n\x08\n\x01\x08\x12\x03\x0f\0$\
\n\x0b\n\x04\x08\xa5\xec\x03\x12\x03\x0f\0$\n\x08\n\x01\x08\x12\x03\x10\
\0'\n\x0b\n\x04\x08\x9f\xec\x03\x12\x03\x10\0'\n\x08\n\x01\x08\x12\x03\
\x11\0&\n\x0b\n\x04\x08\xa7\xec\x03\x12\x03\x11\0&\n\x08\n\x01\x08\x12\
\x03\x12\0'\n\x0b\n\x04\x08\xa8\xec\x03\x12\x03\x12\0'\n\n\n\x02\x04\0\
\x12\x04\x14\0\x16\x01\n\n\n\x03\x04\0\x01\x12\x03\x14\x08\x14\n\x0b\n\
\x04\x04\0\x02\0\x12\x03\x15\x08\x1b\n\r\n\x05\x04\0\x02\0\x04\x12\x04\
\x15\x08\x14\x16\n\x0c\n\x05\x04\0\x02\0\x05\x12\x03\x15\x08\x0e\n\x0c\n\
\x05\x04\0\x02\0\x01\x12\x03\x15\x0f\x16\n\x0c\n\x05\x04\0\x02\0\x03\x12\
\x03\x15\x19\x1a\n\n\n\x02\x04\x01\x12\x04\x18\0\x1f\x01\n\n\n\x03\x04\
\x01\x01\x12\x03\x18\x08\x1b\n\x0c\n\x04\x04\x01\x04\0\x12\x04\x19\x08\
\x1d\t\n\x0c\n\x05\x04\x01\x04\0\x01\x12\x03\x19\r\x1a\n\r\n\x06\x04\x01\
\x04\0\x02\0\x12\x03\x1a\x10\x1c\n\x0e\n\x07\x04\x01\x04\0\x02\0\x01\x12\
\x03\x1a\x10\x17\n\x0e\n\x07\x04\x01\x04\0\x02\0\x02\x12\x03\x1a\x1a\x1b\
\n\r\n\x06\x04\x01\x04\0\x02\x01\x12\x03\x1b\x10\x1c\n\x0e\n\x07\x04\x01\
\x04\0\x02\x01\x01\x12\x03\x1b\x10\x17\n\x0e\n\x07\x04\x01\x04\0\x02\x01\
\x02\x12\x03\x1b\x1a\x1b\n\r\n\x06\x04\x01\x04\0\x02\x02\x12\x03\x1c\x10\
\x20\n\x0e\n\x07\x04\x01\x04\0\x02\x02\x01\x12\x03\x1c\x10\x1b\n\x0e\n\
\x07\x04\x01\x04\0\x02\x02\x02\x12\x03\x1c\x1e\x1f\n\x0b\n\x04\x04\x01\
\x02\0\x12\x03\x1e\x08!\n\r\n\x05\x04\x01\x02\0\x04\x12\x04\x1e\x08\x1d\
\t\n\x0c\n\x05\x04\x01\x02\0\x06\x12\x03\x1e\x08\x15\n\x0c\n\x05\x04\x01\
\x02\0\x01\x12\x03\x1e\x16\x1c\n\x0c\n\x05\x04\x01\x02\0\x03\x12\x03\x1e\
\x1f\x20\n\n\n\x02\x04\x02\x12\x04!\0$\x01\n\n\n\x03\x04\x02\x01\x12\x03\
!\x08\x1c\n\x0b\n\x04\x04\x02\x02\0\x12\x03\"\x08\x20\n\r\n\x05\x04\x02\
\x02\0\x04\x12\x04\"\x08!\x1e\n\x0c\n\x05\x04\x02\x02\0\x05\x12\x03\"\
\x08\x0e\n\x0c\n\x05\x04\x02\x02\0\x01\x12\x03\"\x0f\x1b\n\x0c\n\x05\x04\
\x02\x02\0\x03\x12\x03\"\x1e\x1f\n\x0b\n\x04\x04\x02\x02\x01\x12\x03#\
\x08!\n\r\n\x05\x04\x02\x02\x01\x04\x12\x04#\x08\"\x20\n\x0c\n\x05\x04\
\x02\x02\x01\x05\x12\x03#\x08\x0e\n\x0c\n\x05\x04\x02\x02\x01\x01\x12\
\x03#\x0f\x1c\n\x0c\n\x05\x04\x02\x02\x01\x03\x12\x03#\x1f\x20\n\n\n\x02\
\x06\0\x12\x04&\0)\x01\n\n\n\x03\x06\0\x01\x12\x03&\x08\x0e\n\x0b\n\x04\
\x06\0\x02\0\x12\x03'\x08>\n\x0c\n\x05\x06\0\x02\0\x01\x12\x03'\x0c\x11\
\n\x0c\n\x05\x06\0\x02\0\x02\x12\x03'\x12\x1e\n\x0c\n\x05\x06\0\x02\0\
\x03\x12\x03')<\n\x0b\n\x04\x06\0\x02\x01\x12\x03(\x08A\n\x0c\n\x05\x06\
\0\x02\x01\x01\x12\x03(\x0c\x13\n\x0c\n\x05\x06\0\x02\x01\x02\x12\x03(\
\x14\x20\n\x0c\n\x05\x06\0\x02\x01\x03\x12\x03(+?b\x06proto3\
";
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy::INIT;
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
unsafe {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}
}

View File

@@ -0,0 +1,90 @@
// This file is generated by ttrpc-compiler 0.3.0. Do not edit
// @generated
// https://github.com/Manishearth/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clipto_camel_casepy)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unsafe_code)]
#![allow(unused_imports)]
#![allow(unused_results)]
use protobuf::{CodedInputStream, CodedOutputStream, Message};
use std::collections::HashMap;
use std::sync::Arc;
#[derive(Clone)]
pub struct HealthClient {
client: ::ttrpc::Client,
}
impl HealthClient {
pub fn new(client: ::ttrpc::Client) -> Self {
HealthClient {
client: client,
}
}
pub fn check(&self, req: &super::health::CheckRequest, timeout_nano: i64) -> ::ttrpc::Result<super::health::HealthCheckResponse> {
let mut cres = super::health::HealthCheckResponse::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.Health", "Check", cres);
Ok(cres)
}
pub fn version(&self, req: &super::health::CheckRequest, timeout_nano: i64) -> ::ttrpc::Result<super::health::VersionCheckResponse> {
let mut cres = super::health::VersionCheckResponse::new();
::ttrpc::client_request!(self, req, timeout_nano, "grpc.Health", "Version", cres);
Ok(cres)
}
}
struct CheckMethod {
service: Arc<std::boxed::Box<dyn Health + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for CheckMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, health, CheckRequest, check);
Ok(())
}
}
struct VersionMethod {
service: Arc<std::boxed::Box<dyn Health + Send + Sync>>,
}
impl ::ttrpc::MethodHandler for VersionMethod {
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
::ttrpc::request_handler!(self, ctx, req, health, CheckRequest, version);
Ok(())
}
}
pub trait Health {
fn check(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::health::CheckRequest) -> ::ttrpc::Result<super::health::HealthCheckResponse> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.Health/Check is not supported".to_string())))
}
fn version(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::health::CheckRequest) -> ::ttrpc::Result<super::health::VersionCheckResponse> {
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.Health/Version is not supported".to_string())))
}
}
pub fn create_health(service: Arc<std::boxed::Box<dyn Health + Send + Sync>>) -> HashMap <String, Box<dyn ::ttrpc::MethodHandler + Send + Sync>> {
let mut methods = HashMap::new();
methods.insert("/grpc.Health/Check".to_string(),
std::boxed::Box::new(CheckMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods.insert("/grpc.Health/Version".to_string(),
std::boxed::Box::new(VersionMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
methods
}

View File

@@ -3,7 +3,6 @@
// SPDX-License-Identifier: Apache-2.0
//
#![allow(bare_trait_objects)]
#![allow(clippy::redundant_field_names)]
pub mod agent;
pub mod agent_ttrpc;
@@ -12,3 +11,11 @@ pub mod health;
pub mod health_ttrpc;
pub mod oci;
pub mod types;
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}

10293
src/agent/protocols/src/oci.rs Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -24,9 +24,8 @@ regex = "1.1"
path-absolutize = "1.2.0"
dirs = "3.0.1"
anyhow = "1.0.32"
cgroups = { package = "cgroups-rs", version = "0.2.0" }
cgroups = { git = "https://github.com/kata-containers/cgroups-rs", branch = "stable-0.1.1"}
tempfile = "3.1.0"
epoll = "4.3.1"
[dev-dependencies]
serial_test = "0.5.0"

View File

@@ -6,6 +6,8 @@
// looks like we can use caps to manipulate capabilities
// conveniently, use caps to do it directly.. maybe
use lazy_static;
use crate::log_child;
use crate::sync::write_count;
use anyhow::{anyhow, Result};

View File

@@ -21,6 +21,7 @@ use cgroups::{
use crate::cgroups::Manager as CgroupManager;
use crate::container::DEFAULT_DEVICES;
use anyhow::{anyhow, Context, Result};
use lazy_static;
use libc::{self, pid_t};
use nix::errno::Errno;
use oci::{
@@ -44,23 +45,34 @@ macro_rules! sl {
};
}
macro_rules! get_controller_or_return_singular_none {
($cg:ident) => {
match $cg.controller_of() {
Some(c) => c,
None => return SingularPtrField::none(),
}
};
pub fn load_or_create<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Cgroup<'a> {
let valid_path = path.trim_start_matches("/").to_string();
let cg = load(h.clone(), &valid_path);
if cg.is_none() {
info!(sl!(), "create new cgroup: {}", &valid_path);
cgroups::Cgroup::new(h, valid_path.as_str())
} else {
cg.unwrap()
}
}
pub fn load<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Option<Cgroup<'a>> {
let valid_path = path.trim_start_matches("/").to_string();
let cg = cgroups::Cgroup::load(h, valid_path.as_str());
let cpu_controller: &CpuController = cg.controller_of().unwrap();
if cpu_controller.exists() {
Some(cg)
} else {
None
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Manager {
pub paths: HashMap<String, String>,
pub mounts: HashMap<String, String>,
// pub rels: HashMap<String, String>,
pub cpath: String,
#[serde(skip)]
cgroup: cgroups::Cgroup,
relative_paths: HashMap<String, String>,
}
// set_resource is used to set reources by cgroup controller.
@@ -75,11 +87,17 @@ macro_rules! set_resource {
impl CgroupManager for Manager {
fn apply(&self, pid: pid_t) -> Result<()> {
self.cgroup.add_task(CgroupPid::from(pid as u64))?;
let h = cgroups::hierarchies::auto();
let h = Box::new(&*h);
let cg = load_or_create(h, &self.cpath);
cg.add_task(CgroupPid::from(pid as u64))?;
Ok(())
}
fn set(&self, r: &LinuxResources, update: bool) -> Result<()> {
let h = cgroups::hierarchies::auto();
let h = Box::new(&*h);
let cg = load_or_create(h, &self.cpath);
info!(
sl!(),
"cgroup manager set resources for container. Resources input {:?}", r
@@ -89,49 +107,53 @@ impl CgroupManager for Manager {
// set cpuset and cpu reources
if let Some(cpu) = &r.cpu {
set_cpu_resources(&self.cgroup, cpu)?;
set_cpu_resources(&cg, cpu)?;
}
// set memory resources
if let Some(memory) = &r.memory {
set_memory_resources(&self.cgroup, memory, update)?;
set_memory_resources(&cg, memory, update)?;
}
// set pids resources
if let Some(pids_resources) = &r.pids {
set_pids_resources(&self.cgroup, pids_resources)?;
set_pids_resources(&cg, pids_resources)?;
}
// set block_io resources
if let Some(blkio) = &r.block_io {
set_block_io_resources(&self.cgroup, blkio, res)?;
set_block_io_resources(&cg, blkio, res)?;
}
// set hugepages resources
if !r.hugepage_limits.is_empty() {
set_hugepages_resources(&self.cgroup, &r.hugepage_limits, res)?;
if r.hugepage_limits.len() > 0 {
set_hugepages_resources(&cg, &r.hugepage_limits, res)?;
}
// set network resources
if let Some(network) = &r.network {
set_network_resources(&self.cgroup, network, res)?;
set_network_resources(&cg, network, res)?;
}
// set devices resources
set_devices_resources(&self.cgroup, &r.devices, res)?;
set_devices_resources(&cg, &r.devices, res)?;
info!(sl!(), "resources after processed {:?}", res);
// apply resources
self.cgroup.apply(res)?;
cg.apply(res)?;
Ok(())
}
fn get_stats(&self) -> Result<CgroupStats> {
// CpuStats
let cpu_usage = get_cpuacct_stats(&self.cgroup);
let h = cgroups::hierarchies::auto();
let h = Box::new(&*h);
let cg = load_or_create(h, &self.cpath);
let throttling_data = get_cpu_stats(&self.cgroup);
// CpuStats
let cpu_usage = get_cpuacct_stats(&cg);
let throttling_data = get_cpu_stats(&cg);
let cpu_stats = SingularPtrField::some(CpuStats {
cpu_usage,
@@ -141,17 +163,17 @@ impl CgroupManager for Manager {
});
// Memorystats
let memory_stats = get_memory_stats(&self.cgroup);
let memory_stats = get_memory_stats(&cg);
// PidsStats
let pids_stats = get_pids_stats(&self.cgroup);
let pids_stats = get_pids_stats(&cg);
// BlkioStats
// note that virtiofs has no blkio stats
let blkio_stats = get_blkio_stats(&self.cgroup);
let blkio_stats = get_blkio_stats(&cg);
// HugetlbStats
let hugetlb_stats = get_hugetlb_stats(&self.cgroup);
let hugetlb_stats = get_hugetlb_stats(&cg);
Ok(CgroupStats {
cpu_stats,
@@ -165,7 +187,10 @@ impl CgroupManager for Manager {
}
fn freeze(&self, state: FreezerState) -> Result<()> {
let freezer_controller: &FreezerController = self.cgroup.controller_of().unwrap();
let h = cgroups::hierarchies::auto();
let h = Box::new(&*h);
let cg = load_or_create(h, &self.cpath);
let freezer_controller: &FreezerController = cg.controller_of().unwrap();
match state {
FreezerState::Thawed => {
freezer_controller.thaw()?;
@@ -182,12 +207,20 @@ impl CgroupManager for Manager {
}
fn destroy(&mut self) -> Result<()> {
let _ = self.cgroup.delete();
let h = cgroups::hierarchies::auto();
let h = Box::new(&*h);
let cg = load(h, &self.cpath);
if cg.is_some() {
cg.unwrap().delete();
}
Ok(())
}
fn get_pids(&self) -> Result<Vec<pid_t>> {
let mem_controller: &MemController = self.cgroup.controller_of().unwrap();
let h = cgroups::hierarchies::auto();
let h = Box::new(&*h);
let cg = load_or_create(h, &self.cpath);
let mem_controller: &MemController = cg.controller_of().unwrap();
let pids = mem_controller.tasks();
let result = pids.iter().map(|x| x.pid as i32).collect::<Vec<i32>>();
@@ -206,7 +239,7 @@ fn set_network_resources(
// description can be found at https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/net_cls.html
let class_id = network.class_id.unwrap_or(0) as u64;
if class_id != 0 {
res.network.class_id = Some(class_id);
res.network.class_id = class_id;
}
// set network priorities
@@ -219,13 +252,14 @@ fn set_network_resources(
});
}
res.network.update_values = true;
res.network.priorities = priorities;
Ok(())
}
fn set_devices_resources(
_cg: &cgroups::Cgroup,
device_resources: &[LinuxDeviceCgroup],
device_resources: &Vec<LinuxDeviceCgroup>,
res: &mut cgroups::Resources,
) -> Result<()> {
info!(sl!(), "cgroup manager set devices");
@@ -249,6 +283,7 @@ fn set_devices_resources(
}
}
res.devices.update_values = true;
res.devices.devices = devices;
Ok(())
@@ -256,10 +291,11 @@ fn set_devices_resources(
fn set_hugepages_resources(
_cg: &cgroups::Cgroup,
hugepage_limits: &[LinuxHugepageLimit],
hugepage_limits: &Vec<LinuxHugepageLimit>,
res: &mut cgroups::Resources,
) -> Result<()> {
info!(sl!(), "cgroup manager set hugepage");
res.hugepages.update_values = true;
let mut limits = vec![];
for l in hugepage_limits.iter() {
@@ -280,6 +316,7 @@ fn set_block_io_resources(
res: &mut cgroups::Resources,
) -> Result<()> {
info!(sl!(), "cgroup manager set block io");
res.blkio.update_values = true;
if cg.v2() {
res.blkio.weight = convert_blk_io_to_v2_value(blkio.weight);
@@ -328,9 +365,7 @@ fn set_cpu_resources(cg: &cgroups::Cgroup, cpu: &LinuxCPU) -> Result<()> {
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
if !cpu.cpus.is_empty() {
if let Err(e) = cpuset_controller.set_cpus(&cpu.cpus) {
warn!(sl!(), "write cpuset failed: {:?}", e);
}
cpuset_controller.set_cpus(&cpu.cpus)?;
}
if !cpu.mems.is_empty() {
@@ -386,13 +421,13 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool
}
}
if let Some(swappiness) = memory.swappiness {
if swappiness >= 0 && swappiness <= 100 {
mem_controller.set_swappiness(swappiness as u64)?;
if let Some(swapiness) = memory.swapiness {
if swapiness >= 0 && swapiness <= 100 {
mem_controller.set_swappiness(swapiness as u64)?;
} else {
return Err(anyhow!(
"invalid value:{}. valid memory swappiness range is 0-100",
swappiness
swapiness
));
}
}
@@ -418,7 +453,7 @@ fn set_pids_resources(cg: &cgroups::Cgroup, pids: &LinuxPids) -> Result<()> {
}
fn build_blk_io_device_throttle_resource(
input: &[oci::LinuxThrottleDevice],
input: &Vec<oci::LinuxThrottleDevice>,
) -> Vec<BlkIoDeviceThrottleResource> {
let mut blk_io_device_throttle_resources = vec![];
for d in input.iter() {
@@ -570,8 +605,10 @@ lazy_static! {
}
fn get_cpu_stats(cg: &cgroups::Cgroup) -> SingularPtrField<ThrottlingData> {
let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg);
let cpu_controller: &CpuController = cg.controller_of().unwrap();
let stat = cpu_controller.cpu().stat;
let h = lines_to_map(&stat);
SingularPtrField::some(ThrottlingData {
@@ -584,18 +621,27 @@ fn get_cpu_stats(cg: &cgroups::Cgroup) -> SingularPtrField<ThrottlingData> {
}
fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField<CpuUsage> {
if let Some(cpuacct_controller) = cg.controller_of::<CpuAcctController>() {
let cpuacct = cpuacct_controller.cpuacct();
let cpuacct_controller: Option<&CpuAcctController> = cg.controller_of();
if cpuacct_controller.is_none() {
if cg.v2() {
return SingularPtrField::some(CpuUsage {
total_usage: 0,
percpu_usage: vec![],
usage_in_kernelmode: 0,
usage_in_usermode: 0,
unknown_fields: UnknownFields::default(),
cached_size: CachedSize::default(),
});
}
let h = lines_to_map(&cpuacct.stat);
let usage_in_usermode =
(((*h.get("user").unwrap() * NANO_PER_SECOND) as f64) / *CLOCK_TICKS) as u64;
let usage_in_kernelmode =
(((*h.get("system").unwrap() * NANO_PER_SECOND) as f64) / *CLOCK_TICKS) as u64;
let total_usage = cpuacct.usage;
let percpu_usage = line_to_vec(&cpuacct.usage_percpu);
// try to get from cpu controller
let cpu_controller: &CpuController = cg.controller_of().unwrap();
let stat = cpu_controller.cpu().stat;
let h = lines_to_map(&stat);
let usage_in_usermode = *h.get("user_usec").unwrap();
let usage_in_kernelmode = *h.get("system_usec").unwrap();
let total_usage = *h.get("usage_usec").unwrap();
let percpu_usage = vec![];
return SingularPtrField::some(CpuUsage {
total_usage,
@@ -607,25 +653,18 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField<CpuUsage> {
});
}
if cg.v2() {
return SingularPtrField::some(CpuUsage {
total_usage: 0,
percpu_usage: vec![],
usage_in_kernelmode: 0,
usage_in_usermode: 0,
unknown_fields: UnknownFields::default(),
cached_size: CachedSize::default(),
});
}
let cpuacct_controller = cpuacct_controller.unwrap();
let cpuacct = cpuacct_controller.cpuacct();
// try to get from cpu controller
let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg);
let stat = cpu_controller.cpu().stat;
let h = lines_to_map(&stat);
let usage_in_usermode = *h.get("user_usec").unwrap();
let usage_in_kernelmode = *h.get("system_usec").unwrap();
let total_usage = *h.get("usage_usec").unwrap();
let percpu_usage = vec![];
let h = lines_to_map(&cpuacct.stat);
let usage_in_usermode =
(((*h.get("user").unwrap() * NANO_PER_SECOND) as f64) / *CLOCK_TICKS) as u64;
let usage_in_kernelmode =
(((*h.get("system").unwrap() * NANO_PER_SECOND) as f64) / *CLOCK_TICKS) as u64;
let total_usage = cpuacct.usage;
let percpu_usage = line_to_vec(&cpuacct.usage_percpu);
SingularPtrField::some(CpuUsage {
total_usage,
@@ -638,7 +677,7 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField<CpuUsage> {
}
fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
let memory_controller: &MemController = get_controller_or_return_singular_none!(cg);
let memory_controller: &MemController = cg.controller_of().unwrap();
// cache from memory stat
let memory = memory_controller.memory_stat();
@@ -646,7 +685,7 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
// use_hierarchy
let value = memory.use_hierarchy;
let use_hierarchy = value == 1;
let use_hierarchy = if value == 1 { true } else { false };
// gte memory datas
let usage = SingularPtrField::some(MemoryData {
@@ -695,17 +734,18 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
}
fn get_pids_stats(cg: &cgroups::Cgroup) -> SingularPtrField<PidsStats> {
let pid_controller: &PidController = get_controller_or_return_singular_none!(cg);
let pid_controller: &PidController = cg.controller_of().unwrap();
let current = pid_controller.get_pid_current().unwrap_or(0);
let max = pid_controller.get_pid_max();
let limit = match max {
Err(_) => 0,
Ok(max) => match max {
let limit = if max.is_err() {
0
} else {
match max.unwrap() {
MaxValue::Value(v) => v,
MaxValue::Max => 0,
},
}
} as u64;
SingularPtrField::some(PidsStats {
@@ -748,9 +788,9 @@ https://github.com/opencontainers/runc/blob/a5847db387ae28c0ca4ebe4beee1a76900c8
Total 0
*/
fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> RepeatedField<BlkioStatsEntry> {
fn get_blkio_stat_blkiodata(blkiodata: &Vec<BlkIoData>) -> RepeatedField<BlkioStatsEntry> {
let mut m = RepeatedField::new();
if blkiodata.is_empty() {
if blkiodata.len() == 0 {
return m;
}
@@ -770,10 +810,10 @@ fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> RepeatedField<BlkioStats
m
}
fn get_blkio_stat_ioservice(services: &[IoService]) -> RepeatedField<BlkioStatsEntry> {
fn get_blkio_stat_ioservice(services: &Vec<IoService>) -> RepeatedField<BlkioStatsEntry> {
let mut m = RepeatedField::new();
if services.is_empty() {
if services.len() == 0 {
return m;
}
@@ -794,14 +834,14 @@ fn build_blkio_stats_entry(major: i16, minor: i16, op: &str, value: u64) -> Blki
major: major as u64,
minor: minor as u64,
op: op.to_string(),
value,
value: value,
unknown_fields: UnknownFields::default(),
cached_size: CachedSize::default(),
}
}
fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> {
let blkio_controller: &BlkIoController = get_controller_or_return_singular_none!(cg);
let blkio_controller: &BlkIoController = cg.controller_of().unwrap();
let blkio = blkio_controller.blkio();
let mut resp = BlkioStats::new();
@@ -829,13 +869,13 @@ fn get_blkio_stats(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> {
return get_blkio_stats_v2(&cg);
}
let blkio_controller: &BlkIoController = get_controller_or_return_singular_none!(cg);
let blkio_controller: &BlkIoController = cg.controller_of().unwrap();
let blkio = blkio_controller.blkio();
let mut m = BlkioStats::new();
let io_serviced_recursive = blkio.io_serviced_recursive;
if io_serviced_recursive.is_empty() {
if io_serviced_recursive.len() == 0 {
// fall back to generic stats
// blkio.throttle.io_service_bytes,
// maybe io_service_bytes_recursive?
@@ -890,8 +930,8 @@ fn get_hugetlb_stats(cg: &cgroups::Cgroup) -> HashMap<String, HugetlbStats> {
h
}
pub const PATHS: &str = "/proc/self/cgroup";
pub const MOUNTS: &str = "/proc/self/mountinfo";
pub const PATHS: &'static str = "/proc/self/cgroup";
pub const MOUNTS: &'static str = "/proc/self/mountinfo";
pub fn get_paths() -> Result<HashMap<String, String>> {
let mut m = HashMap::new();
@@ -946,19 +986,9 @@ pub fn get_mounts() -> Result<HashMap<String, String>> {
Ok(m)
}
fn new_cgroup(
h: Box<dyn cgroups::Hierarchy>,
path: &str,
relative_paths: HashMap<String, String>,
) -> Cgroup {
let valid_path = path.trim_start_matches('/').to_string();
cgroups::Cgroup::new_with_relative_paths(h, valid_path.as_str(), relative_paths)
}
impl Manager {
pub fn new(cpath: &str) -> Result<Self> {
let mut m = HashMap::new();
let mut relative_paths = HashMap::new();
let paths = get_paths()?;
let mounts = get_mounts()?;
@@ -977,7 +1007,6 @@ impl Manager {
};
m.insert(key.to_string(), p);
relative_paths.insert(key.to_string(), value.to_string());
}
Ok(Self {
@@ -985,27 +1014,29 @@ impl Manager {
mounts,
// rels: paths,
cpath: cpath.to_string(),
cgroup: new_cgroup(cgroups::hierarchies::auto(), cpath, relative_paths.clone()),
relative_paths,
})
}
pub fn update_cpuset_path(&self, guest_cpuset: &str, container_cpuset: &str) -> Result<()> {
if guest_cpuset == "" {
pub fn update_cpuset_path(&self, cpuset_cpus: &str) -> Result<()> {
if cpuset_cpus == "" {
return Ok(());
}
info!(sl!(), "update_cpuset_path to: {}", guest_cpuset);
info!(sl!(), "update_cpuset_path to: {}", cpuset_cpus);
let h = cgroups::hierarchies::auto();
let root_cg = h.root_control_group();
let h = Box::new(&*h);
let root_cg = load_or_create(h, "");
let root_cpuset_controller: &CpuSetController = root_cg.controller_of().unwrap();
let path = root_cpuset_controller.path();
let root_path = Path::new(path);
info!(sl!(), "root cpuset path: {:?}", &path);
let container_cpuset_controller: &CpuSetController = self.cgroup.controller_of().unwrap();
let path = container_cpuset_controller.path();
let h = cgroups::hierarchies::auto();
let h = Box::new(&*h);
let cg = load_or_create(h, &self.cpath);
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
let path = cpuset_controller.path();
let container_path = Path::new(path);
info!(sl!(), "container cpuset path: {:?}", &path);
@@ -1014,40 +1045,30 @@ impl Manager {
if ancestor == root_path {
break;
}
paths.push(ancestor);
if ancestor != container_path {
paths.push(ancestor);
}
}
info!(sl!(), "parent paths to update cpuset: {:?}", &paths);
info!(sl!(), "paths to update cpuset: {:?}", &paths);
let mut i = paths.len();
loop {
if i == 0 {
break;
}
i -= 1;
i = i - 1;
let h = cgroups::hierarchies::auto();
let h = Box::new(&*h);
// remove cgroup root from path
let r_path = &paths[i]
.to_str()
.unwrap()
.trim_start_matches(root_path.to_str().unwrap());
info!(sl!(), "updating cpuset for parent path {:?}", &r_path);
let cg = new_cgroup(
cgroups::hierarchies::auto(),
&r_path,
self.relative_paths.clone(),
);
info!(sl!(), "updating cpuset for path {:?}", &r_path);
let cg = load_or_create(h, &r_path);
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
cpuset_controller.set_cpus(guest_cpuset)?;
}
if !container_cpuset.is_empty() {
info!(
sl!(),
"updating cpuset for container path: {:?} cpuset: {}",
&container_path,
container_cpuset
);
container_cpuset_controller.set_cpus(container_cpuset)?;
cpuset_controller.set_cpus(cpuset_cpus)?;
}
Ok(())

View File

@@ -1,74 +0,0 @@
// Copyright (c) 2020 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
use protobuf::{CachedSize, SingularPtrField, UnknownFields};
use crate::cgroups::Manager as CgroupManager;
use crate::protocols::agent::{BlkioStats, CgroupStats, CpuStats, MemoryStats, PidsStats};
use anyhow::Result;
use cgroups::freezer::FreezerState;
use libc::{self, pid_t};
use oci::LinuxResources;
use std::collections::HashMap;
use std::string::String;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Manager {
pub paths: HashMap<String, String>,
pub mounts: HashMap<String, String>,
pub cpath: String,
}
impl CgroupManager for Manager {
fn apply(&self, _: pid_t) -> Result<()> {
Ok(())
}
fn set(&self, _: &LinuxResources, _: bool) -> Result<()> {
Ok(())
}
fn get_stats(&self) -> Result<CgroupStats> {
Ok(CgroupStats {
cpu_stats: SingularPtrField::some(CpuStats::default()),
memory_stats: SingularPtrField::some(MemoryStats::new()),
pids_stats: SingularPtrField::some(PidsStats::new()),
blkio_stats: SingularPtrField::some(BlkioStats::new()),
hugetlb_stats: HashMap::new(),
unknown_fields: UnknownFields::default(),
cached_size: CachedSize::default(),
})
}
fn freeze(&self, _: FreezerState) -> Result<()> {
Ok(())
}
fn destroy(&mut self) -> Result<()> {
Ok(())
}
fn get_pids(&self) -> Result<Vec<pid_t>> {
Ok(Vec::new())
}
}
impl Manager {
pub fn new(cpath: &str) -> Result<Self> {
Ok(Self {
paths: HashMap::new(),
mounts: HashMap::new(),
cpath: cpath.to_string(),
})
}
pub fn update_cpuset_path(&self, _: &str, _: &str) -> Result<()> {
Ok(())
}
pub fn get_cg_path(&self, _: &str) -> Option<String> {
Some("".to_string())
}
}

View File

@@ -10,7 +10,6 @@ use protocols::agent::CgroupStats;
use cgroups::freezer::FreezerState;
pub mod fs;
pub mod mock;
pub mod notifier;
pub mod systemd;

View File

@@ -41,7 +41,7 @@ fn get_value_from_cgroup(path: &PathBuf, key: &str) -> Result<i64> {
);
for line in content.lines() {
let arr: Vec<&str> = line.split(' ').collect();
let arr: Vec<&str> = line.split(" ").collect();
if arr.len() == 2 && arr[0] == key {
let r = arr[1].parse::<i64>()?;
return Ok(r);

View File

@@ -4,9 +4,12 @@
//
use anyhow::{anyhow, Context, Result};
use dirs;
use lazy_static;
use libc::pid_t;
use oci::{Hook, Linux, LinuxNamespace, LinuxResources, POSIXRlimit, Spec};
use oci::{LinuxDevice, LinuxIDMapping};
use serde_json;
use std::clone::Clone;
use std::ffi::{CStr, CString};
use std::fmt;
@@ -20,10 +23,7 @@ use std::time::SystemTime;
use cgroups::freezer::FreezerState;
use crate::capabilities::{self, CAPSMAP};
#[cfg(not(test))]
use crate::cgroups::fs::Manager as FsManager;
#[cfg(test)]
use crate::cgroups::mock::Manager as FsManager;
use crate::cgroups::Manager;
use crate::log_child;
use crate::process::Process;
@@ -41,10 +41,9 @@ use nix::pty;
use nix::sched::{self, CloneFlags};
use nix::sys::signal::{self, Signal};
use nix::sys::stat::{self, Mode};
use nix::unistd::{self, fork, ForkResult, Gid, Pid, Uid};
use std::os::unix::fs::MetadataExt;
use std::os::unix::io::AsRawFd;
use nix::unistd::{self, ForkResult, Gid, Pid, Uid};
use libc;
use protobuf::SingularPtrField;
use oci::State as OCIState;
@@ -55,9 +54,9 @@ use std::os::unix::io::FromRawFd;
use slog::{info, o, Logger};
const STATE_FILENAME: &str = "state.json";
const EXEC_FIFO_FILENAME: &str = "exec.fifo";
const VER_MARKER: &str = "1.2.5";
const STATE_FILENAME: &'static str = "state.json";
const EXEC_FIFO_FILENAME: &'static str = "exec.fifo";
const VER_MARKER: &'static str = "1.2.5";
const PID_NS_PATH: &str = "/proc/self/ns/pid";
const INIT: &str = "INIT";
@@ -67,7 +66,6 @@ const CWFD_FD: &str = "CWFD_FD";
const CLOG_FD: &str = "CLOG_FD";
const FIFO_FD: &str = "FIFO_FD";
const HOME_ENV_KEY: &str = "HOME";
const PIDNS_FD: &str = "PIDNS_FD";
#[derive(PartialEq, Clone, Copy)]
pub enum Status {
@@ -323,13 +321,10 @@ pub fn init_child() {
let cwfd = std::env::var(CWFD_FD).unwrap().parse::<i32>().unwrap();
let cfd_log = std::env::var(CLOG_FD).unwrap().parse::<i32>().unwrap();
match do_init_child(cwfd) {
Ok(_) => log_child!(cfd_log, "temporary parent process exit successfully"),
Err(e) => {
log_child!(cfd_log, "temporary parent process exit:child exit: {:?}", e);
let _ = write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str());
}
}
let _ = do_init_child(cwfd).map_err(|e| {
log_child!(cfd_log, "child exit: {:?}", e);
let _ = write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str());
});
}
fn do_init_child(cwfd: RawFd) -> Result<()> {
@@ -344,38 +339,6 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
let crfd = std::env::var(CRFD_FD)?.parse::<i32>().unwrap();
let cfd_log = std::env::var(CLOG_FD)?.parse::<i32>().unwrap();
// get the pidns fd from parent, if parent had passed the pidns fd,
// then get it and join in this pidns; otherwise, create a new pidns
// by unshare from the parent pidns.
match std::env::var(PIDNS_FD) {
Ok(fd) => {
let pidns_fd = fd.parse::<i32>().context("get parent pidns fd")?;
sched::setns(pidns_fd, CloneFlags::CLONE_NEWPID).context("failed to join pidns")?;
let _ = unistd::close(pidns_fd);
}
Err(_e) => sched::unshare(CloneFlags::CLONE_NEWPID)?,
}
match fork() {
Ok(ForkResult::Parent { child, .. }) => {
log_child!(
cfd_log,
"Continuing execution in temporary process, new child has pid: {:?}",
child
);
let _ = write_sync(cwfd, SYNC_DATA, format!("{}", pid_t::from(child)).as_str());
// parent return
return Ok(());
}
Ok(ForkResult::Child) => (),
Err(e) => {
return Err(anyhow!(format!(
"failed to fork temporary process: {:?}",
e
)));
}
}
log_child!(cfd_log, "child process start run");
let buf = read_sync(crfd)?;
let spec_str = std::str::from_utf8(&buf)?;
@@ -586,15 +549,9 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
let uid = Uid::from_raw(guser.uid);
let gid = Gid::from_raw(guser.gid);
// only change stdio devices owner when user
// isn't root.
if guser.uid != 0 {
set_stdio_permissions(guser.uid)?;
}
setid(uid, gid)?;
if !guser.additional_gids.is_empty() {
if guser.additional_gids.len() > 0 {
setgroups(guser.additional_gids.as_slice()).map_err(|e| {
let _ = write_sync(
cwfd,
@@ -638,7 +595,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
// setup the envs
for e in env.iter() {
let v: Vec<&str> = e.splitn(2, '=').collect();
let v: Vec<&str> = e.splitn(2, "=").collect();
if v.len() != 2 {
continue;
}
@@ -686,43 +643,6 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
do_exec(&args);
}
// set_stdio_permissions fixes the permissions of PID 1's STDIO
// within the container to the specified user.
// The ownership needs to match because it is created outside of
// the container and needs to be localized.
fn set_stdio_permissions(uid: libc::uid_t) -> Result<()> {
let meta = fs::metadata("/dev/null")?;
let fds = [
std::io::stdin().as_raw_fd(),
std::io::stdout().as_raw_fd(),
std::io::stderr().as_raw_fd(),
];
for fd in &fds {
let stat = stat::fstat(*fd)?;
// Skip chown of /dev/null if it was used as one of the STDIO fds.
if stat.st_rdev == meta.rdev() {
continue;
}
// According to the POSIX specification, -1 is used to indicate that owner and group
// are not to be changed. Since uid_t and gid_t are unsigned types, we have to wrap
// around to get -1.
let gid = (0 as libc::gid_t).wrapping_sub(1);
// We only change the uid owner (as it is possible for the mount to
// prefer a different gid, and there's no reason for us to change it).
// The reason why we don't just leave the default uid=X mount setup is
// that users expect to be able to actually use their console. Without
// this code, you couldn't effectively run as a non-root user inside a
// container and also have a console set up.
let res = unsafe { libc::fchown(*fd, uid, gid) };
Errno::result(res).map_err(|e| anyhow!(e).context("set stdio permissions failed"))?;
}
Ok(())
}
impl BaseContainer for LinuxContainer {
fn id(&self) -> String {
self.id.clone()
@@ -733,15 +653,11 @@ impl BaseContainer for LinuxContainer {
}
fn state(&self) -> Result<State> {
Err(anyhow!("not supported"))
Err(anyhow!("not suppoerted"))
}
fn oci_state(&self) -> Result<OCIState> {
let oci = match self.config.spec.as_ref() {
Some(s) => s,
None => return Err(anyhow!("Unable to get OCI state: spec not found")),
};
let oci = self.config.spec.as_ref().unwrap();
let status = self.status();
let pid = if status != Status::STOPPED {
self.init_process_pid
@@ -749,17 +665,9 @@ impl BaseContainer for LinuxContainer {
0
};
let root = match oci.root.as_ref() {
Some(s) => s.path.as_str(),
None => return Err(anyhow!("Unable to get root path: oci.root is none")),
};
let root = oci.root.as_ref().unwrap().path.as_str();
let path = fs::canonicalize(root)?;
let bundle = match path.parent() {
Some(s) => s.to_str().unwrap().to_string(),
None => return Err(anyhow!("could not get root parent: root path {:?}", path)),
};
let bundle = path.parent().unwrap().to_str().unwrap().to_string();
Ok(OCIState {
version: oci.version.clone(),
id: self.id(),
@@ -823,7 +731,7 @@ impl BaseContainer for LinuxContainer {
info!(logger, "enter container.start!");
let mut fifofd: RawFd = -1;
if p.init {
if stat::stat(fifo_file.as_str()).is_ok() {
if let Ok(_) = stat::stat(fifo_file.as_str()) {
return Err(anyhow!("exec fifo exists"));
}
unistd::mkfifo(fifo_file.as_str(), Mode::from_bits(0o622).unwrap())?;
@@ -854,8 +762,30 @@ impl BaseContainer for LinuxContainer {
.map_err(|e| warn!(logger, "fcntl pfd log FD_CLOEXEC {:?}", e));
let child_logger = logger.new(o!("action" => "child process log"));
let log_handler = setup_child_logger(pfd_log, child_logger)?;
let log_handler = thread::spawn(move || {
let log_file = unsafe { std::fs::File::from_raw_fd(pfd_log) };
let mut reader = BufReader::new(log_file);
loop {
let mut line = String::new();
match reader.read_line(&mut line) {
Err(e) => {
info!(child_logger, "read child process log error: {:?}", e);
break;
}
Ok(count) => {
if count == 0 {
info!(child_logger, "read child process log end",);
break;
}
info!(child_logger, "{}", line);
}
}
}
});
info!(logger, "exec fifo opened!");
let (prfd, cwfd) = unistd::pipe().context("failed to create pipe")?;
let (crfd, pwfd) = unistd::pipe().context("failed to create pipe")?;
@@ -894,11 +824,32 @@ impl BaseContainer for LinuxContainer {
child_stderr = unsafe { std::process::Stdio::from_raw_fd(stderr) };
}
let old_pid_ns =
fcntl::open(PID_NS_PATH, OFlag::O_CLOEXEC, Mode::empty()).map_err(|e| {
error!(
logger,
"cannot open pid ns path: {} with error: {:?}", PID_NS_PATH, e
);
e
})?;
//restore the parent's process's pid namespace.
defer!({
let _ = sched::setns(old_pid_ns, CloneFlags::CLONE_NEWPID)
.map_err(|e| warn!(logger, "settns CLONE_NEWPID {:?}", e));
let _ = unistd::close(old_pid_ns)
.map_err(|e| warn!(logger, "close old pid namespace {:?}", e));
});
let pidns = get_pid_namespace(&self.logger, linux)?;
defer!(if let Some(pid) = pidns {
let _ = unistd::close(pid);
});
if pidns.is_some() {
sched::setns(pidns.unwrap(), CloneFlags::CLONE_NEWPID)
.context("failed to join pidns")?;
unistd::close(pidns.unwrap())?;
} else {
sched::unshare(CloneFlags::CLONE_NEWPID)?;
}
let exec_path = std::env::current_exe()?;
let mut child = std::process::Command::new(exec_path);
@@ -917,31 +868,13 @@ impl BaseContainer for LinuxContainer {
child = child.env(FIFO_FD, format!("{}", fifofd));
}
if pidns.is_some() {
child = child.env(PIDNS_FD, format!("{}", pidns.unwrap()));
}
let child = child.spawn()?;
unistd::close(crfd)?;
unistd::close(cwfd)?;
unistd::close(cfd_log)?;
// get container process's pid
let pid_buf = read_sync(prfd)?;
let pid_str = std::str::from_utf8(&pid_buf).context("get pid string")?;
let pid = match pid_str.parse::<i32>() {
Ok(i) => i,
Err(e) => {
return Err(anyhow!(format!(
"failed to get container process's pid: {:?}",
e
)));
}
};
p.pid = pid;
p.pid = child.id() as i32;
if p.init {
self.init_process_pid = p.pid;
}
@@ -998,7 +931,7 @@ impl BaseContainer for LinuxContainer {
.join()
.map_err(|e| warn!(logger, "joining log handler {:?}", e));
info!(logger, "create process completed");
Ok(())
return Ok(());
}
fn run(&mut self, p: Process) -> Result<()> {
@@ -1030,7 +963,7 @@ impl BaseContainer for LinuxContainer {
}
self.status.transition(Status::STOPPED);
mount::umount2(
nix::mount::umount2(
spec.root.as_ref().unwrap().path.as_str(),
MntFlags::MNT_DETACH,
)?;
@@ -1140,7 +1073,7 @@ fn get_pid_namespace(logger: &Logger, linux: &Linux) -> Result<Option<RawFd>> {
}
let fd =
fcntl::open(ns.path.as_str(), OFlag::O_RDONLY, Mode::empty()).map_err(|e| {
fcntl::open(ns.path.as_str(), OFlag::O_CLOEXEC, Mode::empty()).map_err(|e| {
error!(
logger,
"cannot open type: {} path: {}",
@@ -1177,34 +1110,6 @@ fn get_namespaces(linux: &Linux) -> Vec<LinuxNamespace> {
.collect()
}
pub fn setup_child_logger(fd: RawFd, child_logger: Logger) -> Result<std::thread::JoinHandle<()>> {
let builder = thread::Builder::new();
builder
.spawn(move || {
let log_file = unsafe { std::fs::File::from_raw_fd(fd) };
let mut reader = BufReader::new(log_file);
loop {
let mut line = String::new();
match reader.read_line(&mut line) {
Err(e) => {
info!(child_logger, "read child process log error: {:?}", e);
break;
}
Ok(count) => {
if count == 0 {
info!(child_logger, "read child process log end",);
break;
}
info!(child_logger, "{}", line);
}
}
}
})
.map_err(|e| anyhow!(e).context("failed to create thread"))
}
fn join_namespaces(
logger: &Logger,
spec: &Spec,
@@ -1259,9 +1164,11 @@ fn join_namespaces(
}
// apply cgroups
if p.init && res.is_some() {
info!(logger, "apply cgroups!");
cm.set(res.unwrap(), false)?;
if p.init {
if res.is_some() {
info!(logger, "apply cgroups!");
cm.set(res.unwrap(), false)?;
}
}
if res.is_some() {
@@ -1557,7 +1464,7 @@ fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
}
}
Ok(())
return Ok(());
}
ForkResult::Child => {
@@ -1660,11 +1567,13 @@ fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
error
}
}
} else if let Ok(s) = rx.recv() {
s
} else {
let _ = signal::kill(Pid::from_raw(pid), Some(Signal::SIGKILL));
-libc::EPIPE
if let Ok(s) = rx.recv() {
s
} else {
let _ = signal::kill(Pid::from_raw(pid), Some(Signal::SIGKILL));
-libc::EPIPE
}
}
};
@@ -1682,18 +1591,6 @@ fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
#[cfg(test)]
mod tests {
use super::*;
use crate::process::Process;
use crate::skip_if_not_root;
use std::fs;
use std::os::unix::fs::MetadataExt;
use std::os::unix::io::AsRawFd;
use tempfile::tempdir;
macro_rules! sl {
() => {
slog_scope::logger()
};
}
#[test]
fn test_status_transtition() {
@@ -1712,318 +1609,4 @@ mod tests {
assert_eq!(pre_status, status.pre_status());
}
}
#[test]
fn test_set_stdio_permissions() {
skip_if_not_root!();
let meta = fs::metadata("/dev/stdin").unwrap();
let old_uid = meta.uid();
let uid = 1000;
set_stdio_permissions(uid).unwrap();
let meta = fs::metadata("/dev/stdin").unwrap();
assert_eq!(meta.uid(), uid);
let meta = fs::metadata("/dev/stdout").unwrap();
assert_eq!(meta.uid(), uid);
let meta = fs::metadata("/dev/stderr").unwrap();
assert_eq!(meta.uid(), uid);
// restore the uid
set_stdio_permissions(old_uid).unwrap();
}
#[test]
fn test_status_fmt() {
assert_eq!("\"created\"", format!("{:?}", Status::CREATED));
assert_eq!("\"running\"", format!("{:?}", Status::RUNNING));
assert_eq!("\"paused\"", format!("{:?}", Status::PAUSED));
assert_eq!("\"stopped\"", format!("{:?}", Status::STOPPED));
}
#[test]
fn test_namespaces() {
lazy_static::initialize(&NAMESPACES);
assert_eq!(NAMESPACES.len(), 7);
let ns = NAMESPACES.get("user");
assert!(ns.is_some());
let ns = NAMESPACES.get("ipc");
assert!(ns.is_some());
let ns = NAMESPACES.get("pid");
assert!(ns.is_some());
let ns = NAMESPACES.get("network");
assert!(ns.is_some());
let ns = NAMESPACES.get("mount");
assert!(ns.is_some());
let ns = NAMESPACES.get("uts");
assert!(ns.is_some());
let ns = NAMESPACES.get("cgroup");
assert!(ns.is_some());
}
#[test]
fn test_typetoname() {
lazy_static::initialize(&TYPETONAME);
assert_eq!(TYPETONAME.len(), 7);
let ns = TYPETONAME.get("user");
assert!(ns.is_some());
let ns = TYPETONAME.get("ipc");
assert!(ns.is_some());
let ns = TYPETONAME.get("pid");
assert!(ns.is_some());
let ns = TYPETONAME.get("network");
assert!(ns.is_some());
let ns = TYPETONAME.get("mount");
assert!(ns.is_some());
let ns = TYPETONAME.get("uts");
assert!(ns.is_some());
let ns = TYPETONAME.get("cgroup");
assert!(ns.is_some());
}
fn create_dummy_opts() -> CreateOpts {
let mut root = oci::Root::default();
root.path = "/tmp".to_string();
let linux = Linux::default();
let mut spec = Spec::default();
spec.root = Some(root).into();
spec.linux = Some(linux).into();
CreateOpts {
cgroup_name: "".to_string(),
use_systemd_cgroup: false,
no_pivot_root: false,
no_new_keyring: false,
spec: Some(spec),
rootless_euid: false,
rootless_cgroup: false,
}
}
fn new_linux_container<U, F: FnOnce(LinuxContainer) -> Result<U, anyhow::Error>>(
op: F,
) -> Result<U, anyhow::Error> {
// Create a temporal directory
tempdir()
.map_err(|e| anyhow!(e).context("tempdir failed"))
.and_then(|p: tempfile::TempDir| {
// Create a new container
LinuxContainer::new(
"some_id",
&p.path().join("rootfs").to_str().unwrap(),
create_dummy_opts(),
&slog_scope::logger(),
)
.and_then(op)
})
}
#[test]
fn test_linuxcontainer_pause_bad_status() {
let ret = new_linux_container(|mut c: LinuxContainer| {
// Change state to pause, c.pause() should fail
c.status.transition(Status::PAUSED);
c.pause().map_err(|e| anyhow!(e))
});
assert!(ret.is_err(), "Expecting error, Got {:?}", ret);
assert!(format!("{:?}", ret).contains("failed to pause container"))
}
#[test]
fn test_linuxcontainer_pause_cgroupmgr_is_none() {
let ret = new_linux_container(|mut c: LinuxContainer| {
c.cgroup_manager = None;
c.pause().map_err(|e| anyhow!(e))
});
assert!(ret.is_err(), "Expecting error, Got {:?}", ret);
}
#[test]
fn test_linuxcontainer_pause() {
let ret = new_linux_container(|mut c: LinuxContainer| {
c.cgroup_manager = FsManager::new("").ok();
c.pause().map_err(|e| anyhow!(e))
});
assert!(ret.is_ok(), "Expecting Ok, Got {:?}", ret);
}
#[test]
fn test_linuxcontainer_resume_bad_status() {
let ret = new_linux_container(|mut c: LinuxContainer| {
// Change state to created, c.resume() should fail
c.status.transition(Status::CREATED);
c.resume().map_err(|e| anyhow!(e))
});
assert!(ret.is_err(), "Expecting error, Got {:?}", ret);
assert!(format!("{:?}", ret).contains("not paused"))
}
#[test]
fn test_linuxcontainer_resume_cgroupmgr_is_none() {
let ret = new_linux_container(|mut c: LinuxContainer| {
c.status.transition(Status::PAUSED);
c.cgroup_manager = None;
c.resume().map_err(|e| anyhow!(e))
});
assert!(ret.is_err(), "Expecting error, Got {:?}", ret);
}
#[test]
fn test_linuxcontainer_resume() {
let ret = new_linux_container(|mut c: LinuxContainer| {
c.cgroup_manager = FsManager::new("").ok();
// Change status to paused, this way we can resume it
c.status.transition(Status::PAUSED);
c.resume().map_err(|e| anyhow!(e))
});
assert!(ret.is_ok(), "Expecting Ok, Got {:?}", ret);
}
#[test]
fn test_linuxcontainer_state() {
let ret = new_linux_container(|c: LinuxContainer| c.state());
assert!(ret.is_err(), "Expecting Err, Got {:?}", ret);
assert!(
format!("{:?}", ret).contains("not supported"),
"Got: {:?}",
ret
)
}
#[test]
fn test_linuxcontainer_oci_state_no_root_parent() {
let ret = new_linux_container(|mut c: LinuxContainer| {
c.config.spec.as_mut().unwrap().root.as_mut().unwrap().path = "/".to_string();
c.oci_state()
});
assert!(ret.is_err(), "Expecting Err, Got {:?}", ret);
assert!(
format!("{:?}", ret).contains("could not get root parent"),
"Got: {:?}",
ret
)
}
#[test]
fn test_linuxcontainer_oci_state() {
let ret = new_linux_container(|c: LinuxContainer| c.oci_state());
assert!(ret.is_ok(), "Expecting Ok, Got {:?}", ret);
}
#[test]
fn test_linuxcontainer_config() {
let ret = new_linux_container(|c: LinuxContainer| Ok(c));
assert!(ret.is_ok(), "Expecting ok, Got {:?}", ret);
assert!(
ret.as_ref().unwrap().config().is_ok(),
"Expecting ok, Got {:?}",
ret
);
}
#[test]
fn test_linuxcontainer_processes() {
let ret = new_linux_container(|c: LinuxContainer| c.processes());
assert!(ret.is_ok(), "Expecting Ok, Got {:?}", ret);
}
#[test]
fn test_linuxcontainer_get_process_not_found() {
let _ = new_linux_container(|mut c: LinuxContainer| {
let p = c.get_process("123");
assert!(p.is_err(), "Expecting Err, Got {:?}", p);
Ok(())
});
}
#[test]
fn test_linuxcontainer_get_process() {
let _ = new_linux_container(|mut c: LinuxContainer| {
c.processes.insert(
1,
Process::new(&sl!(), &oci::Process::default(), "123", true, 1).unwrap(),
);
let p = c.get_process("123");
assert!(p.is_ok(), "Expecting Ok, Got {:?}", p);
Ok(())
});
}
#[test]
fn test_linuxcontainer_stats() {
let ret = new_linux_container(|c: LinuxContainer| c.stats());
assert!(ret.is_ok(), "Expecting Ok, Got {:?}", ret);
}
#[test]
fn test_linuxcontainer_set() {
let ret =
new_linux_container(|mut c: LinuxContainer| c.set(oci::LinuxResources::default()));
assert!(ret.is_ok(), "Expecting Ok, Got {:?}", ret);
}
#[test]
fn test_linuxcontainer_start() {
let ret = new_linux_container(|mut c: LinuxContainer| {
c.start(Process::new(&sl!(), &oci::Process::default(), "123", true, 1).unwrap())
});
assert!(ret.is_err(), "Expecting Err, Got {:?}", ret);
}
#[test]
fn test_linuxcontainer_run() {
let ret = new_linux_container(|mut c: LinuxContainer| {
c.run(Process::new(&sl!(), &oci::Process::default(), "123", true, 1).unwrap())
});
assert!(ret.is_err(), "Expecting Err, Got {:?}", ret);
}
#[test]
fn test_linuxcontainer_destroy() {
let ret = new_linux_container(|mut c: LinuxContainer| c.destroy());
assert!(ret.is_ok(), "Expecting Ok, Got {:?}", ret);
}
#[test]
fn test_linuxcontainer_signal() {
let ret =
new_linux_container(|c: LinuxContainer| c.signal(nix::sys::signal::SIGCONT, true));
assert!(ret.is_ok(), "Expecting Ok, Got {:?}", ret);
}
#[test]
fn test_linuxcontainer_exec() {
let ret = new_linux_container(|mut c: LinuxContainer| c.exec());
assert!(ret.is_err(), "Expecting Err, Got {:?}", ret);
}
#[test]
fn test_linuxcontainer_do_init_child() {
let ret = do_init_child(std::io::stdin().as_raw_fd());
assert!(ret.is_err(), "Expecting Err, Got {:?}", ret);
}
}

View File

@@ -41,7 +41,6 @@ pub mod cgroups;
pub mod container;
pub mod mount;
pub mod process;
pub mod reaper;
pub mod specconv;
pub mod sync;
pub mod validator;
@@ -310,7 +309,7 @@ pub fn resources_grpc_to_oci(res: &grpcLinuxResources) -> ociLinuxResources {
swap: Some(mem.Swap),
kernel: Some(mem.Kernel),
kernel_tcp: Some(mem.KernelTCP),
swappiness: Some(mem.Swappiness as i64),
swapiness: Some(mem.Swappiness as i64),
disable_oom_killer: Some(mem.DisableOOMKiller),
})
} else {

View File

@@ -3,7 +3,7 @@
// SPDX-License-Identifier: Apache-2.0
//
use anyhow::{anyhow, bail, Context, Result};
use anyhow::{anyhow, bail, Context, Error, Result};
use libc::uid_t;
use nix::errno::Errno;
use nix::fcntl::{self, OFlag};
@@ -22,11 +22,13 @@ use std::os::unix::io::RawFd;
use std::path::{Path, PathBuf};
use path_absolutize::*;
use scan_fmt;
use std::fs::File;
use std::io::{BufRead, BufReader};
use crate::container::DEFAULT_DEVICES;
use crate::sync::write_count;
use lazy_static;
use std::string::ToString;
use crate::log_child;
@@ -48,7 +50,7 @@ pub struct Info {
vfs_opts: String,
}
const MOUNTINFOFORMAT: &str = "{d} {d} {d}:{d} {} {} {} {}";
const MOUNTINFOFORMAT: &'static str = "{d} {d} {d}:{d} {} {} {} {}";
const PROC_PATH: &str = "/proc";
// since libc didn't defined this const for musl, thus redefined it here.
@@ -112,12 +114,7 @@ lazy_static! {
#[inline(always)]
#[allow(unused_variables)]
pub fn mount<
P1: ?Sized + NixPath,
P2: ?Sized + NixPath,
P3: ?Sized + NixPath,
P4: ?Sized + NixPath,
>(
fn mount<P1: ?Sized + NixPath, P2: ?Sized + NixPath, P3: ?Sized + NixPath, P4: ?Sized + NixPath>(
source: Option<&P1>,
target: &P2,
fstype: Option<&P3>,
@@ -132,7 +129,7 @@ pub fn mount<
#[inline(always)]
#[allow(unused_variables)]
pub fn umount2<P: ?Sized + NixPath>(
fn umount2<P: ?Sized + NixPath>(
target: &P,
flags: MntFlags,
) -> std::result::Result<(), nix::Error> {
@@ -156,7 +153,7 @@ pub fn init_rootfs(
let linux = &spec
.linux
.as_ref()
.ok_or_else(|| anyhow!("Could not get linux configuration from spec"))?;
.ok_or::<Error>(anyhow!("Could not get linux configuration from spec"))?;
let mut flags = MsFlags::MS_REC;
match PROPAGATION.get(&linux.rootfs_propagation.as_str()) {
@@ -167,14 +164,14 @@ pub fn init_rootfs(
let root = spec
.root
.as_ref()
.ok_or_else(|| anyhow!("Could not get rootfs path from spec"))
.ok_or(anyhow!("Could not get rootfs path from spec"))
.and_then(|r| {
fs::canonicalize(r.path.as_str()).context("Could not canonicalize rootfs path")
})?;
let rootfs = (*root)
.to_str()
.ok_or_else(|| anyhow!("Could not convert rootfs path to string"))?;
.ok_or(anyhow!("Could not convert rootfs path to string"))?;
mount(None::<&str>, "/", None::<&str>, flags, None::<&str>)?;
@@ -188,10 +185,9 @@ pub fn init_rootfs(
None::<&str>,
)?;
let mut bind_mount_dev = false;
for m in &spec.mounts {
let (mut flags, data) = parse_mount(&m);
if !m.destination.starts_with('/') || m.destination.contains("..") {
if !m.destination.starts_with("/") || m.destination.contains("..") {
return Err(anyhow!(
"the mount destination {} is invalid",
m.destination
@@ -202,9 +198,6 @@ pub fn init_rootfs(
mount_cgroups(cfd_log, &m, rootfs, flags, &data, cpath, mounts)?;
} else {
if m.destination == "/dev" {
if m.r#type == "bind" {
bind_mount_dev = true;
}
flags &= !MsFlags::MS_RDONLY;
}
@@ -246,14 +239,9 @@ pub fn init_rootfs(
let olddir = unistd::getcwd()?;
unistd::chdir(rootfs)?;
// in case the /dev directory was binded mount from guest,
// then there's no need to create devices nodes and symlinks
// in /dev.
if !bind_mount_dev {
default_symlinks()?;
create_devices(&linux.devices, bind_device)?;
ensure_ptmx()?;
}
default_symlinks()?;
create_devices(&linux.devices, bind_device)?;
ensure_ptmx()?;
unistd::chdir(&olddir)?;
@@ -285,9 +273,9 @@ fn check_proc_mount(m: &Mount) -> Result<()> {
// only allow a mount on-top of proc if it's source is "proc"
unsafe {
let mut stats = MaybeUninit::<libc::statfs>::uninit();
if m.source
if let Ok(_) = m
.source
.with_nix_path(|path| libc::statfs(path.as_ptr(), stats.as_mut_ptr()))
.is_ok()
{
if stats.assume_init().f_type == PROC_SUPER_MAGIC {
return Ok(());
@@ -310,7 +298,7 @@ fn check_proc_mount(m: &Mount) -> Result<()> {
)));
}
Ok(())
return Ok(());
}
fn mount_cgroups_v2(cfd_log: RawFd, m: &Mount, rootfs: &str, flags: MsFlags) -> Result<()> {
@@ -598,14 +586,15 @@ pub fn ms_move_root(rootfs: &str) -> Result<bool> {
let abs_root_buf = root_path.absolutize()?;
let abs_root = abs_root_buf
.to_str()
.ok_or_else(|| anyhow!("failed to parse {} to absolute path", rootfs))?;
.ok_or::<Error>(anyhow!("failed to parse {} to absolute path", rootfs))?;
for info in mount_infos.iter() {
let mount_point = Path::new(&info.mount_point);
let abs_mount_buf = mount_point.absolutize()?;
let abs_mount_point = abs_mount_buf
.to_str()
.ok_or_else(|| anyhow!("failed to parse {} to absolute path", info.mount_point))?;
let abs_mount_point = abs_mount_buf.to_str().ok_or::<Error>(anyhow!(
"failed to parse {} to absolute path",
info.mount_point
))?;
let abs_mount_point_string = String::from(abs_mount_point);
// Umount every syfs and proc file systems, except those under the container rootfs
@@ -766,7 +755,7 @@ fn mount_from(
Ok(())
}
static SYMLINKS: &[(&str, &str)] = &[
static SYMLINKS: &'static [(&'static str, &'static str)] = &[
("/proc/self/fd", "dev/fd"),
("/proc/self/fd/0", "dev/stdin"),
("/proc/self/fd/1", "dev/stdout"),
@@ -899,7 +888,7 @@ pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec) -> Result<()> {
}
fn mask_path(path: &str) -> Result<()> {
if !path.starts_with('/') || path.contains("..") {
if !path.starts_with("/") || path.contains("..") {
return Err(nix::Error::Sys(Errno::EINVAL).into());
}
@@ -928,7 +917,7 @@ fn mask_path(path: &str) -> Result<()> {
}
fn readonly_path(path: &str) -> Result<()> {
if !path.starts_with('/') || path.contains("..") {
if !path.starts_with("/") || path.contains("..") {
return Err(nix::Error::Sys(Errno::EINVAL).into());
}

View File

@@ -14,7 +14,6 @@ use nix::sys::wait::{self, WaitStatus};
use nix::unistd::{self, Pid};
use nix::Result;
use crate::reaper::Epoller;
use oci::Process as OCIProcess;
use slog::Logger;
@@ -41,7 +40,6 @@ pub struct Process {
pub exit_watchers: Vec<Sender<i32>>,
pub oci: OCIProcess,
pub logger: Logger,
pub epoller: Option<Epoller>,
}
pub trait ProcessOperations {
@@ -93,7 +91,6 @@ impl Process {
exit_watchers: Vec::new(),
oci: ocip.clone(),
logger: logger.clone(),
epoller: None,
};
info!(logger, "before create console socket!");
@@ -115,29 +112,6 @@ impl Process {
}
Ok(p)
}
pub fn close_epoller(&mut self) {
if let Some(epoller) = self.epoller.take() {
epoller.close();
}
}
pub fn create_epoller(&mut self) -> anyhow::Result<()> {
match self.term_master {
Some(term_master) => {
// add epoller to process
let epoller = Epoller::new(&self.logger, term_master)?;
self.epoller = Some(epoller)
}
None => {
info!(
self.logger,
"try to add epoller to a process without a term master fd"
);
}
}
Ok(())
}
}
fn create_extended_pipe(flags: OFlag, pipe_size: i32) -> Result<(RawFd, RawFd)> {

View File

@@ -1,150 +0,0 @@
// Copyright (c) 2020 Ant Group
//
// SPDX-License-Identifier: Apache-2.0
//
use nix::fcntl::OFlag;
use slog::Logger;
use nix::unistd;
use std::os::unix::io::RawFd;
use anyhow::Result;
const MAX_EVENTS: usize = 2;
#[derive(Debug, Clone)]
pub struct Epoller {
logger: Logger,
epoll_fd: RawFd,
// rfd and wfd are a pipe's files two ends, this pipe is
// used to sync between the readStdio and the process exits.
// once the process exits, it will close one end to notify
// the readStdio that the process has exited and it should not
// wait on the process's terminal which has been inherited
// by it's children and hasn't exited.
rfd: RawFd,
wfd: RawFd,
}
impl Epoller {
pub fn new(logger: &Logger, fd: RawFd) -> Result<Epoller> {
let epoll_fd = epoll::create(true)?;
let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC)?;
let mut epoller = Self {
logger: logger.clone(),
epoll_fd,
rfd,
wfd,
};
epoller.add(rfd)?;
epoller.add(fd)?;
Ok(epoller)
}
pub fn close_wfd(&self) {
let _ = unistd::close(self.wfd);
}
pub fn close(&self) {
let _ = unistd::close(self.rfd);
let _ = unistd::close(self.wfd);
let _ = unistd::close(self.epoll_fd);
}
fn add(&mut self, fd: RawFd) -> Result<()> {
info!(self.logger, "Epoller add fd {}", fd);
// add creates an epoll which is used to monitor the process's pty's master and
// one end of its exit notify pipe. Those files will be registered with level-triggered
// notification.
epoll::ctl(
self.epoll_fd,
epoll::ControlOptions::EPOLL_CTL_ADD,
fd,
epoll::Event::new(
epoll::Events::EPOLLHUP
| epoll::Events::EPOLLIN
| epoll::Events::EPOLLERR
| epoll::Events::EPOLLRDHUP,
fd as u64,
),
)?;
Ok(())
}
// There will be three cases on the epoller once it poll:
// a: only pty's master get an event(other than self.rfd);
// b: only the pipe get an event(self.rfd);
// c: both of pty and pipe have event occur;
// for case a, it means there is output in process's terminal and what needed to do is
// just read the terminal and send them out; for case b, it means the process has exited
// and there is no data in the terminal, thus just return the "EOF" to end the io;
// for case c, it means the process has exited but there is some data in the terminal which
// hasn't been send out, thus it should send those data out first and then send "EOF" last to
// end the io.
pub fn poll(&self) -> Result<RawFd> {
let mut rfd = self.rfd;
let mut epoll_events = vec![epoll::Event::new(epoll::Events::empty(), 0); MAX_EVENTS];
loop {
let event_count = match epoll::wait(self.epoll_fd, -1, epoll_events.as_mut_slice()) {
Ok(ec) => ec,
Err(e) => {
info!(self.logger, "loop wait err {:?}", e);
// EINTR: The call was interrupted by a signal handler before either
// any of the requested events occurred or the timeout expired
if e.kind() == std::io::ErrorKind::Interrupted {
continue;
}
return Err(e.into());
}
};
for event in epoll_events.iter().take(event_count) {
let fd = event.data as i32;
// fd has been assigned with one end of process's exited pipe by default, and
// here to check is there any event occur on process's terminal, if "yes", it
// should be dealt first, otherwise, it means the process has exited and there
// is nothing left in the process's terminal needed to be read.
if fd != rfd {
rfd = fd;
break;
}
}
break;
}
Ok(rfd)
}
}
#[cfg(test)]
mod tests {
use super::Epoller;
use nix::fcntl::OFlag;
use nix::unistd;
use std::thread;
#[test]
fn test_epoller_poll() {
let logger = slog::Logger::root(slog::Discard, o!());
let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC).unwrap();
let epoller = Epoller::new(&logger, rfd).unwrap();
let child = thread::spawn(move || {
let _ = unistd::write(wfd, "temporary file's content".as_bytes());
});
// wait write to finish
let _ = child.join();
let fd = epoller.poll().unwrap();
assert_eq!(fd, rfd, "Should get rfd");
epoller.close();
}
}

View File

@@ -72,15 +72,7 @@ fn read_count(fd: RawFd, count: usize) -> Result<Vec<u8>> {
}
}
if len != count {
Err(anyhow::anyhow!(
"invalid read count expect {} get {}",
count,
len
))
} else {
Ok(v[0..len].to_vec())
}
Ok(v[0..len].to_vec())
}
pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
@@ -96,14 +88,14 @@ pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
let msg: i32 = i32::from_be_bytes(buf_array);
match msg {
SYNC_SUCCESS => Ok(Vec::new()),
SYNC_SUCCESS => return Ok(Vec::new()),
SYNC_DATA => {
let buf = read_count(fd, MSG_SIZE)?;
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
let msg_length: i32 = i32::from_be_bytes(buf_array);
let data_buf = read_count(fd, msg_length as usize)?;
Ok(data_buf)
return Ok(data_buf);
}
SYNC_FAILED => {
let mut error_buf = vec![];
@@ -127,9 +119,9 @@ pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
}
};
Err(anyhow!(error_str))
return Err(anyhow!(error_str));
}
_ => Err(anyhow!("error in receive sync message")),
_ => return Err(anyhow!("error in receive sync message")),
}
}

View File

@@ -4,21 +4,14 @@
//
use crate::container::Config;
use anyhow::{anyhow, Context, Error, Result};
use anyhow::{anyhow, Result};
use lazy_static;
use nix::errno::Errno;
use oci::{Linux, LinuxIDMapping, LinuxNamespace, Spec};
use oci::{LinuxIDMapping, LinuxNamespace, Spec};
use std::collections::HashMap;
use std::path::{Component, PathBuf};
fn einval() -> Error {
anyhow!(nix::Error::from_errno(Errno::EINVAL))
}
fn get_linux(oci: &Spec) -> Result<&Linux> {
oci.linux.as_ref().ok_or_else(einval)
}
fn contain_namespace(nses: &[LinuxNamespace], key: &str) -> bool {
fn contain_namespace(nses: &Vec<LinuxNamespace>, key: &str) -> bool {
for ns in nses {
if ns.r#type.as_str() == key {
return true;
@@ -28,28 +21,30 @@ fn contain_namespace(nses: &[LinuxNamespace], key: &str) -> bool {
false
}
fn get_namespace_path(nses: &[LinuxNamespace], key: &str) -> Result<String> {
fn get_namespace_path(nses: &Vec<LinuxNamespace>, key: &str) -> Result<String> {
for ns in nses {
if ns.r#type.as_str() == key {
return Ok(ns.path.clone());
}
}
Err(einval())
Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)))
}
fn rootfs(root: &str) -> Result<()> {
let path = PathBuf::from(root);
// not absolute path or not exists
if !path.exists() || !path.is_absolute() {
return Err(einval());
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
// symbolic link? ..?
let mut stack: Vec<String> = Vec::new();
for c in path.components() {
if stack.is_empty() && (c == Component::RootDir || c == Component::ParentDir) {
continue;
if stack.is_empty() {
if c == Component::RootDir || c == Component::ParentDir {
continue;
}
}
if c == Component::ParentDir {
@@ -57,11 +52,7 @@ fn rootfs(root: &str) -> Result<()> {
continue;
}
if let Some(v) = c.as_os_str().to_str() {
stack.push(v.to_string());
} else {
return Err(einval());
}
stack.push(c.as_os_str().to_str().unwrap().to_string());
}
let mut cleaned = PathBuf::from("/");
@@ -69,10 +60,10 @@ fn rootfs(root: &str) -> Result<()> {
cleaned.push(e);
}
let canon = path.canonicalize().context("canonicalize")?;
let canon = path.canonicalize()?;
if cleaned != canon {
// There is symbolic in path
return Err(einval());
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
Ok(())
@@ -83,27 +74,29 @@ fn network(_oci: &Spec) -> Result<()> {
}
fn hostname(oci: &Spec) -> Result<()> {
if oci.hostname.is_empty() || oci.hostname == "" {
if oci.hostname.is_empty() || oci.hostname == "".to_string() {
return Ok(());
}
let linux = get_linux(oci)?;
if oci.linux.is_none() {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
let linux = oci.linux.as_ref().unwrap();
if !contain_namespace(&linux.namespaces, "uts") {
return Err(einval());
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
Ok(())
}
fn security(oci: &Spec) -> Result<()> {
let linux = get_linux(oci)?;
if linux.masked_paths.is_empty() && linux.readonly_paths.is_empty() {
let linux = oci.linux.as_ref().unwrap();
if linux.masked_paths.len() == 0 && linux.readonly_paths.len() == 0 {
return Ok(());
}
if !contain_namespace(&linux.namespaces, "mount") {
return Err(einval());
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
// don't care about selinux at present
@@ -111,19 +104,18 @@ fn security(oci: &Spec) -> Result<()> {
Ok(())
}
fn idmapping(maps: &[LinuxIDMapping]) -> Result<()> {
fn idmapping(maps: &Vec<LinuxIDMapping>) -> Result<()> {
for map in maps {
if map.size > 0 {
return Ok(());
}
}
Err(einval())
Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)))
}
fn usernamespace(oci: &Spec) -> Result<()> {
let linux = get_linux(oci)?;
let linux = oci.linux.as_ref().unwrap();
if contain_namespace(&linux.namespaces, "user") {
let user_ns = PathBuf::from("/proc/self/ns/user");
if !user_ns.exists() {
@@ -131,12 +123,12 @@ fn usernamespace(oci: &Spec) -> Result<()> {
}
// check if idmappings is correct, at least I saw idmaps
// with zero size was passed to agent
idmapping(&linux.uid_mappings).context("idmapping uid")?;
idmapping(&linux.gid_mappings).context("idmapping gid")?;
idmapping(&linux.uid_mappings)?;
idmapping(&linux.gid_mappings)?;
} else {
// no user namespace but idmap
if !linux.uid_mappings.is_empty() || !linux.gid_mappings.is_empty() {
return Err(einval());
if linux.uid_mappings.len() != 0 || linux.gid_mappings.len() != 0 {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
}
@@ -144,8 +136,7 @@ fn usernamespace(oci: &Spec) -> Result<()> {
}
fn cgroupnamespace(oci: &Spec) -> Result<()> {
let linux = get_linux(oci)?;
let linux = oci.linux.as_ref().unwrap();
if contain_namespace(&linux.namespaces, "cgroup") {
let path = PathBuf::from("/proc/self/ns/cgroup");
if !path.exists() {
@@ -174,42 +165,43 @@ fn check_host_ns(path: &str) -> Result<()> {
let cpath = PathBuf::from(path);
let hpath = PathBuf::from("/proc/self/ns/net");
let real_hpath = hpath
.read_link()
.context(format!("read link {:?}", hpath))?;
let meta = cpath
.symlink_metadata()
.context(format!("symlink metadata {:?}", cpath))?;
let real_hpath = hpath.read_link()?;
let meta = cpath.symlink_metadata()?;
let file_type = meta.file_type();
if !file_type.is_symlink() {
return Ok(());
}
let real_cpath = cpath
.read_link()
.context(format!("read link {:?}", cpath))?;
let real_cpath = cpath.read_link()?;
if real_cpath == real_hpath {
return Err(einval());
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
Ok(())
}
fn sysctl(oci: &Spec) -> Result<()> {
let linux = get_linux(oci)?;
let linux = oci.linux.as_ref().unwrap();
for (key, _) in linux.sysctl.iter() {
if SYSCTLS.contains_key(key.as_str()) || key.starts_with("fs.mqueue.") {
if contain_namespace(&linux.namespaces, "ipc") {
continue;
} else {
return Err(einval());
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
}
if key.starts_with("net.") {
// the network ns is shared with the guest, don't expect to find it in spec
continue;
if !contain_namespace(&linux.namespaces, "network") {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
let net = get_namespace_path(&linux.namespaces, "network")?;
if net.is_empty() || net == "".to_string() {
continue;
}
check_host_ns(net.as_str())?;
}
if contain_namespace(&linux.namespaces, "uts") {
@@ -218,31 +210,30 @@ fn sysctl(oci: &Spec) -> Result<()> {
}
if key == "kernel.hostname" {
return Err(einval());
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
}
return Err(einval());
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
Ok(())
}
fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
let linux = get_linux(oci)?;
let linux = oci.linux.as_ref().unwrap();
if !contain_namespace(&linux.namespaces, "user") {
return Err(einval());
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
if linux.uid_mappings.is_empty() || linux.gid_mappings.is_empty() {
if linux.uid_mappings.len() == 0 || linux.gid_mappings.len() == 0 {
// rootless containers requires at least one UID/GID mapping
return Err(einval());
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
Ok(())
}
fn has_idmapping(maps: &[LinuxIDMapping], id: u32) -> bool {
fn has_idmapping(maps: &Vec<LinuxIDMapping>, id: u32) -> bool {
for map in maps {
if id >= map.container_id && id < map.container_id + map.size {
return true;
@@ -252,7 +243,7 @@ fn has_idmapping(maps: &[LinuxIDMapping], id: u32) -> bool {
}
fn rootless_euid_mount(oci: &Spec) -> Result<()> {
let linux = get_linux(oci)?;
let linux = oci.linux.as_ref().unwrap();
for mnt in oci.mounts.iter() {
for opt in mnt.options.iter() {
@@ -260,20 +251,21 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> {
let fields: Vec<&str> = opt.split('=').collect();
if fields.len() != 2 {
return Err(einval());
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
let id = fields[1]
.trim()
.parse::<u32>()
.context(format!("parse field {}", &fields[1]))?;
let id = fields[1].trim().parse::<u32>()?;
if opt.starts_with("uid=") && !has_idmapping(&linux.uid_mappings, id) {
return Err(einval());
if opt.starts_with("uid=") {
if !has_idmapping(&linux.uid_mappings, id) {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
}
if opt.starts_with("gid=") && !has_idmapping(&linux.gid_mappings, id) {
return Err(einval());
if opt.starts_with("gid=") {
if !has_idmapping(&linux.gid_mappings, id) {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
}
}
}
@@ -282,306 +274,35 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> {
}
fn rootless_euid(oci: &Spec) -> Result<()> {
rootless_euid_mapping(oci).context("rootless euid mapping")?;
rootless_euid_mount(oci).context("rotless euid mount")?;
rootless_euid_mapping(oci)?;
rootless_euid_mount(oci)?;
Ok(())
}
pub fn validate(conf: &Config) -> Result<()> {
lazy_static::initialize(&SYSCTLS);
let oci = conf.spec.as_ref().ok_or_else(einval)?;
let oci = conf.spec.as_ref().unwrap();
if oci.linux.is_none() {
return Err(einval());
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
let root = match oci.root.as_ref() {
Some(v) => v.path.as_str(),
None => return Err(einval()),
};
if oci.root.is_none() {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
let root = oci.root.as_ref().unwrap().path.as_str();
rootfs(root).context("rootfs")?;
network(oci).context("network")?;
hostname(oci).context("hostname")?;
security(oci).context("security")?;
usernamespace(oci).context("usernamespace")?;
cgroupnamespace(oci).context("cgroupnamespace")?;
sysctl(&oci).context("sysctl")?;
rootfs(root)?;
network(oci)?;
hostname(oci)?;
security(oci)?;
usernamespace(oci)?;
cgroupnamespace(oci)?;
sysctl(&oci)?;
if conf.rootless_euid {
rootless_euid(oci).context("rootless euid")?;
rootless_euid(oci)?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use oci::Mount;
#[test]
fn test_namespace() {
let namespaces = [
LinuxNamespace {
r#type: "net".to_owned(),
path: "/sys/cgroups/net".to_owned(),
},
LinuxNamespace {
r#type: "uts".to_owned(),
path: "/sys/cgroups/uts".to_owned(),
},
];
assert_eq!(contain_namespace(&namespaces, "net"), true);
assert_eq!(contain_namespace(&namespaces, "uts"), true);
assert_eq!(contain_namespace(&namespaces, ""), false);
assert_eq!(contain_namespace(&namespaces, "Net"), false);
assert_eq!(contain_namespace(&namespaces, "ipc"), false);
assert_eq!(
get_namespace_path(&namespaces, "net").unwrap(),
"/sys/cgroups/net"
);
assert_eq!(
get_namespace_path(&namespaces, "uts").unwrap(),
"/sys/cgroups/uts"
);
get_namespace_path(&namespaces, "").unwrap_err();
get_namespace_path(&namespaces, "Uts").unwrap_err();
get_namespace_path(&namespaces, "ipc").unwrap_err();
}
#[test]
fn test_rootfs() {
rootfs("/_no_exit_fs_xxxxxxxxxxx").unwrap_err();
rootfs("sys").unwrap_err();
rootfs("/proc/self/root").unwrap_err();
rootfs("/proc/self/root/sys").unwrap_err();
rootfs("/proc/self").unwrap_err();
rootfs("/./proc/self").unwrap_err();
rootfs("/proc/././self").unwrap_err();
rootfs("/proc/.././self").unwrap_err();
rootfs("/proc/uptime").unwrap();
rootfs("/../proc/uptime").unwrap();
rootfs("/../../proc/uptime").unwrap();
rootfs("/proc/../proc/uptime").unwrap();
rootfs("/proc/../../proc/uptime").unwrap();
}
#[test]
fn test_hostname() {
let mut spec = Spec::default();
hostname(&spec).unwrap();
spec.hostname = "a.test.com".to_owned();
hostname(&spec).unwrap_err();
let mut linux = Linux::default();
linux.namespaces = vec![
LinuxNamespace {
r#type: "net".to_owned(),
path: "/sys/cgroups/net".to_owned(),
},
LinuxNamespace {
r#type: "uts".to_owned(),
path: "/sys/cgroups/uts".to_owned(),
},
];
spec.linux = Some(linux);
hostname(&spec).unwrap();
}
#[test]
fn test_security() {
let mut spec = Spec::default();
let linux = Linux::default();
spec.linux = Some(linux);
security(&spec).unwrap();
let mut linux = Linux::default();
linux.masked_paths.push("/test".to_owned());
linux.namespaces = vec![
LinuxNamespace {
r#type: "net".to_owned(),
path: "/sys/cgroups/net".to_owned(),
},
LinuxNamespace {
r#type: "uts".to_owned(),
path: "/sys/cgroups/uts".to_owned(),
},
];
spec.linux = Some(linux);
security(&spec).unwrap_err();
let mut linux = Linux::default();
linux.masked_paths.push("/test".to_owned());
linux.namespaces = vec![
LinuxNamespace {
r#type: "net".to_owned(),
path: "/sys/cgroups/net".to_owned(),
},
LinuxNamespace {
r#type: "mount".to_owned(),
path: "/sys/cgroups/mount".to_owned(),
},
];
spec.linux = Some(linux);
security(&spec).unwrap();
}
#[test]
fn test_usernamespace() {
let mut spec = Spec::default();
usernamespace(&spec).unwrap_err();
let linux = Linux::default();
spec.linux = Some(linux);
usernamespace(&spec).unwrap();
let mut linux = Linux::default();
linux.uid_mappings = vec![LinuxIDMapping {
container_id: 0,
host_id: 1000,
size: 0,
}];
spec.linux = Some(linux);
usernamespace(&spec).unwrap_err();
let mut linux = Linux::default();
linux.uid_mappings = vec![LinuxIDMapping {
container_id: 0,
host_id: 1000,
size: 100,
}];
spec.linux = Some(linux);
usernamespace(&spec).unwrap_err();
}
#[test]
fn test_rootless_euid() {
let mut spec = Spec::default();
// Test case: without linux
rootless_euid_mapping(&spec).unwrap_err();
rootless_euid_mount(&spec).unwrap_err();
// Test case: without user namespace
let linux = Linux::default();
spec.linux = Some(linux);
rootless_euid_mapping(&spec).unwrap_err();
// Test case: without user namespace
let linux = spec.linux.as_mut().unwrap();
linux.namespaces = vec![
LinuxNamespace {
r#type: "net".to_owned(),
path: "/sys/cgroups/net".to_owned(),
},
LinuxNamespace {
r#type: "uts".to_owned(),
path: "/sys/cgroups/uts".to_owned(),
},
];
rootless_euid_mapping(&spec).unwrap_err();
let linux = spec.linux.as_mut().unwrap();
linux.namespaces = vec![
LinuxNamespace {
r#type: "net".to_owned(),
path: "/sys/cgroups/net".to_owned(),
},
LinuxNamespace {
r#type: "user".to_owned(),
path: "/sys/cgroups/user".to_owned(),
},
];
linux.uid_mappings = vec![LinuxIDMapping {
container_id: 0,
host_id: 1000,
size: 1000,
}];
linux.gid_mappings = vec![LinuxIDMapping {
container_id: 0,
host_id: 1000,
size: 1000,
}];
rootless_euid_mapping(&spec).unwrap();
spec.mounts.push(Mount {
destination: "/app".to_owned(),
r#type: "tmpfs".to_owned(),
source: "".to_owned(),
options: vec!["uid=10000".to_owned()],
});
rootless_euid_mount(&spec).unwrap_err();
spec.mounts = vec![
(Mount {
destination: "/app".to_owned(),
r#type: "tmpfs".to_owned(),
source: "".to_owned(),
options: vec!["uid=500".to_owned(), "gid=500".to_owned()],
}),
];
rootless_euid(&spec).unwrap();
}
#[test]
fn test_check_host_ns() {
check_host_ns("/proc/self/ns/net").unwrap_err();
check_host_ns("/proc/sys/net/ipv4/tcp_sack").unwrap();
}
#[test]
fn test_sysctl() {
let mut spec = Spec::default();
let mut linux = Linux::default();
linux.namespaces = vec![LinuxNamespace {
r#type: "net".to_owned(),
path: "/sys/cgroups/net".to_owned(),
}];
linux
.sysctl
.insert("kernel.domainname".to_owned(), "test.com".to_owned());
spec.linux = Some(linux);
sysctl(&spec).unwrap_err();
spec.linux
.as_mut()
.unwrap()
.namespaces
.push(LinuxNamespace {
r#type: "uts".to_owned(),
path: "/sys/cgroups/uts".to_owned(),
});
sysctl(&spec).unwrap();
}
#[test]
fn test_validate() {
let spec = Spec::default();
let mut config = Config {
cgroup_name: "container1".to_owned(),
use_systemd_cgroup: false,
no_pivot_root: true,
no_new_keyring: true,
rootless_euid: false,
rootless_cgroup: false,
spec: Some(spec),
};
validate(&config).unwrap_err();
let linux = Linux::default();
config.spec.as_mut().unwrap().linux = Some(linux);
validate(&config).unwrap_err();
}
}

View File

@@ -21,10 +21,7 @@ const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
const DEFAULT_CONTAINER_PIPE_SIZE: i32 = 0;
const VSOCK_ADDR: &str = "vsock://-1";
const VSOCK_PORT: u16 = 1024;
// Environment variables used for development and testing
const SERVER_ADDR_ENV_VAR: &str = "KATA_AGENT_SERVER_ADDR";
const LOG_LEVEL_ENV_VAR: &str = "KATA_AGENT_LOG_LEVEL";
// FIXME: unused
const TRACE_MODE_FLAG: &str = "agent.trace";
@@ -142,18 +139,12 @@ impl agentConfig {
self.server_addr = addr;
}
if let Ok(addr) = env::var(LOG_LEVEL_ENV_VAR) {
if let Ok(level) = logrus_to_slog_level(&addr) {
self.log_level = level;
}
}
Ok(())
}
}
fn get_vsock_port(p: &str) -> Result<i32> {
let fields: Vec<&str> = p.split('=').collect();
let fields: Vec<&str> = p.split("=").collect();
if fields.len() != 2 {
return Err(anyhow!("invalid port parameter"));
}
@@ -189,7 +180,7 @@ fn logrus_to_slog_level(logrus_level: &str) -> Result<slog::Level> {
}
fn get_log_level(param: &str) -> Result<slog::Level> {
let fields: Vec<&str> = param.split('=').collect();
let fields: Vec<&str> = param.split("=").collect();
if fields.len() != 2 {
return Err(anyhow!("invalid log level parameter"));
@@ -203,7 +194,7 @@ fn get_log_level(param: &str) -> Result<slog::Level> {
}
fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
let fields: Vec<&str> = param.split('=').collect();
let fields: Vec<&str> = param.split("=").collect();
if fields.len() != 2 {
return Err(anyhow!("invalid hotplug timeout parameter"));
@@ -223,7 +214,7 @@ fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
}
fn get_bool_value(param: &str) -> Result<bool> {
let fields: Vec<&str> = param.split('=').collect();
let fields: Vec<&str> = param.split("=").collect();
if fields.len() != 2 {
return Ok(false);
@@ -234,12 +225,18 @@ fn get_bool_value(param: &str) -> Result<bool> {
// first try to parse as bool value
v.parse::<bool>().or_else(|_err1| {
// then try to parse as integer value
v.parse::<u64>().or(Ok(0)).map(|v| !matches!(v, 0))
v.parse::<u64>().or_else(|_err2| Ok(0)).and_then(|v| {
// only `0` returns false, otherwise returns true
Ok(match v {
0 => false,
_ => true,
})
})
})
}
fn get_container_pipe_size(param: &str) -> Result<i32> {
let fields: Vec<&str> = param.split('=').collect();
let fields: Vec<&str> = param.split("=").collect();
if fields.len() != 2 {
return Err(anyhow!("invalid container pipe size parameter"));
@@ -324,495 +321,297 @@ mod tests {
#[test]
fn test_parse_cmdline() {
const TEST_SERVER_ADDR: &str = "vsock://-1:1024";
#[derive(Debug)]
struct TestData<'a> {
contents: &'a str,
env_vars: Vec<&'a str>,
debug_console: bool,
dev_mode: bool,
log_level: slog::Level,
hotplug_timeout: time::Duration,
container_pipe_size: i32,
server_addr: &'a str,
unified_cgroup_hierarchy: bool,
}
let tests = &[
TestData {
contents: "agent.debug_consolex agent.devmode",
env_vars: Vec::new(),
debug_console: false,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "agent.debug_console agent.devmodex",
env_vars: Vec::new(),
debug_console: true,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "agent.logx=debug",
env_vars: Vec::new(),
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "agent.log=debug",
env_vars: Vec::new(),
debug_console: false,
dev_mode: false,
log_level: slog::Level::Debug,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "agent.log=debug",
env_vars: vec!["KATA_AGENT_LOG_LEVEL=trace"],
debug_console: false,
dev_mode: false,
log_level: slog::Level::Trace,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "",
env_vars: Vec::new(),
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "foo",
env_vars: Vec::new(),
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "foo bar",
env_vars: Vec::new(),
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "foo bar",
env_vars: Vec::new(),
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "foo agent bar",
env_vars: Vec::new(),
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "foo debug_console agent bar devmode",
env_vars: Vec::new(),
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "agent.debug_console",
env_vars: Vec::new(),
debug_console: true,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: " agent.debug_console ",
env_vars: Vec::new(),
debug_console: true,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "agent.debug_console foo",
env_vars: Vec::new(),
debug_console: true,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: " agent.debug_console foo",
env_vars: Vec::new(),
debug_console: true,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "foo agent.debug_console bar",
env_vars: Vec::new(),
debug_console: true,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "foo agent.debug_console",
env_vars: Vec::new(),
debug_console: true,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "foo agent.debug_console ",
env_vars: Vec::new(),
debug_console: true,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "agent.devmode",
env_vars: Vec::new(),
debug_console: false,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: " agent.devmode ",
env_vars: Vec::new(),
debug_console: false,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "agent.devmode foo",
env_vars: Vec::new(),
debug_console: false,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: " agent.devmode foo",
env_vars: Vec::new(),
debug_console: false,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "foo agent.devmode bar",
env_vars: Vec::new(),
debug_console: false,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "foo agent.devmode",
env_vars: Vec::new(),
debug_console: false,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "foo agent.devmode ",
env_vars: Vec::new(),
debug_console: false,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "agent.devmode agent.debug_console",
env_vars: Vec::new(),
debug_console: true,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=100 agent.unified_cgroup_hierarchy=a",
env_vars: Vec::new(),
debug_console: true,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: time::Duration::from_secs(100),
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=0 agent.unified_cgroup_hierarchy=11",
env_vars: Vec::new(),
debug_console: true,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: true,
},
TestData {
contents: "agent.devmode agent.debug_console agent.container_pipe_size=2097152 agent.unified_cgroup_hierarchy=false",
env_vars: Vec::new(),
debug_console: true,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: 2097152,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "agent.devmode agent.debug_console agent.container_pipe_size=100 agent.unified_cgroup_hierarchy=true",
env_vars: Vec::new(),
debug_console: true,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: 100,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: true,
},
TestData {
contents: "agent.devmode agent.debug_console agent.container_pipe_size=0 agent.unified_cgroup_hierarchy=0",
env_vars: Vec::new(),
debug_console: true,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "agent.devmode agent.debug_console agent.container_pip_siz=100 agent.unified_cgroup_hierarchy=1",
env_vars: Vec::new(),
debug_console: true,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: true,
},
TestData {
contents: "",
env_vars: Vec::new(),
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_SERVER_ADDR=foo"],
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: "foo",
unified_cgroup_hierarchy: false,
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_SERVER_ADDR=="],
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: "=",
unified_cgroup_hierarchy: false,
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_SERVER_ADDR==foo"],
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: "=foo",
unified_cgroup_hierarchy: false,
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_SERVER_ADDR=foo=bar=baz="],
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: "foo=bar=baz=",
unified_cgroup_hierarchy: false,
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_SERVER_ADDR=unix:///tmp/foo.socket"],
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: "unix:///tmp/foo.socket",
unified_cgroup_hierarchy: false,
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_SERVER_ADDR=unix://@/tmp/foo.socket"],
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: "unix://@/tmp/foo.socket",
unified_cgroup_hierarchy: false,
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_LOG_LEVEL="],
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_LOG_LEVEL=invalid"],
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_LOG_LEVEL=debug"],
debug_console: false,
dev_mode: false,
log_level: slog::Level::Debug,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
TestData {
contents: "",
env_vars: vec!["KATA_AGENT_LOG_LEVEL=debugger"],
debug_console: false,
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
},
];
let dir = tempdir().expect("failed to create tmpdir");
@@ -826,8 +625,7 @@ mod tests {
let result = config.parse_cmdline(&filename.to_owned());
assert!(result.is_err());
// Now, test various combinations of file contents and environment
// variables.
// Now, test various combinations of file contents
for (i, d) in tests.iter().enumerate() {
let msg = format!("test[{}]: {:?}", i, d);
@@ -836,23 +634,10 @@ mod tests {
let filename = file_path.to_str().expect("failed to create filename");
let mut file =
File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
File::create(filename).expect(&format!("{}: failed to create file", msg));
file.write_all(d.contents.as_bytes())
.unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
let mut vars_to_unset = Vec::new();
for v in &d.env_vars {
let fields: Vec<&str> = v.split('=').collect();
let name = fields[0];
let value = fields[1..].join("=");
env::set_var(name, value);
vars_to_unset.push(name);
}
.expect(&format!("{}: failed to write file contents", msg));
let mut config = agentConfig::new();
assert_eq!(config.debug_console, false, "{}", msg);
@@ -865,7 +650,6 @@ mod tests {
msg
);
assert_eq!(config.container_pipe_size, 0, "{}", msg);
assert_eq!(config.server_addr, TEST_SERVER_ADDR, "{}", msg);
let result = config.parse_cmdline(filename);
assert!(result.is_ok(), "{}", msg);
@@ -880,11 +664,6 @@ mod tests {
assert_eq!(d.log_level, config.log_level, "{}", msg);
assert_eq!(d.hotplug_timeout, config.hotplug_timeout, "{}", msg);
assert_eq!(d.container_pipe_size, config.container_pipe_size, "{}", msg);
assert_eq!(d.server_addr, config.server_addr, "{}", msg);
for v in vars_to_unset {
env::remove_var(v);
}
}
}
@@ -958,7 +737,7 @@ mod tests {
let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, msg);
assert_result!(d.result, result, format!("{}", msg));
}
}
@@ -1052,7 +831,7 @@ mod tests {
let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, msg);
assert_result!(d.result, result, format!("{}", msg));
}
}
@@ -1122,7 +901,7 @@ mod tests {
let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, msg);
assert_result!(d.result, result, format!("{}", msg));
}
}
@@ -1196,7 +975,7 @@ mod tests {
let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, msg);
assert_result!(d.result, result, format!("{}", msg));
}
}
}

View File

@@ -38,8 +38,8 @@ struct DevIndex(HashMap<String, DevIndexEntry>);
// DeviceHandler is the type of callback to be defined to handle every type of device driver.
type DeviceHandler = fn(&Device, &mut Spec, &Arc<Mutex<Sandbox>>, &DevIndex) -> Result<()>;
// DEVICEHANDLERLIST lists the supported drivers.
#[rustfmt::skip]
// DeviceHandlerList lists the supported drivers.
#[cfg_attr(rustfmt, rustfmt_skip)]
lazy_static! {
static ref DEVICEHANDLERLIST: HashMap<&'static str, DeviceHandler> = {
let mut m: HashMap<&'static str, DeviceHandler> = HashMap::new();
@@ -65,7 +65,7 @@ pub fn online_device(path: &str) -> Result<()> {
// Here, bridgeAddr is the address at which the bridge is attached on the root bus,
// while deviceAddr is the address at which the device is attached on the bridge.
fn get_pci_device_address(pci_id: &str) -> Result<String> {
let tokens: Vec<&str> = pci_id.split('/').collect();
let tokens: Vec<&str> = pci_id.split("/").collect();
if tokens.len() != 2 {
return Err(anyhow!(
@@ -165,7 +165,7 @@ pub fn get_pci_device_name(sandbox: &Arc<Mutex<Sandbox>>, pci_id: &str) -> Resul
/// Scan SCSI bus for the given SCSI address(SCSI-Id and LUN)
fn scan_scsi_bus(scsi_addr: &str) -> Result<()> {
let tokens: Vec<&str> = scsi_addr.split(':').collect();
let tokens: Vec<&str> = scsi_addr.split(":").collect();
if tokens.len() != 2 {
return Err(anyhow!(
"Unexpected format for SCSI Address: {}, expect SCSIID:LUA",
@@ -336,11 +336,11 @@ impl DevIndex {
fn new(spec: &Spec) -> DevIndex {
let mut map = HashMap::new();
if let Some(linux) = spec.linux.as_ref() {
for linux in spec.linux.as_ref() {
for (i, d) in linux.devices.iter().enumerate() {
let mut residx = Vec::new();
if let Some(linuxres) = linux.resources.as_ref() {
for linuxres in linux.resources.as_ref() {
for (j, r) in linuxres.devices.iter().enumerate() {
if r.r#type == d.r#type
&& r.major == Some(d.major)

View File

@@ -5,10 +5,8 @@
/// Linux ABI related constants.
#[cfg(target_arch = "aarch64")]
use std::fs;
pub const SYSFS_DIR: &str = "/sys";
pub const SYSFS_PCI_BUS_PREFIX: &str = "/sys/bus/pci/devices";
pub const SYSFS_PCI_BUS_RESCAN_FILE: &str = "/sys/bus/pci/rescan";
#[cfg(any(
@@ -17,46 +15,9 @@ pub const SYSFS_PCI_BUS_RESCAN_FILE: &str = "/sys/bus/pci/rescan";
target_arch = "x86_64",
target_arch = "x86"
))]
pub fn create_pci_root_bus_path() -> String {
String::from("/devices/pci0000:00")
}
pub const PCI_ROOT_BUS_PATH: &str = "/devices/pci0000:00";
#[cfg(target_arch = "aarch64")]
pub fn create_pci_root_bus_path() -> String {
let ret = String::from("/devices/platform/4010000000.pcie/pci0000:00");
let mut sysfs_dir = String::from(SYSFS_DIR);
let mut start_root_bus_path = String::from("/devices/platform/");
let end_root_bus_path = String::from("/pci0000:00");
sysfs_dir.push_str(&start_root_bus_path);
let entries = match fs::read_dir(sysfs_dir) {
Ok(e) => e,
Err(_) => return ret,
};
for entry in entries {
let pathname = match entry {
Ok(p) => p.path(),
Err(_) => return ret,
};
let dir_name = match pathname.file_name() {
Some(p) => p.to_str(),
None => return ret,
};
let dir_name = match dir_name {
Some(p) => p,
None => return ret,
};
let dir_name = String::from(dir_name);
if dir_name.ends_with(".pcie") {
start_root_bus_path.push_str(&dir_name);
start_root_bus_path.push_str(&end_root_bus_path);
return start_root_bus_path;
}
}
ret
}
pub const PCI_ROOT_BUS_PATH: &str = "/devices/platform/4010000000.pcie/pci0000:00";
pub const SYSFS_CPU_ONLINE_PATH: &str = "/sys/devices/system/cpu";

View File

@@ -198,13 +198,6 @@ fn main() -> Result<()> {
// which is required to satisfy the the lifetime constraints of the auto-generated gRPC code.
let _guard = slog_scope::set_global_logger(logger.new(o!("subsystem" => "rpc")));
let mut _log_guard: Result<(), log::SetLoggerError> = Ok(());
if config.log_level == slog::Level::Trace {
// Redirect ttrpc log calls to slog iff full debug requested
_log_guard = Ok(slog_stdlog::init().map_err(|e| e)?);
}
start_sandbox(&logger, &config, init_mode)?;
let _ = log_handle.join();
@@ -253,8 +246,8 @@ fn start_sandbox(logger: &Logger, config: &agentConfig, init_mode: bool) -> Resu
let (tx, rx) = mpsc::channel::<i32>();
sandbox.lock().unwrap().sender = Some(tx);
// vsock:///dev/vsock, port
let mut server = rpc::start(sandbox, config.server_addr.as_str());
//vsock:///dev/vsock, port
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str());
let _ = server.start().unwrap();
@@ -279,6 +272,8 @@ fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
let signals = Signals::new(&[SIGCHLD])?;
let s = sandbox.clone();
thread::spawn(move || {
'outer: for sig in signals.forever() {
info!(logger, "received signal"; "signal" => sig);
@@ -306,16 +301,15 @@ fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
continue 'outer;
}
};
info!(logger, "wait_status"; "wait_status result" => format!("{:?}", wait_status));
let pid = wait_status.pid();
if let Some(pid) = pid {
let raw_pid = pid.as_raw();
if pid.is_some() {
let raw_pid = pid.unwrap().as_raw();
let child_pid = format!("{}", raw_pid);
let logger = logger.new(o!("child-pid" => child_pid));
let mut sandbox = sandbox.lock().unwrap();
let mut sandbox = s.lock().unwrap();
let process = sandbox.find_process(raw_pid);
if process.is_none() {
info!(logger, "child exited unexpectedly");
@@ -343,13 +337,6 @@ fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
p.exit_code = ret;
let _ = unistd::close(pipe_write);
if let Some(ref poller) = p.epoller {
info!(logger, "close epoller");
// close the socket file to notify readStdio to close terminal specifically
// in case this process's terminal has been inherited by its children.
poller.close_wfd()
}
}
}
}
@@ -379,8 +366,7 @@ fn init_agent_as_init(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result
env::set_var("PATH", "/bin:/sbin/:/usr/bin/:/usr/sbin/");
let contents =
std::fs::read_to_string("/etc/hostname").unwrap_or_else(|_| String::from("localhost"));
let contents = std::fs::read_to_string("/etc/hostname").unwrap_or(String::from("localhost"));
let contents_array: Vec<&str> = contents.split(' ').collect();
let hostname = contents_array[0].trim();
@@ -495,8 +481,8 @@ where
// write and return
match writer.write_all(&buf[..buf_len]) {
Ok(_) => Ok(buf_len as u64),
Err(err) => Err(err),
Ok(_) => return Ok(buf_len as u64),
Err(err) => return Err(err),
}
}

View File

@@ -8,6 +8,7 @@ extern crate procfs;
use prometheus::{Encoder, Gauge, GaugeVec, IntCounter, TextEncoder};
use anyhow::Result;
use protocols;
const NAMESPACE_KATA_AGENT: &str = "kata_agent";
const NAMESPACE_KATA_GUEST: &str = "kata_guest";
@@ -84,15 +85,17 @@ pub fn get_metrics(_: &protocols::agent::GetMetricsRequest) -> Result<String> {
let encoder = TextEncoder::new();
encoder.encode(&metric_families, &mut buffer).unwrap();
Ok(String::from_utf8(buffer).unwrap())
Ok(String::from_utf8(buffer.clone()).unwrap())
}
fn update_agent_metrics() {
let me = procfs::process::Process::myself();
if let Err(err) = me {
error!(sl!(), "failed to create process instance: {:?}", err);
return;
match me {
Err(err) => {
error!(sl!(), "failed to create process instance: {:?}", err);
return;
}
Ok(_) => {}
}
let me = me.unwrap();

View File

@@ -39,7 +39,7 @@ pub const DRIVERLOCALTYPE: &str = "local";
pub const TYPEROOTFS: &str = "rootfs";
#[rustfmt::skip]
#[cfg_attr(rustfmt, rustfmt_skip)]
lazy_static! {
pub static ref FLAGS: HashMap<&'static str, (bool, MsFlags)> = {
let mut m = HashMap::new();
@@ -88,7 +88,7 @@ pub struct INIT_MOUNT {
options: Vec<&'static str>,
}
#[rustfmt::skip]
#[cfg_attr(rustfmt, rustfmt_skip)]
lazy_static!{
static ref CGROUPS: HashMap<&'static str, &'static str> = {
let mut m = HashMap::new();
@@ -109,7 +109,7 @@ lazy_static!{
};
}
#[rustfmt::skip]
#[cfg_attr(rustfmt, rustfmt_skip)]
lazy_static! {
pub static ref INIT_ROOTFS_MOUNTS: Vec<INIT_MOUNT> = vec![
INIT_MOUNT{fstype: "proc", src: "proc", dest: "/proc", options: vec!["nosuid", "nodev", "noexec"]},
@@ -126,7 +126,7 @@ lazy_static! {
type StorageHandler = fn(&Logger, &Storage, Arc<Mutex<Sandbox>>) -> Result<String>;
// STORAGEHANDLERLIST lists the supported drivers.
#[rustfmt::skip]
#[cfg_attr(rustfmt, rustfmt_skip)]
lazy_static! {
pub static ref STORAGEHANDLERLIST: HashMap<&'static str, StorageHandler> = {
let mut m = HashMap::new();
@@ -173,9 +173,9 @@ impl<'a> BareMount<'a> {
BareMount {
source: s,
destination: d,
fs_type,
flags,
options,
fs_type: fs_type,
flags: flags,
options: options,
logger: logger.new(o!("subsystem" => "baremount")),
}
}
@@ -190,11 +190,11 @@ impl<'a> BareMount<'a> {
let cstr_dest: CString;
let cstr_fs_type: CString;
if self.source.is_empty() {
if self.source.len() == 0 {
return Err(anyhow!("need mount source"));
}
if self.destination.is_empty() {
if self.destination.len() == 0 {
return Err(anyhow!("need mount destination"));
}
@@ -204,14 +204,14 @@ impl<'a> BareMount<'a> {
cstr_dest = CString::new(self.destination)?;
dest = cstr_dest.as_ptr();
if self.fs_type.is_empty() {
if self.fs_type.len() == 0 {
return Err(anyhow!("need mount FS type"));
}
cstr_fs_type = CString::new(self.fs_type)?;
fs_type = cstr_fs_type.as_ptr();
if !self.options.is_empty() {
if self.options.len() > 0 {
cstr_options = CString::new(self.options)?;
options = cstr_options.as_ptr() as *const c_void;
}
@@ -243,7 +243,8 @@ fn ephemeral_storage_handler(
storage: &Storage,
sandbox: Arc<Mutex<Sandbox>>,
) -> Result<String> {
let mut sb = sandbox.lock().unwrap();
let s = sandbox.clone();
let mut sb = s.lock().unwrap();
let new_storage = sb.set_sandbox_storage(&storage.mount_point);
if !new_storage {
@@ -261,7 +262,8 @@ fn local_storage_handler(
storage: &Storage,
sandbox: Arc<Mutex<Sandbox>>,
) -> Result<String> {
let mut sb = sandbox.lock().unwrap();
let s = sandbox.clone();
let mut sb = s.lock().unwrap();
let new_storage = sb.set_sandbox_storage(&storage.mount_point);
if !new_storage {
@@ -277,7 +279,8 @@ fn local_storage_handler(
let opts = parse_options(opts_vec);
let mode = opts.get("mode");
if let Some(mode) = mode {
if mode.is_some() {
let mode = mode.unwrap();
let mut permission = fs::metadata(&storage.mount_point)?.permissions();
let o_mode = u32::from_str_radix(mode, 8)?;
@@ -407,17 +410,17 @@ fn parse_mount_flags_and_options(options_vec: Vec<&str>) -> (MsFlags, String) {
let mut options: String = "".to_string();
for opt in options_vec {
if !opt.is_empty() {
if opt.len() != 0 {
match FLAGS.get(opt) {
Some(x) => {
let (_, f) = *x;
flags |= f;
flags = flags | f;
}
None => {
if !options.is_empty() {
if options.len() > 0 {
options.push_str(format!(",{}", opt).as_str());
} else {
options.push_str(opt.to_string().as_str());
options.push_str(format!("{}", opt).as_str());
}
}
};
@@ -455,7 +458,7 @@ pub fn add_storages(
// Todo need to rollback the mounted storage if err met.
let mount_point = handler(&logger, &storage, sandbox.clone())?;
if !mount_point.is_empty() {
if mount_point.len() > 0 {
mount_list.push(mount_point);
}
}
@@ -567,10 +570,10 @@ pub fn get_cgroup_mounts(
'outer: for (_, line) in reader.lines().enumerate() {
let line = line?;
let fields: Vec<&str> = line.split('\t').collect();
let fields: Vec<&str> = line.split("\t").collect();
// Ignore comment header
if fields[0].starts_with('#') {
if fields[0].starts_with("#") {
continue;
}
@@ -640,7 +643,7 @@ pub fn cgroups_mount(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result<
Ok(())
}
pub fn remove_mounts(mounts: &[String]) -> Result<()> {
pub fn remove_mounts(mounts: &Vec<String>) -> Result<()> {
for m in mounts.iter() {
mount::umount(m.as_str()).context(format!("failed to umount {:?}", m))?;
}
@@ -672,7 +675,7 @@ fn ensure_destination_exists(destination: &str, fs_type: &str) -> Result<()> {
fn parse_options(option_list: Vec<String>) -> HashMap<String, String> {
let mut options = HashMap::new();
for opt in option_list.iter() {
let fields: Vec<&str> = opt.split('=').collect();
let fields: Vec<&str> = opt.split("=").collect();
if fields.len() != 2 {
continue;
}
@@ -853,7 +856,7 @@ mod tests {
let msg = format!("{}: umount result: {:?}", msg, result);
assert!(ret == 0, msg);
assert!(ret == 0, format!("{}", msg));
};
continue;
@@ -911,8 +914,7 @@ mod tests {
.expect("failed to create mount destination filename");
for d in [test_dir_filename, mnt_src_filename, mnt_dest_filename].iter() {
std::fs::create_dir_all(d)
.unwrap_or_else(|_| panic!("failed to create directory {}", d));
std::fs::create_dir_all(d).expect(&format!("failed to create directory {}", d));
}
// Create an actual mount
@@ -1053,13 +1055,13 @@ mod tests {
let filename = file_path
.to_str()
.unwrap_or_else(|| panic!("{}: failed to create filename", msg));
.expect(&format!("{}: failed to create filename", msg));
let mut file =
File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
File::create(filename).expect(&format!("{}: failed to create file", msg));
file.write_all(d.contents.as_bytes())
.unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
.expect(&format!("{}: failed to write file contents", msg));
let result = get_mount_fs_type_from_file(filename, d.mount_point);
@@ -1215,10 +1217,10 @@ mod tests {
.expect("failed to create cgroup file filename");
let mut file =
File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
File::create(filename).expect(&format!("{}: failed to create file", msg));
file.write_all(d.contents.as_bytes())
.unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
.expect(&format!("{}: failed to write file contents", msg));
let result = get_cgroup_mounts(&logger, filename, false);
let msg = format!("{}: result: {:?}", msg, result);

View File

@@ -16,6 +16,7 @@ use std::thread::{self};
use crate::mount::{BareMount, FLAGS};
use slog::Logger;
//use container::Process;
const PERSISTENT_NS_DIR: &str = "/var/run/sandbox-ns";
pub const NSTYPEIPC: &str = "ipc";
pub const NSTYPEUTS: &str = "uts";
@@ -51,12 +52,12 @@ impl Namespace {
}
}
pub fn get_ipc(mut self) -> Self {
pub fn as_ipc(mut self) -> Self {
self.ns_type = NamespaceType::IPC;
self
}
pub fn get_uts(mut self, hostname: &str) -> Self {
pub fn as_uts(mut self, hostname: &str) -> Self {
self.ns_type = NamespaceType::UTS;
if hostname != "" {
self.hostname = Some(String::from(hostname));
@@ -64,7 +65,7 @@ impl Namespace {
self
}
pub fn get_pid(mut self) -> Self {
pub fn as_pid(mut self) -> Self {
self.ns_type = NamespaceType::PID;
self
}
@@ -80,10 +81,7 @@ impl Namespace {
fs::create_dir_all(&self.persistent_ns_dir)?;
let ns_path = PathBuf::from(&self.persistent_ns_dir);
let ns_type = self.ns_type;
if ns_type == NamespaceType::PID {
return Err(anyhow!("Cannot persist namespace of PID type"));
}
let ns_type = self.ns_type.clone();
let logger = self.logger.clone();
let new_ns_path = ns_path.join(&ns_type.get());
@@ -99,7 +97,7 @@ impl Namespace {
File::open(Path::new(&origin_ns_path))?;
// Create a new netns on the current thread.
let cf = ns_type.get_flags();
let cf = ns_type.get_flags().clone();
unshare(cf)?;
@@ -112,9 +110,12 @@ impl Namespace {
let mut flags = MsFlags::empty();
if let Some(x) = FLAGS.get("rbind") {
let (_, f) = *x;
flags |= f;
match FLAGS.get("rbind") {
Some(x) => {
let (_, f) = *x;
flags = flags | f;
}
None => (),
};
let bare_mount = BareMount::new(source, destination, "none", flags, "", &logger);
@@ -193,34 +194,23 @@ mod tests {
let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap();
let ns_ipc = Namespace::new(&logger)
.get_ipc()
.as_ipc()
.set_root_dir(tmpdir.path().to_str().unwrap())
.setup();
assert!(ns_ipc.is_ok());
assert!(remove_mounts(&[ns_ipc.unwrap().path]).is_ok());
assert!(remove_mounts(&vec![ns_ipc.unwrap().path]).is_ok());
let logger = slog::Logger::root(slog::Discard, o!());
let tmpdir = Builder::new().prefix("uts").tempdir().unwrap();
let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap();
let ns_uts = Namespace::new(&logger)
.get_uts("test_hostname")
.as_uts("test_hostname")
.set_root_dir(tmpdir.path().to_str().unwrap())
.setup();
assert!(ns_uts.is_ok());
assert!(remove_mounts(&[ns_uts.unwrap().path]).is_ok());
// Check it cannot persist pid namespaces.
let logger = slog::Logger::root(slog::Discard, o!());
let tmpdir = Builder::new().prefix("pid").tempdir().unwrap();
let ns_pid = Namespace::new(&logger)
.get_pid()
.set_root_dir(tmpdir.path().to_str().unwrap())
.setup();
assert!(ns_pid.is_err());
assert!(remove_mounts(&vec![ns_uts.unwrap().path]).is_ok());
}
#[test]

View File

@@ -48,7 +48,7 @@ pub fn setup_guest_dns(logger: Logger, dns_list: Vec<String>) -> Result<()> {
fn do_setup_guest_dns(logger: Logger, dns_list: Vec<String>, src: &str, dst: &str) -> Result<()> {
let logger = logger.new(o!( "subsystem" => "network"));
if dns_list.is_empty() {
if dns_list.len() == 0 {
info!(
logger,
"Did not set sandbox DNS as DNS not received as part of request."
@@ -117,12 +117,12 @@ mod tests {
];
// write to /run/kata-containers/sandbox/resolv.conf
let mut src_file = File::create(src_filename)
.unwrap_or_else(|_| panic!("failed to create file {:?}", src_filename));
let mut src_file =
File::create(src_filename).expect(&format!("failed to create file {:?}", src_filename));
let content = dns.join("\n");
src_file
.write_all(content.as_bytes())
.expect("failed to write file contents");
.expect(&format!("failed to write file contents"));
// call do_setup_guest_dns
let result = do_setup_guest_dns(logger, dns.clone(), src_filename, dst_filename);

View File

@@ -4,11 +4,11 @@
//
use anyhow::Result;
use libc;
use nix::errno::Errno;
use nix::fcntl::{self, OFlag};
use nix::sys::stat::Mode;
use std::fs;
use std::os::unix::io::{AsRawFd, FromRawFd};
pub const RNGDEV: &str = "/dev/random";
pub const RNDADDTOENTCNT: libc::c_int = 0x40045201;
@@ -24,22 +24,18 @@ pub fn reseed_rng(data: &[u8]) -> Result<()> {
let len = data.len() as libc::c_long;
fs::write(RNGDEV, data)?;
let f = {
let fd = fcntl::open(RNGDEV, OFlag::O_RDWR, Mode::from_bits_truncate(0o022))?;
// Wrap fd with `File` to properly close descriptor on exit
unsafe { fs::File::from_raw_fd(fd) }
};
let fd = fcntl::open(RNGDEV, OFlag::O_RDWR, Mode::from_bits_truncate(0o022))?;
let ret = unsafe {
libc::ioctl(
f.as_raw_fd(),
fd,
RNDADDTOENTCNT as IoctlRequestType,
&len as *const libc::c_long,
)
};
let _ = Errno::result(ret).map(drop)?;
let ret = unsafe { libc::ioctl(f.as_raw_fd(), RNDRESEEDRNG as IoctlRequestType, 0) };
let ret = unsafe { libc::ioctl(fd, RNDRESEEDRNG as IoctlRequestType, 0) };
let _ = Errno::result(ret).map(drop)?;
Ok(())

View File

@@ -6,7 +6,7 @@
use std::path::Path;
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
use ttrpc::{self, error::get_rpc_status as ttrpc_error};
use ttrpc;
use anyhow::{anyhow, Context, Result};
use oci::{LinuxNamespace, Root, Spec};
@@ -21,10 +21,10 @@ use protocols::health::{
HealthCheckResponse, HealthCheckResponse_ServingStatus, VersionCheckResponse,
};
use protocols::types::Interface;
use rustjail;
use rustjail::cgroups::notifier;
use rustjail::container::{BaseContainer, Container, LinuxContainer};
use rustjail::process::Process;
use rustjail::reaper;
use rustjail::specconv::CreateOpts;
use nix::errno::Errno;
@@ -47,6 +47,7 @@ use crate::AGENT_CONFIG;
use netlink::{RtnlHandle, NETLINK_ROUTE};
use libc::{self, c_ushort, pid_t, winsize, TIOCSWINSZ};
use serde_json;
use std::convert::TryFrom;
use std::fs;
use std::os::unix::io::RawFd;
@@ -151,13 +152,14 @@ impl agentService {
let pipe_size = AGENT_CONFIG.read().unwrap().container_pipe_size;
let p = if oci.process.is_some() {
Process::new(
let tp = Process::new(
&sl!(),
&oci.process.as_ref().unwrap(),
cid.as_str(),
true,
pipe_size,
)?
)?;
tp
} else {
info!(sl!(), "no process configurations!");
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)));
@@ -173,7 +175,7 @@ impl agentService {
}
fn do_start_container(&self, req: protocols::agent::StartContainerRequest) -> Result<()> {
let cid = req.container_id;
let cid = req.container_id.clone();
let sandbox = self.sandbox.clone();
let mut s = sandbox.lock().unwrap();
@@ -181,7 +183,7 @@ impl agentService {
let ctr = s
.get_container(&cid)
.ok_or_else(|| anyhow!("Invalid container id"))?;
.ok_or(anyhow!("Invalid container id"))?;
ctr.exec()?;
@@ -190,14 +192,10 @@ impl agentService {
let cg_path = ctr.cgroup_manager.as_ref().unwrap().get_cg_path("memory");
if cg_path.is_some() {
let rx = notifier::notify_oom(cid.as_str(), cg_path.unwrap())?;
s.run_oom_event_monitor(rx, cid.clone());
s.run_oom_event_monitor(rx, cid);
}
}
// set epoller
let p = find_process(&mut s, cid.as_str(), "", true)?;
p.create_epoller()?;
Ok(())
}
@@ -208,7 +206,9 @@ impl agentService {
let mut remove_container_resources = |sandbox: &mut Sandbox| -> Result<()> {
// Find the sandbox storage used by this container
let mounts = sandbox.container_mounts.get(&cid);
if let Some(mounts) = mounts {
if mounts.is_some() {
let mounts = mounts.unwrap();
remove_mounts(&mounts)?;
for m in mounts.iter() {
@@ -232,7 +232,7 @@ impl agentService {
let mut sandbox = s.lock().unwrap();
let ctr = sandbox
.get_container(&cid)
.ok_or_else(|| anyhow!("Invalid container id"))?;
.ok_or(anyhow!("Invalid container id"))?;
ctr.destroy()?;
@@ -250,11 +250,11 @@ impl agentService {
let mut sandbox = s.lock().unwrap();
let _ctr = sandbox
.get_container(&cid2)
.ok_or_else(|| anyhow!("Invalid container id"))
.map(|ctr| {
.ok_or(anyhow!("Invalid container id"))
.and_then(|ctr| {
ctr.destroy().unwrap();
tx.send(1).unwrap();
ctr
Ok(ctr)
});
});
@@ -277,7 +277,7 @@ impl agentService {
let cid = req.container_id.clone();
let exec_id = req.exec_id.clone();
info!(sl!(), "do_exec_process cid: {} eid: {}", cid, exec_id);
info!(sl!(), "cid: {} eid: {}", cid.clone(), exec_id.clone());
let s = self.sandbox.clone();
let mut sandbox = s.lock().unwrap();
@@ -294,14 +294,10 @@ impl agentService {
let ctr = sandbox
.get_container(&cid)
.ok_or_else(|| anyhow!("Invalid container id"))?;
.ok_or(anyhow!("Invalid container id"))?;
ctr.run(p)?;
// set epoller
let p = find_process(&mut sandbox, cid.as_str(), exec_id.as_str(), false)?;
p.create_epoller()?;
Ok(())
}
@@ -344,7 +340,7 @@ impl agentService {
req: protocols::agent::WaitProcessRequest,
) -> Result<protocols::agent::WaitProcessResponse> {
let cid = req.container_id.clone();
let eid = req.exec_id;
let eid = req.exec_id.clone();
let s = self.sandbox.clone();
let mut resp = WaitProcessResponse::new();
let pid: pid_t;
@@ -380,7 +376,7 @@ impl agentService {
let mut sandbox = s.lock().unwrap();
let ctr = sandbox
.get_container(&cid)
.ok_or_else(|| anyhow!("Invalid container id"))?;
.ok_or(anyhow!("Invalid container id"))?;
let mut p = match ctr.processes.get_mut(&pid) {
Some(p) => p,
@@ -412,8 +408,6 @@ impl agentService {
let _ = unistd::close(p.exit_pipe_r.unwrap());
}
p.close_epoller();
p.parent_stdin = None;
p.parent_stdout = None;
p.parent_stderr = None;
@@ -438,6 +432,13 @@ impl agentService {
let cid = req.container_id.clone();
let eid = req.exec_id.clone();
info!(
sl!(),
"write stdin";
"container-id" => cid.clone(),
"exec-id" => eid.clone()
);
let s = self.sandbox.clone();
let mut sandbox = s.lock().unwrap();
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false)?;
@@ -481,7 +482,6 @@ impl agentService {
let eid = req.exec_id;
let mut fd: RawFd = -1;
let mut epoller: Option<reaper::Epoller> = None;
{
let s = self.sandbox.clone();
let mut sandbox = s.lock().unwrap();
@@ -490,7 +490,6 @@ impl agentService {
if p.term_master.is_some() {
fd = p.term_master.unwrap();
epoller = p.epoller.clone();
} else if stdout {
if p.parent_stdout.is_some() {
fd = p.parent_stdout.unwrap();
@@ -500,17 +499,6 @@ impl agentService {
}
}
if let Some(epoller) = epoller {
// The process's epoller's poll() will return a file descriptor of the process's
// terminal or one end of its exited pipe. If it returns its terminal, it means
// there is data needed to be read out or it has been closed; if it returns the
// process's exited pipe, it means the process has exited and there is no data
// needed to be read out in its terminal, thus following read on it will read out
// "EOF" to terminate this process's io since the other end of this pipe has been
// closed in reap().
fd = epoller.poll()?;
}
if fd == -1 {
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)));
}
@@ -531,7 +519,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
req: protocols::agent::CreateContainerRequest,
) -> ttrpc::Result<Empty> {
match self.do_create_container(req) {
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
))),
Ok(_) => Ok(Empty::new()),
}
}
@@ -542,7 +533,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
req: protocols::agent::StartContainerRequest,
) -> ttrpc::Result<Empty> {
match self.do_start_container(req) {
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
))),
Ok(_) => Ok(Empty::new()),
}
}
@@ -553,7 +547,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
req: protocols::agent::RemoveContainerRequest,
) -> ttrpc::Result<Empty> {
match self.do_remove_container(req) {
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
))),
Ok(_) => Ok(Empty::new()),
}
}
@@ -564,7 +561,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
req: protocols::agent::ExecProcessRequest,
) -> ttrpc::Result<Empty> {
match self.do_exec_process(req) {
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
))),
Ok(_) => Ok(Empty::new()),
}
}
@@ -575,7 +575,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
req: protocols::agent::SignalProcessRequest,
) -> ttrpc::Result<Empty> {
match self.do_signal_process(req) {
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
))),
Ok(_) => Ok(Empty::new()),
}
}
@@ -585,8 +588,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::WaitProcessRequest,
) -> ttrpc::Result<WaitProcessResponse> {
self.do_wait_process(req)
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
self.do_wait_process(req).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})
}
fn list_processes(
@@ -596,18 +600,18 @@ impl protocols::agent_ttrpc::AgentService for agentService {
) -> ttrpc::Result<ListProcessesResponse> {
let cid = req.container_id.clone();
let format = req.format.clone();
let mut args = req.args.into_vec();
let mut args = req.args.clone().into_vec();
let mut resp = ListProcessesResponse::new();
let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap();
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
ttrpc_error(
let ctr = sandbox
.get_container(&cid)
.ok_or(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INVALID_ARGUMENT,
"invalid container id".to_string(),
)
})?;
)))?;
let pids = ctr.processes().unwrap();
@@ -618,15 +622,15 @@ impl protocols::agent_ttrpc::AgentService for agentService {
return Ok(resp);
}
_ => {
return Err(ttrpc_error(
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INVALID_ARGUMENT,
"invalid format!".to_string(),
));
)));
}
}
// format "table"
if args.is_empty() {
if args.len() == 0 {
// default argument
args = vec!["-ef".to_string()];
}
@@ -684,12 +688,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap();
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
ttrpc_error(
let ctr = sandbox
.get_container(&cid)
.ok_or(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INVALID_ARGUMENT,
"invalid container id".to_string(),
)
})?;
)))?;
let resp = Empty::new();
@@ -697,7 +701,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let ociRes = rustjail::resources_grpc_to_oci(&res.unwrap());
match ctr.set(ociRes) {
Err(e) => {
return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()));
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
)));
}
Ok(_) => return Ok(resp),
@@ -712,19 +719,20 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::StatsContainerRequest,
) -> ttrpc::Result<StatsContainerResponse> {
let cid = req.container_id;
let cid = req.container_id.clone();
let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap();
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
ttrpc_error(
let ctr = sandbox
.get_container(&cid)
.ok_or(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INVALID_ARGUMENT,
"invalid container id".to_string(),
)
})?;
)))?;
ctr.stats()
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
ctr.stats().map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})
}
fn pause_container(
@@ -736,15 +744,16 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap();
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
ttrpc_error(
let ctr = sandbox
.get_container(&cid)
.ok_or(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INVALID_ARGUMENT,
"invalid container id".to_string(),
)
})?;
)))?;
ctr.pause()
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
ctr.pause().map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
Ok(Empty::new())
}
@@ -758,15 +767,16 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap();
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
ttrpc_error(
let ctr = sandbox
.get_container(&cid)
.ok_or(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INVALID_ARGUMENT,
"invalid container id".to_string(),
)
})?;
)))?;
ctr.resume()
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
ctr.resume().map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
Ok(Empty::new())
}
@@ -776,8 +786,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::WriteStreamRequest,
) -> ttrpc::Result<WriteStreamResponse> {
self.do_write_stream(req)
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
self.do_write_stream(req).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})
}
fn read_stdout(
@@ -785,8 +796,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::ReadStreamRequest,
) -> ttrpc::Result<ReadStreamResponse> {
self.do_read_stream(req, true)
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
self.do_read_stream(req, true).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})
}
fn read_stderr(
@@ -794,8 +806,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::ReadStreamRequest,
) -> ttrpc::Result<ReadStreamResponse> {
self.do_read_stream(req, false)
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
self.do_read_stream(req, false).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})
}
fn close_stdin(
@@ -804,15 +817,15 @@ impl protocols::agent_ttrpc::AgentService for agentService {
req: protocols::agent::CloseStdinRequest,
) -> ttrpc::Result<Empty> {
let cid = req.container_id.clone();
let eid = req.exec_id;
let eid = req.exec_id.clone();
let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap();
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false).map_err(|e| {
ttrpc_error(
ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INVALID_ARGUMENT,
format!("invalid argument: {:?}", e),
)
))
})?;
if p.term_master.is_some() {
@@ -825,8 +838,6 @@ impl protocols::agent_ttrpc::AgentService for agentService {
p.parent_stdin = None;
}
p.close_epoller();
Ok(Empty::new())
}
@@ -840,14 +851,17 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap();
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false).map_err(|e| {
ttrpc_error(
ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::UNAVAILABLE,
format!("invalid argument: {:?}", e),
)
))
})?;
if p.term_master.is_none() {
return Err(ttrpc_error(ttrpc::Code::UNAVAILABLE, "no tty".to_string()));
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::UNAVAILABLE,
"no tty".to_string(),
)));
}
let fd = p.term_master.unwrap();
@@ -860,9 +874,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
};
let err = libc::ioctl(fd, TIOCSWINSZ, &win);
Errno::result(err)
.map(drop)
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, format!("ioctl error: {:?}", e)))?;
Errno::result(err).map(drop).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
format!("ioctl error: {:?}", e),
))
})?;
}
Ok(Empty::new())
@@ -874,13 +891,13 @@ impl protocols::agent_ttrpc::AgentService for agentService {
req: protocols::agent::UpdateInterfaceRequest,
) -> ttrpc::Result<Interface> {
if req.interface.is_none() {
return Err(ttrpc_error(
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INVALID_ARGUMENT,
"empty update interface request".to_string(),
));
format!("empty update interface request"),
)));
}
let interface = req.interface;
let interface = req.interface.clone();
let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap();
@@ -893,7 +910,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let iface = rtnl
.update_interface(interface.as_ref().unwrap())
.map_err(|e| {
ttrpc_error(ttrpc::Code::INTERNAL, format!("update interface: {:?}", e))
ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
format!("update interface: {:?}", e),
))
})?;
Ok(iface)
@@ -906,13 +926,13 @@ impl protocols::agent_ttrpc::AgentService for agentService {
) -> ttrpc::Result<Routes> {
let mut routes = protocols::agent::Routes::new();
if req.routes.is_none() {
return Err(ttrpc_error(
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INVALID_ARGUMENT,
"empty update routes request".to_string(),
));
format!("empty update routes request"),
)));
}
let rs = req.routes.unwrap().Routes.into_vec();
let rs = req.routes.clone().unwrap().Routes.into_vec();
let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap();
@@ -924,9 +944,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let rtnl = sandbox.rtnl.as_mut().unwrap();
// get current routes to return when error out
let crs = rtnl
.list_routes()
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, format!("update routes: {:?}", e)))?;
let crs = rtnl.list_routes().map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
format!("update routes: {:?}", e),
))
})?;
let v = match rtnl.update_routes(rs.as_ref()) {
Ok(value) => value,
@@ -952,9 +975,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
}
let rtnl = sandbox.rtnl.as_mut().unwrap();
let v = rtnl
.list_interfaces()
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, format!("list interface: {:?}", e)))?;
let v = rtnl.list_interfaces().map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
format!("list interface: {:?}", e),
))
})?;
interface.set_Interfaces(RepeatedField::from_vec(v));
@@ -976,9 +1002,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let rtnl = sandbox.rtnl.as_mut().unwrap();
let v = rtnl
.list_routes()
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, format!("list routes: {:?}", e)))?;
let v = rtnl.list_routes().map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
format!("list routes: {:?}", e),
))
})?;
routes.set_Routes(RepeatedField::from_vec(v));
@@ -1026,17 +1055,19 @@ impl protocols::agent_ttrpc::AgentService for agentService {
});
}
if !req.sandbox_id.is_empty() {
if req.sandbox_id.len() > 0 {
s.id = req.sandbox_id.clone();
}
for m in req.kernel_modules.iter() {
let _ = load_kernel_module(m)
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
let _ = load_kernel_module(m).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
}
s.setup_shared_namespaces()
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
s.setup_shared_namespaces().map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
}
match add_storages(sl!(), req.storages.to_vec(), self.sandbox.clone()) {
@@ -1045,20 +1076,30 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let mut s = sandbox.lock().unwrap();
s.mounts = m
}
Err(e) => return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
Err(e) => {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
)))
}
};
match setup_guest_dns(sl!(), req.dns.to_vec()) {
Ok(_) => {
let sandbox = self.sandbox.clone();
let mut s = sandbox.lock().unwrap();
let _dns = req
let _ = req
.dns
.to_vec()
.iter()
.map(|dns| s.network.set_dns(dns.to_string()));
}
Err(e) => return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
Err(e) => {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
)))
}
};
Ok(Empty::new())
@@ -1087,13 +1128,13 @@ impl protocols::agent_ttrpc::AgentService for agentService {
req: protocols::agent::AddARPNeighborsRequest,
) -> ttrpc::Result<Empty> {
if req.neighbors.is_none() {
return Err(ttrpc_error(
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INVALID_ARGUMENT,
"empty add arp neighbours request".to_string(),
));
format!("empty add arp neighbours request"),
)));
}
let neighs = req.neighbors.unwrap().ARPNeighbors.into_vec();
let neighs = req.neighbors.clone().unwrap().ARPNeighbors.into_vec();
let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap();
@@ -1104,8 +1145,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let rtnl = sandbox.rtnl.as_mut().unwrap();
rtnl.add_arp_neighbors(neighs.as_ref())
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
rtnl.add_arp_neighbors(neighs.as_ref()).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
Ok(Empty::new())
}
@@ -1118,9 +1160,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let s = Arc::clone(&self.sandbox);
let sandbox = s.lock().unwrap();
sandbox
.online_cpu_memory(&req)
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
sandbox.online_cpu_memory(&req).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
Ok(Empty::new())
}
@@ -1130,8 +1172,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::ReseedRandomDevRequest,
) -> ttrpc::Result<Empty> {
random::reseed_rng(req.data.as_slice())
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
random::reseed_rng(req.data.as_slice()).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
Ok(Empty::new())
}
@@ -1151,7 +1194,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
}
Err(e) => {
info!(sl!(), "fail to get memory info!");
return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()));
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
)));
}
}
@@ -1167,8 +1213,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::MemHotplugByProbeRequest,
) -> ttrpc::Result<Empty> {
do_mem_hotplug_by_probe(&req.memHotplugProbeAddr)
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
do_mem_hotplug_by_probe(&req.memHotplugProbeAddr).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
Ok(Empty::new())
}
@@ -1178,8 +1225,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::SetGuestDateTimeRequest,
) -> ttrpc::Result<Empty> {
do_set_guest_date_time(req.Sec, req.Usec)
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
do_set_guest_date_time(req.Sec, req.Usec).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
Ok(Empty::new())
}
@@ -1189,7 +1237,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::CopyFileRequest,
) -> ttrpc::Result<Empty> {
do_copy_file(&req).map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
do_copy_file(&req).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
Ok(Empty::new())
}
@@ -1200,7 +1250,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
req: protocols::agent::GetMetricsRequest,
) -> ttrpc::Result<Metrics> {
match get_metrics(&req) {
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
))),
Ok(s) => {
let mut metrics = Metrics::new();
metrics.set_metrics(s);
@@ -1222,12 +1275,17 @@ impl protocols::agent_ttrpc::AgentService for agentService {
drop(sandbox);
match event_rx.recv() {
Err(err) => Err(ttrpc_error(ttrpc::Code::INTERNAL, err.to_string())),
Err(err) => {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
err.to_string(),
)))
}
Ok(container_id) => {
info!(sl!(), "get_oom_event return {}", &container_id);
let mut resp = OOMEvent::new();
resp.container_id = container_id;
Ok(resp)
return Ok(resp);
}
}
}
@@ -1267,7 +1325,7 @@ fn get_memory_info(block_size: bool, hotplug: bool) -> Result<(u64, bool)> {
if block_size {
match fs::read_to_string(SYSFS_MEMORY_BLOCK_SIZE_PATH) {
Ok(v) => {
if v.is_empty() {
if v.len() == 0 {
info!(sl!(), "string in empty???");
return Err(anyhow!("Invalid block size"));
}
@@ -1346,7 +1404,7 @@ fn read_stream(fd: RawFd, l: usize) -> Result<Vec<u8>> {
}
Err(e) => match e {
nix::Error::Sys(errno) => match errno {
Errno::EAGAIN => v.clear(),
Errno::EAGAIN => v.resize(0, 0),
_ => return Err(anyhow!(nix::Error::Sys(errno))),
},
_ => return Err(anyhow!("read error")),
@@ -1364,13 +1422,13 @@ fn find_process<'a>(
) -> Result<&'a mut Process> {
let ctr = sandbox
.get_container(cid)
.ok_or_else(|| anyhow!("Invalid container id"))?;
.ok_or(anyhow!("Invalid container id"))?;
if init || eid == "" {
return ctr
.processes
.get_mut(&ctr.init_process_pid)
.ok_or_else(|| anyhow!("cannot find init process!"));
.ok_or(anyhow!("cannot find init process!"));
}
ctr.get_process(eid).map_err(|_| anyhow!("Invalid exec id"))
@@ -1420,7 +1478,7 @@ fn update_container_namespaces(
let linux = spec
.linux
.as_mut()
.ok_or_else(|| anyhow!("Spec didn't container linux field"))?;
.ok_or(anyhow!("Spec didn't container linux field"))?;
let namespaces = linux.namespaces.as_mut_slice();
for namespace in namespaces.iter_mut() {
@@ -1488,7 +1546,7 @@ fn is_signal_handled(pid: pid_t, signum: u32) -> bool {
}
};
if line.starts_with("SigCgt:") {
let mask_vec: Vec<&str> = line.split(':').collect();
let mask_vec: Vec<&str> = line.split(":").collect();
if mask_vec.len() != 2 {
warn!(sl!(), "parse the SigCgt field failed\n");
return false;
@@ -1508,7 +1566,7 @@ fn is_signal_handled(pid: pid_t, signum: u32) -> bool {
false
}
fn do_mem_hotplug_by_probe(addrs: &[u64]) -> Result<()> {
fn do_mem_hotplug_by_probe(addrs: &Vec<u64>) -> Result<()> {
for addr in addrs.iter() {
fs::write(SYSFS_MEMORY_HOTPLUG_PROBE_PATH, format!("{:#X}", *addr))?;
}
@@ -1521,12 +1579,8 @@ fn do_set_guest_date_time(sec: i64, usec: i64) -> Result<()> {
tv_usec: usec,
};
let ret = unsafe {
libc::settimeofday(
&tv as *const libc::timeval,
std::ptr::null::<libc::timezone>(),
)
};
let ret =
unsafe { libc::settimeofday(&tv as *const libc::timeval, 0 as *const libc::timezone) };
Errno::result(ret).map(drop)?;
@@ -1542,8 +1596,8 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
let parent = path.parent();
let dir = if let Some(parent) = parent {
parent.to_path_buf()
let dir = if parent.is_some() {
parent.unwrap().to_path_buf()
} else {
PathBuf::from("/")
};
@@ -1603,8 +1657,8 @@ fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
let spec_root = spec.root.as_ref().unwrap();
let bundle_path = Path::new(CONTAINER_BASE).join(cid);
let config_path = bundle_path.join("config.json");
let rootfs_path = bundle_path.join("rootfs");
let config_path = bundle_path.clone().join("config.json");
let rootfs_path = bundle_path.clone().join("rootfs");
fs::create_dir_all(&rootfs_path)?;
BareMount::new(
@@ -1668,9 +1722,9 @@ fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> {
"load_kernel_module return code: {} stdout:{} stderr:{}",
code, std_out, std_err
);
Err(anyhow!(msg))
return Err(anyhow!(msg));
}
None => Err(anyhow!("Process terminated by signal")),
None => return Err(anyhow!("Process terminated by signal")),
}
}
@@ -1682,16 +1736,17 @@ mod tests {
use std::sync::mpsc::{Receiver, Sender};
use ttrpc::{MessageHeader, TtrpcContext};
type Message = (MessageHeader, Vec<u8>);
fn mk_ttrpc_context() -> (TtrpcContext, Receiver<Message>) {
fn mk_ttrpc_context() -> (TtrpcContext, Receiver<(MessageHeader, Vec<u8>)>) {
let mh = MessageHeader::default();
let (tx, rx): (Sender<Message>, Receiver<Message>) = channel();
let (tx, rx): (
Sender<(MessageHeader, Vec<u8>)>,
Receiver<(MessageHeader, Vec<u8>)>,
) = channel();
let ctx = TtrpcContext {
fd: -1,
mh,
mh: mh,
res_tx: tx,
};

View File

@@ -3,6 +3,7 @@
// SPDX-License-Identifier: Apache-2.0
//
//use crate::container::Container;
use crate::linux_abi::*;
use crate::mount::{get_mount_fs_type, remove_mounts, TYPEROOTFS};
use crate::namespace::Namespace;
@@ -74,7 +75,7 @@ impl Sandbox {
sender: None,
rtnl: Some(RtnlHandle::new(NETLINK_ROUTE, 0).unwrap()),
hooks: None,
event_rx,
event_rx: event_rx,
event_tx: tx,
})
}
@@ -111,14 +112,14 @@ impl Sandbox {
// acquiring a lock on sandbox.
pub fn unset_sandbox_storage(&mut self, path: &str) -> Result<bool> {
match self.storages.get_mut(path) {
None => Err(anyhow!("Sandbox storage with path {} not found", path)),
None => return Err(anyhow!("Sandbox storage with path {} not found", path)),
Some(count) => {
*count -= 1;
if *count < 1 {
self.storages.remove(path);
return Ok(true);
}
Ok(false)
return Ok(false);
}
}
}
@@ -160,13 +161,13 @@ impl Sandbox {
pub fn setup_shared_namespaces(&mut self) -> Result<bool> {
// Set up shared IPC namespace
self.shared_ipcns = Namespace::new(&self.logger)
.get_ipc()
.as_ipc()
.setup()
.context("Failed to setup persistent IPC namespace")?;
// // Set up shared UTS namespace
self.shared_utsns = Namespace::new(&self.logger)
.get_uts(self.hostname.as_str())
.as_uts(self.hostname.as_str())
.setup()
.context("Failed to setup persistent UTS namespace")?;
@@ -183,7 +184,7 @@ impl Sandbox {
// This means a separate pause process has not been created. We treat the
// first container created as the infra container in that case
// and use its pid namespace in case pid namespace needs to be shared.
if self.sandbox_pidns.is_none() && self.containers.is_empty() {
if self.sandbox_pidns.is_none() && self.containers.len() == 0 {
let init_pid = c.init_process_pid;
if init_pid == -1 {
return Err(anyhow!(
@@ -191,7 +192,7 @@ impl Sandbox {
));
}
let mut pid_ns = Namespace::new(&self.logger).get_pid();
let mut pid_ns = Namespace::new(&self.logger).as_pid();
pid_ns.path = format!("/proc/{}/ns/pid", init_pid);
self.sandbox_pidns = Some(pid_ns);
@@ -215,7 +216,7 @@ impl Sandbox {
}
pub fn destroy(&mut self) -> Result<()> {
for ctr in self.containers.values_mut() {
for (_, ctr) in &mut self.containers {
ctr.destroy()?;
}
Ok(())
@@ -232,33 +233,14 @@ impl Sandbox {
online_memory(&self.logger)?;
}
if req.nb_cpus == 0 {
return Ok(());
}
let guest_cpuset = rustjail_cgroups::fs::get_guest_cpuset()?;
let cpuset = rustjail_cgroups::fs::get_guest_cpuset()?;
for (_, ctr) in self.containers.iter() {
let cpu = ctr
.config
.spec
.as_ref()
.unwrap()
.linux
.as_ref()
.unwrap()
.resources
.as_ref()
.unwrap()
.cpu
.as_ref();
let container_cpust = if let Some(c) = cpu { &c.cpus } else { "" };
info!(self.logger, "updating {}", ctr.id.as_str());
ctr.cgroup_manager
.as_ref()
.unwrap()
.update_cpuset_path(guest_cpuset.as_str(), &container_cpust)?;
.update_cpuset_path(cpuset.as_str())?;
}
Ok(())
@@ -350,7 +332,7 @@ fn online_resources(logger: &Logger, path: &str, pattern: &str, num: i32) -> Res
}
let c = c.unwrap();
if c.trim().contains('0') {
if c.trim().contains("0") {
let r = fs::write(file.as_str(), "1");
if r.is_err() {
continue;
@@ -411,6 +393,7 @@ fn online_memory(logger: &Logger) -> Result<()> {
#[cfg(test)]
mod tests {
//use rustjail::Error;
use super::Sandbox;
use crate::{mount::BareMount, skip_if_not_root};
use anyhow::Error;
@@ -629,8 +612,8 @@ mod tests {
let linux = Linux::default();
let mut spec = Spec::default();
spec.root = Some(root);
spec.linux = Some(linux);
spec.root = Some(root).into();
spec.linux = Some(linux).into();
CreateOpts {
cgroup_name: "".to_string(),
@@ -724,31 +707,4 @@ mod tests {
assert!(s.hooks.as_ref().unwrap().poststart.is_empty());
assert!(s.hooks.as_ref().unwrap().poststop.is_empty());
}
#[test]
pub fn test_sandbox_is_running() {
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
s.running = true;
assert!(s.is_running());
s.running = false;
assert!(!s.is_running());
}
#[test]
fn test_sandbox_set_hostname() {
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
let hostname = "abc123";
s.set_hostname(hostname.to_string());
assert_eq!(s.hostname, hostname);
}
#[test]
fn test_sandbox_set_destroy() {
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
let ret = s.destroy();
assert!(ret.is_ok());
}
}

View File

@@ -2,7 +2,6 @@
//
// SPDX-License-Identifier: Apache-2.0
//
#![allow(clippy::module_inception)]
#[cfg(test)]
mod test_utils {

View File

@@ -48,16 +48,13 @@ impl Uevent {
// Check whether this is a block device hot-add event.
fn is_block_add_event(&self) -> bool {
let pci_root_bus_path = create_pci_root_bus_path();
self.action == U_EVENT_ACTION_ADD
&& self.subsystem == "block"
&& self.devpath.starts_with(&pci_root_bus_path)
&& self.devpath.starts_with(PCI_ROOT_BUS_PATH)
&& self.devname != ""
}
fn handle_block_add_event(&self, sandbox: &Arc<Mutex<Sandbox>>) {
let pci_root_bus_path = create_pci_root_bus_path();
// Keep the same lock order as device::get_device_name(), otherwise it may cause deadlock.
let mut w = GLOBAL_DEVICE_WATCHER.lock().unwrap();
let mut sb = sandbox.lock().unwrap();
@@ -72,7 +69,7 @@ impl Uevent {
let empties: Vec<_> = w
.iter()
.filter(|(dev_addr, _)| {
let pci_p = format!("{}/{}", pci_root_bus_path, *dev_addr);
let pci_p = format!("{}/{}", PCI_ROOT_BUS_PATH, *dev_addr);
// blk block device
devpath.starts_with(pci_p.as_str()) ||

View File

@@ -68,7 +68,7 @@ NETMON_TARGET = $(PROJECT_TYPE)-netmon
NETMON_TARGET_OUTPUT = $(CURDIR)/$(NETMON_TARGET)
BINLIBEXECLIST += $(NETMON_TARGET)
DESTDIR ?= /
DESTDIR := /
ifeq ($(PREFIX),)
PREFIX := /usr
@@ -93,9 +93,6 @@ DEFAULTSDIR := $(SHAREDIR)/defaults
COLLECT_SCRIPT = data/kata-collect-data.sh
COLLECT_SCRIPT_SRC = $(COLLECT_SCRIPT).in
# @RUNTIME_NAME@ should be replaced with the target in generated files
RUNTIME_NAME = $(TARGET)
GENERATED_FILES += $(COLLECT_SCRIPT)
GENERATED_VARS = \
VERSION \
@@ -139,14 +136,13 @@ HYPERVISORS := $(HYPERVISOR_ACRN) $(HYPERVISOR_FC) $(HYPERVISOR_QEMU) $(HYPERVIS
QEMUPATH := $(QEMUBINDIR)/$(QEMUCMD)
QEMUVALIDHYPERVISORPATHS := [\"$(QEMUPATH)\"]
QEMUVIRTIOFSPATH := $(QEMUBINDIR)/$(QEMUVIRTIOFSCMD)
QEMUVALIDVIRTIOFSPATHS := [\"$(QEMUVIRTIOFSPATH)\"]
QEMUVALIDVIRTIOFSPATHS := $(QEMUBINDIR)/$(QEMUVIRTIOFSCMD)
CLHPATH := $(CLHBINDIR)/$(CLHCMD)
CLHVALIDHYPERVISORPATHS := [\"$(CLHPATH)\"]
CLHVALIDHYPERVISORPATHS := [\"$(CLHBINDIR)/$(CLHCMD)\"]
FCPATH = $(FCBINDIR)/$(FCCMD)
FCVALIDHYPERVISORPATHS := [\"$(FCPATH)\"]
FCVALIDPATHS = [\"$(FCPATH)\"]
FCJAILERPATH = $(FCBINDIR)/$(FCJAILERCMD)
FCVALIDJAILERPATHS = [\"$(FCJAILERPATH)\"]
@@ -604,9 +600,8 @@ $(SHIMV2_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST)
$(QUIET_BUILD)(cd $(SHIMV2_DIR)/ && ln -fs $(GENERATED_CONFIG))
$(QUIET_BUILD)(cd $(SHIMV2_DIR)/ && go build $(KATA_LDFLAGS) $(BUILDFLAGS) -o $@ .)
$(MONITOR_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) .git-commit
$(QUIET_BUILD)(cd $(MONITOR_DIR)/ && go build \
--ldflags "-X main.GitCommit=$(shell cat .git-commit)" -o $@ .)
$(MONITOR_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST)
$(QUIET_BUILD)(cd $(MONITOR_DIR)/ && go build $(KATA_LDFLAGS) $(BUILDFLAGS) -o $@ .)
.PHONY: \
check \

View File

@@ -19,8 +19,6 @@ For details of the other Kata Containers repositories, see the
* [Quick start for developers](#quick-start-for-developers)
* [Architecture overview](#architecture-overview)
* [Configuration](#configuration)
* [Hypervisor specific configuration](#hypervisor-specific-configuration)
* [Stateless systems](#stateless-systems)
* [Logging](#logging)
* [Kata OCI](#kata-oci)
* [Kata containerd shimv2](#kata-containerd-shimv2)
@@ -67,7 +65,7 @@ The runtime has a built-in command to determine if your host system is capable
of running and creating a Kata Container:
```bash
$ kata-runtime check
$ kata-runtime kata-check
```
> **Note:**
@@ -108,15 +106,6 @@ The file contains comments explaining all options.
> You may need to modify this file to optimise or tailor your system, or if you have
> specific requirements.
### Hypervisor specific configuration
Kata Containers supports multiple hypervisors so your `configuration.toml`
configuration file may be a symbolic link to a hypervisor-specific
configuration file. See
[the hypervisors document](../../docs/hypervisors.md) for further details.
### Stateless systems
Since the runtime supports a
[stateless system](https://clearlinux.org/about),
it checks for this configuration file in multiple locations, two of which are
@@ -146,7 +135,7 @@ To see details of your systems runtime environment (including the location of
the configuration file being used), run:
```bash
$ kata-runtime env
$ kata-runtime kata-env
```
## Logging

View File

@@ -1 +0,0 @@
2.0.0

1
src/runtime/VERSION Symbolic link
View File

@@ -0,0 +1 @@
../../VERSION

View File

@@ -36,6 +36,10 @@ var commit = "@COMMIT@"
// version is the runtime version.
var version = "@VERSION@"
// project-specific command names
var envCmd = fmt.Sprintf("%s-env", projectPrefix)
var checkCmd = fmt.Sprintf("%s-check", projectPrefix)
// project-specific option names
var configFilePathOption = fmt.Sprintf("%s-config", projectPrefix)
var showConfigPathsOption = fmt.Sprintf("%s-show-default-config-paths", projectPrefix)

View File

@@ -235,4 +235,4 @@ experimental=@DEFAULTEXPFEATURES@
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
# (default: false)
# enable_pprof = true
# EnablePprof = true

View File

@@ -234,4 +234,4 @@ experimental=@DEFAULTEXPFEATURES@
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
# (default: false)
# enable_pprof = true
# EnablePprof = true

View File

@@ -360,4 +360,4 @@ experimental=@DEFAULTEXPFEATURES@
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
# (default: false)
# enable_pprof = true
# EnablePprof = true

View File

@@ -120,8 +120,8 @@ default_memory = @DEFMEMSZ@
disable_block_device_use = @DEFDISABLEBLOCK@
# Shared file system type:
# - virtio-fs (default)
# - virtio-9p
# - virtio-9p (default)
# - virtio-fs
shared_fs = "@DEFSHAREDFS_QEMU_VIRTIOFS@"
# Path to vhost-user-fs daemon.
@@ -319,26 +319,6 @@ valid_file_mem_backends = @DEFVALIDFILEMEMBACKENDS@
# Default 0-sized value means unlimited rate.
#tx_rate_limiter_max_rate = 0
# Set where to save the guest memory dump file.
# If set, when GUEST_PANICKED event occurred,
# guest memeory will be dumped to host filesystem under guest_memory_dump_path,
# This directory will be created automatically if it does not exist.
#
# The dumped file(also called vmcore) can be processed with crash or gdb.
#
# WARNING:
# Dump guests memory can take very long depending on the amount of guest memory
# and use much disk space.
#guest_memory_dump_path="/var/crash/kata"
# If enable paging.
# Basically, if you want to use "gdb" rather than "crash",
# or need the guest-virtual addresses in the ELF vmcore,
# then you should enable paging.
#
# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
#guest_memory_dump_paging=false
[factory]
# VM templating support. Once enabled, new VMs are created from template
# using vm cloning. They will share the same initial kernel, initramfs and
@@ -506,4 +486,4 @@ experimental=@DEFAULTEXPFEATURES@
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
# (default: false)
# enable_pprof = true
# EnablePprof = true

View File

@@ -134,25 +134,17 @@ func getCPUFlags(cpuinfo string) string {
// haveKernelModule returns true if the specified module exists
// (either loaded or available to be loaded)
func haveKernelModule(module string) bool {
kmodLog := kataLog.WithField("module", module)
// First, check to see if the module is already loaded
path := filepath.Join(sysModuleDir, module)
if katautils.FileExists(path) {
return true
}
// Only root can load modules
if os.Getuid() != 0 {
kmodLog.Error("Module is not loaded and it can not be inserted. Please consider running with sudo or as root")
return false
}
// Now, check if the module is unloaded, but available.
// And modprobe it if so.
cmd := exec.Command(modProbeCmd, module)
if output, err := cmd.CombinedOutput(); err != nil {
kmodLog.WithError(err).WithField("output", string(output)).Warnf("modprobe insert module failed")
kataLog.WithField("module", module).WithError(err).Warnf("modprobe insert module failed: %s", string(output))
return false
}
return true
@@ -314,9 +306,8 @@ func genericHostIsVMContainerCapable(details vmContainerCapableDetails) error {
}
var kataCheckCLICommand = cli.Command{
Name: "check",
Aliases: []string{"kata-check"},
Usage: "tests if system can run " + project,
Name: checkCmd,
Usage: "tests if system can run " + project,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "check-version-only",
@@ -353,36 +344,36 @@ EXAMPLES:
- Perform basic checks:
$ %s check
$ %s %s
- Local basic checks only:
$ %s check --no-network-checks
$ %s %s --no-network-checks
- Perform further checks:
$ sudo %s check
$ sudo %s %s
- Just check if a newer version is available:
$ %s check --check-version-only
$ %s %s --check-version-only
- List available releases (shows output in format "version;release-date;url"):
$ %s check --only-list-releases
$ %s %s --only-list-releases
- List all available releases (includes pre-release versions):
$ %s check --only-list-releases --include-all-releases
$ %s %s --only-list-releases --include-all-releases
`,
project,
noNetworkEnvVar,
name,
name,
name,
name,
name,
name,
name, checkCmd,
name, checkCmd,
name, checkCmd,
name, checkCmd,
name, checkCmd,
name, checkCmd,
),
Action: func(context *cli.Context) error {
@@ -395,7 +386,7 @@ EXAMPLES:
return err
}
span, _ := katautils.Trace(ctx, "check")
span, _ := katautils.Trace(ctx, "kata-check")
defer span.Finish()
if context.Bool("no-network-checks") == false && os.Getenv(noNetworkEnvVar) == "" {
@@ -422,7 +413,7 @@ EXAMPLES:
runtimeConfig, ok := context.App.Metadata["runtimeConfig"].(oci.RuntimeConfig)
if !ok {
return errors.New("check: cannot determine runtime config")
return errors.New("kata-check: cannot determine runtime config")
}
err = setCPUtype(runtimeConfig.HypervisorType)

View File

@@ -513,10 +513,6 @@ func TestCheckCheckCPUAttribs(t *testing.T) {
}
func TestCheckHaveKernelModule(t *testing.T) {
if tc.NotValid(ktu.NeedRoot()) {
t.Skip(testDisabledAsNonRoot)
}
assert := assert.New(t)
dir, err := ioutil.TempDir("", "")

View File

@@ -14,7 +14,6 @@ import (
"github.com/BurntSushi/toml"
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils"
"github.com/kata-containers/kata-containers/src/runtime/pkg/utils"
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
exp "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/experimental"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/oci"
@@ -293,7 +292,7 @@ func getNetmonInfo(config oci.RuntimeConfig) NetmonInfo {
}
func getCommandVersion(cmd string) (string, error) {
return utils.RunCommand([]string{cmd, "--version"})
return katautils.RunCommand([]string{cmd, "--version"})
}
func getAgentInfo(config oci.RuntimeConfig) (AgentInfo, error) {
@@ -438,9 +437,8 @@ func writeJSONSettings(env EnvInfo, file *os.File) error {
}
var kataEnvCLICommand = cli.Command{
Name: "env",
Aliases: []string{"kata-env"},
Usage: "display settings. Default to TOML",
Name: envCmd,
Usage: "display settings. Default to TOML",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "json",

View File

@@ -9,8 +9,6 @@ import (
"flag"
"net/http"
"os"
"runtime"
"text/template"
"time"
kataMonitor "github.com/kata-containers/kata-containers/src/runtime/pkg/kata-monitor"
@@ -22,77 +20,12 @@ var containerdAddr = flag.String("containerd-address", "/run/containerd/containe
var containerdConfig = flag.String("containerd-conf", "/etc/containerd/config.toml", "Containerd config file.")
var logLevel = flag.String("log-level", "info", "Log level of logrus(trace/debug/info/warn/error/fatal/panic).")
// These values are overridden via ldflags
var (
appName = "kata-monitor"
// version is the kata monitor version.
version = "0.1.0"
GitCommit = "unknown-commit"
)
type versionInfo struct {
AppName string
Version string
GitCommit string
GoVersion string
Os string
Arch string
}
var versionTemplate = `{{.AppName}}
Version: {{.Version}}
Go version: {{.GoVersion}}
Git commit: {{.GitCommit}}
OS/Arch: {{.Os}}/{{.Arch}}
`
func printVersion(ver versionInfo) {
t, err := template.New("version").Parse(versionTemplate)
if err = t.Execute(os.Stdout, ver); err != nil {
panic(err)
}
}
func main() {
ver := versionInfo{
AppName: appName,
Version: version,
GoVersion: runtime.Version(),
Os: runtime.GOOS,
Arch: runtime.GOARCH,
GitCommit: GitCommit,
}
if len(os.Args) == 2 && (os.Args[1] == "--version" || os.Args[1] == "version") {
printVersion(ver)
return
}
flag.Parse()
// init logrus
initLog()
announceFields := logrus.Fields{
// properties from version info
"app": ver.AppName,
"version": ver.Version,
"go-version": ver.GoVersion,
"os": ver.Os,
"arch": ver.Arch,
"git-commit": ver.GitCommit,
// properties from command-line options
"listen-address": *monitorListenAddr,
"containerd-address": *containerdAddr,
"containerd-conf": *containerdConfig,
"log-level": *logLevel,
}
logrus.WithFields(announceFields).Info("announce")
// create new kataMonitor
km, err := kataMonitor.NewKataMonitor(*containerdAddr, *containerdConfig)
if err != nil {

View File

@@ -272,7 +272,7 @@ func beforeSubcommands(c *cli.Context) error {
ignoreConfigLogs := false
var traceRootSpan string
subCmdIsCheckCmd := (c.NArg() >= 1 && ((c.Args()[0] == "kata-check") || (c.Args()[0] == "check")))
subCmdIsCheckCmd := (c.NArg() >= 1 && (c.Args()[0] == checkCmd))
if subCmdIsCheckCmd {
// checkCmd will use the default logrus logger to stderr
// raise the logger default level to warn
@@ -313,7 +313,7 @@ func beforeSubcommands(c *cli.Context) error {
// (meaning any spans created at this point will be silently ignored).
setExternalLoggers(context.Background(), kataLog)
if c.NArg() == 1 && (c.Args()[0] == "kata-env" || c.Args()[0] == "env") {
if c.NArg() == 1 && c.Args()[0] == envCmd {
// simply report the logging setup
ignoreConfigLogs = true
}

View File

@@ -24,7 +24,6 @@ import (
"github.com/dlespiau/covertool/pkg/cover"
ktu "github.com/kata-containers/kata-containers/src/runtime/pkg/katatestutils"
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils"
"github.com/kata-containers/kata-containers/src/runtime/pkg/utils"
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/oci"
@@ -274,7 +273,7 @@ func createOCIConfig(bundleDir string) error {
return errors.New("Cannot find command to generate OCI config file")
}
_, err := utils.RunCommand([]string{configCmd, "spec", "--bundle", bundleDir})
_, err := katautils.RunCommand([]string{configCmd, "spec", "--bundle", bundleDir})
if err != nil {
return err
}
@@ -379,7 +378,7 @@ func makeOCIBundle(bundleDir string) error {
}
}
output, err := utils.RunCommandFull([]string{"cp", "-a", from, to}, true)
output, err := katautils.RunCommandFull([]string{"cp", "-a", from, to}, true)
if err != nil {
return fmt.Errorf("failed to copy test OCI bundle from %v to %v: %v (output: %v)", from, to, err, output)
}

View File

@@ -6,7 +6,6 @@
package main
import (
"bytes"
"encoding/json"
"errors"
"fmt"
@@ -277,17 +276,12 @@ func getReleases(releaseURL string, includeAll bool) ([]semver.Version, map[stri
releasesArray := []map[string]interface{}{}
body, err := ioutil.ReadAll(resp.Body)
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, nil, fmt.Errorf("failed to read release details: %v", err)
} else if resp.StatusCode == http.StatusForbidden && bytes.Contains(body, []byte("limit exceeded")) {
// Do not fail if rate limit is exceeded
kataLog.WithField("url", releaseURL).
Warn("API rate limit exceeded. Try again later. Read https://docs.github.com/apps/building-github-apps/understanding-rate-limits-for-github-apps for more information")
return []semver.Version{}, map[string]releaseDetails{}, nil
}
if err := json.Unmarshal(body, &releasesArray); err != nil {
if err := json.Unmarshal(bytes, &releasesArray); err != nil {
return nil, nil, fmt.Errorf("failed to unpack release details: %v", err)
}
@@ -327,14 +321,8 @@ func getNewReleaseType(current semver.Version, latest semver.Version) (string, e
}
} else if latest.Patch == current.Patch && len(latest.Pre) > 0 {
desc = "pre-release"
} else if latest.Major == current.Major &&
latest.Minor == current.Minor &&
latest.Patch == current.Patch {
if len(current.Pre) > 0 && len(latest.Pre) == 0 {
desc = "major"
}
} else {
return "", fmt.Errorf("BUG: unhandled scenario: current version: %s, latest version: %s", current, latest)
return "", fmt.Errorf("BUG: unhandled scenario: current version: %s, latest version: %v", current, latest)
}
return desc, nil

View File

@@ -458,21 +458,6 @@ func TestGetNewReleaseType(t *testing.T) {
}
data := []testData{
// Check build metadata (ignored for version comparisions)
{"2.0.0+build", "2.0.0", true, ""},
{"2.0.0+build-1", "2.0.0+build-2", true, ""},
{"1.12.0+build", "1.12.0", true, ""},
{"2.0.0-rc3+foo", "2.0.0", false, "major"},
{"2.0.0-rc3+foo", "2.0.0-rc4", false, "pre-release"},
{"1.12.0+foo", "1.13.0", false, "minor"},
{"1.12.0+build", "2.0.0", false, "major"},
{"1.12.0+build", "1.13.0", false, "minor"},
{"1.12.0-rc2+build", "1.12.1", false, "patch"},
{"1.12.0-rc2+build", "1.12.1-foo", false, "patch pre-release"},
{"1.12.0-rc4+wibble", "1.12.0", false, "major"},
{"2.0.0-alpha3", "1.0.0", true, ""},
{"1.0.0", "1.0.0", true, ""},
{"2.0.0", "1.0.0", true, ""},
@@ -488,12 +473,6 @@ func TestGetNewReleaseType(t *testing.T) {
{"1.0.0", "1.0.3", false, "patch"},
{"1.0.0-beta29", "1.0.0-beta30", false, "pre-release"},
{"1.0.0", "1.0.3-alpha99.1b", false, "patch pre-release"},
{"2.0.0-rc0", "2.0.0", false, "major"},
{"2.0.0-rc1", "2.0.0", false, "major"},
{"1.12.0-rc0", "1.12.0", false, "major"},
{"1.12.0-rc5", "1.12.0", false, "major"},
}
for i, d := range data {

View File

@@ -13,7 +13,6 @@ import (
"testing"
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils"
"github.com/kata-containers/kata-containers/src/runtime/pkg/utils"
"github.com/stretchr/testify/assert"
)
@@ -177,13 +176,13 @@ VERSION_ID="%s"
}
func TestUtilsRunCommand(t *testing.T) {
output, err := utils.RunCommand([]string{"true"})
output, err := katautils.RunCommand([]string{"true"})
assert.NoError(t, err)
assert.Equal(t, "", output)
}
func TestUtilsRunCommandCaptureStdout(t *testing.T) {
output, err := utils.RunCommand([]string{"echo", "hello"})
output, err := katautils.RunCommand([]string{"echo", "hello"})
assert.NoError(t, err)
assert.Equal(t, "hello", output)
}
@@ -191,7 +190,7 @@ func TestUtilsRunCommandCaptureStdout(t *testing.T) {
func TestUtilsRunCommandIgnoreStderr(t *testing.T) {
args := []string{"/bin/sh", "-c", "echo foo >&2;exit 0"}
output, err := utils.RunCommand(args)
output, err := katautils.RunCommand(args)
assert.NoError(t, err)
assert.Equal(t, "", output)
}
@@ -214,7 +213,7 @@ func TestUtilsRunCommandInvalidCmds(t *testing.T) {
}
for _, args := range invalidCommands {
output, err := utils.RunCommand(args)
output, err := katautils.RunCommand(args)
assert.Error(t, err)
assert.Equal(t, "", output)
}

View File

@@ -17,12 +17,13 @@ import (
func deleteContainer(ctx context.Context, s *service, c *container) error {
if !c.cType.IsSandbox() {
if c.status != task.StatusStopped {
if _, err := s.sandbox.StopContainer(c.id, false); err != nil && !isNotFound(err) {
_, err := s.sandbox.StopContainer(c.id, false)
if err != nil {
return err
}
}
if _, err := s.sandbox.DeleteContainer(c.id); err != nil && !isNotFound(err) {
if _, err := s.sandbox.DeleteContainer(c.id); err != nil {
return err
}
}

View File

@@ -25,7 +25,6 @@ import (
"github.com/containerd/typeurl"
ptypes "github.com/gogo/protobuf/types"
"github.com/opencontainers/runtime-spec/specs-go"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
@@ -79,21 +78,6 @@ func New(ctx context.Context, id string, publisher events.Publisher) (cdshim.Shi
vci.SetLogger(ctx, shimLog)
katautils.SetLogger(ctx, shimLog, shimLog.Logger.Level)
// load runtime config so that tracing can start if enabled
_, runtimeConfig, err := katautils.LoadConfiguration("", false, true)
if err != nil {
return nil, err
}
// create tracer
_, err = katautils.CreateTracer("kata")
if err != nil {
return nil, err
}
// create span
span, ctx := trace(ctx, "New")
defer span.Finish()
ctx, cancel := context.WithCancel(ctx)
s := &service{
@@ -101,7 +85,6 @@ func New(ctx context.Context, id string, publisher events.Publisher) (cdshim.Shi
pid: uint32(os.Getpid()),
ctx: ctx,
containers: make(map[string]*container),
config: &runtimeConfig,
events: make(chan interface{}, chSize),
ec: make(chan exit, bufferSize),
cancel: cancel,
@@ -184,13 +167,6 @@ func newCommand(ctx context.Context, containerdBinary, id, containerdAddress str
// StartShim willl start a kata shimv2 daemon which will implemented the
// ShimV2 APIs such as create/start/update etc containers.
func (s *service) StartShim(ctx context.Context, id, containerdBinary, containerdAddress string) (string, error) {
// Stop tracing here since a new tracer will be created the next time New()
// is called again after StartShim()
defer katautils.StopTracing(s.ctx)
span, _ := trace(s.ctx, "StartShim")
defer span.Finish()
bundlePath, err := os.Getwd()
if err != nil {
return "", err
@@ -239,6 +215,8 @@ func (s *service) StartShim(ctx context.Context, id, containerdBinary, container
}
}()
// make sure to wait after start
go cmd.Wait()
if err = cdshim.WritePidFile("shim.pid", cmd.Process.Pid); err != nil {
return "", err
}
@@ -302,22 +280,7 @@ func getTopic(e interface{}) string {
return cdruntime.TaskUnknownTopic
}
func trace(ctx context.Context, name string) (opentracing.Span, context.Context) {
if ctx == nil {
logrus.WithField("type", "bug").Error("trace called before context set")
ctx = context.Background()
}
span, ctx := opentracing.StartSpanFromContext(ctx, name)
span.SetTag("source", "runtime")
return span, ctx
}
func (s *service) Cleanup(ctx context.Context) (_ *taskAPI.DeleteResponse, err error) {
span, _ := trace(s.ctx, "Cleanup")
defer span.Finish()
//Since the binary cleanup will return the DeleteResponse from stdout to
//containerd, thus we must make sure there is no any outputs in stdout except
//the returned response, thus here redirect the log to stderr in case there's
@@ -373,9 +336,6 @@ func (s *service) Cleanup(ctx context.Context) (_ *taskAPI.DeleteResponse, err e
// Create a new sandbox or container with the underlying OCI runtime
func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *taskAPI.CreateTaskResponse, err error) {
span, _ := trace(s.ctx, "Create")
defer span.Finish()
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -385,53 +345,38 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *
s.mu.Lock()
defer s.mu.Unlock()
type Result struct {
container *container
err error
var c *container
c, err = create(ctx, s, r)
if err != nil {
return nil, err
}
ch := make(chan Result, 1)
go func() {
container, err := create(ctx, s, r)
ch <- Result{container, err}
}()
select {
case <-ctx.Done():
return nil, errors.Errorf("create container timeout: %v", r.ID)
case res := <-ch:
if res.err != nil {
return nil, res.err
}
container := res.container
container.status = task.StatusCreated
c.status = task.StatusCreated
s.containers[r.ID] = container
s.containers[r.ID] = c
s.send(&eventstypes.TaskCreate{
ContainerID: r.ID,
Bundle: r.Bundle,
Rootfs: r.Rootfs,
IO: &eventstypes.TaskIO{
Stdin: r.Stdin,
Stdout: r.Stdout,
Stderr: r.Stderr,
Terminal: r.Terminal,
},
Checkpoint: r.Checkpoint,
Pid: s.pid,
})
s.send(&eventstypes.TaskCreate{
ContainerID: r.ID,
Bundle: r.Bundle,
Rootfs: r.Rootfs,
IO: &eventstypes.TaskIO{
Stdin: r.Stdin,
Stdout: r.Stdout,
Stderr: r.Stderr,
Terminal: r.Terminal,
},
Checkpoint: r.Checkpoint,
Pid: s.pid,
})
return &taskAPI.CreateTaskResponse{
Pid: s.pid,
}, nil
}
return &taskAPI.CreateTaskResponse{
Pid: s.pid,
}, nil
}
// Start a process
func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAPI.StartResponse, err error) {
span, _ := trace(s.ctx, "Start")
defer span.Finish()
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -480,9 +425,6 @@ func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAP
// Delete the initial process and container
func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *taskAPI.DeleteResponse, err error) {
span, _ := trace(s.ctx, "Delete")
defer span.Finish()
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -532,9 +474,6 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *task
// Exec an additional process inside the container
func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Exec")
defer span.Finish()
start := time.Now()
defer func() {
rpcDurationsHistogram.WithLabelValues("exec").Observe(float64(time.Since(start).Nanoseconds() / int64(time.Millisecond)))
@@ -570,9 +509,6 @@ func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *p
// ResizePty of a process
func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "ResizePty")
defer span.Finish()
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -609,9 +545,6 @@ func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_
// State returns runtime state information for a process
func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAPI.StateResponse, err error) {
span, _ := trace(s.ctx, "State")
defer span.Finish()
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -657,13 +590,11 @@ func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAP
Terminal: execs.tty.terminal,
ExitStatus: uint32(execs.exitCode),
}, nil
}
// Pause the container
func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Pause")
defer span.Finish()
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -700,9 +631,6 @@ func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes
// Resume the container
func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Resume")
defer span.Finish()
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -737,9 +665,6 @@ func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptyp
// Kill a process with the provided signal
func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Kill")
defer span.Finish()
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -798,9 +723,6 @@ func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.E
// Since for kata, it cannot get the process's pid from VM,
// thus only return the Shim's pid directly.
func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI.PidsResponse, err error) {
span, _ := trace(s.ctx, "Pids")
defer span.Finish()
var processes []*task.ProcessInfo
start := time.Now()
@@ -821,9 +743,6 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI.
// CloseIO of a process
func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "CloseIO")
defer span.Finish()
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -862,9 +781,6 @@ func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *pt
// Checkpoint the container
func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Checkpoint")
defer span.Finish()
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -876,9 +792,6 @@ func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskReque
// Connect returns shim information such as the shim's pid
func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *taskAPI.ConnectResponse, err error) {
span, _ := trace(s.ctx, "Connect")
defer span.Finish()
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -896,8 +809,6 @@ func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *ta
}
func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Shutdown")
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -911,9 +822,6 @@ func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ *
}
s.mu.Unlock()
span.Finish()
katautils.StopTracing(s.ctx)
s.cancel()
os.Exit(0)
@@ -924,9 +832,6 @@ func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ *
}
func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAPI.StatsResponse, err error) {
span, _ := trace(s.ctx, "Stats")
defer span.Finish()
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -953,9 +858,6 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAP
// Update a running container
func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Update")
defer span.Finish()
start := time.Now()
defer func() {
err = toGRPC(err)
@@ -985,9 +887,6 @@ func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *
// Wait for a process to exit
func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (_ *taskAPI.WaitResponse, err error) {
span, _ := trace(s.ctx, "Wait")
defer span.Finish()
var ret uint32
start := time.Now()

Some files were not shown because too many files have changed in this diff Show More