rust-agent: Land rust agent into kata-containers

Fixes: #56

Signed-off-by: Yang Bo <bo@hyper.sh>
This commit is contained in:
Yang Bo 2019-10-16 11:20:53 +08:00
parent f56d26105b
commit f8ced638d2
60 changed files with 43687 additions and 0 deletions

5
.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
/target
**/*.rs.bk
**/target
Cargo.lock
**/Cargo.lock

33
.travis.yml Normal file
View File

@ -0,0 +1,33 @@
# Copyright (c) 2019 Ant Financial
#
# SPDX-License-Identifier: Apache-2.0
#
sudo: required
dist: bionic
os:
- linux
language: rust
rust:
- nightly
env:
- target_branch=$TRAVIS_BRANCH RUST_AGENT=yes
before_install:
- "ci/setup.sh"
- "ci/install_go.sh"
- "ci/install_rust.sh"
- "ci/static-checks.sh"
# need to install rust from scratch?
# still need go to download github.com/kata-containers/tests
# which is already installed?
install:
- cd ${TRAVIS_BUILD_DIR}/src/agent && make
script:
- cd ${TRAVIS_BUILD_DIR}/src/agent && make check

22
ci/install_go.sh Executable file
View File

@ -0,0 +1,22 @@
#!/bin/bash
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
set -e
cidir=$(dirname "$0")
source "${cidir}/lib.sh"
clone_tests_repo
new_goroot=/usr/local/go
pushd "${tests_repo_dir}"
# Force overwrite the current version of golang
[ -z "${GOROOT}" ] || rm -rf "${GOROOT}"
.ci/install_go.sh -p -f -d "$(dirname ${new_goroot})"
[ -z "${GOROOT}" ] || sudo ln -sf "${new_goroot}" "${GOROOT}"
go version
popd

16
ci/install_rust.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
# Copyright (c) 2019 Ant Financial
#
# SPDX-License-Identifier: Apache-2.0
#
set -e
cidir=$(dirname "$0")
source "${cidir}/lib.sh"
clone_tests_repo
pushd ${tests_repo_dir}
.ci/install_rust.sh
popd

35
ci/lib.sh Normal file
View File

@ -0,0 +1,35 @@
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
export tests_repo="${tests_repo:-github.com/kata-containers/tests}"
export tests_repo_dir="$GOPATH/src/$tests_repo"
clone_tests_repo()
{
# KATA_CI_NO_NETWORK is (has to be) ignored if there is
# no existing clone.
if [ -d "$tests_repo_dir" -a -n "$KATA_CI_NO_NETWORK" ]
then
return
fi
go get -d -u "$tests_repo" || true
if [ -n "${TRAVIS_BRANCH:-}" ]; then
( cd "${tests_repo_dir}" && git checkout "${TRAVIS_BRANCH}" )
fi
}
run_static_checks()
{
clone_tests_repo
bash "$tests_repo_dir/.ci/static-checks.sh" "github.com/kata-containers/kata-containers"
}
run_rust_test()
{
clone_tests_repo
bash "$tests_repo_dir/.ci/rust-test.sh"
}

16
ci/run.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
#
# Copyright (c) 2019 Ant Financial
#
# SPDX-License-Identifier: Apache-2.0
#
set -e
cidir=$(dirname "$0")
source "${cidir}/lib.sh"
clone_tests_repo
pushd ${tests_repo_dir}
.ci/run.sh
popd

16
ci/setup.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
set -e
cidir=$(dirname "$0")
source "${cidir}/lib.sh"
clone_tests_repo
pushd "${tests_repo_dir}"
.ci/setup.sh
popd

13
ci/static-checks.sh Executable file
View File

@ -0,0 +1,13 @@
#!/bin/bash
#
# Copyright (c) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
set -e
cidir=$(dirname "$0")
source "${cidir}/lib.sh"
cd ${TRAVIS_BUILD_DIR}/src/agent
run_static_checks

32
src/agent/Cargo.toml Normal file
View File

@ -0,0 +1,32 @@
[package]
name = "kata-agent"
version = "0.1.0"
authors = ["Yang Bo <bo@hyper.sh>"]
edition = "2018"
[dependencies]
oci = { path = "oci" }
rustjail = { path = "rustjail" }
protocols = { path = "protocols" }
lazy_static = "1.3.0"
error-chain = "0.12.1"
grpcio = { git="https://github.com/alipay/grpc-rs", branch="rust_agent" }
protobuf = "2.6.1"
futures = "0.1.27"
libc = "0.2.58"
nix = "0.14.1"
prctl = "1.0.0"
serde_json = "1.0.39"
signal-hook = "0.1.9"
scan_fmt = "0.2.3"
regex = "1"
# slog:
# - Dynamic keys required to allow HashMap keys to be slog::Serialized.
# - The 'max_*' features allow changing the log level at runtime
# (by stopping the compiler from removing log calls).
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
slog-json = "2.3.0"
slog-async = "2.3.0"
slog-scope = "4.1.2"
# for testing
tempfile = "3.1.0"

202
src/agent/LICENSE Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

92
src/agent/Makefile Normal file
View File

@ -0,0 +1,92 @@
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
PROJECT_NAME = Kata Containers
PROJECT_URL = https://github.com/kata-containers
PROJECT_COMPONENT = kata-agent
TARGET = $(PROJECT_COMPONENT)
SOURCES := \
$(shell find . 2>&1 | grep -E '.*\.rs$$') \
Cargo.toml
VERSION_FILE := ./VERSION
VERSION := $(shell grep -v ^\# $(VERSION_FILE))
COMMIT_NO := $(shell git rev-parse HEAD 2>/dev/null || true)
COMMIT_NO_SHORT := $(shell git rev-parse --short HEAD 2>/dev/null || true)
COMMIT := $(if $(shell git status --porcelain --untracked-files=no 2>/dev/null || true),${COMMIT_NO}-dirty,${COMMIT_NO})
COMMIT_MSG = $(if $(COMMIT),$(COMMIT),unknown)
# Exported to allow cargo to see it
export VERSION_COMMIT := $(if $(COMMIT),$(VERSION)-$(COMMIT),$(VERSION))
BUILD_TYPE = release
ARCH = $(shell uname -m)
LIBC = musl
TRIPLE = $(ARCH)-unknown-linux-$(LIBC)
TARGET_PATH = target/$(TRIPLE)/$(BUILD_TYPE)/$(TARGET)
DESTDIR :=
BINDIR := /usr/bin
# Display name of command and it's version (or a message if not available).
#
# Arguments:
#
# 1: Name of command
define get_command_version
$(shell printf "%s: %s\\n" $(1) "$(or $(shell $(1) --version 2>/dev/null), (not available))")
endef
define get_toolchain_version
$(shell printf "%s: %s\\n" "toolchain" "$(or $(shell rustup show active-toolchain 2>/dev/null), (unknown))")
endef
default: $(TARGET) show-header
$(TARGET): $(TARGET_PATH)
$(TARGET_PATH): $(SOURCES) | show-summary
@cargo build --target $(TRIPLE)
show-header:
@printf "%s - version %s (commit %s)\n\n" "$(TARGET)" "$(VERSION)" "$(COMMIT_MSG)"
install:
@install -D $(TARGET_PATH) $(DESTDIR)/$(BINDIR)/$(TARGET)
clean:
@cargo clean
check:
@cargo test --target $(TRIPLE)
run:
@cargo run --target $(TRIPLE)
show-summary: show-header
@printf "project:\n"
@printf " name: $(PROJECT_NAME)\n"
@printf " url: $(PROJECT_URL)\n"
@printf " component: $(PROJECT_COMPONENT)\n"
@printf "target: $(TARGET)\n"
@printf "architecture:\n"
@printf " host: $(ARCH)\n"
@printf "rust:\n"
@printf " %s\n" "$(call get_command_version,cargo)"
@printf " %s\n" "$(call get_command_version,rustc)"
@printf " %s\n" "$(call get_command_version,rustup)"
@printf " %s\n" "$(call get_toolchain_version)"
@printf "\n"
help: show-summary
.PHONY: \
help \
show-header \
show-summary

69
src/agent/README.md Normal file
View File

@ -0,0 +1,69 @@
# Kata Agent in Rust
This is a rust version of the [`kata-agent`](https://github.com/kata-containers/kata-agent).
In Denver PTG, [we discussed about re-writing agent in rust](https://etherpad.openstack.org/p/katacontainers-2019-ptg-denver-agenda):
> In general, we all think about re-write agent in rust to reduce the footprint of agent. Moreover, Eric mentioned the possibility to stop using gRPC, which may have some impact on footprint. We may begin to do some PoC to show how much we could save by re-writing agent in rust.
After that, we drafted the initial code here, and any contributions are welcome.
## Features
| Feature | Status |
| :--|:--:|
| **OCI Behaviors** |
| create/start containers | :white_check_mark: |
| signal/wait process | :white_check_mark: |
| exec/list process | :white_check_mark: |
| I/O stream | :white_check_mark: |
| Cgroups | :white_check_mark: |
| Capabilities, rlimit, readonly path, masked path, users | :white_check_mark: |
| container stats (`stats_container`) | :white_check_mark: |
| Hooks | :white_check_mark: |
| **Agent Features & APIs** |
| run agent as `init` (mount fs, udev, setup `lo`) | :white_check_mark: |
| block device as root device | :white_check_mark: |
| Health API | :white_check_mark: |
| network, interface/routes (`update_container`) | :white_check_mark: |
| File transfer API (`copy_file`) | :white_check_mark: |
| Device APIs (`reseed_random_device`, , `online_cpu_memory`, `mem_hotplug_probe`, `set_guet_data_time`) | :white_check_mark: |
| vsock support | :white_check_mark: |
| virtio-serial support | :heavy_multiplication_x: |
| OCI Spec validator | :white_check_mark: |
| **Infrastructures**|
| Debug Console | :white_check_mark: |
| Command line | :white_check_mark: |
| Tracing | :heavy_multiplication_x: |
## Getting Started
### Dependencies
The `rust-agent` depends on [`grpc-rs`](https://github.com/pingcap/grpc-rs) by PingCAP. However, the upstream `grpc-rs` and [gRPC](https://github.com/grpc/grpc) need some changes to be used here, which may take some time to be landed. Therefore, we created a temporary fork or `grpc-rs` here:
- https://github.com/alipay/grpc-rs/tree/rust_agent
### Build from Source
The rust-agent need to be built with rust nightly, and static linked with musl.
```bash
rustup toolchain install nightly
rustup default nightly
rustup target add x86_64-unknown-linux-musl --toolchain=nightly
git submodule update --init --recursive
sudo ln -s /usr/bin/g++ /bin/musl-g++
cargo build --target x86_64-unknown-linux-musl --release
```
## Run Kata CI with rust-agent
* Firstly, install kata as noted by ["how to install Kata"](https://github.com/kata-containers/documentation/blob/master/install/README.md)
* Secondly, build your own kata initrd/image following the steps in ["how to build your own initrd/image"](https://github.com/kata-containers/documentation/blob/master/Developer-Guide.md#create-and-install-rootfs-and-initrd-image).
notes: Please use your rust agent instead of the go agent when building your initrd/image.
* Clone the kata ci test cases from: https://github.com/kata-containers/tests.git, and then run the cri test with:
```bash
$sudo -E PATH=$PATH -E GOPATH=$GOPATH integration/containerd/shimv2/shimv2-tests.sh
```
## Mini Benchmark
The memory of 'RssAnon' consumed by the go-agent and rust-agent as below:
go-agent: about 11M
rust-agent: about 1.1M

1
src/agent/VERSION Normal file
View File

@ -0,0 +1 @@
0.0.1

11
src/agent/oci/Cargo.toml Normal file
View File

@ -0,0 +1,11 @@
[package]
name = "oci"
version = "0.1.0"
authors = ["Yang Bo <bo@hyper.sh>"]
edition = "2018"
[dependencies]
serde = "1.0.91"
serde_derive = "1.0.91"
serde_json = "1.0.39"
libc = "0.2.58"

850
src/agent/oci/src/lib.rs Normal file
View File

@ -0,0 +1,850 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
use std::collections::HashMap;
// use std::io::Write;
use libc::mode_t;
// use std::any::Any;
pub mod serialize;
#[allow(dead_code)]
fn is_false(b: bool) -> bool {
!b
}
#[allow(dead_code)]
fn is_default<T>(d: &T) -> bool
where
T: Default + PartialEq,
{
*d == T::default()
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Spec {
#[serde(
default,
rename = "ociVersion",
skip_serializing_if = "String::is_empty"
)]
pub version: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub process: Option<Process>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub root: Option<Root>,
#[serde(default, skip_serializing_if = "String:: is_empty")]
pub hostname: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub mounts: Vec<Mount>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub hooks: Option<Hooks>,
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub annotations: HashMap<String, String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub linux: Option<Linux>,
#[serde(skip_serializing_if = "Option::is_none")]
pub solaris: Option<Solaris>,
#[serde(skip_serializing_if = "Option::is_none")]
pub windows: Option<Windows<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub vm: Option<VM>,
}
impl Spec {
pub fn load(path: &str) -> Result<Spec, serialize::SerializeError> {
serialize::deserialize(path)
}
pub fn save(&self, path: &str) -> Result<(), serialize::SerializeError> {
serialize::serialize(self, path)
}
}
#[allow(dead_code)]
pub type LinuxRlimit = POSIXRlimit;
#[derive(Serialize, Deserialize, Debug)]
pub struct Process {
#[serde(default)]
pub terminal: bool,
#[serde(
default,
rename = "consoleSize",
skip_serializing_if = "Option::is_none"
)]
pub console_size: Option<Box>,
pub user: User,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub args: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub env: Vec<String>,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub cwd: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capabilities: Option<LinuxCapabilities>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub rlimits: Vec<POSIXRlimit>,
#[serde(default, rename = "noNewPrivileges")]
pub no_new_privileges: bool,
#[serde(
default,
rename = "apparmorProfile",
skip_serializing_if = "String::is_empty"
)]
pub apparmor_profile: String,
#[serde(
default,
rename = "oomScoreAdj",
skip_serializing_if = "Option::is_none"
)]
pub oom_score_adj: Option<i32>,
#[serde(
default,
rename = "selinuxLabel",
skip_serializing_if = "String::is_empty"
)]
pub selinux_label: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxCapabilities {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub bounding: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub effective: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub inheritable: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub permitted: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub ambient: Vec<String>,
}
#[derive(Default, PartialEq, Serialize, Deserialize, Debug)]
pub struct Box {
#[serde(default)]
pub height: u32,
#[serde(default)]
pub width: u32,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct User {
#[serde(default)]
pub uid: u32,
#[serde(default)]
pub gid: u32,
#[serde(
default,
rename = "addtionalGids",
skip_serializing_if = "Vec::is_empty"
)]
pub additional_gids: Vec<u32>,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub username: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Root {
#[serde(default, skip_serializing_if = "String::is_empty")]
pub path: String,
#[serde(default)]
pub readonly: bool,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Mount {
#[serde(default)]
pub destination: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub r#type: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub source: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub options: Vec<String>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Hook {
#[serde(default, skip_serializing_if = "String::is_empty")]
pub path: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub args: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub env: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timeout: Option<i32>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Hooks {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub prestart: Vec<Hook>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub poststart: Vec<Hook>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub poststop: Vec<Hook>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Linux {
#[serde(default, rename = "uidMappings", skip_serializing_if = "Vec::is_empty")]
pub uid_mappings: Vec<LinuxIDMapping>,
#[serde(default, rename = "gidMappings", skip_serializing_if = "Vec::is_empty")]
pub gid_mappings: Vec<LinuxIDMapping>,
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub sysctl: HashMap<String, String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resources: Option<LinuxResources>,
#[serde(
default,
rename = "cgroupsPath",
skip_serializing_if = "String::is_empty"
)]
pub cgroups_path: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub namespaces: Vec<LinuxNamespace>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub devices: Vec<LinuxDevice>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub seccomp: Option<LinuxSeccomp>,
#[serde(
default,
rename = "rootfsPropagation",
skip_serializing_if = "String::is_empty"
)]
pub rootfs_propagation: String,
#[serde(default, rename = "maskedPaths", skip_serializing_if = "Vec::is_empty")]
pub masked_paths: Vec<String>,
#[serde(
default,
rename = "readonlyPaths",
skip_serializing_if = "Vec::is_empty"
)]
pub readonly_paths: Vec<String>,
#[serde(
default,
rename = "mountLabel",
skip_serializing_if = "String::is_empty"
)]
pub mount_label: String,
#[serde(default, rename = "intelRdt", skip_serializing_if = "Option::is_none")]
pub intel_rdt: Option<LinuxIntelRdt>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxNamespace {
#[serde(default, skip_serializing_if = "String::is_empty")]
pub r#type: LinuxNamespaceType,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub path: String,
}
pub type LinuxNamespaceType = String;
#[allow(dead_code)]
pub const PIDNAMESPACE: &'static str = "pid";
#[allow(dead_code)]
pub const NETWORKNAMESPACE: &'static str = "network";
#[allow(dead_code)]
pub const MOUNTNAMESPACE: &'static str = "mount";
#[allow(dead_code)]
pub const IPCNAMESPACE: &'static str = "ipc";
#[allow(dead_code)]
pub const USERNAMESPACE: &'static str = "user";
#[allow(dead_code)]
pub const UTSNAMESPACE: &'static str = "uts";
#[allow(dead_code)]
pub const CGROUPNAMESPACE: &'static str = "cgroup";
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxIDMapping {
#[serde(default, rename = "containerID")]
pub container_id: u32,
#[serde(default, rename = "hostID")]
pub host_id: u32,
#[serde(default)]
pub size: u32,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct POSIXRlimit {
#[serde(default)]
pub r#type: String,
#[serde(default)]
pub hard: u64,
#[serde(default)]
pub soft: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxHugepageLimit {
#[serde(default, rename = "pageSize", skip_serializing_if = "String::is_empty")]
pub page_size: String,
#[serde(default)]
pub limit: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxInterfacePriority {
#[serde(default, skip_serializing_if = "String::is_empty")]
pub name: String,
#[serde(default)]
pub priority: u32,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxBlockIODevice {
#[serde(default)]
pub major: i64,
#[serde(default)]
pub minor: i64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxWeightDevice {
pub blk: LinuxBlockIODevice,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub weight: Option<u16>,
#[serde(
default,
rename = "leafWeight",
skip_serializing_if = "Option::is_none"
)]
pub leaf_weight: Option<u16>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxThrottleDevice {
pub blk: LinuxBlockIODevice,
#[serde(default)]
pub rate: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxBlockIO {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub weight: Option<u16>,
#[serde(
default,
rename = "leafWeight",
skip_serializing_if = "Option::is_none"
)]
pub leaf_weight: Option<u16>,
#[serde(
default,
rename = "weightDevice",
skip_serializing_if = "Vec::is_empty"
)]
pub weight_device: Vec<LinuxWeightDevice>,
#[serde(
default,
skip_serializing_if = "Vec::is_empty",
rename = "throttleReadBpsDevice"
)]
pub throttle_read_bps_device: Vec<LinuxThrottleDevice>,
#[serde(
default,
skip_serializing_if = "Vec::is_empty",
rename = "throttleWriteBpsDevice"
)]
pub throttle_write_bps_device: Vec<LinuxThrottleDevice>,
#[serde(
default,
skip_serializing_if = "Vec::is_empty",
rename = "throttleReadIOPSDevice"
)]
pub throttle_read_iops_device: Vec<LinuxThrottleDevice>,
#[serde(
default,
skip_serializing_if = "Vec::is_empty",
rename = "throttleWriteIOPSDevice"
)]
pub throttle_write_iops_device: Vec<LinuxThrottleDevice>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxMemory {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reservation: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub swap: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kernel: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none", rename = "kernelTCP")]
pub kernel_tcp: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub swapiness: Option<i64>,
#[serde(
default,
skip_serializing_if = "Option::is_none",
rename = "disableOOMKiller"
)]
pub disable_oom_killer: Option<bool>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxCPU {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shares: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub quota: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub period: Option<u64>,
#[serde(
default,
skip_serializing_if = "Option::is_none",
rename = "realtimeRuntime"
)]
pub realtime_runtime: Option<i64>,
#[serde(
default,
skip_serializing_if = "Option::is_none",
rename = "realtimePeriod"
)]
pub realtime_period: Option<u64>,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub cpus: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub mems: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxPids {
#[serde(default)]
pub limit: i64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxNetwork {
#[serde(default, skip_serializing_if = "Option::is_none", rename = "classID")]
pub class_id: Option<u32>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub priorities: Vec<LinuxInterfacePriority>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxRdma {
#[serde(
default,
skip_serializing_if = "Option::is_none",
rename = "hcaHandles"
)]
pub hca_handles: Option<u32>,
#[serde(
default,
skip_serializing_if = "Option::is_none",
rename = "hcaObjects"
)]
pub hca_objects: Option<u32>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxResources {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub devices: Vec<LinuxDeviceCgroup>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub memory: Option<LinuxMemory>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cpu: Option<LinuxCPU>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pids: Option<LinuxPids>,
#[serde(skip_serializing_if = "Option::is_none", rename = "blockIO")]
pub block_io: Option<LinuxBlockIO>,
#[serde(
default,
skip_serializing_if = "Vec::is_empty",
rename = "hugepageLimits"
)]
pub hugepage_limits: Vec<LinuxHugepageLimit>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub network: Option<LinuxNetwork>,
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub rdma: HashMap<String, LinuxRdma>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxDevice {
#[serde(default, skip_serializing_if = "String::is_empty")]
pub path: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub r#type: String,
#[serde(default)]
pub major: i64,
#[serde(default)]
pub minor: i64,
#[serde(default, skip_serializing_if = "Option::is_none", rename = "fileMode")]
pub file_mode: Option<mode_t>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub uid: Option<u32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub gid: Option<u32>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxDeviceCgroup {
#[serde(default)]
pub allow: bool,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub r#type: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub major: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub minor: Option<i64>,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub access: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Solaris {
#[serde(default, skip_serializing_if = "String::is_empty")]
pub milestone: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub limitpriv: String,
#[serde(
default,
skip_serializing_if = "String::is_empty",
rename = "maxShmMemory"
)]
pub max_shm_memory: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub anet: Vec<SolarisAnet>,
#[serde(default, skip_serializing_if = "Option::is_none", rename = "cappedCPU")]
pub capped_cpu: Option<SolarisCappedCPU>,
#[serde(
default,
skip_serializing_if = "Option::is_none",
rename = "cappedMemory"
)]
pub capped_memory: Option<SolarisCappedMemory>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SolarisCappedCPU {
#[serde(default, skip_serializing_if = "String::is_empty")]
pub ncpus: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SolarisCappedMemory {
#[serde(default, skip_serializing_if = "String::is_empty")]
pub physical: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub swap: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SolarisAnet {
#[serde(default, skip_serializing_if = "String::is_empty", rename = "linkname")]
pub link_name: String,
#[serde(
default,
skip_serializing_if = "String::is_empty",
rename = "lowerLink"
)]
pub lower_link: String,
#[serde(
default,
skip_serializing_if = "String::is_empty",
rename = "allowdAddress"
)]
pub allowed_addr: String,
#[serde(
default,
skip_serializing_if = "String::is_empty",
rename = "configureAllowedAddress"
)]
pub config_allowed_addr: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub defrouter: String,
#[serde(
default,
skip_serializing_if = "String::is_empty",
rename = "linkProtection"
)]
pub link_protection: String,
#[serde(
default,
skip_serializing_if = "String::is_empty",
rename = "macAddress"
)]
pub mac_address: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Windows<T> {
#[serde(
default,
skip_serializing_if = "Vec::is_empty",
rename = "layerFolders"
)]
pub layer_folders: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resources: Option<WindowsResources>,
#[serde(default, rename = "credentialSpec")]
pub credential_spec: T,
#[serde(default)]
pub servicing: bool,
#[serde(default, rename = "ignoreFlushesDuringBoot")]
pub ignore_flushes_during_boot: bool,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub hyperv: Option<WindowsHyperV>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub network: Option<WindowsNetwork>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WindowsResources {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub memory: Option<WindowsMemoryResources>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cpu: Option<WindowsCPUResources>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub storage: Option<WindowsStorageResources>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WindowsMemoryResources {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<u64>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WindowsCPUResources {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub shares: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub maximum: Option<u64>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WindowsStorageResources {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub iops: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub bps: Option<u64>,
#[serde(
default,
skip_serializing_if = "Option::is_none",
rename = "sandboxSize"
)]
pub sandbox_size: Option<u64>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WindowsNetwork {
#[serde(
default,
skip_serializing_if = "Vec::is_empty",
rename = "endpointList"
)]
pub endpoint_list: Vec<String>,
#[serde(default, rename = "allowUnqualifiedDNSQuery")]
pub allow_unqualified_dns_query: bool,
#[serde(
default,
skip_serializing_if = "Vec::is_empty",
rename = "DNSSearchList"
)]
pub dns_search_list: Vec<String>,
#[serde(
default,
skip_serializing_if = "String::is_empty",
rename = "nwtworkSharedContainerName"
)]
pub network_shared_container_name: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WindowsHyperV {
#[serde(
default,
skip_serializing_if = "String::is_empty",
rename = "utilityVMPath"
)]
pub utility_vm_path: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct VM {
pub hypervisor: VMHypervisor,
pub kernel: VMKernel,
pub image: VMImage,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct VMHypervisor {
#[serde(default)]
pub path: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub parameters: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct VMKernel {
#[serde(default)]
pub path: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub parameters: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub initrd: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct VMImage {
#[serde(default)]
pub path: String,
#[serde(default)]
pub format: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxSeccomp {
#[serde(default, rename = "defaultAction")]
pub default_action: LinuxSeccompAction,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub architectures: Vec<Arch>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub syscalls: Vec<LinuxSyscall>,
}
pub type Arch = String;
#[allow(dead_code)]
pub const ARCHX86: &'static str = "SCMP_ARCH_X86";
#[allow(dead_code)]
pub const ARCHX86_64: &'static str = "SCMP_ARCH_X86_64";
#[allow(dead_code)]
pub const ARCHX32: &'static str = "SCMP_ARCH_X32";
#[allow(dead_code)]
pub const ARCHARM: &'static str = "SCMP_ARCH_ARM";
#[allow(dead_code)]
pub const ARCHAARCH64: &'static str = "SCMP_ARCH_AARCH64";
#[allow(dead_code)]
pub const ARCHMIPS: &'static str = "SCMP_ARCH_MIPS";
#[allow(dead_code)]
pub const ARCHMIPS64: &'static str = "SCMP_ARCH_MIPS64";
#[allow(dead_code)]
pub const ARCHMIPS64N32: &'static str = "SCMP_ARCH_MIPS64N32";
#[allow(dead_code)]
pub const ARCHMIPSEL: &'static str = "SCMP_ARCH_MIPSEL";
#[allow(dead_code)]
pub const ARCHMIPSEL64: &'static str = "SCMP_ARCH_MIPSEL64";
#[allow(dead_code)]
pub const ARCHMIPSEL64N32: &'static str = "SCMP_ARCH_MIPSEL64N32";
#[allow(dead_code)]
pub const ARCHPPC: &'static str = "SCMP_ARCH_PPC";
#[allow(dead_code)]
pub const ARCHPPC64: &'static str = "SCMP_ARCH_PPC64";
#[allow(dead_code)]
pub const ARCHPPC64LE: &'static str = "SCMP_ARCH_PPC64LE";
#[allow(dead_code)]
pub const ARCHS390: &'static str = "SCMP_ARCH_S390";
#[allow(dead_code)]
pub const ARCHS390X: &'static str = "SCMP_ARCH_S390X";
#[allow(dead_code)]
pub const ARCHPARISC: &'static str = "SCMP_ARCH_PARISC";
#[allow(dead_code)]
pub const ARCHPARISC64: &'static str = "SCMP_ARCH_PARISC64";
pub type LinuxSeccompAction = String;
#[allow(dead_code)]
pub const ACTKILL: &'static str = "SCMP_ACT_KILL";
#[allow(dead_code)]
pub const ACTTRAP: &'static str = "SCMP_ACT_TRAP";
#[allow(dead_code)]
pub const ACTERRNO: &'static str = "SCMP_ACT_ERRNO";
#[allow(dead_code)]
pub const ACTTRACE: &'static str = "SCMP_ACT_TRACE";
#[allow(dead_code)]
pub const ACTALLOW: &'static str = "SCMP_ACT_ALLOW";
pub type LinuxSeccompOperator = String;
#[allow(dead_code)]
pub const OPNOTEQUAL: &'static str = "SCMP_CMP_NE";
#[allow(dead_code)]
pub const OPLESSTHAN: &'static str = "SCMP_CMP_LT";
#[allow(dead_code)]
pub const OPLESSEQUAL: &'static str = "SCMP_CMP_LE";
#[allow(dead_code)]
pub const OPEQUALTO: &'static str = "SCMP_CMP_EQ";
#[allow(dead_code)]
pub const OPGREATEREQUAL: &'static str = "SCMP_CMP_GE";
#[allow(dead_code)]
pub const OPGREATERTHAN: &'static str = "SCMP_CMP_GT";
#[allow(dead_code)]
pub const OPMASKEDEQUAL: &'static str = "SCMP_CMP_MASKED_EQ";
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxSeccompArg {
#[serde(default)]
pub index: u32,
#[serde(default)]
pub value: u64,
#[serde(default, rename = "valueTwo")]
pub value_two: u64,
#[serde(default)]
pub op: LinuxSeccompOperator,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxSyscall {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub names: Vec<String>,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub action: LinuxSeccompAction,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub args: Vec<LinuxSeccompArg>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinuxIntelRdt {
#[serde(
default,
skip_serializing_if = "String::is_empty",
rename = "l3CacheSchema"
)]
pub l3_cache_schema: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct State {
#[serde(
default,
skip_serializing_if = "String::is_empty",
rename = "ociVersion"
)]
pub version: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub id: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub status: String,
#[serde(default)]
pub pid: i32,
#[serde(default, skip_serializing_if = "String::is_empty")]
pub bundle: String,
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub annotations: HashMap<String, String>,
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}

View File

@ -0,0 +1,87 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use serde;
use serde::{Deserialize, Serialize};
use serde_json;
use std::error::Error;
use std::fmt::{self, Formatter};
use std::fs::File;
use std::io;
#[derive(Debug)]
pub enum SerializeError {
Io(io::Error),
Json(serde_json::Error),
}
impl fmt::Display for SerializeError {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
SerializeError::Io(ref e) => e.fmt(f),
SerializeError::Json(ref e) => e.fmt(f),
}
}
}
impl Error for SerializeError {
fn description(&self) -> &str {
match *self {
SerializeError::Io(ref e) => e.description(),
SerializeError::Json(ref e) => e.description(),
}
}
fn cause(&self) -> Option<&dyn Error> {
match *self {
SerializeError::Io(ref e) => Some(e),
SerializeError::Json(ref e) => Some(e),
}
}
}
impl From<io::Error> for SerializeError {
fn from(e: io::Error) -> SerializeError {
SerializeError::Io(e)
}
}
impl From<serde_json::Error> for SerializeError {
fn from(e: serde_json::Error) -> SerializeError {
SerializeError::Json(e)
}
}
pub fn to_writer<W, T>(o: &T, mut w: W) -> Result<(), SerializeError>
where
W: io::Write,
T: Serialize,
{
Ok(serde_json::to_writer(&mut w, &o)?)
}
pub fn serialize<T>(o: &T, path: &str) -> Result<(), SerializeError>
where
T: Serialize,
{
let mut f = File::create(path)?;
Ok(serde_json::to_writer(&mut f, &o)?)
}
pub fn to_string<T>(o: &T) -> Result<String, SerializeError>
where
T: Serialize,
{
Ok(serde_json::to_string(&o)?)
}
pub fn deserialize<T>(path: &str) -> Result<T, SerializeError>
where
for<'a> T: Deserialize<'a>,
{
let f = File::open(path)?;
Ok(serde_json::from_reader(&f)?)
}

View File

@ -0,0 +1,10 @@
[package]
name = "protocols"
version = "0.1.0"
authors = ["Hui Zhu <teawater@hyper.sh>"]
edition = "2018"
[dependencies]
grpcio = { git="https://github.com/alipay/grpc-rs", branch="rust_agent" }
protobuf = "2.6.1"
futures = "0.1.27"

View File

@ -0,0 +1,68 @@
#!/bin/bash
die() {
echo $1
exit
}
get_source_version() {
if [ ! -d $GOPATH/src/$1 ]; then
go get -d -v $1
fi
[ $? -eq 0 ] || die "Failed to get $1"
if [ "$2" != "" ] ; then
pushd "${GOPATH}/src/$1"
if [ $(git rev-parse HEAD) != $2 ] ; then
git checkout $2
[ $? -eq 0 ] || die "Failed to get $1 $2"
fi
popd
fi
}
get_rs() {
local cmd="protoc --rust_out=./src/ --grpc_out=./src/,plugins=grpc:./src/ --plugin=protoc-gen-grpc=`which grpc_rust_plugin` -I ./protos/ ./protos/$1"
echo $cmd
$cmd
[ $? -eq 0 ] || die "Failed to get rust from $1"
}
if [ "$(basename $(pwd))" != "protocols" ] || [ ! -d "./hack/" ]; then
die "Please go to directory of protocols before execute this shell"
fi
which protoc
[ $? -eq 0 ] || die "Please install protoc from github.com/protocolbuffers/protobuf"
which protoc-gen-rust
[ $? -eq 0 ] || die "Please install protobuf-codegen from github.com/pingcap/grpc-rs"
which grpc_rust_plugin
[ $? -eq 0 ] || die "Please install grpc_rust_plugin from github.com/pingcap/grpc-rs"
if [ $UPDATE_PROTOS ]; then
if [ ! $GOPATH ]; then
die 'Need $GOPATH to get the proto files'
fi
get_source_version "github.com/kata-containers/agent" ""
cp $GOPATH/src/github.com/kata-containers/agent/protocols/grpc/agent.proto ./protos/
cp $GOPATH/src/github.com/kata-containers/agent/protocols/grpc/oci.proto ./protos/
cp $GOPATH/src/github.com/kata-containers/agent/protocols/grpc/health.proto ./protos/
mkdir -p ./protos/github.com/kata-containers/agent/pkg/types/
cp $GOPATH/src/github.com/kata-containers/agent/pkg/types/types.proto ./protos/github.com/kata-containers/agent/pkg/types/
# The version is get from https://github.com/kata-containers/agent/blob/master/Gopkg.toml
get_source_version "github.com/gogo/protobuf" "4cbf7e384e768b4e01799441fdf2a706a5635ae7"
mkdir -p ./protos/github.com/gogo/protobuf/gogoproto/
cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto ./protos/github.com/gogo/protobuf/gogoproto/
mkdir -p ./protos/google/protobuf/
cp $GOPATH/src/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto ./protos/google/protobuf/
fi
get_rs agent.proto
get_rs health.proto
get_rs github.com/kata-containers/agent/pkg/types/types.proto
get_rs google/protobuf/empty.proto
get_rs oci.proto
# Need change Box<Self> to ::std::boxed::Box<Self> because there is another struct Box
sed 's/fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> {/fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<::std::any::Any> {/g' src/oci.rs > src/new_oci.rs
mv src/new_oci.rs src/oci.rs

View File

@ -0,0 +1,487 @@
//
// Copyright 2017 HyperHQ Inc.
// Copyright 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
syntax = "proto3";
package grpc;
import "oci.proto";
import "github.com/kata-containers/agent/pkg/types/types.proto";
import "google/protobuf/empty.proto";
// unstable
service AgentService {
// execution
rpc CreateContainer(CreateContainerRequest) returns (google.protobuf.Empty);
rpc StartContainer(StartContainerRequest) returns (google.protobuf.Empty);
// RemoveContainer will tear down an existing container by forcibly terminating
// all processes running inside that container and releasing all internal
// resources associated with it.
// RemoveContainer will wait for all processes termination before returning.
// If any process can not be killed or if it can not be killed after
// the RemoveContainerRequest timeout, RemoveContainer will return an error.
rpc RemoveContainer(RemoveContainerRequest) returns (google.protobuf.Empty);
rpc ExecProcess(ExecProcessRequest) returns (google.protobuf.Empty);
rpc SignalProcess(SignalProcessRequest) returns (google.protobuf.Empty);
rpc WaitProcess(WaitProcessRequest) returns (WaitProcessResponse); // wait & reap like waitpid(2)
rpc ListProcesses(ListProcessesRequest) returns (ListProcessesResponse);
rpc UpdateContainer(UpdateContainerRequest) returns (google.protobuf.Empty);
rpc StatsContainer(StatsContainerRequest) returns (StatsContainerResponse);
rpc PauseContainer(PauseContainerRequest) returns (google.protobuf.Empty);
rpc ResumeContainer(ResumeContainerRequest) returns (google.protobuf.Empty);
// stdio
rpc WriteStdin(WriteStreamRequest) returns (WriteStreamResponse);
rpc ReadStdout(ReadStreamRequest) returns (ReadStreamResponse);
rpc ReadStderr(ReadStreamRequest) returns (ReadStreamResponse);
rpc CloseStdin(CloseStdinRequest) returns (google.protobuf.Empty);
rpc TtyWinResize(TtyWinResizeRequest) returns (google.protobuf.Empty);
// networking
rpc UpdateInterface(UpdateInterfaceRequest) returns (types.Interface);
rpc UpdateRoutes(UpdateRoutesRequest) returns (Routes);
rpc ListInterfaces(ListInterfacesRequest) returns(Interfaces);
rpc ListRoutes(ListRoutesRequest) returns (Routes);
// tracing
rpc StartTracing(StartTracingRequest) returns (google.protobuf.Empty);
rpc StopTracing(StopTracingRequest) returns (google.protobuf.Empty);
// misc (TODO: some rpcs can be replaced by hyperstart-exec)
rpc CreateSandbox(CreateSandboxRequest) returns (google.protobuf.Empty);
rpc DestroySandbox(DestroySandboxRequest) returns (google.protobuf.Empty);
rpc OnlineCPUMem(OnlineCPUMemRequest) returns (google.protobuf.Empty);
rpc ReseedRandomDev(ReseedRandomDevRequest) returns (google.protobuf.Empty);
rpc GetGuestDetails(GuestDetailsRequest) returns (GuestDetailsResponse);
rpc MemHotplugByProbe(MemHotplugByProbeRequest) returns (google.protobuf.Empty);
rpc SetGuestDateTime(SetGuestDateTimeRequest) returns (google.protobuf.Empty);
rpc CopyFile(CopyFileRequest) returns (google.protobuf.Empty);
}
message CreateContainerRequest {
string container_id = 1;
string exec_id = 2;
StringUser string_user = 3;
repeated Device devices = 4;
repeated Storage storages = 5;
Spec OCI = 6;
// This field is used to indicate if the container needs to join
// sandbox shared pid ns or create a new namespace. This field is
// meant to override the NEWPID config settings in the OCI spec.
// The agent would receive an OCI spec with PID namespace cleared
// out altogether and not just the pid ns path.
bool sandbox_pidns = 7;
}
message StartContainerRequest {
string container_id = 1;
}
message RemoveContainerRequest {
string container_id = 1;
// RemoveContainer will return an error if
// it could not kill some container processes
// after timeout seconds.
// Setting timeout to 0 means RemoveContainer will
// wait for ever.
uint32 timeout = 2;
}
message ExecProcessRequest {
string container_id = 1;
string exec_id = 2;
StringUser string_user = 3;
Process process = 4;
}
message SignalProcessRequest {
string container_id = 1;
// Special case for SignalProcess(): exec_id can be empty(""),
// which means to send the signal to all the processes including their descendants.
// Other APIs with exec_id should treat empty exec_id as an invalid request.
string exec_id = 2;
uint32 signal = 3;
}
message WaitProcessRequest {
string container_id = 1;
string exec_id = 2;
}
message WaitProcessResponse {
int32 status = 1;
}
// ListProcessesRequest contains the options used to list running processes inside the container
message ListProcessesRequest {
string container_id = 1;
string format = 2;
repeated string args = 3;
}
// ListProcessesResponse represents the list of running processes inside the container
message ListProcessesResponse {
bytes process_list = 1;
}
message UpdateContainerRequest {
string container_id = 1;
LinuxResources resources = 2;
}
message StatsContainerRequest {
string container_id = 1;
}
message PauseContainerRequest {
string container_id = 1;
}
message ResumeContainerRequest {
string container_id = 1;
}
message CpuUsage {
uint64 total_usage = 1;
repeated uint64 percpu_usage = 2;
uint64 usage_in_kernelmode = 3;
uint64 usage_in_usermode = 4;
}
message ThrottlingData {
uint64 periods = 1;
uint64 throttled_periods = 2;
uint64 throttled_time = 3;
}
message CpuStats {
CpuUsage cpu_usage = 1;
ThrottlingData throttling_data = 2;
}
message PidsStats {
uint64 current = 1;
uint64 limit = 2;
}
message MemoryData {
uint64 usage = 1;
uint64 max_usage = 2;
uint64 failcnt = 3;
uint64 limit = 4;
}
message MemoryStats {
uint64 cache = 1;
MemoryData usage = 2;
MemoryData swap_usage = 3;
MemoryData kernel_usage = 4;
bool use_hierarchy = 5;
map<string, uint64> stats = 6;
}
message BlkioStatsEntry {
uint64 major = 1;
uint64 minor = 2;
string op = 3;
uint64 value = 4;
}
message BlkioStats {
repeated BlkioStatsEntry io_service_bytes_recursive = 1; // number of bytes transferred to and from the block device
repeated BlkioStatsEntry io_serviced_recursive = 2;
repeated BlkioStatsEntry io_queued_recursive = 3;
repeated BlkioStatsEntry io_service_time_recursive = 4;
repeated BlkioStatsEntry io_wait_time_recursive = 5;
repeated BlkioStatsEntry io_merged_recursive = 6;
repeated BlkioStatsEntry io_time_recursive = 7;
repeated BlkioStatsEntry sectors_recursive = 8;
}
message HugetlbStats {
uint64 usage = 1;
uint64 max_usage = 2;
uint64 failcnt = 3;
}
message CgroupStats {
CpuStats cpu_stats = 1;
MemoryStats memory_stats = 2;
PidsStats pids_stats = 3;
BlkioStats blkio_stats = 4;
map<string, HugetlbStats> hugetlb_stats = 5; // the map is in the format "size of hugepage: stats of the hugepage"
}
message NetworkStats {
string name = 1;
uint64 rx_bytes = 2;
uint64 rx_packets = 3;
uint64 rx_errors = 4;
uint64 rx_dropped = 5;
uint64 tx_bytes = 6;
uint64 tx_packets = 7;
uint64 tx_errors = 8;
uint64 tx_dropped = 9;
}
message StatsContainerResponse {
CgroupStats cgroup_stats = 1;
repeated NetworkStats network_stats = 2;
}
message WriteStreamRequest {
string container_id = 1;
string exec_id = 2;
bytes data = 3;
}
message WriteStreamResponse {
uint32 len = 1;
}
message ReadStreamRequest {
string container_id = 1;
string exec_id = 2;
uint32 len = 3;
}
message ReadStreamResponse {
bytes data = 1;
}
message CloseStdinRequest {
string container_id = 1;
string exec_id = 2;
}
message TtyWinResizeRequest {
string container_id = 1;
string exec_id = 2;
uint32 row = 3;
uint32 column = 4;
}
message CreateSandboxRequest {
string hostname = 1;
repeated string dns = 2;
repeated Storage storages = 3;
// This field means that a pause process needs to be created by the
// agent. This pid namespace of the pause process will be treated as
// a shared pid namespace. All containers created will join this shared
// pid namespace.
bool sandbox_pidns = 4;
// SandboxId identifies which sandbox is using the agent. We allow only
// one sandbox per agent and implicitly require that CreateSandbox is
// called before other sandbox/network calls.
string sandbox_id = 5;
// This field, if non-empty, designates an absolute path to a directory
// that the agent will search for OCI hooks to run within the guest.
string guest_hook_path = 6;
}
message DestroySandboxRequest {
}
message Interfaces {
repeated types.Interface Interfaces = 1;
}
message Routes {
repeated types.Route Routes = 1;
}
message UpdateInterfaceRequest {
types.Interface interface = 1;
}
message UpdateRoutesRequest {
Routes routes = 1;
}
message ListInterfacesRequest {
}
message ListRoutesRequest {
}
message OnlineCPUMemRequest {
// Wait specifies if the caller waits for the agent to online all resources.
// If true the agent returns once all resources have been connected, otherwise all
// resources are connected asynchronously and the agent returns immediately.
bool wait = 1;
// NbCpus specifies the number of CPUs that were added and the agent has to online.
uint32 nb_cpus = 2;
// CpuOnly specifies whether only online CPU or not.
bool cpu_only = 3;
}
message ReseedRandomDevRequest {
// Data specifies the random data used to reseed the guest crng.
bytes data = 2;
}
// AgentDetails provides information to the client about the running agent.
message AgentDetails {
// Semantic version of agent (see https://semver.org).
string version = 1;
// Set if the agent is running as PID 1.
bool init_daemon = 2;
// List of available device handlers.
repeated string device_handlers = 3;
// List of available storage handlers.
repeated string storage_handlers = 4;
// Set only if the agent is built with seccomp support and the guest
// environment supports seccomp.
bool supports_seccomp = 5;
}
message GuestDetailsRequest {
// MemBlockSize asks server to return the system memory block size that can be used
// for memory hotplug alignment. Typically the server returns what's in
// /sys/devices/system/memory/block_size_bytes.
bool mem_block_size = 1;
// MemoryHotplugProbe asks server to return whether guest kernel supports memory hotplug
// via probeinterface. Typically the server will check if the path
// /sys/devices/system/memory/probe exists.
bool mem_hotplug_probe = 2;
}
message GuestDetailsResponse {
// MemBlockSizeBytes returns the system memory block size in bytes.
uint64 mem_block_size_bytes = 1;
AgentDetails agent_details = 2;
bool support_mem_hotplug_probe = 3;
}
message MemHotplugByProbeRequest {
// server needs to send the value of memHotplugProbeAddr into file /sys/devices/system/memory/probe,
// in order to notify the guest kernel about hot-add memory event
repeated uint64 memHotplugProbeAddr = 1;
}
message SetGuestDateTimeRequest {
// Sec the second since the Epoch.
int64 Sec = 1;
// Usec the microseconds portion of time since the Epoch.
int64 Usec = 2;
}
// Storage represents both the rootfs of the container, and any volume that
// could have been defined through the Mount list of the OCI specification.
message Storage {
// Driver is used to define the way the storage is passed through the
// virtual machine. It can be "9p", "blk", or something else, but for
// all cases, this will define if some extra steps are required before
// this storage gets mounted into the container.
string driver = 1;
// DriverOptions allows the caller to define a list of options such
// as block sizes, numbers of luns, ... which are very specific to
// every device and cannot be generalized through extra fields.
repeated string driver_options = 2;
// Source can be anything representing the source of the storage. This
// will be handled by the proper handler based on the Driver used.
// For instance, it can be a very simple path if the caller knows the
// name of device inside the VM, or it can be some sort of identifier
// to let the agent find the device inside the VM.
string source = 3;
// Fstype represents the filesystem that needs to be used to mount the
// storage inside the VM. For instance, it could be "xfs" for block
// device, "9p" for shared filesystem, or "tmpfs" for shared /dev/shm.
string fstype = 4;
// Options describes the additional options that might be needed to
// mount properly the storage filesytem.
repeated string options = 5;
// MountPoint refers to the path where the storage should be mounted
// inside the VM.
string mount_point = 6;
}
// Device represents only the devices that could have been defined through the
// Linux Device list of the OCI specification.
message Device {
// Id can be used to identify the device inside the VM. Some devices
// might not need it to be identified on the VM, and will rely on the
// provided VmPath instead.
string id = 1;
// Type defines the type of device described. This can be "blk",
// "scsi", "vfio", ...
// Particularly, this should be used to trigger the use of the
// appropriate device handler.
string type = 2;
// VmPath can be used by the caller to provide directly the path of
// the device as it will appear inside the VM. For some devices, the
// device id or the list of options passed might not be enough to find
// the device. In those cases, the caller should predict and provide
// this vm_path.
string vm_path = 3;
// ContainerPath defines the path where the device should be found inside
// the container. This path should match the path of the device from
// the device list listed inside the OCI spec. This is used in order
// to identify the right device in the spec and update it with the
// right options such as major/minor numbers as they appear inside
// the VM for instance. Note that an empty ctr_path should be used
// to make sure the device handler inside the agent is called, but
// no spec update needs to be performed. This has to happen for the
// case of rootfs, when a device has to be waited for after it has
// been hotplugged. An equivalent Storage entry should be defined if
// any mount needs to be performed afterwards.
string container_path = 4;
// Options allows the caller to define a list of options such as block
// sizes, numbers of luns, ... which are very specific to every device
// and cannot be generalized through extra fields.
repeated string options = 5;
}
message StringUser {
string uid = 1;
string gid = 2;
repeated string additionalGids = 3;
}
message CopyFileRequest {
// Path is the destination file in the guest. It must be absolute,
// canonical and below /run.
string path = 1;
// FileSize is the expected file size, for security reasons write operations
// are made in a temporary file, once it has the expected size, it's moved
// to the destination path.
int64 file_size = 2;
// FileMode is the file mode.
uint32 file_mode = 3;
// DirMode is the mode for the parent directories of destination path.
uint32 dir_mode = 4;
// Uid is the numeric user id.
int32 uid = 5;
// Gid is the numeric group id.
int32 gid = 6;
// Offset for the next write operation.
int64 offset = 7;
// Data to write in the destination file.
bytes data = 8;
}
message StartTracingRequest {
}
message StopTracingRequest {
}

View File

@ -0,0 +1,144 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto2";
package gogoproto;
import "google/protobuf/descriptor.proto";
option java_package = "com.google.protobuf";
option java_outer_classname = "GoGoProtos";
option go_package = "github.com/gogo/protobuf/gogoproto";
extend google.protobuf.EnumOptions {
optional bool goproto_enum_prefix = 62001;
optional bool goproto_enum_stringer = 62021;
optional bool enum_stringer = 62022;
optional string enum_customname = 62023;
optional bool enumdecl = 62024;
}
extend google.protobuf.EnumValueOptions {
optional string enumvalue_customname = 66001;
}
extend google.protobuf.FileOptions {
optional bool goproto_getters_all = 63001;
optional bool goproto_enum_prefix_all = 63002;
optional bool goproto_stringer_all = 63003;
optional bool verbose_equal_all = 63004;
optional bool face_all = 63005;
optional bool gostring_all = 63006;
optional bool populate_all = 63007;
optional bool stringer_all = 63008;
optional bool onlyone_all = 63009;
optional bool equal_all = 63013;
optional bool description_all = 63014;
optional bool testgen_all = 63015;
optional bool benchgen_all = 63016;
optional bool marshaler_all = 63017;
optional bool unmarshaler_all = 63018;
optional bool stable_marshaler_all = 63019;
optional bool sizer_all = 63020;
optional bool goproto_enum_stringer_all = 63021;
optional bool enum_stringer_all = 63022;
optional bool unsafe_marshaler_all = 63023;
optional bool unsafe_unmarshaler_all = 63024;
optional bool goproto_extensions_map_all = 63025;
optional bool goproto_unrecognized_all = 63026;
optional bool gogoproto_import = 63027;
optional bool protosizer_all = 63028;
optional bool compare_all = 63029;
optional bool typedecl_all = 63030;
optional bool enumdecl_all = 63031;
optional bool goproto_registration = 63032;
optional bool messagename_all = 63033;
optional bool goproto_sizecache_all = 63034;
optional bool goproto_unkeyed_all = 63035;
}
extend google.protobuf.MessageOptions {
optional bool goproto_getters = 64001;
optional bool goproto_stringer = 64003;
optional bool verbose_equal = 64004;
optional bool face = 64005;
optional bool gostring = 64006;
optional bool populate = 64007;
optional bool stringer = 67008;
optional bool onlyone = 64009;
optional bool equal = 64013;
optional bool description = 64014;
optional bool testgen = 64015;
optional bool benchgen = 64016;
optional bool marshaler = 64017;
optional bool unmarshaler = 64018;
optional bool stable_marshaler = 64019;
optional bool sizer = 64020;
optional bool unsafe_marshaler = 64023;
optional bool unsafe_unmarshaler = 64024;
optional bool goproto_extensions_map = 64025;
optional bool goproto_unrecognized = 64026;
optional bool protosizer = 64028;
optional bool compare = 64029;
optional bool typedecl = 64030;
optional bool messagename = 64033;
optional bool goproto_sizecache = 64034;
optional bool goproto_unkeyed = 64035;
}
extend google.protobuf.FieldOptions {
optional bool nullable = 65001;
optional bool embed = 65002;
optional string customtype = 65003;
optional string customname = 65004;
optional string jsontag = 65005;
optional string moretags = 65006;
optional string casttype = 65007;
optional string castkey = 65008;
optional string castvalue = 65009;
optional bool stdtime = 65010;
optional bool stdduration = 65011;
optional bool wktpointer = 65012;
}

View File

@ -0,0 +1,49 @@
//
// Copyright 2018 Intel Corporation.
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
syntax = "proto3";
package types;
enum IPFamily {
v4 = 0;
v6 = 1;
}
message IPAddress {
IPFamily family = 1;
string address = 2;
string mask = 3;
}
message Interface {
string device = 1;
string name = 2;
repeated IPAddress IPAddresses = 3;
uint64 mtu = 4;
string hwAddr = 5;
// pciAddr is the PCI address in the format "bridgeAddr/deviceAddr".
// Here, bridgeAddr is the address at which the bridge is attached on the root bus,
// while deviceAddr is the address at which the network device is attached on the bridge.
string pciAddr = 6;
// Type defines the type of interface described by this structure.
// The expected values are the one that are defined by the netlink
// library, regarding each type of link. Here is a non exhaustive
// list: "veth", "macvtap", "vlan", "macvlan", "tap", ...
string type = 7;
uint32 raw_flags = 8;
}
message Route {
string dest = 1;
string gateway = 2;
string device = 3;
string source = 4;
uint32 scope = 5;
}

View File

@ -0,0 +1,52 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option go_package = "types";
option java_package = "com.google.protobuf";
option java_outer_classname = "EmptyProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
option cc_enable_arenas = true;
// A generic empty message that you can re-use to avoid defining duplicated
// empty messages in your APIs. A typical example is to use it as the request
// or the response type of an API method. For instance:
//
// service Foo {
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
// }
//
// The JSON representation for `Empty` is empty JSON object `{}`.
message Empty {}

View File

@ -0,0 +1,40 @@
//
// Copyright 2017 HyperHQ Inc.
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
syntax = "proto3";
package grpc;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.equal_all) = true;
option (gogoproto.populate_all) = true;
option (gogoproto.testgen_all) = true;
option (gogoproto.benchgen_all) = true;
message CheckRequest {
string service = 1;
}
message HealthCheckResponse {
enum ServingStatus {
UNKNOWN = 0;
SERVING = 1;
NOT_SERVING = 2;
}
ServingStatus status = 1;
}
message VersionCheckResponse {
string grpc_version = 1;
string agent_version = 2;
}
service Health {
rpc Check(CheckRequest) returns (HealthCheckResponse);
rpc Version(CheckRequest) returns (VersionCheckResponse);
}

View File

@ -0,0 +1,463 @@
//
// Copyright (c) 2017 Intel Corporation
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
syntax = "proto3";
package grpc;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/wrappers.proto";
option (gogoproto.equal_all) = true;
option (gogoproto.populate_all) = true;
option (gogoproto.testgen_all) = true;
option (gogoproto.benchgen_all) = true;
message Spec {
// Version of the Open Container Initiative Runtime Specification with which the bundle complies.
string Version = 1;
// Process configures the container process.
Process Process = 2;
// Root configures the container's root filesystem.
Root Root = 3;
// Hostname configures the container's hostname.
string Hostname = 4;
// Mounts configures additional mounts (on top of Root).
repeated Mount Mounts = 5 [(gogoproto.nullable) = false];
// Hooks configures callbacks for container lifecycle events.
Hooks Hooks = 6;
// Annotations contains arbitrary metadata for the container.
map<string, string> Annotations = 7;
// Linux is platform-specific configuration for Linux based containers.
Linux Linux = 8;
// Solaris is platform-specific configuration for Solaris based containers.
Solaris Solaris = 9;
// Windows is platform-specific configuration for Windows based containers.
Windows Windows = 10;
}
message Process {
// Terminal creates an interactive terminal for the container.
bool Terminal = 1;
// ConsoleSize specifies the size of the console.
Box ConsoleSize = 2;
// User specifies user information for the process.
User User = 3 [(gogoproto.nullable) = false];
// Args specifies the binary and arguments for the application to execute.
repeated string Args = 4;
// Env populates the process environment for the process.
repeated string Env = 5;
// Cwd is the current working directory for the process and must be
// relative to the container's root.
string Cwd = 6;
// Capabilities are Linux capabilities that are kept for the process.
LinuxCapabilities Capabilities = 7;
// Rlimits specifies rlimit options to apply to the process.
repeated POSIXRlimit Rlimits = 8 [(gogoproto.nullable) = false];
// NoNewPrivileges controls whether additional privileges could be gained by processes in the container.
bool NoNewPrivileges = 9;
// ApparmorProfile specifies the apparmor profile for the container.
string ApparmorProfile = 10;
// Specify an oom_score_adj for the container.
int64 OOMScoreAdj = 11;
// SelinuxLabel specifies the selinux context that the container process is run as.
string SelinuxLabel = 12;
}
message Box {
// Height is the vertical dimension of a box.
uint32 Height = 1;
// Width is the horizontal dimension of a box.
uint32 Width = 2;
}
message User {
// UID is the user id.
uint32 UID = 1;
// GID is the group id.
uint32 GID = 2;
// AdditionalGids are additional group ids set for the container's process.
repeated uint32 AdditionalGids = 3;
// Username is the user name.
string Username = 4;
}
message LinuxCapabilities {
// Bounding is the set of capabilities checked by the kernel.
repeated string Bounding = 1;
// Effective is the set of capabilities checked by the kernel.
repeated string Effective = 2;
// Inheritable is the capabilities preserved across execve.
repeated string Inheritable = 3;
// Permitted is the limiting superset for effective capabilities.
repeated string Permitted = 4;
// Ambient is the ambient set of capabilities that are kept.
repeated string Ambient = 5;
}
message POSIXRlimit {
// Type of the rlimit to set
string Type = 1;
// Hard is the hard limit for the specified type
uint64 Hard = 2;
// Soft is the soft limit for the specified type
uint64 Soft = 3;
}
message Mount {
// destination is the path inside the container expect when it starts with "tmp:/"
string destination = 1;
// source is the path inside the container expect when it starts with "vm:/dev/" or "tmp:/"
// the path which starts with "vm:/dev/" refers the guest vm's "/dev",
// especially, "vm:/dev/hostfs/" refers to the shared filesystem.
// "tmp:/" is a temporary directory which is used for temporary mounts.
string source = 2;
string type = 3;
repeated string options = 4;
}
message Root {
// Path is the absolute path to the container's root filesystem.
string Path = 1;
// Readonly makes the root filesystem for the container readonly before the process is executed.
bool Readonly = 2;
}
message Hooks {
// Prestart is a list of hooks to be run before the container process is executed.
repeated Hook Prestart = 1 [(gogoproto.nullable) = false];
// Poststart is a list of hooks to be run after the container process is started.
repeated Hook Poststart = 2 [(gogoproto.nullable) = false];
// Poststop is a list of hooks to be run after the container process exits.
repeated Hook Poststop = 3 [(gogoproto.nullable) = false];
}
message Hook {
string Path = 1;
repeated string Args = 2;
repeated string Env = 3;
int64 Timeout = 4;
}
message Linux {
// UIDMapping specifies user mappings for supporting user namespaces.
repeated LinuxIDMapping UIDMappings = 1 [(gogoproto.nullable) = false];
// GIDMapping specifies group mappings for supporting user namespaces.
repeated LinuxIDMapping GIDMappings = 2 [(gogoproto.nullable) = false];
// Sysctl are a set of key value pairs that are set for the container on start
map<string, string> Sysctl = 3;
// Resources contain cgroup information for handling resource constraints
// for the container
LinuxResources Resources = 4;
// CgroupsPath specifies the path to cgroups that are created and/or joined by the container.
// The path is expected to be relative to the cgroups mountpoint.
// If resources are specified, the cgroups at CgroupsPath will be updated based on resources.
string CgroupsPath = 5;
// Namespaces contains the namespaces that are created and/or joined by the container
repeated LinuxNamespace Namespaces = 6 [(gogoproto.nullable) = false];
// Devices are a list of device nodes that are created for the container
repeated LinuxDevice Devices = 7 [(gogoproto.nullable) = false];
// Seccomp specifies the seccomp security settings for the container.
LinuxSeccomp Seccomp = 8;
// RootfsPropagation is the rootfs mount propagation mode for the container.
string RootfsPropagation = 9;
// MaskedPaths masks over the provided paths inside the container.
repeated string MaskedPaths = 10;
// ReadonlyPaths sets the provided paths as RO inside the container.
repeated string ReadonlyPaths = 11;
// MountLabel specifies the selinux context for the mounts in the container.
string MountLabel = 12;
// IntelRdt contains Intel Resource Director Technology (RDT) information
// for handling resource constraints (e.g., L3 cache) for the container
LinuxIntelRdt IntelRdt = 13;
}
message Windows {
// Dummy string, never used.
string dummy = 1;
}
message Solaris {
// Dummy string, never used.
string dummy = 1;
}
message LinuxIDMapping {
// HostID is the starting UID/GID on the host to be mapped to 'ContainerID'
uint32 HostID = 1;
// ContainerID is the starting UID/GID in the container
uint32 ContainerID = 2;
// Size is the number of IDs to be mapped
uint32 Size = 3;
}
message LinuxNamespace {
// Type is the type of namespace
string Type = 1;
// Path is a path to an existing namespace persisted on disk that can be joined
// and is of the same type
string Path = 2;
}
message LinuxDevice {
// Path to the device.
string Path = 1;
// Device type, block, char, etc.
string Type = 2;
// Major is the device's major number.
int64 Major = 3;
// Minor is the device's minor number.
int64 Minor = 4;
// FileMode permission bits for the device.
uint32 FileMode = 5;
// UID of the device.
uint32 UID = 6;
// Gid of the device.
uint32 GID = 7;
}
message LinuxResources {
// Devices configures the device whitelist.
repeated LinuxDeviceCgroup Devices = 1 [(gogoproto.nullable) = false];
// Memory restriction configuration
LinuxMemory Memory = 2;
// CPU resource restriction configuration
LinuxCPU CPU = 3;
// Task resource restriction configuration.
LinuxPids Pids = 4;
// BlockIO restriction configuration
LinuxBlockIO BlockIO = 5;
// Hugetlb limit (in bytes)
repeated LinuxHugepageLimit HugepageLimits = 6 [(gogoproto.nullable) = false];
// Network restriction configuration
LinuxNetwork Network = 7;
}
message LinuxMemory {
// Memory limit (in bytes).
int64 Limit = 1;
// Memory reservation or soft_limit (in bytes).
int64 Reservation = 2;
// Total memory limit (memory + swap).
int64 Swap = 3;
// Kernel memory limit (in bytes).
int64 Kernel = 4;
// Kernel memory limit for tcp (in bytes)
int64 KernelTCP = 5;
// How aggressive the kernel will swap memory pages.
uint64 Swappiness = 6;
// DisableOOMKiller disables the OOM killer for out of memory conditions
bool DisableOOMKiller = 7;
}
message LinuxCPU {
// CPU shares (relative weight (ratio) vs. other cgroups with cpu shares).
uint64 Shares = 1;
// CPU hardcap limit (in usecs). Allowed cpu time in a given period.
int64 Quota = 2;
// CPU period to be used for hardcapping (in usecs).
uint64 Period = 3;
// How much time realtime scheduling may use (in usecs).
int64 RealtimeRuntime = 4;
// CPU period to be used for realtime scheduling (in usecs).
uint64 RealtimePeriod = 5;
// CPUs to use within the cpuset. Default is to use any CPU available.
string Cpus = 6;
// List of memory nodes in the cpuset. Default is to use any available memory node.
string Mems = 7;
}
message LinuxWeightDevice {
// Major is the device's major number.
int64 Major = 1;
// Minor is the device's minor number.
int64 Minor = 2;
// Weight is the bandwidth rate for the device.
uint32 Weight = 3;
// LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only
uint32 LeafWeight = 4;
}
message LinuxThrottleDevice {
// Major is the device's major number.
int64 Major = 1;
// Minor is the device's minor number.
int64 Minor = 2;
// Rate is the IO rate limit per cgroup per device
uint64 Rate = 3;
}
message LinuxBlockIO {
// Specifies per cgroup weight
uint32 Weight = 1;
// Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, CFQ scheduler only
uint32 LeafWeight = 2;
// Weight per cgroup per device, can override BlkioWeight
repeated LinuxWeightDevice WeightDevice = 3 [(gogoproto.nullable) = false];
// IO read rate limit per cgroup per device, bytes per second
repeated LinuxThrottleDevice ThrottleReadBpsDevice = 4 [(gogoproto.nullable) = false];
// IO write rate limit per cgroup per device, bytes per second
repeated LinuxThrottleDevice ThrottleWriteBpsDevice = 5 [(gogoproto.nullable) = false];
// IO read rate limit per cgroup per device, IO per second
repeated LinuxThrottleDevice ThrottleReadIOPSDevice = 6 [(gogoproto.nullable) = false];
// IO write rate limit per cgroup per device, IO per second
repeated LinuxThrottleDevice ThrottleWriteIOPSDevice = 7 [(gogoproto.nullable) = false];
}
message LinuxPids {
// Maximum number of PIDs. Default is "no limit".
int64 Limit = 1;
}
message LinuxDeviceCgroup {
// Allow or deny
bool Allow = 1;
// Device type, block, char, etc.
string Type = 2;
// Major is the device's major number.
int64 Major = 3;
// Minor is the device's minor number.
int64 Minor = 4;
// Cgroup access permissions format, rwm.
string Access = 5;
}
message LinuxNetwork {
// Set class identifier for container's network packets
uint32 ClassID = 1;
// Set priority of network traffic for container
repeated LinuxInterfacePriority Priorities = 2 [(gogoproto.nullable) = false];
}
message LinuxHugepageLimit {
// Pagesize is the hugepage size
string Pagesize = 1;
// Limit is the limit of "hugepagesize" hugetlb usage
uint64 Limit = 2;
}
message LinuxInterfacePriority {
// Name is the name of the network interface
string Name = 1;
// Priority for the interface
uint32 Priority = 2;
}
message LinuxSeccomp {
string DefaultAction = 1;
repeated string Architectures = 2;
repeated LinuxSyscall Syscalls = 3 [(gogoproto.nullable) = false];
}
message LinuxSeccompArg {
uint64 Index = 1;
uint64 Value = 2;
uint64 ValueTwo = 3;
string Op = 4;
}
message LinuxSyscall {
repeated string Names = 1;
string Action = 2;
repeated LinuxSeccompArg Args = 3 [(gogoproto.nullable) = false];
}
message LinuxIntelRdt {
// The schema for L3 cache id and capacity bitmask (CBM)
// Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
string L3CacheSchema = 1;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,883 @@
// This file is generated. Do not edit
// @generated
// https://github.com/Manishearth/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unsafe_code)]
#![allow(unused_imports)]
#![allow(unused_results)]
const METHOD_AGENT_SERVICE_CREATE_CONTAINER: ::grpcio::Method<super::agent::CreateContainerRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/CreateContainer",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_START_CONTAINER: ::grpcio::Method<super::agent::StartContainerRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/StartContainer",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_REMOVE_CONTAINER: ::grpcio::Method<super::agent::RemoveContainerRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/RemoveContainer",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_EXEC_PROCESS: ::grpcio::Method<super::agent::ExecProcessRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/ExecProcess",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_SIGNAL_PROCESS: ::grpcio::Method<super::agent::SignalProcessRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/SignalProcess",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_WAIT_PROCESS: ::grpcio::Method<super::agent::WaitProcessRequest, super::agent::WaitProcessResponse> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/WaitProcess",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_LIST_PROCESSES: ::grpcio::Method<super::agent::ListProcessesRequest, super::agent::ListProcessesResponse> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/ListProcesses",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_UPDATE_CONTAINER: ::grpcio::Method<super::agent::UpdateContainerRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/UpdateContainer",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_STATS_CONTAINER: ::grpcio::Method<super::agent::StatsContainerRequest, super::agent::StatsContainerResponse> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/StatsContainer",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_PAUSE_CONTAINER: ::grpcio::Method<super::agent::PauseContainerRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/PauseContainer",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_RESUME_CONTAINER: ::grpcio::Method<super::agent::ResumeContainerRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/ResumeContainer",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_WRITE_STDIN: ::grpcio::Method<super::agent::WriteStreamRequest, super::agent::WriteStreamResponse> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/WriteStdin",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_READ_STDOUT: ::grpcio::Method<super::agent::ReadStreamRequest, super::agent::ReadStreamResponse> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/ReadStdout",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_READ_STDERR: ::grpcio::Method<super::agent::ReadStreamRequest, super::agent::ReadStreamResponse> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/ReadStderr",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_CLOSE_STDIN: ::grpcio::Method<super::agent::CloseStdinRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/CloseStdin",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_TTY_WIN_RESIZE: ::grpcio::Method<super::agent::TtyWinResizeRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/TtyWinResize",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_UPDATE_INTERFACE: ::grpcio::Method<super::agent::UpdateInterfaceRequest, super::types::Interface> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/UpdateInterface",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_UPDATE_ROUTES: ::grpcio::Method<super::agent::UpdateRoutesRequest, super::agent::Routes> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/UpdateRoutes",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_LIST_INTERFACES: ::grpcio::Method<super::agent::ListInterfacesRequest, super::agent::Interfaces> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/ListInterfaces",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_LIST_ROUTES: ::grpcio::Method<super::agent::ListRoutesRequest, super::agent::Routes> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/ListRoutes",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_START_TRACING: ::grpcio::Method<super::agent::StartTracingRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/StartTracing",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_STOP_TRACING: ::grpcio::Method<super::agent::StopTracingRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/StopTracing",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_CREATE_SANDBOX: ::grpcio::Method<super::agent::CreateSandboxRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/CreateSandbox",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_DESTROY_SANDBOX: ::grpcio::Method<super::agent::DestroySandboxRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/DestroySandbox",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_ONLINE_CPU_MEM: ::grpcio::Method<super::agent::OnlineCPUMemRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/OnlineCPUMem",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_RESEED_RANDOM_DEV: ::grpcio::Method<super::agent::ReseedRandomDevRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/ReseedRandomDev",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_GET_GUEST_DETAILS: ::grpcio::Method<super::agent::GuestDetailsRequest, super::agent::GuestDetailsResponse> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/GetGuestDetails",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_MEM_HOTPLUG_BY_PROBE: ::grpcio::Method<super::agent::MemHotplugByProbeRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/MemHotplugByProbe",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_SET_GUEST_DATE_TIME: ::grpcio::Method<super::agent::SetGuestDateTimeRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/SetGuestDateTime",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_AGENT_SERVICE_COPY_FILE: ::grpcio::Method<super::agent::CopyFileRequest, super::empty::Empty> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.AgentService/CopyFile",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
#[derive(Clone)]
pub struct AgentServiceClient {
client: ::grpcio::Client,
}
impl AgentServiceClient {
pub fn new(channel: ::grpcio::Channel) -> Self {
AgentServiceClient {
client: ::grpcio::Client::new(channel),
}
}
pub fn create_container_opt(&self, req: &super::agent::CreateContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_CREATE_CONTAINER, req, opt)
}
pub fn create_container(&self, req: &super::agent::CreateContainerRequest) -> ::grpcio::Result<super::empty::Empty> {
self.create_container_opt(req, ::grpcio::CallOption::default())
}
pub fn create_container_async_opt(&self, req: &super::agent::CreateContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_CREATE_CONTAINER, req, opt)
}
pub fn create_container_async(&self, req: &super::agent::CreateContainerRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.create_container_async_opt(req, ::grpcio::CallOption::default())
}
pub fn start_container_opt(&self, req: &super::agent::StartContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_START_CONTAINER, req, opt)
}
pub fn start_container(&self, req: &super::agent::StartContainerRequest) -> ::grpcio::Result<super::empty::Empty> {
self.start_container_opt(req, ::grpcio::CallOption::default())
}
pub fn start_container_async_opt(&self, req: &super::agent::StartContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_START_CONTAINER, req, opt)
}
pub fn start_container_async(&self, req: &super::agent::StartContainerRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.start_container_async_opt(req, ::grpcio::CallOption::default())
}
pub fn remove_container_opt(&self, req: &super::agent::RemoveContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_REMOVE_CONTAINER, req, opt)
}
pub fn remove_container(&self, req: &super::agent::RemoveContainerRequest) -> ::grpcio::Result<super::empty::Empty> {
self.remove_container_opt(req, ::grpcio::CallOption::default())
}
pub fn remove_container_async_opt(&self, req: &super::agent::RemoveContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_REMOVE_CONTAINER, req, opt)
}
pub fn remove_container_async(&self, req: &super::agent::RemoveContainerRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.remove_container_async_opt(req, ::grpcio::CallOption::default())
}
pub fn exec_process_opt(&self, req: &super::agent::ExecProcessRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_EXEC_PROCESS, req, opt)
}
pub fn exec_process(&self, req: &super::agent::ExecProcessRequest) -> ::grpcio::Result<super::empty::Empty> {
self.exec_process_opt(req, ::grpcio::CallOption::default())
}
pub fn exec_process_async_opt(&self, req: &super::agent::ExecProcessRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_EXEC_PROCESS, req, opt)
}
pub fn exec_process_async(&self, req: &super::agent::ExecProcessRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.exec_process_async_opt(req, ::grpcio::CallOption::default())
}
pub fn signal_process_opt(&self, req: &super::agent::SignalProcessRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_SIGNAL_PROCESS, req, opt)
}
pub fn signal_process(&self, req: &super::agent::SignalProcessRequest) -> ::grpcio::Result<super::empty::Empty> {
self.signal_process_opt(req, ::grpcio::CallOption::default())
}
pub fn signal_process_async_opt(&self, req: &super::agent::SignalProcessRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_SIGNAL_PROCESS, req, opt)
}
pub fn signal_process_async(&self, req: &super::agent::SignalProcessRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.signal_process_async_opt(req, ::grpcio::CallOption::default())
}
pub fn wait_process_opt(&self, req: &super::agent::WaitProcessRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::agent::WaitProcessResponse> {
self.client.unary_call(&METHOD_AGENT_SERVICE_WAIT_PROCESS, req, opt)
}
pub fn wait_process(&self, req: &super::agent::WaitProcessRequest) -> ::grpcio::Result<super::agent::WaitProcessResponse> {
self.wait_process_opt(req, ::grpcio::CallOption::default())
}
pub fn wait_process_async_opt(&self, req: &super::agent::WaitProcessRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::WaitProcessResponse>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_WAIT_PROCESS, req, opt)
}
pub fn wait_process_async(&self, req: &super::agent::WaitProcessRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::WaitProcessResponse>> {
self.wait_process_async_opt(req, ::grpcio::CallOption::default())
}
pub fn list_processes_opt(&self, req: &super::agent::ListProcessesRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::agent::ListProcessesResponse> {
self.client.unary_call(&METHOD_AGENT_SERVICE_LIST_PROCESSES, req, opt)
}
pub fn list_processes(&self, req: &super::agent::ListProcessesRequest) -> ::grpcio::Result<super::agent::ListProcessesResponse> {
self.list_processes_opt(req, ::grpcio::CallOption::default())
}
pub fn list_processes_async_opt(&self, req: &super::agent::ListProcessesRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::ListProcessesResponse>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_LIST_PROCESSES, req, opt)
}
pub fn list_processes_async(&self, req: &super::agent::ListProcessesRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::ListProcessesResponse>> {
self.list_processes_async_opt(req, ::grpcio::CallOption::default())
}
pub fn update_container_opt(&self, req: &super::agent::UpdateContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_UPDATE_CONTAINER, req, opt)
}
pub fn update_container(&self, req: &super::agent::UpdateContainerRequest) -> ::grpcio::Result<super::empty::Empty> {
self.update_container_opt(req, ::grpcio::CallOption::default())
}
pub fn update_container_async_opt(&self, req: &super::agent::UpdateContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_UPDATE_CONTAINER, req, opt)
}
pub fn update_container_async(&self, req: &super::agent::UpdateContainerRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.update_container_async_opt(req, ::grpcio::CallOption::default())
}
pub fn stats_container_opt(&self, req: &super::agent::StatsContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::agent::StatsContainerResponse> {
self.client.unary_call(&METHOD_AGENT_SERVICE_STATS_CONTAINER, req, opt)
}
pub fn stats_container(&self, req: &super::agent::StatsContainerRequest) -> ::grpcio::Result<super::agent::StatsContainerResponse> {
self.stats_container_opt(req, ::grpcio::CallOption::default())
}
pub fn stats_container_async_opt(&self, req: &super::agent::StatsContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::StatsContainerResponse>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_STATS_CONTAINER, req, opt)
}
pub fn stats_container_async(&self, req: &super::agent::StatsContainerRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::StatsContainerResponse>> {
self.stats_container_async_opt(req, ::grpcio::CallOption::default())
}
pub fn pause_container_opt(&self, req: &super::agent::PauseContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_PAUSE_CONTAINER, req, opt)
}
pub fn pause_container(&self, req: &super::agent::PauseContainerRequest) -> ::grpcio::Result<super::empty::Empty> {
self.pause_container_opt(req, ::grpcio::CallOption::default())
}
pub fn pause_container_async_opt(&self, req: &super::agent::PauseContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_PAUSE_CONTAINER, req, opt)
}
pub fn pause_container_async(&self, req: &super::agent::PauseContainerRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.pause_container_async_opt(req, ::grpcio::CallOption::default())
}
pub fn resume_container_opt(&self, req: &super::agent::ResumeContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_RESUME_CONTAINER, req, opt)
}
pub fn resume_container(&self, req: &super::agent::ResumeContainerRequest) -> ::grpcio::Result<super::empty::Empty> {
self.resume_container_opt(req, ::grpcio::CallOption::default())
}
pub fn resume_container_async_opt(&self, req: &super::agent::ResumeContainerRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_RESUME_CONTAINER, req, opt)
}
pub fn resume_container_async(&self, req: &super::agent::ResumeContainerRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.resume_container_async_opt(req, ::grpcio::CallOption::default())
}
pub fn write_stdin_opt(&self, req: &super::agent::WriteStreamRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::agent::WriteStreamResponse> {
self.client.unary_call(&METHOD_AGENT_SERVICE_WRITE_STDIN, req, opt)
}
pub fn write_stdin(&self, req: &super::agent::WriteStreamRequest) -> ::grpcio::Result<super::agent::WriteStreamResponse> {
self.write_stdin_opt(req, ::grpcio::CallOption::default())
}
pub fn write_stdin_async_opt(&self, req: &super::agent::WriteStreamRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::WriteStreamResponse>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_WRITE_STDIN, req, opt)
}
pub fn write_stdin_async(&self, req: &super::agent::WriteStreamRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::WriteStreamResponse>> {
self.write_stdin_async_opt(req, ::grpcio::CallOption::default())
}
pub fn read_stdout_opt(&self, req: &super::agent::ReadStreamRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::agent::ReadStreamResponse> {
self.client.unary_call(&METHOD_AGENT_SERVICE_READ_STDOUT, req, opt)
}
pub fn read_stdout(&self, req: &super::agent::ReadStreamRequest) -> ::grpcio::Result<super::agent::ReadStreamResponse> {
self.read_stdout_opt(req, ::grpcio::CallOption::default())
}
pub fn read_stdout_async_opt(&self, req: &super::agent::ReadStreamRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::ReadStreamResponse>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_READ_STDOUT, req, opt)
}
pub fn read_stdout_async(&self, req: &super::agent::ReadStreamRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::ReadStreamResponse>> {
self.read_stdout_async_opt(req, ::grpcio::CallOption::default())
}
pub fn read_stderr_opt(&self, req: &super::agent::ReadStreamRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::agent::ReadStreamResponse> {
self.client.unary_call(&METHOD_AGENT_SERVICE_READ_STDERR, req, opt)
}
pub fn read_stderr(&self, req: &super::agent::ReadStreamRequest) -> ::grpcio::Result<super::agent::ReadStreamResponse> {
self.read_stderr_opt(req, ::grpcio::CallOption::default())
}
pub fn read_stderr_async_opt(&self, req: &super::agent::ReadStreamRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::ReadStreamResponse>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_READ_STDERR, req, opt)
}
pub fn read_stderr_async(&self, req: &super::agent::ReadStreamRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::ReadStreamResponse>> {
self.read_stderr_async_opt(req, ::grpcio::CallOption::default())
}
pub fn close_stdin_opt(&self, req: &super::agent::CloseStdinRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_CLOSE_STDIN, req, opt)
}
pub fn close_stdin(&self, req: &super::agent::CloseStdinRequest) -> ::grpcio::Result<super::empty::Empty> {
self.close_stdin_opt(req, ::grpcio::CallOption::default())
}
pub fn close_stdin_async_opt(&self, req: &super::agent::CloseStdinRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_CLOSE_STDIN, req, opt)
}
pub fn close_stdin_async(&self, req: &super::agent::CloseStdinRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.close_stdin_async_opt(req, ::grpcio::CallOption::default())
}
pub fn tty_win_resize_opt(&self, req: &super::agent::TtyWinResizeRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_TTY_WIN_RESIZE, req, opt)
}
pub fn tty_win_resize(&self, req: &super::agent::TtyWinResizeRequest) -> ::grpcio::Result<super::empty::Empty> {
self.tty_win_resize_opt(req, ::grpcio::CallOption::default())
}
pub fn tty_win_resize_async_opt(&self, req: &super::agent::TtyWinResizeRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_TTY_WIN_RESIZE, req, opt)
}
pub fn tty_win_resize_async(&self, req: &super::agent::TtyWinResizeRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.tty_win_resize_async_opt(req, ::grpcio::CallOption::default())
}
pub fn update_interface_opt(&self, req: &super::agent::UpdateInterfaceRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::types::Interface> {
self.client.unary_call(&METHOD_AGENT_SERVICE_UPDATE_INTERFACE, req, opt)
}
pub fn update_interface(&self, req: &super::agent::UpdateInterfaceRequest) -> ::grpcio::Result<super::types::Interface> {
self.update_interface_opt(req, ::grpcio::CallOption::default())
}
pub fn update_interface_async_opt(&self, req: &super::agent::UpdateInterfaceRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::types::Interface>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_UPDATE_INTERFACE, req, opt)
}
pub fn update_interface_async(&self, req: &super::agent::UpdateInterfaceRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::types::Interface>> {
self.update_interface_async_opt(req, ::grpcio::CallOption::default())
}
pub fn update_routes_opt(&self, req: &super::agent::UpdateRoutesRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::agent::Routes> {
self.client.unary_call(&METHOD_AGENT_SERVICE_UPDATE_ROUTES, req, opt)
}
pub fn update_routes(&self, req: &super::agent::UpdateRoutesRequest) -> ::grpcio::Result<super::agent::Routes> {
self.update_routes_opt(req, ::grpcio::CallOption::default())
}
pub fn update_routes_async_opt(&self, req: &super::agent::UpdateRoutesRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::Routes>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_UPDATE_ROUTES, req, opt)
}
pub fn update_routes_async(&self, req: &super::agent::UpdateRoutesRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::Routes>> {
self.update_routes_async_opt(req, ::grpcio::CallOption::default())
}
pub fn list_interfaces_opt(&self, req: &super::agent::ListInterfacesRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::agent::Interfaces> {
self.client.unary_call(&METHOD_AGENT_SERVICE_LIST_INTERFACES, req, opt)
}
pub fn list_interfaces(&self, req: &super::agent::ListInterfacesRequest) -> ::grpcio::Result<super::agent::Interfaces> {
self.list_interfaces_opt(req, ::grpcio::CallOption::default())
}
pub fn list_interfaces_async_opt(&self, req: &super::agent::ListInterfacesRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::Interfaces>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_LIST_INTERFACES, req, opt)
}
pub fn list_interfaces_async(&self, req: &super::agent::ListInterfacesRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::Interfaces>> {
self.list_interfaces_async_opt(req, ::grpcio::CallOption::default())
}
pub fn list_routes_opt(&self, req: &super::agent::ListRoutesRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::agent::Routes> {
self.client.unary_call(&METHOD_AGENT_SERVICE_LIST_ROUTES, req, opt)
}
pub fn list_routes(&self, req: &super::agent::ListRoutesRequest) -> ::grpcio::Result<super::agent::Routes> {
self.list_routes_opt(req, ::grpcio::CallOption::default())
}
pub fn list_routes_async_opt(&self, req: &super::agent::ListRoutesRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::Routes>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_LIST_ROUTES, req, opt)
}
pub fn list_routes_async(&self, req: &super::agent::ListRoutesRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::Routes>> {
self.list_routes_async_opt(req, ::grpcio::CallOption::default())
}
pub fn start_tracing_opt(&self, req: &super::agent::StartTracingRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_START_TRACING, req, opt)
}
pub fn start_tracing(&self, req: &super::agent::StartTracingRequest) -> ::grpcio::Result<super::empty::Empty> {
self.start_tracing_opt(req, ::grpcio::CallOption::default())
}
pub fn start_tracing_async_opt(&self, req: &super::agent::StartTracingRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_START_TRACING, req, opt)
}
pub fn start_tracing_async(&self, req: &super::agent::StartTracingRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.start_tracing_async_opt(req, ::grpcio::CallOption::default())
}
pub fn stop_tracing_opt(&self, req: &super::agent::StopTracingRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_STOP_TRACING, req, opt)
}
pub fn stop_tracing(&self, req: &super::agent::StopTracingRequest) -> ::grpcio::Result<super::empty::Empty> {
self.stop_tracing_opt(req, ::grpcio::CallOption::default())
}
pub fn stop_tracing_async_opt(&self, req: &super::agent::StopTracingRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_STOP_TRACING, req, opt)
}
pub fn stop_tracing_async(&self, req: &super::agent::StopTracingRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.stop_tracing_async_opt(req, ::grpcio::CallOption::default())
}
pub fn create_sandbox_opt(&self, req: &super::agent::CreateSandboxRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_CREATE_SANDBOX, req, opt)
}
pub fn create_sandbox(&self, req: &super::agent::CreateSandboxRequest) -> ::grpcio::Result<super::empty::Empty> {
self.create_sandbox_opt(req, ::grpcio::CallOption::default())
}
pub fn create_sandbox_async_opt(&self, req: &super::agent::CreateSandboxRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_CREATE_SANDBOX, req, opt)
}
pub fn create_sandbox_async(&self, req: &super::agent::CreateSandboxRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.create_sandbox_async_opt(req, ::grpcio::CallOption::default())
}
pub fn destroy_sandbox_opt(&self, req: &super::agent::DestroySandboxRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_DESTROY_SANDBOX, req, opt)
}
pub fn destroy_sandbox(&self, req: &super::agent::DestroySandboxRequest) -> ::grpcio::Result<super::empty::Empty> {
self.destroy_sandbox_opt(req, ::grpcio::CallOption::default())
}
pub fn destroy_sandbox_async_opt(&self, req: &super::agent::DestroySandboxRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_DESTROY_SANDBOX, req, opt)
}
pub fn destroy_sandbox_async(&self, req: &super::agent::DestroySandboxRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.destroy_sandbox_async_opt(req, ::grpcio::CallOption::default())
}
pub fn online_cpu_mem_opt(&self, req: &super::agent::OnlineCPUMemRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_ONLINE_CPU_MEM, req, opt)
}
pub fn online_cpu_mem(&self, req: &super::agent::OnlineCPUMemRequest) -> ::grpcio::Result<super::empty::Empty> {
self.online_cpu_mem_opt(req, ::grpcio::CallOption::default())
}
pub fn online_cpu_mem_async_opt(&self, req: &super::agent::OnlineCPUMemRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_ONLINE_CPU_MEM, req, opt)
}
pub fn online_cpu_mem_async(&self, req: &super::agent::OnlineCPUMemRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.online_cpu_mem_async_opt(req, ::grpcio::CallOption::default())
}
pub fn reseed_random_dev_opt(&self, req: &super::agent::ReseedRandomDevRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_RESEED_RANDOM_DEV, req, opt)
}
pub fn reseed_random_dev(&self, req: &super::agent::ReseedRandomDevRequest) -> ::grpcio::Result<super::empty::Empty> {
self.reseed_random_dev_opt(req, ::grpcio::CallOption::default())
}
pub fn reseed_random_dev_async_opt(&self, req: &super::agent::ReseedRandomDevRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_RESEED_RANDOM_DEV, req, opt)
}
pub fn reseed_random_dev_async(&self, req: &super::agent::ReseedRandomDevRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.reseed_random_dev_async_opt(req, ::grpcio::CallOption::default())
}
pub fn get_guest_details_opt(&self, req: &super::agent::GuestDetailsRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::agent::GuestDetailsResponse> {
self.client.unary_call(&METHOD_AGENT_SERVICE_GET_GUEST_DETAILS, req, opt)
}
pub fn get_guest_details(&self, req: &super::agent::GuestDetailsRequest) -> ::grpcio::Result<super::agent::GuestDetailsResponse> {
self.get_guest_details_opt(req, ::grpcio::CallOption::default())
}
pub fn get_guest_details_async_opt(&self, req: &super::agent::GuestDetailsRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::GuestDetailsResponse>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_GET_GUEST_DETAILS, req, opt)
}
pub fn get_guest_details_async(&self, req: &super::agent::GuestDetailsRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::agent::GuestDetailsResponse>> {
self.get_guest_details_async_opt(req, ::grpcio::CallOption::default())
}
pub fn mem_hotplug_by_probe_opt(&self, req: &super::agent::MemHotplugByProbeRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_MEM_HOTPLUG_BY_PROBE, req, opt)
}
pub fn mem_hotplug_by_probe(&self, req: &super::agent::MemHotplugByProbeRequest) -> ::grpcio::Result<super::empty::Empty> {
self.mem_hotplug_by_probe_opt(req, ::grpcio::CallOption::default())
}
pub fn mem_hotplug_by_probe_async_opt(&self, req: &super::agent::MemHotplugByProbeRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_MEM_HOTPLUG_BY_PROBE, req, opt)
}
pub fn mem_hotplug_by_probe_async(&self, req: &super::agent::MemHotplugByProbeRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.mem_hotplug_by_probe_async_opt(req, ::grpcio::CallOption::default())
}
pub fn set_guest_date_time_opt(&self, req: &super::agent::SetGuestDateTimeRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_SET_GUEST_DATE_TIME, req, opt)
}
pub fn set_guest_date_time(&self, req: &super::agent::SetGuestDateTimeRequest) -> ::grpcio::Result<super::empty::Empty> {
self.set_guest_date_time_opt(req, ::grpcio::CallOption::default())
}
pub fn set_guest_date_time_async_opt(&self, req: &super::agent::SetGuestDateTimeRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_SET_GUEST_DATE_TIME, req, opt)
}
pub fn set_guest_date_time_async(&self, req: &super::agent::SetGuestDateTimeRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.set_guest_date_time_async_opt(req, ::grpcio::CallOption::default())
}
pub fn copy_file_opt(&self, req: &super::agent::CopyFileRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::empty::Empty> {
self.client.unary_call(&METHOD_AGENT_SERVICE_COPY_FILE, req, opt)
}
pub fn copy_file(&self, req: &super::agent::CopyFileRequest) -> ::grpcio::Result<super::empty::Empty> {
self.copy_file_opt(req, ::grpcio::CallOption::default())
}
pub fn copy_file_async_opt(&self, req: &super::agent::CopyFileRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.client.unary_call_async(&METHOD_AGENT_SERVICE_COPY_FILE, req, opt)
}
pub fn copy_file_async(&self, req: &super::agent::CopyFileRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::empty::Empty>> {
self.copy_file_async_opt(req, ::grpcio::CallOption::default())
}
pub fn spawn<F>(&self, f: F) where F: ::futures::Future<Item = (), Error = ()> + Send + 'static {
self.client.spawn(f)
}
}
pub trait AgentService {
fn create_container(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::CreateContainerRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn start_container(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::StartContainerRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn remove_container(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::RemoveContainerRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn exec_process(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::ExecProcessRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn signal_process(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::SignalProcessRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn wait_process(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::WaitProcessRequest, sink: ::grpcio::UnarySink<super::agent::WaitProcessResponse>);
fn list_processes(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::ListProcessesRequest, sink: ::grpcio::UnarySink<super::agent::ListProcessesResponse>);
fn update_container(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::UpdateContainerRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn stats_container(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::StatsContainerRequest, sink: ::grpcio::UnarySink<super::agent::StatsContainerResponse>);
fn pause_container(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::PauseContainerRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn resume_container(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::ResumeContainerRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn write_stdin(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::WriteStreamRequest, sink: ::grpcio::UnarySink<super::agent::WriteStreamResponse>);
fn read_stdout(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::ReadStreamRequest, sink: ::grpcio::UnarySink<super::agent::ReadStreamResponse>);
fn read_stderr(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::ReadStreamRequest, sink: ::grpcio::UnarySink<super::agent::ReadStreamResponse>);
fn close_stdin(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::CloseStdinRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn tty_win_resize(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::TtyWinResizeRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn update_interface(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::UpdateInterfaceRequest, sink: ::grpcio::UnarySink<super::types::Interface>);
fn update_routes(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::UpdateRoutesRequest, sink: ::grpcio::UnarySink<super::agent::Routes>);
fn list_interfaces(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::ListInterfacesRequest, sink: ::grpcio::UnarySink<super::agent::Interfaces>);
fn list_routes(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::ListRoutesRequest, sink: ::grpcio::UnarySink<super::agent::Routes>);
fn start_tracing(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::StartTracingRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn stop_tracing(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::StopTracingRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn create_sandbox(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::CreateSandboxRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn destroy_sandbox(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::DestroySandboxRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn online_cpu_mem(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::OnlineCPUMemRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn reseed_random_dev(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::ReseedRandomDevRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn get_guest_details(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::GuestDetailsRequest, sink: ::grpcio::UnarySink<super::agent::GuestDetailsResponse>);
fn mem_hotplug_by_probe(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::MemHotplugByProbeRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn set_guest_date_time(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::SetGuestDateTimeRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
fn copy_file(&mut self, ctx: ::grpcio::RpcContext, req: super::agent::CopyFileRequest, sink: ::grpcio::UnarySink<super::empty::Empty>);
}
pub fn create_agent_service<S: AgentService + Send + Clone + 'static>(s: S) -> ::grpcio::Service {
let mut builder = ::grpcio::ServiceBuilder::new();
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_CREATE_CONTAINER, move |ctx, req, resp| {
instance.create_container(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_START_CONTAINER, move |ctx, req, resp| {
instance.start_container(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_REMOVE_CONTAINER, move |ctx, req, resp| {
instance.remove_container(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_EXEC_PROCESS, move |ctx, req, resp| {
instance.exec_process(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_SIGNAL_PROCESS, move |ctx, req, resp| {
instance.signal_process(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_WAIT_PROCESS, move |ctx, req, resp| {
instance.wait_process(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_LIST_PROCESSES, move |ctx, req, resp| {
instance.list_processes(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_UPDATE_CONTAINER, move |ctx, req, resp| {
instance.update_container(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_STATS_CONTAINER, move |ctx, req, resp| {
instance.stats_container(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_PAUSE_CONTAINER, move |ctx, req, resp| {
instance.pause_container(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_RESUME_CONTAINER, move |ctx, req, resp| {
instance.resume_container(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_WRITE_STDIN, move |ctx, req, resp| {
instance.write_stdin(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_READ_STDOUT, move |ctx, req, resp| {
instance.read_stdout(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_READ_STDERR, move |ctx, req, resp| {
instance.read_stderr(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_CLOSE_STDIN, move |ctx, req, resp| {
instance.close_stdin(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_TTY_WIN_RESIZE, move |ctx, req, resp| {
instance.tty_win_resize(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_UPDATE_INTERFACE, move |ctx, req, resp| {
instance.update_interface(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_UPDATE_ROUTES, move |ctx, req, resp| {
instance.update_routes(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_LIST_INTERFACES, move |ctx, req, resp| {
instance.list_interfaces(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_LIST_ROUTES, move |ctx, req, resp| {
instance.list_routes(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_START_TRACING, move |ctx, req, resp| {
instance.start_tracing(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_STOP_TRACING, move |ctx, req, resp| {
instance.stop_tracing(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_CREATE_SANDBOX, move |ctx, req, resp| {
instance.create_sandbox(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_DESTROY_SANDBOX, move |ctx, req, resp| {
instance.destroy_sandbox(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_ONLINE_CPU_MEM, move |ctx, req, resp| {
instance.online_cpu_mem(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_RESEED_RANDOM_DEV, move |ctx, req, resp| {
instance.reseed_random_dev(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_GET_GUEST_DETAILS, move |ctx, req, resp| {
instance.get_guest_details(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_MEM_HOTPLUG_BY_PROBE, move |ctx, req, resp| {
instance.mem_hotplug_by_probe(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_SET_GUEST_DATE_TIME, move |ctx, req, resp| {
instance.set_guest_date_time(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_AGENT_SERVICE_COPY_FILE, move |ctx, req, resp| {
instance.copy_file(ctx, req, resp)
});
builder.build()
}

View File

@ -0,0 +1,221 @@
// This file is generated by rust-protobuf 2.6.2. Do not edit
// @generated
// https://github.com/Manishearth/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unsafe_code)]
#![allow(unused_imports)]
#![allow(unused_results)]
use protobuf::Message as Message_imported_for_functions;
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
#[derive(PartialEq,Clone,Default)]
pub struct Empty {
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Empty {
fn default() -> &'a Empty {
<Empty as ::protobuf::Message>::default_instance()
}
}
impl Empty {
pub fn new() -> Empty {
::std::default::Default::default()
}
}
impl ::protobuf::Message for Empty {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn as_any_mut(&mut self) -> &mut ::std::any::Any {
self as &mut ::std::any::Any
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Empty {
Empty::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let fields = ::std::vec::Vec::new();
::protobuf::reflect::MessageDescriptor::new::<Empty>(
"Empty",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static Empty {
static mut instance: ::protobuf::lazy::Lazy<Empty> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const Empty,
};
unsafe {
instance.get(Empty::new)
}
}
}
impl ::protobuf::Clear for Empty {
fn clear(&mut self) {
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for Empty {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for Empty {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\n\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\x07\n\x05Empt\
yBT\n\x13com.google.protobufB\nEmptyProtoP\x01Z\x05types\xf8\x01\x01\xa2\
\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesJ\xfe\x10\n\x06\x12\
\x04\x1e\03\x10\n\xcc\x0c\n\x01\x0c\x12\x03\x1e\0\x122\xc1\x0c\x20Protoc\
ol\x20Buffers\x20-\x20Google's\x20data\x20interchange\x20format\n\x20Cop\
yright\x202008\x20Google\x20Inc.\x20\x20All\x20rights\x20reserved.\n\x20\
https://developers.google.com/protocol-buffers/\n\n\x20Redistribution\
\x20and\x20use\x20in\x20source\x20and\x20binary\x20forms,\x20with\x20or\
\x20without\n\x20modification,\x20are\x20permitted\x20provided\x20that\
\x20the\x20following\x20conditions\x20are\n\x20met:\n\n\x20\x20\x20\x20\
\x20*\x20Redistributions\x20of\x20source\x20code\x20must\x20retain\x20th\
e\x20above\x20copyright\n\x20notice,\x20this\x20list\x20of\x20conditions\
\x20and\x20the\x20following\x20disclaimer.\n\x20\x20\x20\x20\x20*\x20Red\
istributions\x20in\x20binary\x20form\x20must\x20reproduce\x20the\x20abov\
e\n\x20copyright\x20notice,\x20this\x20list\x20of\x20conditions\x20and\
\x20the\x20following\x20disclaimer\n\x20in\x20the\x20documentation\x20an\
d/or\x20other\x20materials\x20provided\x20with\x20the\n\x20distribution.\
\n\x20\x20\x20\x20\x20*\x20Neither\x20the\x20name\x20of\x20Google\x20Inc\
.\x20nor\x20the\x20names\x20of\x20its\n\x20contributors\x20may\x20be\x20\
used\x20to\x20endorse\x20or\x20promote\x20products\x20derived\x20from\n\
\x20this\x20software\x20without\x20specific\x20prior\x20written\x20permi\
ssion.\n\n\x20THIS\x20SOFTWARE\x20IS\x20PROVIDED\x20BY\x20THE\x20COPYRIG\
HT\x20HOLDERS\x20AND\x20CONTRIBUTORS\n\x20\"AS\x20IS\"\x20AND\x20ANY\x20\
EXPRESS\x20OR\x20IMPLIED\x20WARRANTIES,\x20INCLUDING,\x20BUT\x20NOT\n\
\x20LIMITED\x20TO,\x20THE\x20IMPLIED\x20WARRANTIES\x20OF\x20MERCHANTABIL\
ITY\x20AND\x20FITNESS\x20FOR\n\x20A\x20PARTICULAR\x20PURPOSE\x20ARE\x20D\
ISCLAIMED.\x20IN\x20NO\x20EVENT\x20SHALL\x20THE\x20COPYRIGHT\n\x20OWNER\
\x20OR\x20CONTRIBUTORS\x20BE\x20LIABLE\x20FOR\x20ANY\x20DIRECT,\x20INDIR\
ECT,\x20INCIDENTAL,\n\x20SPECIAL,\x20EXEMPLARY,\x20OR\x20CONSEQUENTIAL\
\x20DAMAGES\x20(INCLUDING,\x20BUT\x20NOT\n\x20LIMITED\x20TO,\x20PROCUREM\
ENT\x20OF\x20SUBSTITUTE\x20GOODS\x20OR\x20SERVICES;\x20LOSS\x20OF\x20USE\
,\n\x20DATA,\x20OR\x20PROFITS;\x20OR\x20BUSINESS\x20INTERRUPTION)\x20HOW\
EVER\x20CAUSED\x20AND\x20ON\x20ANY\n\x20THEORY\x20OF\x20LIABILITY,\x20WH\
ETHER\x20IN\x20CONTRACT,\x20STRICT\x20LIABILITY,\x20OR\x20TORT\n\x20(INC\
LUDING\x20NEGLIGENCE\x20OR\x20OTHERWISE)\x20ARISING\x20IN\x20ANY\x20WAY\
\x20OUT\x20OF\x20THE\x20USE\n\x20OF\x20THIS\x20SOFTWARE,\x20EVEN\x20IF\
\x20ADVISED\x20OF\x20THE\x20POSSIBILITY\x20OF\x20SUCH\x20DAMAGE.\n\n\x08\
\n\x01\x02\x12\x03\x20\0\x18\n\x08\n\x01\x08\x12\x03\"\0;\n\t\n\x02\x08%\
\x12\x03\"\0;\n\x08\n\x01\x08\x12\x03#\0\x1c\n\t\n\x02\x08\x0b\x12\x03#\
\0\x1c\n\x08\n\x01\x08\x12\x03$\0,\n\t\n\x02\x08\x01\x12\x03$\0,\n\x08\n\
\x01\x08\x12\x03%\0+\n\t\n\x02\x08\x08\x12\x03%\0+\n\x08\n\x01\x08\x12\
\x03&\0\"\n\t\n\x02\x08\n\x12\x03&\0\"\n\x08\n\x01\x08\x12\x03'\0!\n\t\n\
\x02\x08$\x12\x03'\0!\n\x08\n\x01\x08\x12\x03(\0\x1f\n\t\n\x02\x08\x1f\
\x12\x03(\0\x1f\n\xfb\x02\n\x02\x04\0\x12\x033\0\x10\x1a\xef\x02\x20A\
\x20generic\x20empty\x20message\x20that\x20you\x20can\x20re-use\x20to\
\x20avoid\x20defining\x20duplicated\n\x20empty\x20messages\x20in\x20your\
\x20APIs.\x20A\x20typical\x20example\x20is\x20to\x20use\x20it\x20as\x20t\
he\x20request\n\x20or\x20the\x20response\x20type\x20of\x20an\x20API\x20m\
ethod.\x20For\x20instance:\n\n\x20\x20\x20\x20\x20service\x20Foo\x20{\n\
\x20\x20\x20\x20\x20\x20\x20rpc\x20Bar(google.protobuf.Empty)\x20returns\
\x20(google.protobuf.Empty);\n\x20\x20\x20\x20\x20}\n\n\x20The\x20JSON\
\x20representation\x20for\x20`Empty`\x20is\x20empty\x20JSON\x20object\
\x20`{}`.\n\n\n\n\x03\x04\0\x01\x12\x033\x08\rb\x06proto3\
";
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::descriptor::FileDescriptorProto,
};
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
unsafe {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}
}

View File

@ -0,0 +1,688 @@
// This file is generated by rust-protobuf 2.6.2. Do not edit
// @generated
// https://github.com/Manishearth/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unsafe_code)]
#![allow(unused_imports)]
#![allow(unused_results)]
use protobuf::Message as Message_imported_for_functions;
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
#[derive(PartialEq,Clone,Default)]
pub struct CheckRequest {
// message fields
pub service: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a CheckRequest {
fn default() -> &'a CheckRequest {
<CheckRequest as ::protobuf::Message>::default_instance()
}
}
impl CheckRequest {
pub fn new() -> CheckRequest {
::std::default::Default::default()
}
// string service = 1;
pub fn get_service(&self) -> &str {
&self.service
}
pub fn clear_service(&mut self) {
self.service.clear();
}
// Param is passed by value, moved
pub fn set_service(&mut self, v: ::std::string::String) {
self.service = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_service(&mut self) -> &mut ::std::string::String {
&mut self.service
}
// Take field
pub fn take_service(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.service, ::std::string::String::new())
}
}
impl ::protobuf::Message for CheckRequest {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.service)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.service.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.service);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
if !self.service.is_empty() {
os.write_string(1, &self.service)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn as_any_mut(&mut self) -> &mut ::std::any::Any {
self as &mut ::std::any::Any
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> CheckRequest {
CheckRequest::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"service",
|m: &CheckRequest| { &m.service },
|m: &mut CheckRequest| { &mut m.service },
));
::protobuf::reflect::MessageDescriptor::new::<CheckRequest>(
"CheckRequest",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static CheckRequest {
static mut instance: ::protobuf::lazy::Lazy<CheckRequest> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const CheckRequest,
};
unsafe {
instance.get(CheckRequest::new)
}
}
}
impl ::protobuf::Clear for CheckRequest {
fn clear(&mut self) {
self.service.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for CheckRequest {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for CheckRequest {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct HealthCheckResponse {
// message fields
pub status: HealthCheckResponse_ServingStatus,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a HealthCheckResponse {
fn default() -> &'a HealthCheckResponse {
<HealthCheckResponse as ::protobuf::Message>::default_instance()
}
}
impl HealthCheckResponse {
pub fn new() -> HealthCheckResponse {
::std::default::Default::default()
}
// .grpc.HealthCheckResponse.ServingStatus status = 1;
pub fn get_status(&self) -> HealthCheckResponse_ServingStatus {
self.status
}
pub fn clear_status(&mut self) {
self.status = HealthCheckResponse_ServingStatus::UNKNOWN;
}
// Param is passed by value, moved
pub fn set_status(&mut self, v: HealthCheckResponse_ServingStatus) {
self.status = v;
}
}
impl ::protobuf::Message for HealthCheckResponse {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.status, 1, &mut self.unknown_fields)?
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.status != HealthCheckResponse_ServingStatus::UNKNOWN {
my_size += ::protobuf::rt::enum_size(1, self.status);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
if self.status != HealthCheckResponse_ServingStatus::UNKNOWN {
os.write_enum(1, self.status.value())?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn as_any_mut(&mut self) -> &mut ::std::any::Any {
self as &mut ::std::any::Any
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> HealthCheckResponse {
HealthCheckResponse::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<HealthCheckResponse_ServingStatus>>(
"status",
|m: &HealthCheckResponse| { &m.status },
|m: &mut HealthCheckResponse| { &mut m.status },
));
::protobuf::reflect::MessageDescriptor::new::<HealthCheckResponse>(
"HealthCheckResponse",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static HealthCheckResponse {
static mut instance: ::protobuf::lazy::Lazy<HealthCheckResponse> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const HealthCheckResponse,
};
unsafe {
instance.get(HealthCheckResponse::new)
}
}
}
impl ::protobuf::Clear for HealthCheckResponse {
fn clear(&mut self) {
self.status = HealthCheckResponse_ServingStatus::UNKNOWN;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for HealthCheckResponse {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for HealthCheckResponse {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum HealthCheckResponse_ServingStatus {
UNKNOWN = 0,
SERVING = 1,
NOT_SERVING = 2,
}
impl ::protobuf::ProtobufEnum for HealthCheckResponse_ServingStatus {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<HealthCheckResponse_ServingStatus> {
match value {
0 => ::std::option::Option::Some(HealthCheckResponse_ServingStatus::UNKNOWN),
1 => ::std::option::Option::Some(HealthCheckResponse_ServingStatus::SERVING),
2 => ::std::option::Option::Some(HealthCheckResponse_ServingStatus::NOT_SERVING),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [HealthCheckResponse_ServingStatus] = &[
HealthCheckResponse_ServingStatus::UNKNOWN,
HealthCheckResponse_ServingStatus::SERVING,
HealthCheckResponse_ServingStatus::NOT_SERVING,
];
values
}
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::EnumDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::EnumDescriptor,
};
unsafe {
descriptor.get(|| {
::protobuf::reflect::EnumDescriptor::new("HealthCheckResponse_ServingStatus", file_descriptor_proto())
})
}
}
}
impl ::std::marker::Copy for HealthCheckResponse_ServingStatus {
}
impl ::std::default::Default for HealthCheckResponse_ServingStatus {
fn default() -> Self {
HealthCheckResponse_ServingStatus::UNKNOWN
}
}
impl ::protobuf::reflect::ProtobufValue for HealthCheckResponse_ServingStatus {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef {
::protobuf::reflect::ProtobufValueRef::Enum(self.descriptor())
}
}
#[derive(PartialEq,Clone,Default)]
pub struct VersionCheckResponse {
// message fields
pub grpc_version: ::std::string::String,
pub agent_version: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a VersionCheckResponse {
fn default() -> &'a VersionCheckResponse {
<VersionCheckResponse as ::protobuf::Message>::default_instance()
}
}
impl VersionCheckResponse {
pub fn new() -> VersionCheckResponse {
::std::default::Default::default()
}
// string grpc_version = 1;
pub fn get_grpc_version(&self) -> &str {
&self.grpc_version
}
pub fn clear_grpc_version(&mut self) {
self.grpc_version.clear();
}
// Param is passed by value, moved
pub fn set_grpc_version(&mut self, v: ::std::string::String) {
self.grpc_version = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_grpc_version(&mut self) -> &mut ::std::string::String {
&mut self.grpc_version
}
// Take field
pub fn take_grpc_version(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.grpc_version, ::std::string::String::new())
}
// string agent_version = 2;
pub fn get_agent_version(&self) -> &str {
&self.agent_version
}
pub fn clear_agent_version(&mut self) {
self.agent_version.clear();
}
// Param is passed by value, moved
pub fn set_agent_version(&mut self, v: ::std::string::String) {
self.agent_version = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_agent_version(&mut self) -> &mut ::std::string::String {
&mut self.agent_version
}
// Take field
pub fn take_agent_version(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.agent_version, ::std::string::String::new())
}
}
impl ::protobuf::Message for VersionCheckResponse {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.grpc_version)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.agent_version)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.grpc_version.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.grpc_version);
}
if !self.agent_version.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.agent_version);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
if !self.grpc_version.is_empty() {
os.write_string(1, &self.grpc_version)?;
}
if !self.agent_version.is_empty() {
os.write_string(2, &self.agent_version)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn as_any_mut(&mut self) -> &mut ::std::any::Any {
self as &mut ::std::any::Any
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> VersionCheckResponse {
VersionCheckResponse::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"grpc_version",
|m: &VersionCheckResponse| { &m.grpc_version },
|m: &mut VersionCheckResponse| { &mut m.grpc_version },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"agent_version",
|m: &VersionCheckResponse| { &m.agent_version },
|m: &mut VersionCheckResponse| { &mut m.agent_version },
));
::protobuf::reflect::MessageDescriptor::new::<VersionCheckResponse>(
"VersionCheckResponse",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static VersionCheckResponse {
static mut instance: ::protobuf::lazy::Lazy<VersionCheckResponse> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const VersionCheckResponse,
};
unsafe {
instance.get(VersionCheckResponse::new)
}
}
}
impl ::protobuf::Clear for VersionCheckResponse {
fn clear(&mut self) {
self.grpc_version.clear();
self.agent_version.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for VersionCheckResponse {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for VersionCheckResponse {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\n\x0chealth.proto\x12\x04grpc\x1a-github.com/gogo/protobuf/gogoproto/go\
go.proto\"(\n\x0cCheckRequest\x12\x18\n\x07service\x18\x01\x20\x01(\tR\
\x07service\"\x92\x01\n\x13HealthCheckResponse\x12?\n\x06status\x18\x01\
\x20\x01(\x0e2'.grpc.HealthCheckResponse.ServingStatusR\x06status\":\n\r\
ServingStatus\x12\x0b\n\x07UNKNOWN\x10\0\x12\x0b\n\x07SERVING\x10\x01\
\x12\x0f\n\x0bNOT_SERVING\x10\x02\"^\n\x14VersionCheckResponse\x12!\n\
\x0cgrpc_version\x18\x01\x20\x01(\tR\x0bgrpcVersion\x12#\n\ragent_versio\
n\x18\x02\x20\x01(\tR\x0cagentVersion2{\n\x06Health\x126\n\x05Check\x12\
\x12.grpc.CheckRequest\x1a\x19.grpc.HealthCheckResponse\x129\n\x07Versio\
n\x12\x12.grpc.CheckRequest\x1a\x1a.grpc.VersionCheckResponseB\x10\xb8\
\xe2\x1e\x01\xf8\xe1\x1e\x01\xa8\xe2\x1e\x01\xc0\xe2\x1e\x01J\xd9\x06\n\
\x06\x12\x04\x06\0&\x01\nO\n\x01\x0c\x12\x03\x06\0\x122E\n\x20Copyright\
\x202017\x20HyperHQ\x20Inc.\n\n\x20SPDX-License-Identifier:\x20Apache-2.\
0\n\n\n\x08\n\x01\x02\x12\x03\x08\0\r\n\t\n\x02\x03\0\x12\x03\n\07\n\x08\
\n\x01\x08\x12\x03\x0c\0$\n\x0b\n\x04\x08\xa5\xec\x03\x12\x03\x0c\0$\n\
\x08\n\x01\x08\x12\x03\r\0'\n\x0b\n\x04\x08\x9f\xec\x03\x12\x03\r\0'\n\
\x08\n\x01\x08\x12\x03\x0e\0&\n\x0b\n\x04\x08\xa7\xec\x03\x12\x03\x0e\0&\
\n\x08\n\x01\x08\x12\x03\x0f\0'\n\x0b\n\x04\x08\xa8\xec\x03\x12\x03\x0f\
\0'\n\n\n\x02\x04\0\x12\x04\x11\0\x13\x01\n\n\n\x03\x04\0\x01\x12\x03\
\x11\x08\x14\n\x0b\n\x04\x04\0\x02\0\x12\x03\x12\x08\x1b\n\r\n\x05\x04\0\
\x02\0\x04\x12\x04\x12\x08\x11\x16\n\x0c\n\x05\x04\0\x02\0\x05\x12\x03\
\x12\x08\x0e\n\x0c\n\x05\x04\0\x02\0\x01\x12\x03\x12\x0f\x16\n\x0c\n\x05\
\x04\0\x02\0\x03\x12\x03\x12\x19\x1a\n\n\n\x02\x04\x01\x12\x04\x15\0\x1c\
\x01\n\n\n\x03\x04\x01\x01\x12\x03\x15\x08\x1b\n\x0c\n\x04\x04\x01\x04\0\
\x12\x04\x16\x08\x1a\t\n\x0c\n\x05\x04\x01\x04\0\x01\x12\x03\x16\r\x1a\n\
\r\n\x06\x04\x01\x04\0\x02\0\x12\x03\x17\x10\x1c\n\x0e\n\x07\x04\x01\x04\
\0\x02\0\x01\x12\x03\x17\x10\x17\n\x0e\n\x07\x04\x01\x04\0\x02\0\x02\x12\
\x03\x17\x1a\x1b\n\r\n\x06\x04\x01\x04\0\x02\x01\x12\x03\x18\x10\x1c\n\
\x0e\n\x07\x04\x01\x04\0\x02\x01\x01\x12\x03\x18\x10\x17\n\x0e\n\x07\x04\
\x01\x04\0\x02\x01\x02\x12\x03\x18\x1a\x1b\n\r\n\x06\x04\x01\x04\0\x02\
\x02\x12\x03\x19\x10\x20\n\x0e\n\x07\x04\x01\x04\0\x02\x02\x01\x12\x03\
\x19\x10\x1b\n\x0e\n\x07\x04\x01\x04\0\x02\x02\x02\x12\x03\x19\x1e\x1f\n\
\x0b\n\x04\x04\x01\x02\0\x12\x03\x1b\x08!\n\r\n\x05\x04\x01\x02\0\x04\
\x12\x04\x1b\x08\x1a\t\n\x0c\n\x05\x04\x01\x02\0\x06\x12\x03\x1b\x08\x15\
\n\x0c\n\x05\x04\x01\x02\0\x01\x12\x03\x1b\x16\x1c\n\x0c\n\x05\x04\x01\
\x02\0\x03\x12\x03\x1b\x1f\x20\n\n\n\x02\x04\x02\x12\x04\x1e\0!\x01\n\n\
\n\x03\x04\x02\x01\x12\x03\x1e\x08\x1c\n\x0b\n\x04\x04\x02\x02\0\x12\x03\
\x1f\x08\x20\n\r\n\x05\x04\x02\x02\0\x04\x12\x04\x1f\x08\x1e\x1e\n\x0c\n\
\x05\x04\x02\x02\0\x05\x12\x03\x1f\x08\x0e\n\x0c\n\x05\x04\x02\x02\0\x01\
\x12\x03\x1f\x0f\x1b\n\x0c\n\x05\x04\x02\x02\0\x03\x12\x03\x1f\x1e\x1f\n\
\x0b\n\x04\x04\x02\x02\x01\x12\x03\x20\x08!\n\r\n\x05\x04\x02\x02\x01\
\x04\x12\x04\x20\x08\x1f\x20\n\x0c\n\x05\x04\x02\x02\x01\x05\x12\x03\x20\
\x08\x0e\n\x0c\n\x05\x04\x02\x02\x01\x01\x12\x03\x20\x0f\x1c\n\x0c\n\x05\
\x04\x02\x02\x01\x03\x12\x03\x20\x1f\x20\n\n\n\x02\x06\0\x12\x04#\0&\x01\
\n\n\n\x03\x06\0\x01\x12\x03#\x08\x0e\n\x0b\n\x04\x06\0\x02\0\x12\x03$\
\x08>\n\x0c\n\x05\x06\0\x02\0\x01\x12\x03$\x0c\x11\n\x0c\n\x05\x06\0\x02\
\0\x02\x12\x03$\x12\x1e\n\x0c\n\x05\x06\0\x02\0\x03\x12\x03$)<\n\x0b\n\
\x04\x06\0\x02\x01\x12\x03%\x08A\n\x0c\n\x05\x06\0\x02\x01\x01\x12\x03%\
\x0c\x13\n\x0c\n\x05\x06\0\x02\x01\x02\x12\x03%\x14\x20\n\x0c\n\x05\x06\
\0\x02\x01\x03\x12\x03%+?b\x06proto3\
";
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::descriptor::FileDescriptorProto,
};
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
unsafe {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}
}

View File

@ -0,0 +1,99 @@
// This file is generated. Do not edit
// @generated
// https://github.com/Manishearth/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unsafe_code)]
#![allow(unused_imports)]
#![allow(unused_results)]
const METHOD_HEALTH_CHECK: ::grpcio::Method<super::health::CheckRequest, super::health::HealthCheckResponse> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.Health/Check",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
const METHOD_HEALTH_VERSION: ::grpcio::Method<super::health::CheckRequest, super::health::VersionCheckResponse> = ::grpcio::Method {
ty: ::grpcio::MethodType::Unary,
name: "/grpc.Health/Version",
req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de },
};
#[derive(Clone)]
pub struct HealthClient {
client: ::grpcio::Client,
}
impl HealthClient {
pub fn new(channel: ::grpcio::Channel) -> Self {
HealthClient {
client: ::grpcio::Client::new(channel),
}
}
pub fn check_opt(&self, req: &super::health::CheckRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::health::HealthCheckResponse> {
self.client.unary_call(&METHOD_HEALTH_CHECK, req, opt)
}
pub fn check(&self, req: &super::health::CheckRequest) -> ::grpcio::Result<super::health::HealthCheckResponse> {
self.check_opt(req, ::grpcio::CallOption::default())
}
pub fn check_async_opt(&self, req: &super::health::CheckRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::health::HealthCheckResponse>> {
self.client.unary_call_async(&METHOD_HEALTH_CHECK, req, opt)
}
pub fn check_async(&self, req: &super::health::CheckRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::health::HealthCheckResponse>> {
self.check_async_opt(req, ::grpcio::CallOption::default())
}
pub fn version_opt(&self, req: &super::health::CheckRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<super::health::VersionCheckResponse> {
self.client.unary_call(&METHOD_HEALTH_VERSION, req, opt)
}
pub fn version(&self, req: &super::health::CheckRequest) -> ::grpcio::Result<super::health::VersionCheckResponse> {
self.version_opt(req, ::grpcio::CallOption::default())
}
pub fn version_async_opt(&self, req: &super::health::CheckRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::health::VersionCheckResponse>> {
self.client.unary_call_async(&METHOD_HEALTH_VERSION, req, opt)
}
pub fn version_async(&self, req: &super::health::CheckRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver<super::health::VersionCheckResponse>> {
self.version_async_opt(req, ::grpcio::CallOption::default())
}
pub fn spawn<F>(&self, f: F) where F: ::futures::Future<Item = (), Error = ()> + Send + 'static {
self.client.spawn(f)
}
}
pub trait Health {
fn check(&mut self, ctx: ::grpcio::RpcContext, req: super::health::CheckRequest, sink: ::grpcio::UnarySink<super::health::HealthCheckResponse>);
fn version(&mut self, ctx: ::grpcio::RpcContext, req: super::health::CheckRequest, sink: ::grpcio::UnarySink<super::health::VersionCheckResponse>);
}
pub fn create_health<S: Health + Send + Clone + 'static>(s: S) -> ::grpcio::Service {
let mut builder = ::grpcio::ServiceBuilder::new();
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_HEALTH_CHECK, move |ctx, req, resp| {
instance.check(ctx, req, resp)
});
let mut instance = s.clone();
builder = builder.add_unary_handler(&METHOD_HEALTH_VERSION, move |ctx, req, resp| {
instance.version(ctx, req, resp)
});
builder.build()
}

View File

@ -0,0 +1,17 @@
#![allow(bare_trait_objects)]
pub mod agent;
pub mod agent_grpc;
pub mod health;
pub mod health_grpc;
pub mod oci;
pub mod types;
pub mod empty;
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}

10473
src/agent/protocols/src/oci.rs Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,25 @@
[package]
name = "rustjail"
version = "0.1.0"
authors = ["Yang Bo <bo@hyper.sh>"]
edition = "2018"
[dependencies]
error-chain = "0.12.1"
serde = "1.0.91"
serde_json = "1.0.39"
serde_derive = "1.0.91"
oci = { path = "../oci" }
protocols = { path ="../protocols" }
caps = "0.3.0"
nix = "0.14.1"
scopeguard = "1.0.0"
prctl = "1.0.0"
lazy_static = "1.3.0"
libc = "0.2.58"
protobuf = "2.6.1"
slog = "2.5.2"
slog-scope = "4.1.2"
scan_fmt = "0.2"
regex = "1.1"
path-absolutize = { git = "git://github.com/magiclen/path-absolutize.git", tag= "v1.1.3" }

View File

@ -0,0 +1,135 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
// looks like we can use caps to manipulate capabilities
// conveniently, use caps to do it directly.. maybe
use lazy_static;
use crate::errors::*;
use caps::{self, CapSet, Capability, CapsHashSet};
use protocols::oci::LinuxCapabilities;
use slog::Logger;
use std::collections::HashMap;
lazy_static! {
pub static ref CAPSMAP: HashMap<String, Capability> = {
let mut m = HashMap::new();
m.insert("CAP_CHOWN".to_string(), Capability::CAP_CHOWN);
m.insert("CAP_DAC_OVERRIDE".to_string(), Capability::CAP_DAC_OVERRIDE);
m.insert(
"CAP_DAC_READ_SEARCH".to_string(),
Capability::CAP_DAC_READ_SEARCH,
);
m.insert("CAP_FOWNER".to_string(), Capability::CAP_FOWNER);
m.insert("CAP_FSETID".to_string(), Capability::CAP_FSETID);
m.insert("CAP_KILL".to_string(), Capability::CAP_KILL);
m.insert("CAP_SETGID".to_string(), Capability::CAP_SETGID);
m.insert("CAP_SETUID".to_string(), Capability::CAP_SETUID);
m.insert("CAP_SETPCAP".to_string(), Capability::CAP_SETPCAP);
m.insert(
"CAP_LINUX_IMMUTABLE".to_string(),
Capability::CAP_LINUX_IMMUTABLE,
);
m.insert(
"CAP_NET_BIND_SERVICE".to_string(),
Capability::CAP_NET_BIND_SERVICE,
);
m.insert(
"CAP_NET_BROADCAST".to_string(),
Capability::CAP_NET_BROADCAST,
);
m.insert("CAP_NET_ADMIN".to_string(), Capability::CAP_NET_ADMIN);
m.insert("CAP_NET_RAW".to_string(), Capability::CAP_NET_RAW);
m.insert("CAP_IPC_LOCK".to_string(), Capability::CAP_IPC_LOCK);
m.insert("CAP_IPC_OWNER".to_string(), Capability::CAP_IPC_OWNER);
m.insert("CAP_SYS_MODULE".to_string(), Capability::CAP_SYS_MODULE);
m.insert("CAP_SYS_RAWIO".to_string(), Capability::CAP_SYS_RAWIO);
m.insert("CAP_SYS_CHROOT".to_string(), Capability::CAP_SYS_CHROOT);
m.insert("CAP_SYS_PTRACE".to_string(), Capability::CAP_SYS_PTRACE);
m.insert("CAP_SYS_PACCT".to_string(), Capability::CAP_SYS_PACCT);
m.insert("CAP_SYS_ADMIN".to_string(), Capability::CAP_SYS_ADMIN);
m.insert("CAP_SYS_BOOT".to_string(), Capability::CAP_SYS_BOOT);
m.insert("CAP_SYS_NICE".to_string(), Capability::CAP_SYS_NICE);
m.insert("CAP_SYS_RESOURCE".to_string(), Capability::CAP_SYS_RESOURCE);
m.insert("CAP_SYS_TIME".to_string(), Capability::CAP_SYS_TIME);
m.insert(
"CAP_SYS_TTY_CONFIG".to_string(),
Capability::CAP_SYS_TTY_CONFIG,
);
m.insert("CAP_MKNOD".to_string(), Capability::CAP_MKNOD);
m.insert("CAP_LEASE".to_string(), Capability::CAP_LEASE);
m.insert("CAP_AUDIT_WRITE".to_string(), Capability::CAP_AUDIT_WRITE);
m.insert("CAP_AUDIT_CONTROL".to_string(), Capability::CAP_AUDIT_WRITE);
m.insert("CAP_SETFCAP".to_string(), Capability::CAP_SETFCAP);
m.insert("CAP_MAC_OVERRIDE".to_string(), Capability::CAP_MAC_OVERRIDE);
m.insert("CAP_SYSLOG".to_string(), Capability::CAP_SYSLOG);
m.insert("CAP_WAKE_ALARM".to_string(), Capability::CAP_WAKE_ALARM);
m.insert(
"CAP_BLOCK_SUSPEND".to_string(),
Capability::CAP_BLOCK_SUSPEND,
);
m.insert("CAP_AUDIT_READ".to_string(), Capability::CAP_AUDIT_READ);
m
};
}
fn to_capshashset(logger: &Logger, caps: &[String]) -> CapsHashSet {
let mut r = CapsHashSet::new();
for cap in caps.iter() {
let c = CAPSMAP.get(cap);
if c.is_none() {
warn!(logger, "{} is not a cap", cap);
continue;
}
r.insert(*c.unwrap());
}
r
}
pub fn reset_effective() -> Result<()> {
caps::set(None, CapSet::Effective, caps::all())?;
Ok(())
}
pub fn drop_priviledges(logger: &Logger, caps: &LinuxCapabilities) -> Result<()> {
let logger = logger.new(o!("subsystem" => "capabilities"));
let all = caps::all();
for c in all.difference(&to_capshashset(&logger, caps.Bounding.as_ref())) {
caps::drop(None, CapSet::Bounding, *c)?;
}
caps::set(
None,
CapSet::Effective,
to_capshashset(&logger, caps.Effective.as_ref()),
)?;
caps::set(
None,
CapSet::Permitted,
to_capshashset(&logger, caps.Permitted.as_ref()),
)?;
caps::set(
None,
CapSet::Inheritable,
to_capshashset(&logger, caps.Inheritable.as_ref()),
)?;
if let Err(_) = caps::set(
None,
CapSet::Ambient,
to_capshashset(&logger, caps.Ambient.as_ref()),
) {
warn!(logger, "failed to set ambient capability");
}
Ok(())
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,49 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use crate::errors::*;
// use crate::configs::{FreezerState, Config};
use protocols::agent::CgroupStats;
use protocols::oci::LinuxResources;
use std::collections::HashMap;
pub mod fs;
pub mod systemd;
pub type FreezerState = &'static str;
pub trait Manager {
fn apply(&self, _pid: i32) -> Result<()> {
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
}
fn get_pids(&self) -> Result<Vec<i32>> {
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
}
fn get_all_pids(&self) -> Result<Vec<i32>> {
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
}
fn get_stats(&self) -> Result<CgroupStats> {
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
}
fn freeze(&self, _state: FreezerState) -> Result<()> {
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
}
fn destroy(&mut self) -> Result<()> {
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
}
fn get_paths(&self) -> Result<HashMap<String, String>> {
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
}
fn set(&self, _container: &LinuxResources, _update: bool) -> Result<()> {
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
}
}

View File

@ -0,0 +1,10 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use crate::cgroups::Manager as CgroupManager;
pub struct Manager {}
impl CgroupManager for Manager {}

View File

@ -0,0 +1,56 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use libc::*;
use serde;
#[macro_use]
use serde_derive;
use serde_json;
#[derive(Serialize, Deserialize, Debug)]
pub struct Device {
#[serde(default)]
r#type: char,
#[serde(default)]
path: String,
#[serde(default)]
major: i64,
#[serde(default)]
minor: i64,
#[serde(default)]
permissions: String,
#[serde(default)]
file_mode: mode_t,
#[serde(default)]
uid: i32,
#[serde(default)]
gid: i32,
#[serde(default)]
allow: bool,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct BlockIODevice {
#[serde(default)]
major: i64,
#[serde(default)]
minor: i64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WeightDevice {
block: BlockIODevice,
#[serde(default)]
weight: u16,
#[serde(default, rename = "leafWeight")]
leaf_weight: u16,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ThrottleDevice {
block: BlockIODevice,
#[serde(default)]
rate: u64,
}

View File

@ -0,0 +1,494 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use serde;
#[macro_use]
use serde_derive;
use serde_json;
use protocols::oci::State as OCIState;
use crate::errors::*;
use std::collections::HashMap;
use std::fmt;
use std::path::PathBuf;
use std::time::Duration;
use nix::unistd;
use self::device::{Device, ThrottleDevice, WeightDevice};
use self::namespaces::Namespaces;
use crate::specconv::CreateOpts;
pub mod device;
pub mod namespaces;
pub mod validator;
#[derive(Serialize, Deserialize, Debug)]
pub struct Rlimit {
#[serde(default)]
r#type: i32,
#[serde(default)]
hard: i32,
#[serde(default)]
soft: i32,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct IDMap {
#[serde(default)]
container_id: i32,
#[serde(default)]
host_id: i32,
#[serde(default)]
size: i32,
}
type Action = i32;
#[derive(Serialize, Deserialize, Debug)]
pub struct Seccomp {
#[serde(default)]
default_action: Action,
#[serde(default)]
architectures: Vec<String>,
#[serde(default)]
syscalls: Vec<Syscall>,
}
type Operator = i32;
#[derive(Serialize, Deserialize, Debug)]
pub struct Arg {
#[serde(default)]
index: u32,
#[serde(default)]
value: u64,
#[serde(default)]
value_two: u64,
#[serde(default)]
op: Operator,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Syscall {
#[serde(default, skip_serializing_if = "String::is_empty")]
name: String,
#[serde(default)]
action: Action,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
args: Vec<Arg>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Config<'a> {
#[serde(default)]
no_pivot_root: bool,
#[serde(default)]
parent_death_signal: i32,
#[serde(default)]
rootfs: String,
#[serde(default)]
readonlyfs: bool,
#[serde(default, rename = "rootPropagation")]
root_propagation: i32,
#[serde(default)]
mounts: Vec<Mount>,
#[serde(default)]
devices: Vec<Device>,
#[serde(default)]
mount_label: String,
#[serde(default)]
hostname: String,
#[serde(default)]
namespaces: Namespaces,
#[serde(default)]
capabilities: Option<Capabilities>,
#[serde(default)]
networks: Vec<Network>,
#[serde(default)]
routes: Vec<Route>,
#[serde(default)]
cgroups: Option<Cgroup<'a>>,
#[serde(default, skip_serializing_if = "String::is_empty")]
apparmor_profile: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
process_label: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
rlimits: Vec<Rlimit>,
#[serde(default)]
oom_score_adj: Option<i32>,
#[serde(default)]
uid_mappings: Vec<IDMap>,
#[serde(default)]
gid_mappings: Vec<IDMap>,
#[serde(default)]
mask_paths: Vec<String>,
#[serde(default)]
readonly_paths: Vec<String>,
#[serde(default)]
sysctl: HashMap<String, String>,
#[serde(default)]
seccomp: Option<Seccomp>,
#[serde(default)]
no_new_privileges: bool,
hooks: Option<Hooks>,
#[serde(default)]
version: String,
#[serde(default)]
labels: Vec<String>,
#[serde(default)]
no_new_keyring: bool,
#[serde(default)]
intel_rdt: Option<IntelRdt>,
#[serde(default)]
rootless_euid: bool,
#[serde(default)]
rootless_cgroups: bool,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Hooks {
prestart: Vec<Box<Hook>>,
poststart: Vec<Box<Hook>>,
poststop: Vec<Box<Hook>>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Capabilities {
bounding: Vec<String>,
effective: Vec<String>,
inheritable: Vec<String>,
permitted: Vec<String>,
ambient: Vec<String>,
}
pub trait Hook {
fn run(&self, state: &OCIState) -> Result<()>;
}
pub struct FuncHook {
// run: fn(&OCIState) -> Result<()>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Command {
#[serde(default)]
path: String,
#[serde(default)]
args: Vec<String>,
#[serde(default)]
env: Vec<String>,
#[serde(default)]
dir: String,
#[serde(default)]
timeout: Duration,
}
pub struct CommandHook {
command: Command,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Mount {
#[serde(default)]
source: String,
#[serde(default)]
destination: String,
#[serde(default)]
device: String,
#[serde(default)]
flags: i32,
#[serde(default)]
propagation_flags: Vec<i32>,
#[serde(default)]
data: String,
#[serde(default)]
relabel: String,
#[serde(default)]
extensions: i32,
#[serde(default)]
premount_cmds: Vec<Command>,
#[serde(default)]
postmount_cmds: Vec<Command>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct HugepageLimit {
#[serde(default)]
page_size: String,
#[serde(default)]
limit: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct IntelRdt {
#[serde(default, skip_serializing_if = "String::is_empty")]
l3_cache_schema: String,
#[serde(
default,
rename = "memBwSchema",
skip_serializing_if = "String::is_empty"
)]
mem_bw_schema: String,
}
pub type FreezerState = String;
#[derive(Serialize, Deserialize, Debug)]
pub struct Cgroup<'a> {
#[serde(default, skip_serializing_if = "String::is_empty")]
name: String,
#[serde(default, skip_serializing_if = "String::is_empty")]
parent: String,
#[serde(default)]
path: String,
#[serde(default)]
scope_prefix: String,
paths: HashMap<String, String>,
resource: &'a Resources<'a>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Resources<'a> {
#[serde(default)]
allow_all_devices: bool,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
allowed_devices: Vec<&'a Device>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
denied_devices: Vec<&'a Device>,
#[serde(default)]
devices: Vec<&'a Device>,
#[serde(default)]
memory: i64,
#[serde(default)]
memory_reservation: i64,
#[serde(default)]
memory_swap: i64,
#[serde(default)]
kernel_memory: i64,
#[serde(default)]
kernel_memory_tcp: i64,
#[serde(default)]
cpu_shares: u64,
#[serde(default)]
cpu_quota: i64,
#[serde(default)]
cpu_period: u64,
#[serde(default)]
cpu_rt_quota: i64,
#[serde(default)]
cpu_rt_period: u64,
#[serde(default)]
cpuset_cpus: String,
#[serde(default)]
cpuset_mems: String,
#[serde(default)]
pids_limit: i64,
#[serde(default)]
blkio_weight: u64,
#[serde(default)]
blkio_leaf_weight: u64,
#[serde(default)]
blkio_weight_device: Vec<&'a WeightDevice>,
#[serde(default)]
blkio_throttle_read_bps_device: Vec<&'a ThrottleDevice>,
#[serde(default)]
blkio_throttle_write_bps_device: Vec<&'a ThrottleDevice>,
#[serde(default)]
blkio_throttle_read_iops_device: Vec<&'a ThrottleDevice>,
#[serde(default)]
blkio_throttle_write_iops_device: Vec<&'a ThrottleDevice>,
#[serde(default)]
freezer: FreezerState,
#[serde(default)]
hugetlb_limit: Vec<&'a HugepageLimit>,
#[serde(default)]
oom_kill_disable: bool,
#[serde(default)]
memory_swapiness: u64,
#[serde(default)]
net_prio_ifpriomap: Vec<&'a IfPrioMap>,
#[serde(default)]
net_cls_classid_u: u32,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Network {
#[serde(default)]
r#type: String,
#[serde(default)]
name: String,
#[serde(default)]
bridge: String,
#[serde(default)]
mac_address: String,
#[serde(default)]
address: String,
#[serde(default)]
gateway: String,
#[serde(default)]
ipv6_address: String,
#[serde(default)]
ipv6_gateway: String,
#[serde(default)]
mtu: i32,
#[serde(default)]
txqueuelen: i32,
#[serde(default)]
host_interface_name: String,
#[serde(default)]
hairpin_mode: bool,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Route {
#[serde(default)]
destination: String,
#[serde(default)]
source: String,
#[serde(default)]
gateway: String,
#[serde(default)]
interface_name: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct IfPrioMap {
#[serde(default)]
interface: String,
#[serde(default)]
priority: i32,
}
impl IfPrioMap {
fn cgroup_string(&self) -> String {
format!("{} {}", self.interface, self.priority)
}
}
/*
impl Config {
fn new(opts: &CreateOpts) -> Result<Self> {
if opts.spec.is_none() {
return Err(ErrorKind::ErrorCode("invalid createopts!".into()));
}
let root = unistd::getcwd().chain_err(|| "cannot getwd")?;
let root = root.as_path().canonicalize().chain_err(||
"cannot resolve root into absolute path")?;
let mut root = root.into();
let cwd = root.clone();
let spec = opts.spec.as_ref().unwrap();
if spec.root.is_none() {
return Err(ErrorKind::ErrorCode("no root".into()));
}
let rootfs = PathBuf::from(&spec.root.as_ref().unwrap().path);
if rootfs.is_relative() {
root = format!("{}/{}", root, rootfs.into());
}
// handle annotations
let mut label = spec.annotations
.iter()
.map(|(key, value)| format!("{}={}", key, value)).collect();
label.push(format!("bundle={}", cwd));
let mut config = Config {
rootfs: root,
no_pivot_root: opts.no_pivot_root,
readonlyfs: spec.root.as_ref().unwrap().readonly,
hostname: spec.hostname.clone(),
labels: label,
no_new_keyring: opts.no_new_keyring,
rootless_euid: opts.rootless_euid,
rootless_cgroups: opts.rootless_cgroups,
};
config.mounts = Vec::new();
for m in &spec.mounts {
config.mounts.push(Mount::new(&cwd, &m)?);
}
config.devices = create_devices(&spec)?;
config.cgroups = Cgroups::new(&opts)?;
if spec.linux.as_ref().is_none() {
return Err(ErrorKind::ErrorCode("no linux configuration".into()));
}
let linux = spec.linux.as_ref().unwrap();
let propagation = MOUNTPROPAGATIONMAPPING.get(linux.rootfs_propagation);
if propagation.is_none() {
Err(ErrorKind::ErrorCode("rootfs propagation not support".into()));
}
config.root_propagation = propagation.unwrap();
if config.no_pivot_root && (config.root_propagation & MSFlags::MSPRIVATE != 0) {
return Err(ErrorKind::ErrorCode("[r]private is not safe without pivot root".into()));
}
// handle namespaces
let m: HashMap<String, String> = HashMap::new();
for ns in &linux.namespaces {
if NAMESPACEMAPPING.get(&ns.r#type.as_str()).is_none() {
return Err(ErrorKind::ErrorCode("namespace don't exist".into()));
}
if m.get(&ns.r#type).is_some() {
return Err(ErrorKind::ErrorCode(format!("duplicate ns {}", ns.r#type)));
}
m.insert(ns.r#type, ns.path);
}
if m.contains_key(oci::NETWORKNAMESPACE) {
let path = m.get(oci::NETWORKNAMESPACE).unwrap();
if path == "" {
config.networks = vec![Network {
r#type: "loopback",
}];
}
}
if m.contains_key(oci::USERNAMESPACE) {
setup_user_namespace(&spec, &mut config)?;
}
config.namespaces = m.iter().map(|(key, value)| Namespace {
r#type: key,
path: value,
}).collect();
config.mask_paths = linux.mask_paths;
config.readonly_path = linux.readonly_path;
config.mount_label = linux.mount_label;
config.sysctl = linux.sysctl;
config.seccomp = None;
config.intelrdt = None;
if spec.process.is_some() {
let process = spec.process.as_ref().unwrap();
config.oom_score_adj = process.oom_score_adj;
config.process_label = process.selinux_label.clone();
if process.capabilities.as_ref().is_some() {
let cap = process.capabilities.as_ref().unwrap();
config.capabilities = Some(Capabilities {
..cap
})
}
}
config.hooks = None;
config.version = spec.version;
Ok(config)
}
}
impl Mount {
fn new(cwd: &str, m: &oci::Mount) -> Result<Self> {
}
}
*/

View File

@ -0,0 +1,46 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use serde;
#[macro_use]
use serde_derive;
use serde_json;
use std::collections::HashMap;
#[macro_use]
use lazy_static;
pub type NamespaceType = String;
pub type Namespaces = Vec<Namespace>;
#[derive(Serialize, Deserialize, Debug)]
pub struct Namespace {
#[serde(default)]
r#type: NamespaceType,
#[serde(default)]
path: String,
}
pub const NEWNET: &'static str = "NEWNET";
pub const NEWPID: &'static str = "NEWPID";
pub const NEWNS: &'static str = "NEWNS";
pub const NEWUTS: &'static str = "NEWUTS";
pub const NEWUSER: &'static str = "NEWUSER";
pub const NEWCGROUP: &'static str = "NEWCGROUP";
pub const NEWIPC: &'static str = "NEWIPC";
lazy_static! {
static ref TYPETONAME: HashMap<&'static str, &'static str> = {
let mut m = HashMap::new();
m.insert("pid", "pid");
m.insert("network", "net");
m.insert("mount", "mnt");
m.insert("user", "user");
m.insert("uts", "uts");
m.insert("ipc", "ipc");
m.insert("cgroup", "cgroup");
m
};
}

View File

@ -0,0 +1,23 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use crate::configs::Config;
use std::io::Result;
pub trait Validator {
fn validate(&self, config: &Config) -> Result<()> {
Ok(())
}
}
pub struct ConfigValidator {}
impl Validator for ConfigValidator {}
impl ConfigValidator {
fn new() -> Self {
ConfigValidator {}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,32 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
// define errors here
error_chain! {
types {
Error, ErrorKind, ResultExt, Result;
}
// foreign error conv to chain error
foreign_links {
Io(std::io::Error);
Nix(nix::Error);
Ffi(std::ffi::NulError);
Caps(caps::errors::Error);
Serde(serde_json::Error);
UTF8(std::string::FromUtf8Error);
Parse(std::num::ParseIntError);
Scanfmt(scan_fmt::parse::ScanError);
Ip(std::net::AddrParseError);
Regex(regex::Error);
}
// define new errors
errors {
ErrorCode(t: String) {
description("Error Code")
display("Error Code: '{}'", t)
}
}
}

View File

@ -0,0 +1,586 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
// #![allow(unused_attributes)]
// #![allow(unused_imports)]
// #![allow(unused_variables)]
// #![allow(unused_mut)]
#![allow(dead_code)]
// #![allow(deprecated)]
// #![allow(unused_must_use)]
#![allow(non_upper_case_globals)]
// #![allow(unused_comparisons)]
#[macro_use]
extern crate error_chain;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
extern crate caps;
extern crate protocols;
#[macro_use]
extern crate scopeguard;
extern crate prctl;
#[macro_use]
extern crate lazy_static;
extern crate libc;
extern crate protobuf;
#[macro_use]
extern crate slog;
#[macro_use]
extern crate scan_fmt;
extern crate oci;
extern crate path_absolutize;
extern crate regex;
// Convenience macro to obtain the scope logger
macro_rules! sl {
() => {
slog_scope::logger().new(o!("subsystem" => "rustjail"))
};
}
pub mod cgroups;
pub mod container;
pub mod errors;
pub mod mount;
pub mod process;
pub mod specconv;
// pub mod sync;
pub mod capabilities;
pub mod validator;
// pub mod factory;
//pub mod configs;
// pub mod devices;
// pub mod init;
// pub mod rootfs;
// pub mod capabilities;
// pub mod console;
// pub mod stats;
// pub mod user;
//pub mod intelrdt;
// construtc ociSpec from grpcSpec, which is needed for hook
// execution. since hooks read config.json
use std::collections::HashMap;
use std::mem::MaybeUninit;
use oci::{
Box as ociBox, Hooks as ociHooks, Linux as ociLinux, LinuxCapabilities as ociLinuxCapabilities,
Mount as ociMount, POSIXRlimit as ociPOSIXRlimit, Process as ociProcess, Root as ociRoot,
Spec as ociSpec, User as ociUser,
};
use protocols::oci::{
Hooks as grpcHooks, Linux as grpcLinux, Mount as grpcMount, Process as grpcProcess,
Root as grpcRoot, Spec as grpcSpec,
};
fn process_grpc_to_oci(p: &grpcProcess) -> ociProcess {
let console_size = if p.ConsoleSize.is_some() {
let c = p.ConsoleSize.as_ref().unwrap();
Some(ociBox {
height: c.Height,
width: c.Width,
})
} else {
None
};
let user = if p.User.is_some() {
let u = p.User.as_ref().unwrap();
ociUser {
uid: u.UID,
gid: u.GID,
additional_gids: u.AdditionalGids.clone(),
username: u.Username.clone(),
}
} else {
unsafe { MaybeUninit::zeroed().assume_init() }
};
let capabilities = if p.Capabilities.is_some() {
let cap = p.Capabilities.as_ref().unwrap();
Some(ociLinuxCapabilities {
bounding: cap.Bounding.clone().into_vec(),
effective: cap.Effective.clone().into_vec(),
inheritable: cap.Inheritable.clone().into_vec(),
permitted: cap.Permitted.clone().into_vec(),
ambient: cap.Ambient.clone().into_vec(),
})
} else {
None
};
let rlimits = {
let mut r = Vec::new();
for lm in p.Rlimits.iter() {
r.push(ociPOSIXRlimit {
r#type: lm.Type.clone(),
hard: lm.Hard,
soft: lm.Soft,
});
}
r
};
ociProcess {
terminal: p.Terminal,
console_size,
user,
args: p.Args.clone().into_vec(),
env: p.Env.clone().into_vec(),
cwd: p.Cwd.clone(),
capabilities,
rlimits,
no_new_privileges: p.NoNewPrivileges,
apparmor_profile: p.ApparmorProfile.clone(),
oom_score_adj: Some(p.OOMScoreAdj as i32),
selinux_label: p.SelinuxLabel.clone(),
}
}
fn process_oci_to_grpc(_p: ociProcess) -> grpcProcess {
// dont implement it for now
unsafe { MaybeUninit::zeroed().assume_init() }
}
fn root_grpc_to_oci(root: &grpcRoot) -> ociRoot {
ociRoot {
path: root.Path.clone(),
readonly: root.Readonly,
}
}
fn root_oci_to_grpc(_root: &ociRoot) -> grpcRoot {
unsafe { MaybeUninit::zeroed().assume_init() }
}
fn mount_grpc_to_oci(m: &grpcMount) -> ociMount {
ociMount {
destination: m.destination.clone(),
r#type: m.field_type.clone(),
source: m.source.clone(),
options: m.options.clone().into_vec(),
}
}
fn mount_oci_to_grpc(_m: &ociMount) -> grpcMount {
unsafe { MaybeUninit::zeroed().assume_init() }
}
use oci::Hook as ociHook;
use protocols::oci::Hook as grpcHook;
fn hook_grpc_to_oci(h: &[grpcHook]) -> Vec<ociHook> {
let mut r = Vec::new();
for e in h.iter() {
r.push(ociHook {
path: e.Path.clone(),
args: e.Args.clone().into_vec(),
env: e.Env.clone().into_vec(),
timeout: Some(e.Timeout as i32),
});
}
r
}
fn hooks_grpc_to_oci(h: &grpcHooks) -> ociHooks {
let prestart = hook_grpc_to_oci(h.Prestart.as_ref());
let poststart = hook_grpc_to_oci(h.Poststart.as_ref());
let poststop = hook_grpc_to_oci(h.Poststop.as_ref());
ociHooks {
prestart,
poststart,
poststop,
}
}
fn hooks_oci_to_grpc(_h: &ociHooks) -> grpcHooks {
unsafe { MaybeUninit::zeroed().assume_init() }
}
use oci::{
LinuxDevice as ociLinuxDevice, LinuxIDMapping as ociLinuxIDMapping,
LinuxIntelRdt as ociLinuxIntelRdt, LinuxNamespace as ociLinuxNamespace,
LinuxResources as ociLinuxResources, LinuxSeccomp as ociLinuxSeccomp,
};
use protocols::oci::{
LinuxIDMapping as grpcLinuxIDMapping, LinuxResources as grpcLinuxResources,
LinuxSeccomp as grpcLinuxSeccomp,
};
fn idmap_grpc_to_oci(im: &grpcLinuxIDMapping) -> ociLinuxIDMapping {
ociLinuxIDMapping {
container_id: im.ContainerID,
host_id: im.HostID,
size: im.Size,
}
}
fn idmaps_grpc_to_oci(ims: &[grpcLinuxIDMapping]) -> Vec<ociLinuxIDMapping> {
let mut r = Vec::new();
for im in ims.iter() {
r.push(idmap_grpc_to_oci(im));
}
r
}
use oci::{
LinuxBlockIO as ociLinuxBlockIO, LinuxBlockIODevice as ociLinuxBlockIODevice,
LinuxCPU as ociLinuxCPU, LinuxDeviceCgroup as ociLinuxDeviceCgroup,
LinuxHugepageLimit as ociLinuxHugepageLimit,
LinuxInterfacePriority as ociLinuxInterfacePriority, LinuxMemory as ociLinuxMemory,
LinuxNetwork as ociLinuxNetwork, LinuxPids as ociLinuxPids,
LinuxThrottleDevice as ociLinuxThrottleDevice, LinuxWeightDevice as ociLinuxWeightDevice,
};
use protocols::oci::{
LinuxBlockIO as grpcLinuxBlockIO, LinuxThrottleDevice as grpcLinuxThrottleDevice,
LinuxWeightDevice as grpcLinuxWeightDevice,
};
fn throttle_devices_grpc_to_oci(tds: &[grpcLinuxThrottleDevice]) -> Vec<ociLinuxThrottleDevice> {
let mut r = Vec::new();
for td in tds.iter() {
r.push(ociLinuxThrottleDevice {
blk: ociLinuxBlockIODevice {
major: td.Major,
minor: td.Minor,
},
rate: td.Rate,
});
}
r
}
fn weight_devices_grpc_to_oci(wds: &[grpcLinuxWeightDevice]) -> Vec<ociLinuxWeightDevice> {
let mut r = Vec::new();
for wd in wds.iter() {
r.push(ociLinuxWeightDevice {
blk: ociLinuxBlockIODevice {
major: wd.Major,
minor: wd.Minor,
},
weight: Some(wd.Weight as u16),
leaf_weight: Some(wd.LeafWeight as u16),
});
}
r
}
fn blockio_grpc_to_oci(blk: &grpcLinuxBlockIO) -> ociLinuxBlockIO {
let weight_device = weight_devices_grpc_to_oci(blk.WeightDevice.as_ref());
let throttle_read_bps_device = throttle_devices_grpc_to_oci(blk.ThrottleReadBpsDevice.as_ref());
let throttle_write_bps_device =
throttle_devices_grpc_to_oci(blk.ThrottleWriteBpsDevice.as_ref());
let throttle_read_iops_device =
throttle_devices_grpc_to_oci(blk.ThrottleReadIOPSDevice.as_ref());
let throttle_write_iops_device =
throttle_devices_grpc_to_oci(blk.ThrottleWriteIOPSDevice.as_ref());
ociLinuxBlockIO {
weight: Some(blk.Weight as u16),
leaf_weight: Some(blk.LeafWeight as u16),
weight_device,
throttle_read_bps_device,
throttle_write_bps_device,
throttle_read_iops_device,
throttle_write_iops_device,
}
}
fn resources_grpc_to_oci(res: &grpcLinuxResources) -> ociLinuxResources {
let devices = {
let mut d = Vec::new();
for dev in res.Devices.iter() {
let major = if dev.Major == -1 {
None
} else {
Some(dev.Major)
};
let minor = if dev.Minor == -1 {
None
} else {
Some(dev.Minor)
};
d.push(ociLinuxDeviceCgroup {
allow: dev.Allow,
r#type: dev.Type.clone(),
major,
minor,
access: dev.Access.clone(),
});
}
d
};
let memory = if res.Memory.is_some() {
let mem = res.Memory.as_ref().unwrap();
Some(ociLinuxMemory {
limit: Some(mem.Limit),
reservation: Some(mem.Reservation),
swap: Some(mem.Swap),
kernel: Some(mem.Kernel),
kernel_tcp: Some(mem.KernelTCP),
swapiness: Some(mem.Swappiness as i64),
disable_oom_killer: Some(mem.DisableOOMKiller),
})
} else {
None
};
let cpu = if res.CPU.is_some() {
let c = res.CPU.as_ref().unwrap();
Some(ociLinuxCPU {
shares: Some(c.Shares),
quota: Some(c.Quota),
period: Some(c.Period),
realtime_runtime: Some(c.RealtimeRuntime),
realtime_period: Some(c.RealtimePeriod),
cpus: c.Cpus.clone(),
mems: c.Mems.clone(),
})
} else {
None
};
let pids = if res.Pids.is_some() {
let p = res.Pids.as_ref().unwrap();
Some(ociLinuxPids { limit: p.Limit })
} else {
None
};
let block_io = if res.BlockIO.is_some() {
let blk = res.BlockIO.as_ref().unwrap();
// copy LinuxBlockIO
Some(blockio_grpc_to_oci(blk))
} else {
None
};
let hugepage_limits = {
let mut r = Vec::new();
for hl in res.HugepageLimits.iter() {
r.push(ociLinuxHugepageLimit {
page_size: hl.Pagesize.clone(),
limit: hl.Limit,
});
}
r
};
let network = if res.Network.is_some() {
let net = res.Network.as_ref().unwrap();
let priorities = {
let mut r = Vec::new();
for pr in net.Priorities.iter() {
r.push(ociLinuxInterfacePriority {
name: pr.Name.clone(),
priority: pr.Priority,
});
}
r
};
Some(ociLinuxNetwork {
class_id: Some(net.ClassID),
priorities,
})
} else {
None
};
ociLinuxResources {
devices,
memory,
cpu,
pids,
block_io,
hugepage_limits,
network,
rdma: HashMap::new(),
}
}
use oci::{LinuxSeccompArg as ociLinuxSeccompArg, LinuxSyscall as ociLinuxSyscall};
fn seccomp_grpc_to_oci(sec: &grpcLinuxSeccomp) -> ociLinuxSeccomp {
let syscalls = {
let mut r = Vec::new();
for sys in sec.Syscalls.iter() {
let mut args = Vec::new();
for arg in sys.Args.iter() {
args.push(ociLinuxSeccompArg {
index: arg.Index as u32,
value: arg.Value,
value_two: arg.ValueTwo,
op: arg.Op.clone(),
});
}
r.push(ociLinuxSyscall {
names: sys.Names.clone().into_vec(),
action: sys.Action.clone(),
args,
});
}
r
};
ociLinuxSeccomp {
default_action: sec.DefaultAction.clone(),
architectures: sec.Architectures.clone().into_vec(),
syscalls,
}
}
fn linux_grpc_to_oci(l: &grpcLinux) -> ociLinux {
let uid_mappings = idmaps_grpc_to_oci(l.UIDMappings.as_ref());
let gid_mappings = idmaps_grpc_to_oci(l.GIDMappings.as_ref());
let resources = if l.Resources.is_some() {
Some(resources_grpc_to_oci(l.Resources.as_ref().unwrap()))
} else {
None
};
let seccomp = if l.Seccomp.is_some() {
Some(seccomp_grpc_to_oci(l.Seccomp.as_ref().unwrap()))
} else {
None
};
let namespaces = {
let mut r = Vec::new();
for ns in l.Namespaces.iter() {
r.push(ociLinuxNamespace {
r#type: ns.Type.clone(),
path: ns.Path.clone(),
});
}
r
};
let devices = {
let mut r = Vec::new();
for d in l.Devices.iter() {
r.push(ociLinuxDevice {
path: d.Path.clone(),
r#type: d.Type.clone(),
major: d.Major,
minor: d.Minor,
file_mode: Some(d.FileMode),
uid: Some(d.UID),
gid: Some(d.GID),
});
}
r
};
let intel_rdt = if l.IntelRdt.is_some() {
let rdt = l.IntelRdt.as_ref().unwrap();
Some(ociLinuxIntelRdt {
l3_cache_schema: rdt.L3CacheSchema.clone(),
})
} else {
None
};
ociLinux {
uid_mappings,
gid_mappings,
sysctl: l.Sysctl.clone(),
resources,
cgroups_path: l.CgroupsPath.clone(),
namespaces,
devices,
seccomp,
rootfs_propagation: l.RootfsPropagation.clone(),
masked_paths: l.MaskedPaths.clone().into_vec(),
readonly_paths: l.ReadonlyPaths.clone().into_vec(),
mount_label: l.MountLabel.clone(),
intel_rdt,
}
}
fn linux_oci_to_grpc(_l: &ociLinux) -> grpcLinux {
grpcLinux::default()
}
pub fn grpc_to_oci(grpc: &grpcSpec) -> ociSpec {
// process
let process = if grpc.Process.is_some() {
Some(process_grpc_to_oci(grpc.Process.as_ref().unwrap()))
} else {
None
};
// root
let root = if grpc.Root.is_some() {
Some(root_grpc_to_oci(grpc.Root.as_ref().unwrap()))
} else {
None
};
// mounts
let mounts = {
let mut r = Vec::new();
for m in grpc.Mounts.iter() {
r.push(mount_grpc_to_oci(m));
}
r
};
// hooks
let hooks = if grpc.Hooks.is_some() {
Some(hooks_grpc_to_oci(grpc.Hooks.as_ref().unwrap()))
} else {
None
};
// Linux
let linux = if grpc.Linux.is_some() {
Some(linux_grpc_to_oci(grpc.Linux.as_ref().unwrap()))
} else {
None
};
ociSpec {
version: grpc.Version.clone(),
process,
root,
hostname: grpc.Hostname.clone(),
mounts,
hooks,
annotations: grpc.Annotations.clone(),
linux,
solaris: None,
windows: None,
vm: None,
}
}
pub fn oci_to_grpc(_oci: &ociSpec) -> grpcSpec {
unsafe { MaybeUninit::zeroed().assume_init() }
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}

View File

@ -0,0 +1,738 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use libc::uid_t;
use nix::errno::Errno;
use nix::fcntl::{self, OFlag};
use nix::mount::{self, MntFlags, MsFlags};
use nix::sys::stat::{self, Mode, SFlag};
use nix::unistd::{self, Gid, Uid};
use nix::NixPath;
use protocols::oci::{LinuxDevice, Mount, Spec};
use std::collections::{HashMap, HashSet};
use std::fs::{self, OpenOptions};
use std::os::unix;
use std::path::{Path, PathBuf};
use path_absolutize::*;
use scan_fmt;
use std::fs::File;
use std::io::{BufRead, BufReader};
use crate::container::DEFAULT_DEVICES;
use crate::errors::*;
use lazy_static;
use std::string::ToString;
use protobuf::{CachedSize, RepeatedField, UnknownFields};
use slog::Logger;
// Info reveals information about a particular mounted filesystem. This
// struct is populated from the content in the /proc/<pid>/mountinfo file.
pub struct Info {
id: i32,
parent: i32,
major: i32,
minor: i32,
root: String,
mount_point: String,
opts: String,
optional: String,
fstype: String,
source: String,
vfs_opts: String,
}
const MOUNTINFOFORMAT: &'static str = "{d} {d} {d}:{d} {} {} {} {}";
lazy_static! {
static ref PROPAGATION: HashMap<&'static str, MsFlags> = {
let mut m = HashMap::new();
m.insert("shared", MsFlags::MS_SHARED | MsFlags::MS_REC);
m.insert("private", MsFlags::MS_PRIVATE | MsFlags::MS_REC);
m.insert("slave", MsFlags::MS_SLAVE | MsFlags::MS_REC);
m
};
static ref OPTIONS: HashMap<&'static str, (bool, MsFlags)> = {
let mut m = HashMap::new();
m.insert("defaults", (false, MsFlags::empty()));
m.insert("ro", (false, MsFlags::MS_RDONLY));
m.insert("rw", (true, MsFlags::MS_RDONLY));
m.insert("suid", (true, MsFlags::MS_NOSUID));
m.insert("nosuid", (false, MsFlags::MS_NOSUID));
m.insert("dev", (true, MsFlags::MS_NODEV));
m.insert("nodev", (false, MsFlags::MS_NODEV));
m.insert("exec", (true, MsFlags::MS_NOEXEC));
m.insert("noexec", (false, MsFlags::MS_NOEXEC));
m.insert("sync", (false, MsFlags::MS_SYNCHRONOUS));
m.insert("async", (true, MsFlags::MS_SYNCHRONOUS));
m.insert("dirsync", (false, MsFlags::MS_DIRSYNC));
m.insert("remount", (false, MsFlags::MS_REMOUNT));
m.insert("mand", (false, MsFlags::MS_MANDLOCK));
m.insert("nomand", (true, MsFlags::MS_MANDLOCK));
m.insert("atime", (true, MsFlags::MS_NOATIME));
m.insert("noatime", (false, MsFlags::MS_NOATIME));
m.insert("diratime", (true, MsFlags::MS_NODIRATIME));
m.insert("nodiratime", (false, MsFlags::MS_NODIRATIME));
m.insert("bind", (false, MsFlags::MS_BIND));
m.insert("rbind", (false, MsFlags::MS_BIND | MsFlags::MS_REC));
m.insert("unbindable", (false, MsFlags::MS_UNBINDABLE));
m.insert(
"runbindable",
(false, MsFlags::MS_UNBINDABLE | MsFlags::MS_REC),
);
m.insert("private", (false, MsFlags::MS_PRIVATE));
m.insert("rprivate", (false, MsFlags::MS_PRIVATE | MsFlags::MS_REC));
m.insert("shared", (false, MsFlags::MS_SHARED));
m.insert("rshared", (false, MsFlags::MS_SHARED | MsFlags::MS_REC));
m.insert("slave", (false, MsFlags::MS_SLAVE));
m.insert("rslave", (false, MsFlags::MS_SLAVE | MsFlags::MS_REC));
m.insert("relatime", (false, MsFlags::MS_RELATIME));
m.insert("norelatime", (true, MsFlags::MS_RELATIME));
m.insert("strictatime", (false, MsFlags::MS_STRICTATIME));
m.insert("nostrictatime", (true, MsFlags::MS_STRICTATIME));
m
};
}
pub fn init_rootfs(
logger: &Logger,
spec: &Spec,
cpath: &HashMap<String, String>,
mounts: &HashMap<String, String>,
bind_device: bool,
) -> Result<()> {
lazy_static::initialize(&OPTIONS);
lazy_static::initialize(&PROPAGATION);
lazy_static::initialize(&LINUXDEVICETYPE);
let linux = spec.Linux.as_ref().unwrap();
let mut flags = MsFlags::MS_REC;
match PROPAGATION.get(&linux.RootfsPropagation.as_str()) {
Some(fl) => flags |= *fl,
None => flags |= MsFlags::MS_SLAVE,
}
let rootfs = spec.Root.as_ref().unwrap().Path.as_str();
let root = fs::canonicalize(rootfs)?;
let rootfs = root.to_str().unwrap();
mount::mount(None::<&str>, "/", None::<&str>, flags, None::<&str>)?;
mount::mount(
Some(rootfs),
rootfs,
None::<&str>,
MsFlags::MS_BIND | MsFlags::MS_REC,
None::<&str>,
)?;
for m in &spec.Mounts {
let (mut flags, data) = parse_mount(&m);
if !m.destination.starts_with("/") || m.destination.contains("..") {
return Err(ErrorKind::Nix(nix::Error::Sys(Errno::EINVAL)).into());
}
if m.field_type == "cgroup" {
mount_cgroups(logger, m, rootfs, flags, &data, cpath, mounts)?;
} else {
if m.destination == "/dev" {
flags &= !MsFlags::MS_RDONLY;
}
mount_from(&m, &rootfs, flags, &data, "")?;
}
}
let olddir = unistd::getcwd()?;
unistd::chdir(rootfs)?;
default_symlinks()?;
create_devices(&linux.Devices, bind_device)?;
ensure_ptmx()?;
unistd::chdir(&olddir)?;
Ok(())
}
fn mount_cgroups(
logger: &Logger,
m: &Mount,
rootfs: &str,
flags: MsFlags,
_data: &str,
cpath: &HashMap<String, String>,
mounts: &HashMap<String, String>,
) -> Result<()> {
// mount tmpfs
let ctm = Mount {
source: "tmpfs".to_string(),
field_type: "tmpfs".to_string(),
destination: m.destination.clone(),
options: RepeatedField::default(),
unknown_fields: UnknownFields::default(),
cached_size: CachedSize::default(),
};
let cflags = MsFlags::MS_NOEXEC | MsFlags::MS_NOSUID | MsFlags::MS_NODEV;
info!(logger, "tmpfs");
mount_from(&ctm, rootfs, cflags, "", "")?;
let olddir = unistd::getcwd()?;
unistd::chdir(rootfs)?;
let mut srcs: HashSet<String> = HashSet::new();
// bind mount cgroups
for (key, mount) in mounts.iter() {
info!(logger, "{}", key);
let source = if cpath.get(key).is_some() {
cpath.get(key).unwrap()
} else {
continue;
};
let base = if let Some(o) = mount.rfind('/') {
&mount[o + 1..]
} else {
&mount[..]
};
let destination = format!("{}/{}", m.destination.as_str(), base);
if srcs.contains(source) {
// already mounted, xxx,yyy style cgroup
if key != base {
let src = format!("{}/{}", m.destination.as_str(), key);
unix::fs::symlink(destination.as_str(), &src[1..])?;
}
continue;
}
srcs.insert(source.to_string());
info!(logger, "{}", destination.as_str());
let bm = Mount {
source: source.to_string(),
field_type: "bind".to_string(),
destination: destination.clone(),
options: RepeatedField::default(),
unknown_fields: UnknownFields::default(),
cached_size: CachedSize::default(),
};
mount_from(
&bm,
rootfs,
flags | MsFlags::MS_REC | MsFlags::MS_BIND,
"",
"",
)?;
if key != base {
let src = format!("{}/{}", m.destination.as_str(), key);
match unix::fs::symlink(destination.as_str(), &src[1..]) {
Err(e) => {
info!(
logger,
"symlink: {} {} err: {}",
key,
destination.as_str(),
e.to_string()
);
return Err(e.into());
}
Ok(_) => {}
}
}
}
unistd::chdir(&olddir)?;
if flags.contains(MsFlags::MS_RDONLY) {
let dest = format!("{}{}", rootfs, m.destination.as_str());
mount::mount(
Some(dest.as_str()),
dest.as_str(),
None::<&str>,
flags | MsFlags::MS_BIND | MsFlags::MS_REMOUNT,
None::<&str>,
)?;
}
Ok(())
}
pub fn pivot_rootfs<P: ?Sized + NixPath>(path: &P) -> Result<()> {
let oldroot = fcntl::open("/", OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::empty())?;
defer!(unistd::close(oldroot).unwrap());
let newroot = fcntl::open(path, OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::empty())?;
defer!(unistd::close(newroot).unwrap());
unistd::pivot_root(path, path)?;
mount::umount2("/", MntFlags::MNT_DETACH)?;
unistd::fchdir(newroot)?;
stat::umask(Mode::from_bits_truncate(0o022));
Ok(())
}
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
// bind mounts
fn parse_mount_table() -> Result<Vec<Info>> {
let file = File::open("/proc/self/mountinfo")?;
let reader = BufReader::new(file);
let mut infos = Vec::new();
for (_index, line) in reader.lines().enumerate() {
let line = line?;
let (id, parent, major, minor, root, mount_point, opts, optional) = scan_fmt!(
&line,
MOUNTINFOFORMAT,
i32,
i32,
i32,
i32,
String,
String,
String,
String
)?;
let fields: Vec<&str> = line.split(" - ").collect();
if fields.len() == 2 {
let (fstype, source, vfs_opts) =
scan_fmt!(fields[1], "{} {} {}", String, String, String)?;
let mut optional_new = String::new();
if optional != "-" {
optional_new = optional;
}
let info = Info {
id,
parent,
major,
minor,
root,
mount_point,
opts,
optional: optional_new,
fstype,
source,
vfs_opts,
};
infos.push(info);
} else {
return Err(ErrorKind::ErrorCode("failed to parse mount info file".to_string()).into());
}
}
Ok(infos)
}
pub fn ms_move_root(rootfs: &str) -> Result<bool> {
unistd::chdir(rootfs)?;
let mount_infos = parse_mount_table()?;
let root_path = Path::new(rootfs);
let abs_root_buf = root_path.absolutize()?;
let abs_root = abs_root_buf.to_str().ok_or::<Error>(
ErrorKind::ErrorCode(format!("failed to parse {} to absolute path", rootfs)).into(),
)?;
for info in mount_infos.iter() {
let mount_point = Path::new(&info.mount_point);
let abs_mount_buf = mount_point.absolutize()?;
let abs_mount_point = abs_mount_buf.to_str().ok_or::<Error>(
ErrorKind::ErrorCode(format!(
"failed to parse {} to absolute path",
info.mount_point
))
.into(),
)?;
let abs_mount_point_string = String::from(abs_mount_point);
// Umount every syfs and proc file systems, except those under the container rootfs
if (info.fstype != "proc" && info.fstype != "sysfs")
|| abs_mount_point_string.starts_with(abs_root)
{
continue;
}
// Be sure umount events are not propagated to the host.
mount::mount(
None::<&str>,
abs_mount_point,
None::<&str>,
MsFlags::MS_SLAVE | MsFlags::MS_REC,
None::<&str>,
)?;
match mount::umount2(abs_mount_point, MntFlags::MNT_DETACH) {
Ok(_) => (),
Err(e) => {
if e.ne(&nix::Error::from(Errno::EINVAL)) && e.ne(&nix::Error::from(Errno::EPERM)) {
return Err(ErrorKind::ErrorCode(e.to_string()).into());
}
// If we have not privileges for umounting (e.g. rootless), then
// cover the path.
mount::mount(
Some("tmpfs"),
abs_mount_point,
Some("tmpfs"),
MsFlags::empty(),
None::<&str>,
)?;
}
}
}
mount::mount(
Some(abs_root),
"/",
None::<&str>,
MsFlags::MS_MOVE,
None::<&str>,
)?;
unistd::chroot(".")?;
unistd::chdir("/")?;
Ok(true)
}
fn parse_mount(m: &Mount) -> (MsFlags, String) {
let mut flags = MsFlags::empty();
let mut data = Vec::new();
for o in &m.options {
match OPTIONS.get(o.as_str()) {
Some(v) => {
let (clear, fl) = *v;
if clear {
flags &= !fl;
} else {
flags |= fl;
}
}
None => data.push(o.clone()),
}
}
(flags, data.join(","))
}
fn mount_from(m: &Mount, rootfs: &str, flags: MsFlags, data: &str, _label: &str) -> Result<()> {
let d = String::from(data);
let dest = format!("{}{}", rootfs, &m.destination);
let src = if m.field_type.as_str() == "bind" {
let src = fs::canonicalize(m.source.as_str())?;
let dir = if src.is_file() {
Path::new(&dest).parent().unwrap()
} else {
Path::new(&dest)
};
// let _ = fs::create_dir_all(&dir);
match fs::create_dir_all(&dir) {
Ok(_) => {}
Err(e) => {
info!(
sl!(),
"creat dir {}: {}",
dir.to_str().unwrap(),
e.to_string()
);
}
}
// make sure file exists so we can bind over it
if src.is_file() {
let _ = OpenOptions::new().create(true).write(true).open(&dest);
}
src
} else {
let _ = fs::create_dir_all(&dest);
PathBuf::from(&m.source)
};
info!(sl!(), "{}, {}", src.to_str().unwrap(), dest.as_str());
// ignore this check since some mount's src didn't been a directory
// such as tmpfs.
/*
match stat::stat(src.to_str().unwrap()) {
Ok(_) => {}
Err(e) => {
info!("{}: {}", src.to_str().unwrap(), e.as_errno().unwrap().desc());
}
}
*/
match stat::stat(dest.as_str()) {
Ok(_) => {}
Err(e) => {
info!(sl!(), "{}: {}", dest.as_str(), e.as_errno().unwrap().desc());
}
}
match mount::mount(
Some(src.to_str().unwrap()),
dest.as_str(),
Some(m.field_type.as_str()),
flags,
Some(d.as_str()),
) {
Ok(_) => {}
Err(e) => {
info!(sl!(), "mount error: {}", e.as_errno().unwrap().desc());
return Err(e.into());
}
}
if flags.contains(MsFlags::MS_BIND)
&& flags.intersects(
!(MsFlags::MS_REC
| MsFlags::MS_REMOUNT
| MsFlags::MS_BIND
| MsFlags::MS_PRIVATE
| MsFlags::MS_SHARED
| MsFlags::MS_SLAVE),
)
{
match mount::mount(
Some(dest.as_str()),
dest.as_str(),
None::<&str>,
flags | MsFlags::MS_REMOUNT,
None::<&str>,
) {
Err(e) => {
info!(
sl!(),
"remout {}: {}",
dest.as_str(),
e.as_errno().unwrap().desc()
);
return Err(e.into());
}
Ok(_) => {}
}
}
Ok(())
}
static SYMLINKS: &'static [(&'static str, &'static str)] = &[
("/proc/self/fd", "dev/fd"),
("/proc/self/fd/0", "dev/stdin"),
("/proc/self/fd/1", "dev/stdout"),
("/proc/self/fd/2", "dev/stderr"),
];
fn default_symlinks() -> Result<()> {
if Path::new("/proc/kcore").exists() {
unix::fs::symlink("/proc/kcore", "dev/kcore")?;
}
for &(src, dst) in SYMLINKS {
unix::fs::symlink(src, dst)?;
}
Ok(())
}
fn create_devices(devices: &[LinuxDevice], bind: bool) -> Result<()> {
let op: fn(&LinuxDevice) -> Result<()> = if bind { bind_dev } else { mknod_dev };
let old = stat::umask(Mode::from_bits_truncate(0o000));
for dev in DEFAULT_DEVICES.iter() {
op(dev)?;
}
for dev in devices {
if !dev.Path.starts_with("/dev") || dev.Path.contains("..") {
let msg = format!("{} is not a valid device path", dev.Path);
bail!(ErrorKind::ErrorCode(msg));
}
op(dev)?;
}
stat::umask(old);
Ok(())
}
fn ensure_ptmx() -> Result<()> {
let _ = fs::remove_file("dev/ptmx");
unix::fs::symlink("pts/ptmx", "dev/ptmx")?;
Ok(())
}
fn makedev(major: u64, minor: u64) -> u64 {
(minor & 0xff) | ((major & 0xfff) << 8) | ((minor & !0xff) << 12) | ((major & !0xfff) << 32)
}
lazy_static! {
static ref LINUXDEVICETYPE: HashMap<&'static str, SFlag> = {
let mut m = HashMap::new();
m.insert("c", SFlag::S_IFCHR);
m.insert("b", SFlag::S_IFBLK);
m.insert("p", SFlag::S_IFIFO);
m
};
}
fn mknod_dev(dev: &LinuxDevice) -> Result<()> {
let f = match LINUXDEVICETYPE.get(dev.Type.as_str()) {
Some(v) => v,
None => return Err(ErrorKind::ErrorCode("invalid spec".to_string()).into()),
};
stat::mknod(
&dev.Path[1..],
*f,
Mode::from_bits_truncate(dev.FileMode),
makedev(dev.Major as u64, dev.Minor as u64),
)?;
unistd::chown(
&dev.Path[1..],
Some(Uid::from_raw(dev.UID as uid_t)),
Some(Gid::from_raw(dev.GID as uid_t)),
)?;
Ok(())
}
fn bind_dev(dev: &LinuxDevice) -> Result<()> {
let fd = fcntl::open(
&dev.Path[1..],
OFlag::O_RDWR | OFlag::O_CREAT,
Mode::from_bits_truncate(0o644),
)?;
unistd::close(fd)?;
mount::mount(
Some(&*dev.Path),
&dev.Path[1..],
None::<&str>,
MsFlags::MS_BIND,
None::<&str>,
)?;
Ok(())
}
pub fn finish_rootfs(spec: &Spec) -> Result<()> {
let olddir = unistd::getcwd()?;
info!(sl!(), "{}", olddir.to_str().unwrap());
unistd::chdir("/")?;
if spec.Linux.is_some() {
let linux = spec.Linux.as_ref().unwrap();
for path in linux.MaskedPaths.iter() {
mask_path(path)?;
}
for path in linux.ReadonlyPaths.iter() {
readonly_path(path)?;
}
}
for m in spec.Mounts.iter() {
if m.destination == "/dev" {
let (flags, _) = parse_mount(m);
if flags.contains(MsFlags::MS_RDONLY) {
mount::mount(
Some("/dev"),
"/dev",
None::<&str>,
flags | MsFlags::MS_REMOUNT,
None::<&str>,
)?;
}
}
}
if spec.Root.as_ref().unwrap().Readonly {
let flags = MsFlags::MS_BIND | MsFlags::MS_RDONLY | MsFlags::MS_NODEV | MsFlags::MS_REMOUNT;
mount::mount(Some("/"), "/", None::<&str>, flags, None::<&str>)?;
}
stat::umask(Mode::from_bits_truncate(0o022));
unistd::chdir(&olddir)?;
Ok(())
}
fn mask_path(path: &str) -> Result<()> {
if !path.starts_with("/") || path.contains("..") {
return Err(nix::Error::Sys(Errno::EINVAL).into());
}
//info!("{}", path);
match mount::mount(
Some("/dev/null"),
path,
None::<&str>,
MsFlags::MS_BIND,
None::<&str>,
) {
Err(nix::Error::Sys(e)) => {
if e != Errno::ENOENT && e != Errno::ENOTDIR {
//info!("{}: {}", path, e.desc());
return Err(nix::Error::Sys(e).into());
}
}
Err(e) => {
//info!("{}: {}", path, e.as_errno().unwrap().desc());
return Err(e.into());
}
Ok(_) => {}
}
Ok(())
}
fn readonly_path(path: &str) -> Result<()> {
if !path.starts_with("/") || path.contains("..") {
return Err(nix::Error::Sys(Errno::EINVAL).into());
}
//info!("{}", path);
match mount::mount(
Some(&path[1..]),
path,
None::<&str>,
MsFlags::MS_BIND | MsFlags::MS_REC,
None::<&str>,
) {
Err(nix::Error::Sys(e)) => {
if e == Errno::ENOENT {
return Ok(());
} else {
//info!("{}: {}", path, e.desc());
return Err(nix::Error::Sys(e).into());
}
}
Err(e) => {
//info!("{}: {}", path, e.as_errno().unwrap().desc());
return Err(e.into());
}
Ok(_) => {}
}
mount::mount(
Some(&path[1..]),
&path[1..],
None::<&str>,
MsFlags::MS_BIND | MsFlags::MS_REC | MsFlags::MS_RDONLY | MsFlags::MS_REMOUNT,
None::<&str>,
)?;
Ok(())
}

View File

@ -0,0 +1,141 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
// use std::process::{Stdio, Command, ExitStatus};
use libc::pid_t;
use std::fs::File;
use std::os::unix::io::RawFd;
// use crate::configs::{Capabilities, Rlimit};
// use crate::cgroups::Manager as CgroupManager;
// use crate::intelrdt::Manager as RdtManager;
use nix::fcntl::OFlag;
use nix::sys::signal::{self, Signal};
use nix::sys::socket::{self, AddressFamily, SockFlag, SockType};
use nix::sys::wait::{self, WaitStatus};
use nix::unistd::{self, Pid};
use nix::Result;
use nix::Error;
use protocols::oci::Process as OCIProcess;
use slog::Logger;
#[derive(Debug)]
pub struct Process {
pub exec_id: String,
pub stdin: Option<RawFd>,
pub stdout: Option<RawFd>,
pub stderr: Option<RawFd>,
pub exit_pipe_r: Option<RawFd>,
pub exit_pipe_w: Option<RawFd>,
pub extra_files: Vec<File>,
// pub caps: Capabilities,
// pub rlimits: Vec<Rlimit>,
pub console_socket: Option<RawFd>,
pub term_master: Option<RawFd>,
// parent end of fds
pub parent_console_socket: Option<RawFd>,
pub parent_stdin: Option<RawFd>,
pub parent_stdout: Option<RawFd>,
pub parent_stderr: Option<RawFd>,
pub init: bool,
// pid of the init/exec process. since we have no command
// struct to store pid, we must store pid here.
pub pid: pid_t,
pub exit_code: i32,
pub oci: OCIProcess,
pub logger: Logger,
}
pub trait ProcessOperations {
fn pid(&self) -> Pid;
fn wait(&self) -> Result<WaitStatus>;
fn signal(&self, sig: Signal) -> Result<()>;
}
impl ProcessOperations for Process {
fn pid(&self) -> Pid {
Pid::from_raw(self.pid)
}
fn wait(&self) -> Result<WaitStatus> {
wait::waitpid(Some(self.pid()), None)
}
fn signal(&self, sig: Signal) -> Result<()> {
signal::kill(self.pid(), Some(sig))
}
}
impl Process {
pub fn new(logger: &Logger, ocip: &OCIProcess, id: &str, init: bool) -> Result<Self> {
let logger = logger.new(o!("subsystem" => "process"));
let mut p = Process {
exec_id: String::from(id),
stdin: None,
stdout: None,
stderr: None,
exit_pipe_w: None,
exit_pipe_r: None,
extra_files: Vec::new(),
console_socket: None,
term_master: None,
parent_console_socket: None,
parent_stdin: None,
parent_stdout: None,
parent_stderr: None,
init,
pid: -1,
exit_code: 0,
oci: ocip.clone(),
logger: logger.clone(),
};
info!(logger, "before create console socket!");
if ocip.Terminal {
let (psocket, csocket) = match socket::socketpair(
AddressFamily::Unix,
SockType::Stream,
None,
SockFlag::SOCK_CLOEXEC,
) {
Ok((u, v)) => (u, v),
Err(e) => {
match e {
Error::Sys(errno) => {
info!(logger, "socketpair: {}", errno.desc());
}
_ => {
info!(logger, "socketpair: other error!");
}
}
return Err(e);
}
};
p.parent_console_socket = Some(psocket);
p.console_socket = Some(csocket);
}
info!(logger, "created console socket!");
let (stdin, pstdin) = unistd::pipe2(OFlag::O_CLOEXEC)?;
p.parent_stdin = Some(pstdin);
p.stdin = Some(stdin);
let (pstdout, stdout) = unistd::pipe2(OFlag::O_CLOEXEC)?;
p.parent_stdout = Some(pstdout);
p.stdout = Some(stdout);
let (pstderr, stderr) = unistd::pipe2(OFlag::O_CLOEXEC)?;
p.parent_stderr = Some(pstderr);
p.stderr = Some(stderr);
Ok(p)
}
}

View File

@ -0,0 +1,159 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use protocols::oci::Spec;
// use crate::configs::namespaces;
// use crate::configs::device::Device;
#[derive(Debug)]
pub struct CreateOpts {
pub cgroup_name: String,
pub use_systemd_cgroup: bool,
pub no_pivot_root: bool,
pub no_new_keyring: bool,
pub spec: Option<Spec>,
pub rootless_euid: bool,
pub rootless_cgroup: bool,
}
/*
const WILDCARD: i32 = -1;
lazy_static! {
static ref NAEMSPACEMAPPING: HashMap<&'static str, &'static str> = {
let mut m = HashMap::new();
m.insert(oci::PIDNAMESPACE, namespaces::NEWPID);
m.insert(oci::NETWORKNAMESPACE, namespaces::NEWNET);
m.insert(oci::UTSNAMESPACE, namespaces::NEWUTS);
m.insert(oci::MOUNTNAMESPACE, namespaces::NEWNS);
m.insert(oci::IPCNAMESPACE, namespaces::NEWIPC);
m.insert(oci::USERNAMESPACE, namespaces::NEWUSER);
m.insert(oci::CGROUPNAMESPACE, namespaces::NEWCGROUP);
m
};
static ref MOUNTPROPAGATIONMAPPING: HashMap<&'static str, MsFlags> = {
let mut m = HashMap::new();
m.insert("rprivate", MsFlags::MS_PRIVATE | MsFlags::MS_REC);
m.insert("private", MsFlags::MS_PRIVATE);
m.insert("rslave", MsFlags::MS_SLAVE | MsFlags::MS_REC);
m.insert("slave", MsFlags::MS_SLAVE);
m.insert("rshared", MsFlags::MS_SHARED | MsFlags::MS_REC);
m.insert("shared", MsFlags::MS_SHARED);
m.insert("runbindable", MsFlags::MS_UNBINDABLE | MsFlags::MS_REC);
m.insert("unbindable", MsFlags::MS_UNBINDABLE);
m
};
static ref ALLOWED_DEVICES: Vec<Device> = {
let mut m = Vec::new();
m.push(Device {
r#type: 'c',
major: WILDCARD,
minor: WILDCARD,
permissions: "m",
allow: true,
});
m.push(Device {
r#type: 'b',
major: WILDCARD,
minor: WILDCARD,
permissions: "m",
allow: true,
});
m.push(Device {
r#type: 'c',
path: "/dev/null".to_string(),
major: 1,
minor: 3,
permissions: "rwm",
allow: true,
});
m.push(Device {
r#type: 'c',
path: String::from("/dev/random"),
major: 1,
minor: 8,
permissions: "rwm",
allow: true,
});
m.push(Device {
r#type: 'c',
path: String::from("/dev/full"),
major: 1,
minor: 7,
permissions: "rwm",
allow: true,
});
m.push(Device {
r#type: 'c',
path: String::from("/dev/tty"),
major: 5,
minor: 0,
permissions: "rwm",
allow: true,
});
m.push(Device {
r#type: 'c',
path: String::from("/dev/zero"),
major: 1,
minor: 5,
permissions: "rwm",
allow: true,
});
m.push(Device {
r#type: 'c',
path: String::from("/dev/urandom"),
major: 1,
minor: 9,
permissions: "rwm",
allow: true,
});
m.push(Device {
r#type: 'c',
path: String::from("/dev/console"),
major: 5,
minor: 1,
permissions: "rwm",
allow: true,
});
m.push(Device {
r#type: 'c',
path: String::from(""),
major: 136,
minor: WILDCARD,
permissions: "rwm",
allow: true,
});
m.push(Device {
r#type: 'c',
path: String::from(""),
major: 5,
minor: 2,
permissions: "rwm",
allow: true,
});
m.push(Device {
r#type: 'c',
path: String::from(""),
major: 10,
minor: 200,
permissions: "rwm",
allow: true,
});
m
};
}
*/

View File

@ -0,0 +1,304 @@
use crate::container::Config;
use crate::errors::*;
use lazy_static;
use nix::errno::Errno;
use nix::Error;
use protobuf::RepeatedField;
use protocols::oci::{LinuxIDMapping, LinuxNamespace, Spec};
use std::collections::HashMap;
use std::path::{Component, PathBuf};
fn contain_namespace(nses: &RepeatedField<LinuxNamespace>, key: &str) -> bool {
for ns in nses {
if ns.Type.as_str() == key {
return true;
}
}
false
}
fn get_namespace_path(nses: &RepeatedField<LinuxNamespace>, key: &str) -> Result<String> {
for ns in nses {
if ns.Type.as_str() == key {
return Ok(ns.Path.clone());
}
}
Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into())
}
fn rootfs(root: &str) -> Result<()> {
let path = PathBuf::from(root);
// not absolute path or not exists
if !path.exists() || !path.is_absolute() {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
// symbolic link? ..?
let mut stack: Vec<String> = Vec::new();
for c in path.components() {
if stack.is_empty() {
if c == Component::RootDir || c == Component::ParentDir {
continue;
}
}
if c == Component::ParentDir {
stack.pop();
continue;
}
stack.push(c.as_os_str().to_str().unwrap().to_string());
}
let mut cleaned = PathBuf::from("/");
for e in stack.iter() {
cleaned.push(e);
}
let canon = path.canonicalize()?;
if cleaned != canon {
// There is symbolic in path
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
Ok(())
}
fn network(_oci: &Spec) -> Result<()> {
Ok(())
}
fn hostname(oci: &Spec) -> Result<()> {
if oci.Hostname.is_empty() || oci.Hostname == "".to_string() {
return Ok(());
}
if oci.Linux.is_none() {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
let linux = oci.Linux.as_ref().unwrap();
if !contain_namespace(&linux.Namespaces, "uts") {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
Ok(())
}
fn security(oci: &Spec) -> Result<()> {
let linux = oci.Linux.as_ref().unwrap();
if linux.MaskedPaths.len() == 0 && linux.ReadonlyPaths.len() == 0 {
return Ok(());
}
if !contain_namespace(&linux.Namespaces, "mount") {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
// don't care about selinux at present
Ok(())
}
fn idmapping(maps: &RepeatedField<LinuxIDMapping>) -> Result<()> {
for map in maps {
if map.Size > 0 {
return Ok(());
}
}
Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into())
}
fn usernamespace(oci: &Spec) -> Result<()> {
let linux = oci.Linux.as_ref().unwrap();
if contain_namespace(&linux.Namespaces, "user") {
let user_ns = PathBuf::from("/proc/self/ns/user");
if !user_ns.exists() {
return Err(ErrorKind::ErrorCode("user namespace not supported!".to_string()).into());
}
// check if idmappings is correct, at least I saw idmaps
// with zero size was passed to agent
idmapping(&linux.UIDMappings)?;
idmapping(&linux.GIDMappings)?;
} else {
// no user namespace but idmap
if linux.UIDMappings.len() != 0 || linux.GIDMappings.len() != 0 {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
}
Ok(())
}
fn cgroupnamespace(oci: &Spec) -> Result<()> {
let linux = oci.Linux.as_ref().unwrap();
if contain_namespace(&linux.Namespaces, "cgroup") {
let path = PathBuf::from("/proc/self/ns/cgroup");
if !path.exists() {
return Err(ErrorKind::ErrorCode("cgroup unsupported!".to_string()).into());
}
}
Ok(())
}
lazy_static! {
pub static ref SYSCTLS: HashMap<&'static str, bool> = {
let mut m = HashMap::new();
m.insert("kernel.msgmax", true);
m.insert("kernel.msgmnb", true);
m.insert("kernel.msgmni", true);
m.insert("kernel.sem", true);
m.insert("kernel.shmall", true);
m.insert("kernel.shmmax", true);
m.insert("kernel.shmmni", true);
m.insert("kernel.shm_rmid_forced", true);
m
};
}
fn check_host_ns(path: &str) -> Result<()> {
let cpath = PathBuf::from(path);
let hpath = PathBuf::from("/proc/self/ns/net");
let real_hpath = hpath.read_link()?;
let meta = cpath.symlink_metadata()?;
let file_type = meta.file_type();
if !file_type.is_symlink() {
return Ok(());
}
let real_cpath = cpath.read_link()?;
if real_cpath == real_hpath {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
Ok(())
}
fn sysctl(oci: &Spec) -> Result<()> {
let linux = oci.Linux.as_ref().unwrap();
for (key, _) in linux.Sysctl.iter() {
if SYSCTLS.contains_key(key.as_str()) || key.starts_with("fs.mqueue.") {
if contain_namespace(&linux.Namespaces, "ipc") {
continue;
} else {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
}
if key.starts_with("net.") {
if !contain_namespace(&linux.Namespaces, "network") {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
let net = get_namespace_path(&linux.Namespaces, "network")?;
if net.is_empty() || net == "".to_string() {
continue;
}
check_host_ns(net.as_str())?;
}
if contain_namespace(&linux.Namespaces, "uts") {
if key == "kernel.domainname" {
continue;
}
if key == "kernel.hostname" {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
}
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
Ok(())
}
fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
let linux = oci.Linux.as_ref().unwrap();
if !contain_namespace(&linux.Namespaces, "user") {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
if linux.UIDMappings.len() == 0 || linux.GIDMappings.len() == 0 {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
Ok(())
}
fn has_idmapping(maps: &RepeatedField<LinuxIDMapping>, id: u32) -> bool {
for map in maps {
if id >= map.ContainerID && id < map.ContainerID + map.Size {
return true;
}
}
false
}
fn rootless_euid_mount(oci: &Spec) -> Result<()> {
let linux = oci.Linux.as_ref().unwrap();
for mnt in oci.Mounts.iter() {
for opt in mnt.options.iter() {
if opt.starts_with("uid=") || opt.starts_with("gid=") {
let fields: Vec<&str> = opt.split('=').collect();
if fields.len() != 2 {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
let id = fields[1].trim().parse::<u32>()?;
if opt.starts_with("uid=") {
if !has_idmapping(&linux.UIDMappings, id) {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
}
if opt.starts_with("gid=") {
if !has_idmapping(&linux.GIDMappings, id) {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
}
}
}
}
Ok(())
}
fn rootless_euid(oci: &Spec) -> Result<()> {
rootless_euid_mapping(oci)?;
rootless_euid_mount(oci)?;
Ok(())
}
pub fn validate(conf: &Config) -> Result<()> {
lazy_static::initialize(&SYSCTLS);
let oci = conf.spec.as_ref().unwrap();
if oci.Linux.is_none() {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
if oci.Root.is_none() {
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
}
let root = oci.Root.get_ref().Path.as_str();
rootfs(root)?;
network(oci)?;
hostname(oci)?;
security(oci)?;
usernamespace(oci)?;
cgroupnamespace(oci)?;
sysctl(&oci)?;
if conf.rootless_euid {
rootless_euid(oci)?;
}
Ok(())
}

423
src/agent/src/device.rs Normal file
View File

@ -0,0 +1,423 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use rustjail::errors::*;
use std::fs;
// use std::io::Write;
use libc::{c_uint, major, minor};
use std::collections::HashMap;
use std::os::unix::fs::MetadataExt;
use std::path::Path;
use std::sync::mpsc;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use crate::mount::{
DRIVERBLKTYPE, DRIVERMMIOBLKTYPE, DRIVERNVDIMMTYPE, DRIVERSCSITYPE, TIMEOUT_HOTPLUG,
};
use crate::sandbox::Sandbox;
use crate::GLOBAL_DEVICE_WATCHER;
use protocols::agent::Device;
use protocols::oci::Spec;
// Convenience macro to obtain the scope logger
macro_rules! sl {
() => {
slog_scope::logger().new(o!("subsystem" => "device"))
};
}
#[cfg(any(
target_arch = "x86_64",
target_arch = "x86",
target_arch = "powerpc64le",
target_arch = "s390x"
))]
pub const ROOT_BUS_PATH: &'static str = "/devices/pci0000:00";
#[cfg(target_arch = "arm")]
pub const ROOT_BUS_PATH: &'static str = "/devices/platform/4010000000.pcie/pci0000:00";
pub const SYSFS_DIR: &'static str = "/sys";
const SYS_BUS_PREFIX: &'static str = "/sys/bus/pci/devices";
const PCI_BUS_RESCAN_FILE: &'static str = "/sys/bus/pci/rescan";
const SYSTEM_DEV_PATH: &'static str = "/dev";
// SCSI const
// Here in "0:0", the first number is the SCSI host number because
// only one SCSI controller has been plugged, while the second number
// is always 0.
pub const SCSI_HOST_CHANNEL: &'static str = "0:0:";
const SYS_CLASS_PREFIX: &'static str = "/sys/class";
const SCSI_DISK_PREFIX: &'static str = "/sys/class/scsi_disk/0:0:";
pub const SCSI_BLOCK_SUFFIX: &'static str = "block";
const SCSI_DISK_SUFFIX: &'static str = "/device/block";
const SCSI_HOST_PATH: &'static str = "/sys/class/scsi_host";
// DeviceHandler is the type of callback to be defined to handle every
// type of device driver.
type DeviceHandler = fn(&Device, &mut Spec, Arc<Mutex<Sandbox>>) -> Result<()>;
// DeviceHandlerList lists the supported drivers.
#[cfg_attr(rustfmt, rustfmt_skip)]
lazy_static! {
pub static ref DEVICEHANDLERLIST: HashMap<&'static str, DeviceHandler> = {
let mut m = HashMap::new();
let blk: DeviceHandler = virtio_blk_device_handler;
m.insert(DRIVERBLKTYPE, blk);
let virtiommio: DeviceHandler = virtiommio_blk_device_handler;
m.insert(DRIVERMMIOBLKTYPE, virtiommio);
let local: DeviceHandler = virtio_nvdimm_device_handler;
m.insert(DRIVERNVDIMMTYPE, local);
let scsi: DeviceHandler = virtio_scsi_device_handler;
m.insert(DRIVERSCSITYPE, scsi);
m
};
}
pub fn rescan_pci_bus() -> Result<()> {
online_device(PCI_BUS_RESCAN_FILE)
}
pub fn online_device(path: &str) -> Result<()> {
fs::write(path, "1")?;
Ok(())
}
// get_device_pci_address fetches the complete PCI address in sysfs, based on the PCI
// identifier provided. This should be in the format: "bridgeAddr/deviceAddr".
// Here, bridgeAddr is the address at which the brige is attached on the root bus,
// while deviceAddr is the address at which the device is attached on the bridge.
pub fn get_device_pci_address(pci_id: &str) -> Result<String> {
let tokens: Vec<&str> = pci_id.split("/").collect();
if tokens.len() != 2 {
return Err(ErrorKind::ErrorCode(format!(
"PCI Identifier for device should be of format [bridgeAddr/deviceAddr], got {}",
pci_id
))
.into());
}
let bridge_id = tokens[0];
let device_id = tokens[1];
// Deduce the complete bridge address based on the bridge address identifier passed
// and the fact that bridges are attached on the main bus with function 0.
let pci_bridge_addr = format!("0000:00:{}.0", bridge_id);
// Find out the bus exposed by bridge
let bridge_bus_path = format!("{}/{}/pci_bus/", SYS_BUS_PREFIX, pci_bridge_addr);
let files_slice: Vec<_> = fs::read_dir(&bridge_bus_path)
.unwrap()
.map(|res| res.unwrap().path())
.collect();
let bus_num = files_slice.len();
if bus_num != 1 {
return Err(ErrorKind::ErrorCode(format!(
"Expected an entry for bus in {}, got {} entries instead",
bridge_bus_path, bus_num
))
.into());
}
let bus = files_slice[0].file_name().unwrap().to_str().unwrap();
// Device address is based on the bus of the bridge to which it is attached.
// We do not pass devices as multifunction, hence the trailing 0 in the address.
let pci_device_addr = format!("{}:{}.0", bus, device_id);
let bridge_device_pci_addr = format!("{}/{}", pci_bridge_addr, pci_device_addr);
info!(
sl!(),
"Fetched PCI address for device PCIAddr:{}\n", bridge_device_pci_addr
);
Ok(bridge_device_pci_addr)
}
pub fn get_device_name(sandbox: Arc<Mutex<Sandbox>>, dev_addr: &str) -> Result<String> {
let mut dev_name: String = String::default();
let (tx, rx) = mpsc::channel::<String>();
{
let watcher = GLOBAL_DEVICE_WATCHER.clone();
let mut w = watcher.lock().unwrap();
let s = sandbox.clone();
let sb = s.lock().unwrap();
for (key, value) in &(sb.pci_device_map) {
if key.contains(dev_addr) {
dev_name = value.to_string();
info!(sl!(), "Device {} found in pci device map", dev_addr);
break;
}
}
// If device is not found in the device map, hotplug event has not
// been received yet, create and add channel to the watchers map.
// The key of the watchers map is the device we are interested in.
// Note this is done inside the lock, not to miss any events from the
// global udev listener.
if dev_name == "" {
w.insert(dev_addr.to_string(), tx);
}
}
if dev_name == "" {
info!(sl!(), "Waiting on channel for device notification\n");
match rx.recv_timeout(Duration::from_secs(TIMEOUT_HOTPLUG)) {
Ok(name) => dev_name = name,
Err(_) => {
let watcher = GLOBAL_DEVICE_WATCHER.clone();
let mut w = watcher.lock().unwrap();
w.remove_entry(dev_addr);
return Err(ErrorKind::ErrorCode(format!(
"Timeout reached after {} waiting for device {}",
TIMEOUT_HOTPLUG, dev_addr
))
.into());
}
}
}
Ok(format!("{}/{}", SYSTEM_DEV_PATH, &dev_name))
}
pub fn get_scsi_device_name(sandbox: Arc<Mutex<Sandbox>>, scsi_addr: &str) -> Result<String> {
scan_scsi_bus(scsi_addr)?;
let dev_sub_path = format!("{}{}/{}", SCSI_HOST_CHANNEL, scsi_addr, SCSI_BLOCK_SUFFIX);
get_device_name(sandbox, dev_sub_path.as_str())
}
pub fn get_pci_device_name(sandbox: Arc<Mutex<Sandbox>>, pci_id: &str) -> Result<String> {
let pci_addr = get_device_pci_address(pci_id)?;
rescan_pci_bus()?;
get_device_name(sandbox, pci_addr.as_str())
}
// scan_scsi_bus scans SCSI bus for the given SCSI address(SCSI-Id and LUN)
pub fn scan_scsi_bus(scsi_addr: &str) -> Result<()> {
let tokens: Vec<&str> = scsi_addr.split(":").collect();
if tokens.len() != 2 {
return Err(ErrorKind::Msg(format!(
"Unexpected format for SCSI Address: {}, expect SCSIID:LUA",
scsi_addr
))
.into());
}
// Scan scsi host passing in the channel, SCSI id and LUN. Channel
// is always 0 because we have only one SCSI controller.
let scan_data = format!("0 {} {}", tokens[0], tokens[1]);
for entry in fs::read_dir(SCSI_HOST_PATH)? {
let entry = entry?;
let host = entry.file_name();
let scan_path = format!("{}/{}/{}", SCSI_HOST_PATH, host.to_str().unwrap(), "scan");
fs::write(scan_path, &scan_data)?;
}
Ok(())
}
// update_spec_device_list takes a device description provided by the caller,
// trying to find it on the guest. Once this device has been identified, the
// "real" information that can be read from inside the VM is used to update
// the same device in the list of devices provided through the OCI spec.
// This is needed to update information about minor/major numbers that cannot
// be predicted from the caller.
fn update_spec_device_list(device: &Device, spec: &mut Spec) -> Result<()> {
// If no container_path is provided, we won't be able to match and
// update the device in the OCI spec device list. This is an error.
let major_id: c_uint;
let minor_id: c_uint;
// If no container_path is provided, we won't be able to match and
// update the device in the OCI spec device list. This is an error.
if device.container_path == "" {
return Err(ErrorKind::Msg(format!(
"container_path cannot empty for device {:?}",
device
))
.into());
}
let linux = match spec.Linux.as_mut() {
None => {
return Err(
ErrorKind::ErrorCode("Spec didn't container linux field".to_string()).into(),
)
}
Some(l) => l,
};
if !Path::new(&device.vm_path).exists() {
return Err(ErrorKind::Msg(format!("vm_path:{} doesn't exist", device.vm_path)).into());
}
let meta = fs::metadata(&device.vm_path)?;
let dev_id = meta.rdev();
unsafe {
major_id = major(dev_id);
minor_id = minor(dev_id);
}
info!(
sl!(),
"got the device: dev_path: {}, major: {}, minor: {}\n", &device.vm_path, major_id, minor_id
);
let devices = linux.Devices.as_mut_slice();
for dev in devices.iter_mut() {
if dev.Path == device.container_path {
let host_major = dev.Major;
let host_minor = dev.Minor;
dev.Major = major_id as i64;
dev.Minor = minor_id as i64;
info!(
sl!(),
"change the device from major: {} minor: {} to vm device major: {} minor: {}",
host_major,
host_minor,
major_id,
minor_id
);
// Resources must be updated since they are used to identify the
// device in the devices cgroup.
let resource = linux.Resources.as_mut();
if resource.is_some() {
let res = resource.unwrap();
let ds = res.Devices.as_mut_slice();
for d in ds.iter_mut() {
if d.Major == host_major && d.Minor == host_minor {
d.Major = major_id as i64;
d.Minor = minor_id as i64;
info!(
sl!(),
"set resources for device major: {} minor: {}\n", major_id, minor_id
);
}
}
}
}
}
Ok(())
}
// device.Id should be the predicted device name (vda, vdb, ...)
// device.VmPath already provides a way to send it in
fn virtiommio_blk_device_handler(
device: &Device,
spec: &mut Spec,
_sandbox: Arc<Mutex<Sandbox>>,
) -> Result<()> {
if device.vm_path == "" {
return Err(ErrorKind::Msg("Invalid path for virtiommioblkdevice".to_string()).into());
}
update_spec_device_list(device, spec)
}
// device.Id should be the PCI address in the format "bridgeAddr/deviceAddr".
// Here, bridgeAddr is the address at which the brige is attached on the root bus,
// while deviceAddr is the address at which the device is attached on the bridge.
fn virtio_blk_device_handler(
device: &Device,
spec: &mut Spec,
sandbox: Arc<Mutex<Sandbox>>,
) -> Result<()> {
let dev_path = get_pci_device_name(sandbox, device.id.as_str())?;
let mut dev = device.clone();
dev.vm_path = dev_path;
update_spec_device_list(&dev, spec)
}
// device.Id should be the SCSI address of the disk in the format "scsiID:lunID"
fn virtio_scsi_device_handler(
device: &Device,
spec: &mut Spec,
sandbox: Arc<Mutex<Sandbox>>,
) -> Result<()> {
let dev_path = get_scsi_device_name(sandbox, device.id.as_str())?;
let mut dev = device.clone();
dev.vm_path = dev_path;
update_spec_device_list(&dev, spec)
}
fn virtio_nvdimm_device_handler(
device: &Device,
spec: &mut Spec,
_sandbox: Arc<Mutex<Sandbox>>,
) -> Result<()> {
update_spec_device_list(device, spec)
}
pub fn add_devices(
devices: Vec<Device>,
spec: &mut Spec,
sandbox: Arc<Mutex<Sandbox>>,
) -> Result<()> {
for device in devices.iter() {
add_device(device, spec, sandbox.clone())?;
}
Ok(())
}
fn add_device(device: &Device, spec: &mut Spec, sandbox: Arc<Mutex<Sandbox>>) -> Result<()> {
// log before validation to help with debugging gRPC protocol
// version differences.
info!(sl!(), "device-id: {}, device-type: {}, device-vm-path: {}, device-container-path: {}, device-options: {:?}",
device.id, device.field_type, device.vm_path, device.container_path, device.options);
if device.field_type == "" {
return Err(ErrorKind::Msg(format!("invalid type for device {:?}", device)).into());
}
if device.id == "" && device.vm_path == "" {
return Err(
ErrorKind::Msg(format!("invalid ID and VM path for device {:?}", device)).into(),
);
}
if device.container_path == "" {
return Err(
ErrorKind::Msg(format!("invalid container path for device {:?}", device)).into(),
);
}
let dev_handler = match DEVICEHANDLERLIST.get(device.field_type.as_str()) {
None => {
return Err(ErrorKind::Msg(format!("Unknown device type {}", device.field_type)).into())
}
Some(t) => t,
};
dev_handler(device, spec, sandbox)
}

1774
src/agent/src/grpc.rs Normal file

File diff suppressed because it is too large Load Diff

260
src/agent/src/logging.rs Normal file
View File

@ -0,0 +1,260 @@
// Copyright (c) 2019 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
use slog::{BorrowedKV, Drain, Key, OwnedKV, OwnedKVList, Record, KV};
use std::collections::HashMap;
use std::io;
use std::io::Write;
use std::process;
use std::result;
use std::sync::{Arc, Mutex};
// XXX: 'writer' param used to make testing possible.
pub fn create_logger<W>(name: &str, source: &str, level: slog::Level, writer: W) -> slog::Logger
where
W: Write + Send + Sync + 'static,
{
let json_drain = slog_json::Json::new(writer)
.add_default_keys()
.build()
.fuse();
// Ensure only a unique set of key/value fields is logged
let unique_drain = UniqueDrain::new(json_drain).fuse();
// Allow runtime filtering of records by log level
let filter_drain = RuntimeLevelFilter::new(unique_drain, level).fuse();
// Ensure the logger is thread-safe
let async_drain = slog_async::Async::new(filter_drain).build().fuse();
// Add some "standard" fields
slog::Logger::root(
async_drain.fuse(),
o!("version" => env!("CARGO_PKG_VERSION"),
"subsystem" => "root",
"pid" => process::id().to_string(),
"name" => name.to_string(),
"source" => source.to_string()),
)
}
impl KV for HashSerializer {
fn serialize(&self, _record: &Record, serializer: &mut dyn slog::Serializer) -> slog::Result {
for (key, value) in self.fields.clone().into_iter() {
serializer.emit_str(Key::from(key), &value)?;
}
Ok(())
}
}
// Used to convert an slog::OwnedKVList into a hash map.
struct HashSerializer {
fields: HashMap<String, String>,
}
impl HashSerializer {
fn new() -> HashSerializer {
HashSerializer {
fields: HashMap::new(),
}
}
fn add_field(&mut self, key: String, value: String) {
// Take care to only add the first instance of a key. This matters for loggers (but not
// Records) since a child loggers have parents and the loggers are serialised child first
// meaning the *newest* fields are serialised first.
if !self.fields.contains_key(&key) {
self.fields.insert(key, value);
}
}
fn remove_field(&mut self, key: &str) {
self.fields.remove(key);
}
}
impl slog::Serializer for HashSerializer {
fn emit_arguments(&mut self, key: Key, value: &std::fmt::Arguments) -> slog::Result {
self.add_field(format!("{}", key), format!("{}", value));
Ok(())
}
}
struct UniqueDrain<D> {
drain: D,
}
impl<D> UniqueDrain<D> {
fn new(drain: D) -> Self {
UniqueDrain { drain: drain }
}
}
impl<D> Drain for UniqueDrain<D>
where
D: slog::Drain,
{
type Ok = ();
type Err = io::Error;
fn log(&self, record: &Record, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
let mut logger_serializer = HashSerializer::new();
values.serialize(record, &mut logger_serializer)?;
let mut record_serializer = HashSerializer::new();
record.kv().serialize(record, &mut record_serializer)?;
for (key, _) in record_serializer.fields.iter() {
logger_serializer.remove_field(key);
}
let record_owned_kv = OwnedKV(record_serializer);
let record_static = record_static!(record.level(), "");
let new_record = Record::new(&record_static, record.msg(), BorrowedKV(&record_owned_kv));
let logger_owned_kv = OwnedKV(logger_serializer);
let result = self
.drain
.log(&new_record, &OwnedKVList::from(logger_owned_kv));
match result {
Ok(_t) => Ok(()),
Err(_e) => Err(std::io::Error::new(
std::io::ErrorKind::Other,
"failed to drain log".to_string(),
)),
}
}
}
// A RuntimeLevelFilter will discard all log records whose log level is less than the level
// specified in the struct.
struct RuntimeLevelFilter<D> {
drain: D,
level: Arc<Mutex<slog::Level>>,
}
impl<D> RuntimeLevelFilter<D> {
fn new(drain: D, level: slog::Level) -> Self {
RuntimeLevelFilter {
drain: drain,
level: Arc::new(Mutex::new(level)),
}
}
fn set_level(&self, level: slog::Level) {
let level_ref = self.level.clone();
let mut log_level = level_ref.lock().unwrap();
*log_level = level;
}
}
impl<D> Drain for RuntimeLevelFilter<D>
where
D: Drain,
{
type Ok = Option<D::Ok>;
type Err = Option<D::Err>;
fn log(
&self,
record: &slog::Record,
values: &slog::OwnedKVList,
) -> result::Result<Self::Ok, Self::Err> {
let level_ref = self.level.clone();
let log_level = level_ref.lock().unwrap();
if record.level().is_at_least(*log_level) {
self.drain.log(record, values)?;
}
Ok(None)
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::Value;
use std::io::prelude::*;
use tempfile::NamedTempFile;
#[test]
fn test_create_logger_write_to_tmpfile() {
// Create a writer for the logger drain to use
let writer = NamedTempFile::new().expect("failed to create tempfile");
// Used to check file contents before the temp file is unlinked
let mut writer_ref = writer.reopen().expect("failed to clone tempfile");
let level = slog::Level::Trace;
let name = "name";
let source = "source";
let record_subsystem = "record-subsystem";
let record_key = "record-key-1";
let record_value = "record-key-2";
let logger = create_logger(name, source, level, writer);
let msg = "foo, bar, baz";
// Call the logger (which calls the drain)
info!(logger, "{}", msg; "subsystem" => record_subsystem, record_key => record_value);
// Force temp file to be flushed
drop(logger);
let mut contents = String::new();
writer_ref
.read_to_string(&mut contents)
.expect("failed to read tempfile contents");
// Convert file to JSON
let fields: Value =
serde_json::from_str(&contents).expect("failed to convert logfile to json");
// Check the expected JSON fields
let field_ts = fields.get("ts").expect("failed to find timestamp field");
assert_ne!(field_ts, "");
let field_version = fields.get("version").expect("failed to find version field");
assert_eq!(field_version, env!("CARGO_PKG_VERSION"));
let field_pid = fields.get("pid").expect("failed to find pid field");
assert_ne!(field_pid, "");
let field_level = fields.get("level").expect("failed to find level field");
assert_eq!(field_level, "INFO");
let field_msg = fields.get("msg").expect("failed to find msg field");
assert_eq!(field_msg, msg);
let field_name = fields.get("name").expect("failed to find name field");
assert_eq!(field_name, name);
let field_source = fields.get("source").expect("failed to find source field");
assert_eq!(field_source, source);
let field_subsystem = fields
.get("subsystem")
.expect("failed to find subsystem field");
// The records field should take priority over the loggers field of the same name
assert_eq!(field_subsystem, record_subsystem);
let field_record_value = fields
.get(record_key)
.expect("failed to find record key field");
assert_eq!(field_record_value, record_value);
}
}

561
src/agent/src/main.rs Normal file
View File

@ -0,0 +1,561 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
#![allow(non_camel_case_types)]
#![allow(unused_parens)]
#![allow(unused_unsafe)]
#![allow(dead_code)]
#![allow(non_snake_case)]
#[macro_use]
extern crate lazy_static;
extern crate prctl;
extern crate protocols;
extern crate regex;
extern crate rustjail;
extern crate serde_json;
extern crate signal_hook;
#[macro_use]
extern crate scan_fmt;
extern crate oci;
#[macro_use]
extern crate slog;
extern crate slog_async;
extern crate slog_json;
use futures::*;
use nix::sys::wait::{self, WaitStatus};
use nix::unistd;
use prctl::set_child_subreaper;
use rustjail::errors::*;
use signal_hook::{iterator::Signals, SIGCHLD};
use std::collections::HashMap;
use std::env;
use std::fs;
use std::os::unix::fs::{self as unixfs};
use std::os::unix::io::AsRawFd;
use std::path::Path;
use std::sync::mpsc::{self, Sender};
use std::sync::{Arc, Mutex};
use std::{io, thread};
use unistd::Pid;
mod device;
mod logging;
mod mount;
mod namespace;
pub mod netlink;
mod network;
pub mod random;
mod sandbox;
mod uevent;
mod version;
use mount::{cgroups_mount, general_mount};
use sandbox::Sandbox;
use slog::Logger;
use uevent::watch_uevents;
mod grpc;
const NAME: &'static str = "kata-agent";
const VSOCK_ADDR: &'static str = "vsock://-1";
const VSOCK_PORT: u16 = 1024;
lazy_static! {
static ref GLOBAL_DEVICE_WATCHER: Arc<Mutex<HashMap<String, Sender<String>>>> =
Arc::new(Mutex::new(HashMap::new()));
}
use std::mem::MaybeUninit;
fn announce(logger: &Logger) {
let commit = match env::var("VERSION_COMMIT") {
Ok(s) => s,
Err(_) => String::from(""),
};
info!(logger, "announce";
"agent-commit" => commit.as_str(),
"agent-version" => version::AGENT_VERSION,
"api-version" => version::API_VERSION,
);
}
fn main() -> Result<()> {
let writer = io::stdout();
let logger = logging::create_logger(NAME, "agent", slog::Level::Info, writer);
announce(&logger);
// This "unused" variable is required as it enables the global (and crucially static) logger,
// which is required to satisfy the the lifetime constraints of the auto-generated gRPC code.
let _guard = slog_scope::set_global_logger(logger.new(o!("subsystem" => "grpc")));
env::set_var("RUST_BACKTRACE", "full");
lazy_static::initialize(&SHELLS);
parse_cmdline(KERNEL_CMDLINE_FILE)?;
let shells = SHELLS.clone();
let shell_handle = if unsafe { DEBUG_CONSOLE } {
let thread_logger = logger.clone();
thread::spawn(move || {
let shells = shells.lock().unwrap();
let result = setup_debug_console(shells.to_vec());
if result.is_err() {
// Report error, but don't fail
warn!(thread_logger, "failed to setup debug console";
"error" => format!("{}", result.unwrap_err()));
}
})
} else {
unsafe { MaybeUninit::zeroed().assume_init() }
};
if unistd::getpid() == Pid::from_raw(1) {
init_agent_as_init(&logger)?;
}
// Initialize unique sandbox structure.
let s = Sandbox::new(&logger).map_err(|e| {
error!(logger, "Failed to create sandbox with error: {:?}", e);
e
})?;
let sandbox = Arc::new(Mutex::new(s));
setup_signal_handler(&logger, sandbox.clone()).unwrap();
watch_uevents(sandbox.clone());
let (tx, rx) = mpsc::channel::<i32>();
sandbox.lock().unwrap().sender = Some(tx);
//vsock:///dev/vsock, port
let mut server = grpc::start(sandbox.clone(), VSOCK_ADDR, VSOCK_PORT);
/*
let _ = fs::remove_file("/tmp/testagent");
let _ = fs::remove_dir_all("/run/agent");
let mut server = grpc::start(sandbox.clone(), "unix:///tmp/testagent", 1);
*/
let handle = thread::spawn(move || {
// info!("Press ENTER to exit...");
// let _ = io::stdin().read(&mut [0]).unwrap();
// thread::sleep(Duration::from_secs(3000));
let _ = rx.recv().unwrap();
});
// receive something from destroy_sandbox here?
// or in the thread above? It depneds whether grpc request
// are run in another thread or in the main thead?
// let _ = rx.wait();
handle.join().unwrap();
if unsafe { DEBUG_CONSOLE } {
shell_handle.join().unwrap();
}
let _ = server.shutdown().wait();
let _ = fs::remove_file("/tmp/testagent");
Ok(())
}
use nix::sys::wait::WaitPidFlag;
fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result<()> {
let logger = logger.new(o!("subsystem" => "signals"));
set_child_subreaper(true).map_err(|err| {
format!(
"failed to setup agent as a child subreaper, failed with {}",
err
)
})?;
let signals = Signals::new(&[SIGCHLD])?;
let s = sandbox.clone();
thread::spawn(move || {
'outer: for sig in signals.forever() {
info!(logger, "received signal"; "signal" => sig);
// sevral signals can be combined together
// as one. So loop around to reap all
// exited children
'inner: loop {
let wait_status = match wait::waitpid(
Some(Pid::from_raw(-1)),
Some(WaitPidFlag::WNOHANG | WaitPidFlag::__WALL),
) {
Ok(s) => {
if s == WaitStatus::StillAlive {
continue 'outer;
}
s
}
Err(e) => {
info!(
logger,
"waitpid reaper failed";
"error" => e.as_errno().unwrap().desc()
);
continue 'outer;
}
};
let pid = wait_status.pid();
if pid.is_some() {
let raw_pid = pid.unwrap().as_raw();
let child_pid = format!("{}", raw_pid);
let logger = logger.new(o!("child-pid" => child_pid));
let mut sandbox = s.lock().unwrap();
let process = sandbox.find_process(raw_pid);
if process.is_none() {
info!(logger, "child exited unexpectedly");
continue 'inner;
}
let mut p = process.unwrap();
if p.exit_pipe_w.is_none() {
error!(logger, "the process's exit_pipe_w isn't set");
continue 'inner;
}
let pipe_write = p.exit_pipe_w.unwrap();
let ret: i32;
match wait_status {
WaitStatus::Exited(_, c) => ret = c,
WaitStatus::Signaled(_, sig, _) => ret = sig as i32,
_ => {
info!(logger, "got wrong status for process";
"child-status" => format!("{:?}", wait_status));
continue 'inner;
}
}
p.exit_code = ret;
let _ = unistd::close(pipe_write);
}
}
}
});
Ok(())
}
// init_agent_as_init will do the initializations such as setting up the rootfs
// when this agent has been run as the init process.
fn init_agent_as_init(logger: &Logger) -> Result<()> {
general_mount(logger)?;
cgroups_mount(logger)?;
fs::remove_file(Path::new("/dev/ptmx"))?;
unixfs::symlink(Path::new("/dev/pts/ptmx"), Path::new("/dev/ptmx"))?;
unistd::setsid()?;
unsafe {
libc::ioctl(io::stdin().as_raw_fd(), libc::TIOCSCTTY, 1);
}
env::set_var("PATH", "/bin:/sbin/:/usr/bin/:/usr/sbin/");
Ok(())
}
const LOG_LEVEL_FLAG: &'static str = "agent.log";
const DEV_MODE_FLAG: &'static str = "agent.devmode";
const TRACE_MODE_FLAG: &'static str = "agent.trace";
const USE_VSOCK_FLAG: &'static str = "agent.use_vsock";
const DEBUG_CONSOLE_FLAG: &'static str = "agent.debug_console";
const KERNEL_CMDLINE_FILE: &'static str = "/proc/cmdline";
const CONSOLE_PATH: &'static str = "/dev/console";
lazy_static! {
static ref SHELLS: Arc<Mutex<Vec<String>>> = {
let mut v = Vec::new();
if !cfg!(test) {
v.push("/bin/bash".to_string());
v.push("/bin/sh".to_string());
}
Arc::new(Mutex::new(v))
};
}
pub static mut DEBUG_CONSOLE: bool = false;
// pub static mut LOG_LEVEL: ;
pub static mut DEV_MODE: bool = false;
// pub static mut TRACE_MODE: ;
fn parse_cmdline(file: &str) -> Result<()> {
let cmdline = fs::read_to_string(file)?;
let params: Vec<&str> = cmdline.split_ascii_whitespace().collect();
for param in params.iter() {
if param.starts_with(DEBUG_CONSOLE_FLAG) {
unsafe {
DEBUG_CONSOLE = true;
}
}
if param.starts_with(DEV_MODE_FLAG) {
unsafe {
DEV_MODE = true;
}
}
}
Ok(())
}
use nix::fcntl::{self, OFlag};
use nix::sys::stat::Mode;
use std::os::unix::io::{FromRawFd, RawFd};
use std::path::PathBuf;
use std::process::{Command, Stdio};
fn setup_debug_console(shells: Vec<String>) -> Result<()> {
for shell in shells.iter() {
let binary = PathBuf::from(shell);
if binary.exists() {
let f: RawFd = fcntl::open(CONSOLE_PATH, OFlag::O_RDWR, Mode::empty())?;
let cmd = Command::new(shell)
.stdin(unsafe { Stdio::from_raw_fd(f) })
.stdout(unsafe { Stdio::from_raw_fd(f) })
.stderr(unsafe { Stdio::from_raw_fd(f) })
.spawn();
let mut cmd = match cmd {
Ok(c) => c,
Err(_) => {
return Err(ErrorKind::ErrorCode("failed to spawn shell".to_string()).into())
}
};
cmd.wait()?;
return Ok(());
} else {
return Err(ErrorKind::ErrorCode("invalid shell".to_string()).into());
}
}
Err(ErrorKind::ErrorCode("no shell".to_string()).into())
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
use std::io::Write;
use tempfile::tempdir;
#[test]
fn test_setup_debug_console_no_shells() {
// Guarantee no shells have been added
// (required to avoid racing with
// test_setup_debug_console_invalid_shell()).
let shells_ref = SHELLS.clone();
let mut shells = shells_ref.lock().unwrap();
shells.clear();
let result = setup_debug_console(shells.to_vec());
assert!(result.is_err());
assert_eq!(result.unwrap_err().to_string(), "Error Code: 'no shell'");
}
#[test]
fn test_setup_debug_console_invalid_shell() {
let shells_ref = SHELLS.clone();
let mut shells = shells_ref.lock().unwrap();
let dir = tempdir().expect("failed to create tmpdir");
// Add an invalid shell
let shell = dir
.path()
.join("enoent")
.to_str()
.expect("failed to construct shell path")
.to_string();
shells.push(shell);
let result = setup_debug_console(shells.to_vec());
assert!(result.is_err());
assert_eq!(
result.unwrap_err().to_string(),
"Error Code: 'invalid shell'"
);
}
#[test]
fn test_parse_cmdline() {
#[derive(Debug)]
struct TestData<'a> {
contents: &'a str,
debug_console: bool,
dev_mode: bool,
}
let tests = &[
TestData {
contents: "",
debug_console: false,
dev_mode: false,
},
TestData {
contents: "foo",
debug_console: false,
dev_mode: false,
},
TestData {
contents: "foo bar",
debug_console: false,
dev_mode: false,
},
TestData {
contents: "foo bar",
debug_console: false,
dev_mode: false,
},
TestData {
contents: "foo agent bar",
debug_console: false,
dev_mode: false,
},
TestData {
contents: "foo debug_console agent bar devmode",
debug_console: false,
dev_mode: false,
},
TestData {
contents: "agent.debug_console",
debug_console: true,
dev_mode: false,
},
TestData {
contents: " agent.debug_console ",
debug_console: true,
dev_mode: false,
},
TestData {
contents: "agent.debug_console foo",
debug_console: true,
dev_mode: false,
},
TestData {
contents: " agent.debug_console foo",
debug_console: true,
dev_mode: false,
},
TestData {
contents: "foo agent.debug_console bar",
debug_console: true,
dev_mode: false,
},
TestData {
contents: "foo agent.debug_console",
debug_console: true,
dev_mode: false,
},
TestData {
contents: "foo agent.debug_console ",
debug_console: true,
dev_mode: false,
},
TestData {
contents: "agent.devmode",
debug_console: false,
dev_mode: true,
},
TestData {
contents: " agent.devmode ",
debug_console: false,
dev_mode: true,
},
TestData {
contents: "agent.devmode foo",
debug_console: false,
dev_mode: true,
},
TestData {
contents: " agent.devmode foo",
debug_console: false,
dev_mode: true,
},
TestData {
contents: "foo agent.devmode bar",
debug_console: false,
dev_mode: true,
},
TestData {
contents: "foo agent.devmode",
debug_console: false,
dev_mode: true,
},
TestData {
contents: "foo agent.devmode ",
debug_console: false,
dev_mode: true,
},
TestData {
contents: "agent.devmode agent.debug_console",
debug_console: true,
dev_mode: true,
},
];
let dir = tempdir().expect("failed to create tmpdir");
// First, check a missing file is handled
let file_path = dir.path().join("enoent");
let filename = file_path.to_str().expect("failed to create filename");
let result = parse_cmdline(&filename.to_owned());
assert!(result.is_err());
// Now, test various combinations of file contents
for (i, d) in tests.iter().enumerate() {
// Reset
unsafe {
DEBUG_CONSOLE = false;
DEV_MODE = false;
};
let msg = format!("test[{}]: {:?}", i, d);
let file_path = dir.path().join("cmdline");
let filename = file_path.to_str().expect("failed to create filename");
let mut file =
File::create(filename).expect(&format!("{}: failed to create file", msg));
file.write_all(d.contents.as_bytes())
.expect(&format!("{}: failed to write file contents", msg));
let result = parse_cmdline(filename);
assert!(result.is_ok(), "{}", msg);
unsafe {
assert_eq!(d.debug_console, DEBUG_CONSOLE, "{}", msg);
assert_eq!(d.dev_mode, DEV_MODE, "{}", msg);
};
}
}
}

1296
src/agent/src/mount.rs Normal file

File diff suppressed because it is too large Load Diff

119
src/agent/src/namespace.rs Normal file
View File

@ -0,0 +1,119 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use nix::mount::MsFlags;
use nix::sched::{unshare, CloneFlags};
use nix::unistd::{getpid, gettid};
use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::os::unix::io::AsRawFd;
use std::path::Path;
use std::thread;
use crate::mount::{BareMount, FLAGS};
use slog::Logger;
//use container::Process;
const PERSISTENT_NS_DIR: &'static str = "/var/run/sandbox-ns";
pub const NSTYPEIPC: &'static str = "ipc";
pub const NSTYPEUTS: &'static str = "uts";
pub const NSTYPEPID: &'static str = "pid";
lazy_static! {
static ref CLONE_FLAG_TABLE: HashMap<&'static str, CloneFlags> = {
let mut m = HashMap::new();
m.insert(NSTYPEIPC, CloneFlags::CLONE_NEWIPC);
m.insert(NSTYPEUTS, CloneFlags::CLONE_NEWUTS);
m
};
}
#[derive(Debug, Default)]
pub struct Namespace {
pub path: String,
}
pub fn get_current_thread_ns_path(ns_type: &str) -> String {
format!(
"/proc/{}/task/{}/ns/{}",
getpid().to_string(),
gettid().to_string(),
ns_type
)
}
// setup_persistent_ns creates persistent namespace without switchin to it.
// Note, pid namespaces cannot be persisted.
pub fn setup_persistent_ns(logger: Logger, ns_type: &'static str) -> Result<Namespace, String> {
if let Err(err) = fs::create_dir_all(PERSISTENT_NS_DIR) {
return Err(err.to_string());
}
let ns_path = Path::new(PERSISTENT_NS_DIR);
let new_ns_path = ns_path.join(ns_type);
if let Err(err) = File::create(new_ns_path.as_path()) {
return Err(err.to_string());
}
let new_thread = thread::spawn(move || {
let origin_ns_path = get_current_thread_ns_path(ns_type);
let _origin_ns_fd = match File::open(Path::new(&origin_ns_path)) {
Err(err) => return Err(err.to_string()),
Ok(file) => file.as_raw_fd(),
};
// Create a new netns on the current thread.
let cf = match CLONE_FLAG_TABLE.get(ns_type) {
None => return Err(format!("Failed to get ns type {}", ns_type).to_string()),
Some(cf) => cf,
};
if let Err(err) = unshare(*cf) {
return Err(err.to_string());
}
// Bind mount the new namespace from the current thread onto the mount point to persist it.
let source: &str = origin_ns_path.as_str();
let destination: &str = new_ns_path.as_path().to_str().unwrap_or("none");
let _recursive = true;
let _readonly = true;
let mut flags = MsFlags::empty();
match FLAGS.get("rbind") {
Some(x) => {
let (_, f) = *x;
flags = flags | f;
}
None => (),
};
let bare_mount = BareMount::new(source, destination, "none", flags, "", &logger);
if let Err(err) = bare_mount.mount() {
return Err(format!(
"Failed to mount {} to {} with err:{:?}",
source, destination, err
));
}
Ok(())
});
match new_thread.join() {
Ok(t) => match t {
Err(err) => return Err(err),
Ok(()) => (),
},
Err(err) => return Err(format!("Failed to join thread {:?}!", err)),
}
let new_ns_path = ns_path.join(ns_type);
Ok(Namespace {
path: new_ns_path.into_os_string().into_string().unwrap(),
})
}

2841
src/agent/src/netlink.rs Normal file

File diff suppressed because it is too large Load Diff

30
src/agent/src/network.rs Normal file
View File

@ -0,0 +1,30 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use protocols::types::{Interface, Route};
use std::collections::HashMap;
// Network fully describes a sandbox network with its interfaces, routes and dns
// related information.
#[derive(Debug, Default)]
pub struct Network {
ifaces: HashMap<String, Interface>,
routes: Vec<Route>,
dns: Vec<String>,
}
impl Network {
pub fn new() -> Network {
Network {
ifaces: HashMap::new(),
routes: Vec::new(),
dns: Vec::new(),
}
}
pub fn set_dns(&mut self, dns: String) {
self.dns.push(dns);
}
}

30
src/agent/src/random.rs Normal file
View File

@ -0,0 +1,30 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use libc;
use nix::errno::Errno;
use nix::fcntl::{self, OFlag};
use nix::sys::stat::Mode;
use rustjail::errors::*;
use std::fs;
pub const RNGDEV: &'static str = "/dev/random";
pub const RNDADDTOENTCNT: libc::c_int = 0x40045201;
pub const RNDRESEEDRNG: libc::c_int = 0x5207;
pub fn reseed_rng(data: &[u8]) -> Result<()> {
let len = data.len() as libc::c_long;
fs::write(RNGDEV, data)?;
let fd = fcntl::open(RNGDEV, OFlag::O_RDWR, Mode::from_bits_truncate(0o022))?;
let ret = unsafe { libc::ioctl(fd, RNDADDTOENTCNT, &len as *const libc::c_long) };
let _ = Errno::result(ret).map(drop)?;
let ret = unsafe { libc::ioctl(fd, RNDRESEEDRNG, 0) };
let _ = Errno::result(ret).map(drop)?;
Ok(())
}

273
src/agent/src/sandbox.rs Normal file
View File

@ -0,0 +1,273 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
//use crate::container::Container;
use crate::mount::{get_mount_fs_type, remove_mounts, TYPEROOTFS};
use crate::namespace::{setup_persistent_ns, Namespace, NSTYPEIPC, NSTYPEUTS};
use crate::netlink::{RtnlHandle, NETLINK_ROUTE};
use crate::network::Network;
use libc::pid_t;
use protocols::agent::OnlineCPUMemRequest;
use regex::Regex;
use rustjail::cgroups;
use rustjail::container::BaseContainer;
use rustjail::container::LinuxContainer;
use rustjail::errors::*;
use rustjail::process::Process;
use slog::Logger;
use std::collections::HashMap;
use std::fs;
use std::sync::mpsc::Sender;
#[derive(Debug)]
pub struct Sandbox {
pub logger: Logger,
pub id: String,
pub hostname: String,
pub containers: HashMap<String, LinuxContainer>,
pub network: Network,
pub mounts: Vec<String>,
pub container_mounts: HashMap<String, Vec<String>>,
pub pci_device_map: HashMap<String, String>,
pub shared_utsns: Namespace,
pub shared_ipcns: Namespace,
pub storages: HashMap<String, u32>,
pub running: bool,
pub no_pivot_root: bool,
enable_grpc_trace: bool,
pub sandbox_pid_ns: bool,
pub sender: Option<Sender<i32>>,
pub rtnl: Option<RtnlHandle>,
}
impl Sandbox {
pub fn new(logger: &Logger) -> Result<Self> {
let fs_type = get_mount_fs_type("/")?;
let logger = logger.new(o!("subsystem" => "sandbox"));
Ok(Sandbox {
logger: logger,
id: "".to_string(),
hostname: "".to_string(),
network: Network::new(),
containers: HashMap::new(),
mounts: Vec::new(),
container_mounts: HashMap::new(),
pci_device_map: HashMap::new(),
shared_utsns: Namespace {
path: "".to_string(),
},
shared_ipcns: Namespace {
path: "".to_string(),
},
storages: HashMap::new(),
running: false,
no_pivot_root: fs_type.eq(TYPEROOTFS),
enable_grpc_trace: false,
sandbox_pid_ns: false,
sender: None,
rtnl: Some(RtnlHandle::new(NETLINK_ROUTE, 0).unwrap()),
})
}
// unset_sandbox_storage will decrement the sandbox storage
// reference counter. If there aren't any containers using
// that sandbox storage, this method will remove the
// storage reference from the sandbox and return 'true, nil' to
// let the caller know that they can clean up the storage
// related directories by calling remove_sandbox_storage
//
// It's assumed that caller is calling this method after
// acquiring a lock on sandbox.
pub fn unset_sandbox_storage(&mut self, path: &str) -> bool {
match self.storages.get_mut(path) {
None => return false,
Some(count) => {
*count -= 1;
if *count < 1 {
self.storages.remove(path);
}
return true;
}
}
}
// remove_sandbox_storage removes the sandbox storage if no
// containers are using that storage.
//
// It's assumed that caller is calling this method after
// acquiring a lock on sandbox.
pub fn remove_sandbox_storage(&self, path: &str) -> Result<()> {
let mounts = vec![path.to_string()];
remove_mounts(&mounts)?;
fs::remove_dir_all(path)?;
Ok(())
}
// unset_and_remove_sandbox_storage unsets the storage from sandbox
// and if there are no containers using this storage it will
// remove it from the sandbox.
//
// It's assumed that caller is calling this method after
// acquiring a lock on sandbox.
pub fn unset_and_remove_sandbox_storage(&mut self, path: &str) -> Result<()> {
if self.unset_sandbox_storage(path) {
return self.remove_sandbox_storage(path);
}
Ok(())
}
pub fn is_running(&self) -> bool {
self.running
}
pub fn set_hostname(&mut self, hostname: String) {
self.hostname = hostname;
}
pub fn setup_shared_namespaces(&mut self) -> Result<bool> {
// Set up shared IPC namespace
self.shared_ipcns = match setup_persistent_ns(self.logger.clone(), NSTYPEIPC) {
Ok(ns) => ns,
Err(err) => {
return Err(ErrorKind::ErrorCode(format!(
"Failed to setup persisten IPC namespace with error: {}",
&err
))
.into())
}
};
// Set up shared UTS namespace
self.shared_utsns = match setup_persistent_ns(self.logger.clone(), NSTYPEUTS) {
Ok(ns) => ns,
Err(err) => {
return Err(ErrorKind::ErrorCode(format!(
"Failed to setup persisten UTS namespace with error: {} ",
&err
))
.into())
}
};
Ok(true)
}
pub fn add_container(&mut self, c: LinuxContainer) {
self.containers.insert(c.id.clone(), c);
}
pub fn get_container(&mut self, id: &str) -> Option<&mut LinuxContainer> {
self.containers.get_mut(id)
}
pub fn find_process<'a>(&'a mut self, pid: pid_t) -> Option<&'a mut Process> {
for (_, c) in self.containers.iter_mut() {
if c.processes.get(&pid).is_some() {
return c.processes.get_mut(&pid);
}
}
None
}
// set_sandbox_storage sets the sandbox level reference
// counter for the sandbox storage.
// This method also returns a boolean to let
// callers know if the storage already existed or not.
// It will return true if storage is new.
//
// It's assumed that caller is calling this method after
// acquiring a lock on sandbox.
pub fn set_sandbox_storage(&mut self, path: &str) -> bool {
match self.storages.get_mut(path) {
None => {
self.storages.insert(path.to_string(), 1);
true
}
Some(count) => {
*count += 1;
false
}
}
}
pub fn destroy(&mut self) -> Result<()> {
for (_, ctr) in &mut self.containers {
ctr.destroy()?;
}
Ok(())
}
pub fn online_cpu_memory(&self, req: &OnlineCPUMemRequest) -> Result<()> {
if req.nb_cpus > 0 {
// online cpus
online_cpus(&self.logger, req.nb_cpus as i32)?;
}
if !req.cpu_only {
// online memory
online_memory(&self.logger)?;
}
let cpuset = cgroups::fs::get_guest_cpuset()?;
for (_, ctr) in self.containers.iter() {
info!(self.logger, "updating {}", ctr.id.as_str());
ctr.cgroup_manager
.as_ref()
.unwrap()
.update_cpuset_path(cpuset.as_str())?;
}
Ok(())
}
}
pub const CPU_ONLINE_PATH: &'static str = "/sys/devices/system/cpu";
pub const MEMORY_ONLINE_PATH: &'static str = "/sys/devices/system/memory";
pub const ONLINE_FILE: &'static str = "online";
fn online_resources(logger: &Logger, path: &str, pattern: &str, num: i32) -> Result<i32> {
let mut count = 0;
let re = Regex::new(pattern)?;
for e in fs::read_dir(path)? {
let entry = e?;
let tmpname = entry.file_name();
let name = tmpname.to_str().unwrap();
let p = entry.path();
if re.is_match(name) {
let file = format!("{}/{}", p.to_str().unwrap(), ONLINE_FILE);
info!(logger, "{}", file.as_str());
let c = fs::read_to_string(file.as_str())?;
if c.trim().contains("0") {
fs::write(file.as_str(), "1")?;
count += 1;
if num > 0 && count == num {
break;
}
}
}
}
if num > 0 {
return Ok(count);
}
Ok(0)
}
fn online_cpus(logger: &Logger, num: i32) -> Result<i32> {
online_resources(logger, CPU_ONLINE_PATH, r"cpu[0-9]+", num)
}
fn online_memory(logger: &Logger) -> Result<()> {
online_resources(logger, MEMORY_ONLINE_PATH, r"memory[0-9]+", -1)?;
Ok(())
}

141
src/agent/src/uevent.rs Normal file
View File

@ -0,0 +1,141 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use crate::device::{online_device, ROOT_BUS_PATH, SCSI_BLOCK_SUFFIX, SYSFS_DIR};
use crate::grpc::SYSFS_MEMORY_ONLINE_PATH;
use crate::netlink::{RtnlHandle, NETLINK_UEVENT};
use crate::sandbox::Sandbox;
use crate::GLOBAL_DEVICE_WATCHER;
use std::sync::{Arc, Mutex};
use std::thread;
pub const U_EVENT_ACTION: &'static str = "ACTION";
pub const U_EVENT_DEV_PATH: &'static str = "DEVPATH";
pub const U_EVENT_SUB_SYSTEM: &'static str = "SUBSYSTEM";
pub const U_EVENT_SEQ_NUM: &'static str = "SEQNUM";
pub const U_EVENT_DEV_NAME: &'static str = "DEVNAME";
pub const U_EVENT_INTERFACE: &'static str = "INTERFACE";
#[derive(Debug, Default)]
pub struct Uevent {
action: String,
devpath: String,
devname: String,
subsystem: String,
seqnum: String,
interface: String,
}
fn parse_uevent(message: &str) -> Uevent {
let mut msg_iter = message.split('\0');
let mut event = Uevent::default();
msg_iter.next(); // skip the first value
for arg in msg_iter {
let key_val: Vec<&str> = arg.splitn(2, '=').collect();
if key_val.len() == 2 {
match key_val[0] {
U_EVENT_ACTION => event.action = String::from(key_val[1]),
U_EVENT_DEV_NAME => event.devname = String::from(key_val[1]),
U_EVENT_SUB_SYSTEM => event.subsystem = String::from(key_val[1]),
U_EVENT_DEV_PATH => event.devpath = String::from(key_val[1]),
U_EVENT_SEQ_NUM => event.seqnum = String::from(key_val[1]),
U_EVENT_INTERFACE => event.interface = String::from(key_val[1]),
_ => (),
}
}
}
event
}
pub fn watch_uevents(sandbox: Arc<Mutex<Sandbox>>) {
let sref = sandbox.clone();
let s = sref.lock().unwrap();
let logger = s.logger.new(o!("subsystem" => "uevent"));
thread::spawn(move || {
let rtnl = RtnlHandle::new(NETLINK_UEVENT, 1).unwrap();
loop {
match rtnl.recv_message() {
Err(e) => {
error!(logger, "receive uevent message failed"; "error" => format!("{}", e))
}
Ok(data) => {
let text = String::from_utf8(data);
match text {
Err(e) => {
error!(logger, "failed to convert bytes to text"; "error" => format!("{}", e))
}
Ok(text) => {
let event = parse_uevent(&text);
info!(logger, "got uevent message"; "event" => format!("{:?}", event));
// Check if device hotplug event results in a device node being created.
if event.devname != ""
&& event.devpath.starts_with(ROOT_BUS_PATH)
&& event.subsystem == "block"
{
let watcher = GLOBAL_DEVICE_WATCHER.clone();
let mut w = watcher.lock().unwrap();
let s = sandbox.clone();
let mut sb = s.lock().unwrap();
// Add the device node name to the pci device map.
sb.pci_device_map
.insert(event.devpath.clone(), event.devname.clone());
// Notify watchers that are interested in the udev event.
// Close the channel after watcher has been notified.
let devpath = event.devpath.clone();
let empties: Vec<_> = w
.iter()
.filter(|(dev_addr, _)| {
let pci_p = format!("{}/{}", ROOT_BUS_PATH, *dev_addr);
// blk block device
devpath.starts_with(pci_p.as_str()) ||
// scsi block device
{
(*dev_addr).ends_with(SCSI_BLOCK_SUFFIX) &&
devpath.contains(*dev_addr)
}
})
.map(|(k, sender)| {
let devname = event.devname.clone();
let _ = sender.send(devname);
k.clone()
})
.collect();
for empty in empties {
w.remove(&empty);
}
} else {
let online_path =
format!("{}/{}/online", SYSFS_DIR, &event.devpath);
if online_path.starts_with(SYSFS_MEMORY_ONLINE_PATH) {
// Check memory hotplug and online if possible
match online_device(online_path.as_ref()) {
Ok(_) => (),
Err(e) => error!(
logger,
"failed to online device";
"device" => &event.devpath,
"error" => format!("{}", e),
),
}
}
}
}
}
}
}
}
});
}

7
src/agent/src/version.rs Normal file
View File

@ -0,0 +1,7 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
pub const AGENT_VERSION: &'static str = "1.4.5";
pub const API_VERSION: &'static str = "0.0.1";