diff --git a/.github/workflows/bvt.yml b/.github/workflows/bvt.yml index b3011f5c..f8eaab9f 100644 --- a/.github/workflows/bvt.yml +++ b/.github/workflows/bvt.yml @@ -10,6 +10,9 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3 + - name: Install Protoc + run: ./install_protoc.sh + shell: bash - name: Check run: | make check-all @@ -23,15 +26,16 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3 + - name: Install Protoc + run: ./install_protoc.sh + shell: bash - name: Build run: | make make -C compiler make -C ttrpc-codegen make -C example build-examples - # It's important for windows to fail correctly - # https://github.com/actions/runner-images/issues/6668 - shell: bash + make -C example2 build-examples deny: runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index ff70e924..c1d65e21 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,9 @@ Cargo.lock .idea *.o example/protocols/**/*.rs +!example/protocols/**/mod.rs +example2/protocols/**/*.rs +!example2/protocols/**/mod.rs src/ttrpc.rs +example2/protocols/**/*.rs +!example2/protocols/**/mod.rs \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index e8a8d920..37b3d48c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,8 @@ description = "A Rust version of ttrpc." rust-version = "1.70" [dependencies] -protobuf = { version = "3.1.0" } +prost = { version = "0.11", optional = true } +protobuf = {version = "3.1.0", optional = true} libc = { version = "0.2.59", features = [ "extra_traits" ] } nix = "0.26.2" log = "0.4" @@ -34,11 +35,14 @@ tokio-vsock = { version = "0.7.0", optional = true } # lock home to avoid conflict with latest version home = "=0.5.9" protobuf-codegen = "3.1.0" +prost-build = { version = "0.13", optional = true } [features] -default = ["sync"] +default = ["sync","rustprotobuf"] async = ["async-trait", "async-stream", "tokio", "futures", "tokio-vsock"] sync = [] +prost = ["dep:prost", "dep:prost-build"] +rustprotobuf = ["dep:protobuf"] [package.metadata.docs.rs] all-features = true diff --git a/Makefile b/Makefile index 3ec99be6..838c25bb 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,5 @@ +PROTOC ?= $(shell which protoc 2>/dev/null || echo $(HOME)/protoc/bin/protoc) + all: debug test # @@ -21,12 +23,24 @@ build: debug .PHONY: test test: - cargo test --all-features --verbose +ifeq ($OS,Windows_NT) + cargo test --features sync,async,rustprotobuf +else + # cargo test --all-features --verbose + cargo test --features sync,async,rustprotobuf + cargo test --no-default-features --features sync,async,prost +endif .PHONY: check check: cargo fmt --all -- --check - cargo clippy --all-targets --all-features -- -D warnings + cargo clippy --all-targets --features sync,async -- -D warnings + # Skip prost check on Windows +ifeq ($(OS),Windows_NT) + @echo "Skipping prost check on Windows" +else + cargo clippy --all-targets --no-default-features --features sync,async,prost -- -D warnings +endif .PHONY: check-all check-all: diff --git a/README.md b/README.md index 692e2196..70f61bce 100644 --- a/README.md +++ b/README.md @@ -138,3 +138,19 @@ cargo update cargo install --force --path . ``` 3. Build your project. + +# ttrpc-rust with the Prost + +The new version of the ttrpc-rust is built with the Prost crate, a modern +protobuf compiler written by Rust. There are certain different behaviors from +the Rust-protobuf version: + +1. The protoc should be installed. +2. Enabling "prost" feature for the ttrpc-rust. +3. The Rust files are named based on their package name, rather than the proto + filename, e.g. `ttrpc = { version = "1.0", features = ["prost"] }`. +4. Some variable names are different, e.g. for "cpu", "CPU" is generated by the + Rust-protobuf, and "Cpu" is genereated by the Prost. + +The "example" is an example with the Rust-protobuf version, and the "example2" +is an example with the Prost version. diff --git a/build.rs b/build.rs index 5a094a3e..1a044f69 100644 --- a/build.rs +++ b/build.rs @@ -7,6 +7,11 @@ fn main() { let path: PathBuf = [out_dir.clone(), "mod.rs".to_string()].iter().collect(); fs::write(path, "pub mod ttrpc;").unwrap(); + generate_ttrpc(&out_dir); +} + +#[cfg(not(feature = "prost"))] +fn generate_ttrpc(out_dir: &str) { let customize = protobuf_codegen::Customize::default() .gen_mod_rs(false) .generate_accessors(true); @@ -20,3 +25,70 @@ fn main() { .run() .expect("Codegen failed."); } + +#[cfg(feature = "prost")] +fn generate_ttrpc(out_dir: &str) { + let mut config = prost_build::Config::new(); + config + .out_dir(out_dir) + .compile_well_known_types() + .protoc_arg("--experimental_allow_proto3_optional") + .enum_attribute("Code", "#[allow(non_camel_case_types)]") + .compile_protos(&["src/ttrpc.proto"], &["src"]) + .expect("Codegen failed"); + + // read ttrpc.rs + let ttrpc_path = format!("{}/ttrpc.rs", out_dir); + let content = fs::read_to_string(&ttrpc_path).expect("Failed to read ttrpc.rs"); + + // define the enum value name pairs + let replacements = [ + ("Ok", "OK"), + ("Cancelled", "CANCELLED"), + ("Unknown", "UNKNOWN"), + ("InvalidArgument", "INVALID_ARGUMENT"), + ("DeadlineExceeded", "DEADLINE_EXCEEDED"), + ("NotFound", "NOT_FOUND"), + ("AlreadyExists", "ALREADY_EXISTS"), + ("PermissionDenied", "PERMISSION_DENIED"), + ("Unauthenticated", "UNAUTHENTICATED"), + ("ResourceExhausted", "RESOURCE_EXHAUSTED"), + ("FailedPrecondition", "FAILED_PRECONDITION"), + ("Aborted", "ABORTED"), + ("OutOfRange", "OUT_OF_RANGE"), + ("Unimplemented", "UNIMPLEMENTED"), + ("Internal", "INTERNAL"), + ("Unavailable", "UNAVAILABLE"), + ("DataLoss", "DATA_LOSS"), + ]; + + // replace the enum value in the file + let mut modified_content = content.clone(); + + // replace the enum definition + for (pascal_case, upper_case) in &replacements { + // replace the enum definition line + let enum_pattern = format!(" {} = ", pascal_case); + let enum_replacement = format!(" {} = ", upper_case); + modified_content = modified_content.replace(&enum_pattern, &enum_replacement); + + // replace the as_str_name function + let match_pattern = format!(" Self::{} => ", pascal_case); + let match_replacement = format!(" Self::{} => ", upper_case); + modified_content = modified_content.replace(&match_pattern, &match_replacement); + + // replace the from_str_name function + let from_str_pattern = format!( + " \"{}\" => Some(Self::{})", + upper_case, pascal_case + ); + let from_str_replacement = format!( + " \"{}\" => Some(Self::{})", + upper_case, upper_case + ); + modified_content = modified_content.replace(&from_str_pattern, &from_str_replacement); + } + + // write the modified content back to the file + fs::write(&ttrpc_path, modified_content).expect("Failed to write modified ttrpc.rs"); +} diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml new file mode 100644 index 00000000..ab159f7e --- /dev/null +++ b/codegen/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "ttrpc-codegen" +version = "1.0.0" +edition = "2021" +authors = ["The Ant Group Kata Team "] +license = "Apache-2.0" +keywords = ["codegen", "ttrpc", "protobuf"] +description = "Rust codegen for ttrpc using prost crate" +categories = ["network-programming", "development-tools::build-utils"] +repository = "https://github.com/containerd/ttrpc-rust/tree/master/codegen" +homepage = "https://github.com/containerd/ttrpc-rust/tree/master/codegen" +readme = "README.md" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +prost = "0.11" +prost-types = "0.11" +prost-build = "0.11" +proc-macro2 = "1.0" +quote = "1.0" +anyhow = "^1.0" +lazy_static = "1.4" +regex = "1.7" diff --git a/codegen/Makefile b/codegen/Makefile new file mode 100644 index 00000000..8362c418 --- /dev/null +++ b/codegen/Makefile @@ -0,0 +1,35 @@ +all: debug test + +# +# Build +# + +.PHONY: debug +debug: + cargo build --verbose --all-targets + +.PHONY: release +release: + cargo build --release + +.PHONY: build +build: debug + +# +# Tests and linters +# + +.PHONY: test +test: + cargo test --verbose + +.PHONY: check +check: + cargo fmt --all -- --check + cargo clippy --all-targets --all-features -- -D warnings + +.PHONY: deps +deps: + rustup update stable + rustup default stable + rustup component add rustfmt clippy diff --git a/codegen/README.md b/codegen/README.md new file mode 100644 index 00000000..05a4c648 --- /dev/null +++ b/codegen/README.md @@ -0,0 +1,34 @@ +# Ttrpc-rust Codegen + +## Getting started + +Please ensure that the protoc has been installed on your local environment. Then +write the following code into "build.rs". + +```rust +let mut protos = vec![ + "protocols/protos/health.proto", + "protocols/protos/agent.proto", + "protocols/protos/oci.proto", +]; + +let includes = vec!["protocols/protos"]; + +let codegen = CodegenBuilder::new() + .set_out_dir(&"protocols/sync") + .set_protos(&protos) + .set_includes(&includes) + .set_serde(true) + .set_async_mode(AsyncMode::None) + .set_generate_service(true) + .build() + .unwrap(); +codegen.generate().unwrap(); +``` + +Add ttrpc-codegen to "build-dependencies" section in "Cargo.toml". + +```toml +[build-dependencies] +ttrpc-codegen = "1.0" +``` diff --git a/codegen/src/codegen.rs b/codegen/src/codegen.rs new file mode 100644 index 00000000..387ffa0a --- /dev/null +++ b/codegen/src/codegen.rs @@ -0,0 +1,189 @@ +use anyhow::{anyhow, Context, Result}; +use prost::Message; +use prost_build::Config; +use prost_types::FileDescriptorSet; +use std::{ + fs::{self, File}, + io::{BufReader, Read, Write}, + path::{Path, PathBuf}, +}; + +use crate::svcgen::{AsyncMode, TtrpcServiceGenerator}; + +const FILE_DESCRIPTOR_SET: &str = "fd_set.bin"; + +pub struct Codegen<'a, P: AsRef> { + out_dir: &'a P, + protos: &'a [P], + includes: &'a [P], + /// Whether to enable serde + serde: bool, + async_mode: AsyncMode, + /// Whether to generate service + generate_service: bool, +} + +impl<'a, P> Codegen<'a, P> +where + P: AsRef, +{ + pub fn generate(&self) -> Result<()> { + self.compile_protos().context("Compile protos")?; + self.write_header().context("Write header")?; + self.clean_up().context("Clean up")?; + + Ok(()) + } + + // TODO: Do not write header to the files that already has the header + // TODO: Write header to the files generated by the codegen + fn write_header(&self) -> Result<()> { + // Read fd_set.bin + let f = File::open(PathBuf::from(self.out_dir.as_ref()).join(FILE_DESCRIPTOR_SET)) + .context("Open fd_set.bin")?; + let mut reader = BufReader::new(f); + let mut buffer = Vec::new(); + reader.read_to_end(&mut buffer).context("Read fd_set.bin")?; + + let fd_set = FileDescriptorSet::decode(&buffer as &[u8]).context("Decode fd_set")?; + + for fd in fd_set.file.iter() { + let rs_path = PathBuf::from(self.out_dir.as_ref()).join(format!("{}.rs", fd.package())); + let mut f = match File::open(&rs_path) { + Ok(f) => f, + _ => continue, + }; + let header = format!( + r#"// This file is generated by ttrpc-codegen {}. Do not edit +// @generated + +"#, + env!("CARGO_PKG_VERSION") + ); + + let mut buf = Vec::::new(); + buf.write(header.as_bytes()).context("Write header")?; + f.read_to_end(&mut buf) + .context(format!("Read from rust file {:?}", rs_path))?; + let mut f = File::create(&rs_path).context(format!("Open rust file {:?}", rs_path))?; + f.write_all(buf.as_slice()) + .context(format!("Write to rust file {:?}", rs_path))?; + } + + Ok(()) + } + + fn compile_protos(&self) -> Result<()> { + let mut config = Config::new(); + config.out_dir(self.out_dir.as_ref()); + config.service_generator(Box::new(TtrpcServiceGenerator::new(self.async_mode))); + config.protoc_arg("--experimental_allow_proto3_optional"); + config.compile_well_known_types(); + config.file_descriptor_set_path( + PathBuf::from(self.out_dir.as_ref()).join(FILE_DESCRIPTOR_SET), + ); + if self.generate_service { + config.include_file("_include.rs"); + if self.serde { + config + .message_attribute(".", "#[derive(::serde::Serialize, ::serde::Deserialize)]"); + } + } + config + .compile_protos(self.protos, self.includes) + .context("Compile protos by prost")?; + Ok(()) + } + + fn clean_up(&self) -> Result<()> { + fs::remove_file(PathBuf::from(self.out_dir.as_ref()).join(FILE_DESCRIPTOR_SET)) + .context("Remove fd_set.bin")?; + Ok(()) + } +} + +#[derive(Default)] +pub struct CodegenBuilder<'a, P: AsRef + Default> { + out_dir: Option<&'a P>, + protos: Option<&'a [P]>, + includes: Option<&'a [P]>, + /// Whether to enable serde + serde: Option, + async_mode: Option, + /// Whether to generate service + generate_service: Option, +} + +impl<'a, P> CodegenBuilder<'a, P> +where + P: AsRef + Default, +{ + pub fn new() -> Self { + Default::default() + } + + pub fn set_out_dir(mut self, out_dir: &'a P) -> Self { + self.out_dir = Some(out_dir); + self + } + + pub fn set_protos(mut self, protos: &'a [P]) -> Self { + self.protos = Some(protos); + self + } + + pub fn set_includes(mut self, includes: &'a [P]) -> Self { + self.includes = Some(includes); + self + } + + pub fn set_serde(mut self, serde: bool) -> Self { + self.serde = Some(serde); + self + } + + pub fn set_async_mode(mut self, async_mode: AsyncMode) -> Self { + self.async_mode = Some(async_mode); + self + } + + pub fn set_generate_service(mut self, generate_service: bool) -> Self { + self.generate_service = Some(generate_service); + self + } + + pub fn build(&self) -> Result> { + let out_dir = match self.out_dir { + Some(out_dir) => out_dir, + None => return Err(anyhow!("The out dir is required.")), + }; + + let protos = match self.protos { + Some(protos) => protos, + None => return Err(anyhow!("The protos are required.")), + }; + + let includes = match self.includes { + Some(includes) => includes, + None => return Err(anyhow!("The includes are required.")), + }; + + let serde = self.serde.unwrap_or(false); + + let async_mode = match self.async_mode { + Some(mode) => mode, + None => AsyncMode::None, + }; + + let generate_service = self.generate_service.unwrap_or(false); + + Ok(Codegen { + out_dir, + protos, + includes, + serde, + async_mode, + generate_service, + }) + } +} diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs new file mode 100644 index 00000000..2887ff07 --- /dev/null +++ b/codegen/src/lib.rs @@ -0,0 +1,6 @@ +mod codegen; +mod svcgen; +mod util; + +pub use codegen::{Codegen, CodegenBuilder}; +pub use svcgen::AsyncMode; diff --git a/codegen/src/svcgen.rs b/codegen/src/svcgen.rs new file mode 100644 index 00000000..5fa1c811 --- /dev/null +++ b/codegen/src/svcgen.rs @@ -0,0 +1,503 @@ +use std::fmt; + +use proc_macro2::{Ident, TokenStream}; +use prost_build::{Method, Service, ServiceGenerator}; +use quote::{format_ident, quote}; + +use crate::util::{to_camel_case, to_snake_case, ttrpc_mod, type_token}; + +/// An implementation of Ttrpc service generator with the prost. +/// The types supported by the generator are: +/// - service sync/async +pub struct TtrpcServiceGenerator { + async_mode: AsyncMode, +} + +impl TtrpcServiceGenerator { + pub fn new(async_mode: AsyncMode) -> Self { + Self { async_mode } + } +} + +impl ServiceGenerator for TtrpcServiceGenerator { + fn finalize(&mut self, _buf: &mut String) {} + fn finalize_package(&mut self, _package: &str, _buf: &mut String) {} + /// Generate services + fn generate(&mut self, service: Service, buf: &mut String) { + self.generate_type_aliases(buf); + self.generate_trait(&service, buf); + self.generate_method_handlers(&service, buf); + self.generate_creating_service_method(&service, buf); + self.generate_client(&service, buf); + } +} + +// Generator engine +impl TtrpcServiceGenerator { + /// Generate type aliases at the beginning of the service + fn generate_type_aliases(&self, buf: &mut String) { + let async_trait_token = if self.async_mode != AsyncMode::None { + quote!( + use async_trait::async_trait; + ) + } else { + quote!() + }; + let type_aliases = quote!( + use std::collections::HashMap; + use std::sync::Arc; + use prost::Message; + #async_trait_token + ); + buf.push_str(type_aliases.to_string().as_str()); + } + + /// Generate the service trait for the server. + /// Assumed that there is a service named "Example", which enables async + /// feature, then the trait would be: + /// #[async_trait] + /// pub trait ExampleService: Sync { + /// // === the methods are snipped === + /// } + fn generate_trait(&self, service: &Service, buf: &mut String) { + let trait_name = format_ident!("{}", service.name); + let (derive_token, sync_token) = if async_on(self.async_mode, Side::Server) { + (quote!( #[async_trait] ), quote!( : Sync )) + } else { + (quote!(), quote!()) + }; + let method_signatures: Vec<_> = service + .methods + .iter() + .map(|method| self.trait_method_signature_token(service, method)) + .collect(); + let trait_token = quote!( + #derive_token + pub trait #trait_name #sync_token { + #(#method_signatures)* + } + ); + buf.push_str(&trait_token.to_string()) + } + + fn trait_method_signature_token(&self, service: &Service, method: &Method) -> TokenStream { + let mod_path = ttrpc_mod(); + let name = format_ident!("{}", self.method_name_rust(method)); + let method_type = MethodType::from_method(method); + // Input/output type + let input_type = type_token(&method.input_type); + let output_type = type_token(&method.output_type); + let (req_type, resp_type) = match method_type { + MethodType::Unary => (quote!( #input_type ), quote!( #output_type )), + MethodType::ClientStreaming => ( + quote!( #mod_path::r#async::ServerStreamReceiver<#input_type> ), + quote!( #output_type ), + ), + MethodType::ServerStreaming => ( + quote!( #input_type, _: #mod_path::r#async::ServerStreamSender<#output_type> ), + quote!(()), + ), + MethodType::Duplex => ( + quote!( #mod_path::r#async::ServerStream<#output_type, #input_type> ), + quote!(()), + ), + }; + let context = self.ttrpc_context(async_on(self.async_mode, Side::Server)); + let err_msg = format!( + "{}.{}/{} is not supported", + service.package, service.name, method.proto_name + ); + // Prepend a function prefix if necessary + let async_token = if async_on(self.async_mode, Side::Server) { + quote!(async) + } else { + quote!() + }; + + quote!( + #async_token fn #name(&self, _ctx: &#context, _: #req_type) -> #mod_path::Result<#resp_type> { + Err( + #mod_path::Error::RpcStatus( + #mod_path::get_status( + #mod_path::Code::NOT_FOUND, + #err_msg, + ) + ) + ) + } + ) + } + + /// Generate method handlers for each method. + fn generate_method_handlers(&self, service: &Service, buf: &mut String) { + for method in service.methods.iter() { + let method_handler = self.method_handler_token(service, method); + buf.push_str(&method_handler.to_string()); + } + } + + fn method_handler_token(&self, service: &Service, method: &Method) -> TokenStream { + let struct_name = format_ident!("{}Method", to_camel_case(method.proto_name.as_str())); + let service_name = format_ident!("{}", service.name); + let method_handler_impl = if async_on(self.async_mode, Side::Server) { + self.method_handler_impl_async_token(&struct_name, method) + } else { + self.method_handler_impl_sync_token(&struct_name, method) + }; + quote!( + struct #struct_name { + service: Arc>, + } + #method_handler_impl + ) + } + + fn method_handler_impl_sync_token(&self, struct_name: &Ident, method: &Method) -> TokenStream { + let mod_path = ttrpc_mod(); + let context = self.ttrpc_context(false); + let input_type = format_ident!("{}", method.input_type); + let method_name = format_ident!("{}", self.method_name_rust(method)); + quote!( + impl #mod_path::MethodHandler for #struct_name { + fn handler(&self, ctx: #context, req: #mod_path::Request) -> #mod_path::Result<()> { + #mod_path::request_handler!(self, ctx, req, #input_type, #method_name); + Ok(()) + } + } + ) + } + + fn method_handler_impl_async_token(&self, struct_name: &Ident, method: &Method) -> TokenStream { + let mod_path = ttrpc_mod(); + let context = self.ttrpc_context(true); + let input_type = format_ident!("{}", method.input_type); + let method_name = format_ident!("{}", self.method_name_rust(method)); + + let (handler_trait, inner_token, result_type_token, handler_token) = + match MethodType::from_method(method) { + MethodType::Unary => ( + quote!(MethodHandler), + quote!( req: #mod_path::Request ), + quote!( #mod_path::Response ), + quote!( #mod_path::async_request_handler!(self, ctx, req, #input_type, #method_name); ), + ), + MethodType::ClientStreaming => ( + quote!(StreamHandler), + quote!( inner: #mod_path::r#async::StreamInner ), + quote!( Option<#mod_path::Response> ), + quote!( #mod_path::async_client_streamimg_handler!(self, ctx, inner, #method_name); ), + ), + MethodType::ServerStreaming => ( + quote!(StreamHandler), + quote!( mut inner: #mod_path::r#async::StreamInner ), + quote!( Option<#mod_path::Response> ), + quote!( #mod_path::async_server_streamimg_handler!(self, ctx, inner, #input_type, #method_name); ), + ), + MethodType::Duplex => ( + quote!(StreamHandler), + quote!( inner: #mod_path::r#async::StreamInner ), + quote!( Option<#mod_path::Response> ), + quote!( #mod_path::async_duplex_streamimg_handler!(self, ctx, inner, #method_name); ), + ), + }; + + quote!( + #[async_trait] + impl ::ttrpc::r#async::#handler_trait for #struct_name { + async fn handler(&self, ctx: #context, #inner_token) -> #mod_path::Result<#result_type_token> { + #handler_token + } + } + ) + } + + fn generate_creating_service_method(&self, service: &Service, buf: &mut String) { + let creating_service_method = if async_on(self.async_mode, Side::Server) { + self.async_creating_service_method_token(service) + } else { + self.sync_creating_service_method_token(service) + }; + buf.push_str(&creating_service_method.to_string()); + } + + fn sync_creating_service_method_token(&self, service: &Service) -> TokenStream { + let create_service_name = format_ident!("create_{}", self.service_name_rust(service)); + let service_trait = format_ident!("{}", service.name); + let mod_path = ttrpc_mod(); + let method_inserts: Vec<_> = service.methods.iter().map(|method| { + let key = format!("/{}.{}/{}", service.package, service.name, method.proto_name); + let mm = format_ident!("{}Method", to_camel_case(&method.proto_name)); + quote!( + methods.insert( + #key.to_string(), + Box::new(#mm{service: service.clone()}) as Box); + ) + }).collect(); + + quote!( + pub fn #create_service_name(service: Arc>) -> HashMap> { + let mut methods = HashMap::new(); + #(#method_inserts)* + methods + } + ) + } + + fn async_creating_service_method_token(&self, service: &Service) -> TokenStream { + let create_service_name = format_ident!("create_{}", self.service_name_rust(service)); + let service_trait = format_ident!("{}", service.name); + let mod_path = ttrpc_mod(); + let stream_token = if self.has_stream_method(service) { + quote!( let mut streams = HashMap::new(); ) + } else { + quote!( let streams = HashMap::new(); ) + }; + let method_inserts: Vec<_> = service.methods.iter().map(|method| { + let key = method.proto_name.to_string(); + let mm = format_ident!("{}Method", to_camel_case(&method.proto_name)); + match MethodType::from_method(method) { + MethodType::Unary => { + quote!( + methods.insert( + #key.to_string(), + Box::new(#mm{service: service.clone()}) as Box); + ) + }, + _ => { + quote!( + streams.insert( + #key.to_string(), + Arc::new(#mm{service: service.clone()}) as Arc); + ) + } + } + }).collect(); + let service_path = format!("{}.{}", service.package, to_camel_case(&service.proto_name)); + + quote!( + pub fn #create_service_name(service: Arc>) -> HashMap { + let mut ret = HashMap::new(); + let mut methods = HashMap::new(); + #stream_token + #(#method_inserts)* + ret.insert(#service_path.to_string(), #mod_path::r#async::Service{ methods, streams }); + ret + } + ) + } + + fn generate_client(&self, service: &Service, buf: &mut String) { + let client_struct = self.client_sturct_token(service); + let client_methods = self.client_methods_token(service); + + let client_token = quote!( + #client_struct + #client_methods + ); + buf.push_str(&client_token.to_string()); + } + + fn client_sturct_token(&self, service: &Service) -> TokenStream { + let client_type = format_ident!("{}", self.client_type(service)); + let mod_path = ttrpc_mod(); + let client_field_type = if async_on(self.async_mode, Side::Client) { + quote!( #mod_path::r#async::Client ) + } else { + quote!( #mod_path::Client ) + }; + + quote!( + #[derive(Clone)] + pub struct #client_type { + client: #client_field_type, + } + + impl #client_type { + pub fn new(client: #client_field_type) -> Self { + #client_type { + client, + } + } + } + ) + } + + fn client_methods_token(&self, service: &Service) -> TokenStream { + let client_type = format_ident!("{}", self.client_type(service)); + let methods: Vec<_> = service + .methods + .iter() + .map(|method| { + if async_on(self.async_mode, Side::Client) { + self.async_client_method_token(service, method) + } else { + self.sync_client_method_token(service, method) + } + }) + .collect(); + + quote!( + impl #client_type { + #(#methods)* + } + ) + } + + fn sync_client_method_token(&self, service: &Service, method: &Method) -> TokenStream { + let method_name = format_ident!("{}", method.name); + let mod_path = ttrpc_mod(); + let input = type_token(&method.input_type); + let output = type_token(&method.output_type); + let server_str = format!("{}.{}", service.package, service.name); + let method_str = method.proto_name.to_string(); + + match MethodType::from_method(method) { + MethodType::Unary => { + quote!( + pub fn #method_name(&self, ctx: #mod_path::context::Context, req: &#input) -> #mod_path::Result<#output> { + let mut cres = #output::default(); + #mod_path::client_request!(self, ctx, req, #server_str, #method_str, cres); + Ok(cres) + } + ) + } + _ => { + panic!("Reaching here is prohibited.") + } + } + } + + fn async_client_method_token(&self, service: &Service, method: &Method) -> TokenStream { + let method_name = format_ident!("{}", method.name); + let mod_path = ttrpc_mod(); + let input = type_token(&method.input_type); + let output = type_token(&method.output_type); + let server_str = format!("{}.{}", service.package, service.name); + let method_str = method.proto_name.to_string(); + + let (mut arg_tokens, ret_token, body_token) = match MethodType::from_method(method) { + MethodType::Unary => ( + vec![quote!(req: &#input)], + quote!( #mod_path::Result<#output> ), + quote!( + let mut cres = #output::default(); + #mod_path::async_client_request!(self, ctx, req, #server_str, #method_str, cres); + ), + ), + MethodType::ClientStreaming => ( + vec![], + quote!( #mod_path::Result<#mod_path::r#async::ClientStreamSender<#input, #output>> ), + quote!( ::ttrpc::async_client_stream_send!(self, ctx, #server_str, #method_str); ), + ), + MethodType::ServerStreaming => ( + vec![quote!( req: &#input )], + quote!( #mod_path::Result<#mod_path::r#async::ClientStreamReceiver<#output>> ), + quote!( #mod_path::async_client_stream_receive!(self, ctx, req, #server_str, #method_str); ), + ), + MethodType::Duplex => ( + vec![], + quote!( #mod_path::Result<#mod_path::r#async::ClientStream<#input, #output>> ), + quote!( ::ttrpc::async_client_stream!(self, ctx, #server_str, #method_str); ), + ), + }; + + let mut args = vec![quote!(&self), quote!( ctx: #mod_path::context::Context )]; + args.append(&mut arg_tokens); + + quote!( + pub async fn #method_name(#(#args),*) -> #ret_token { + #body_token + } + ) + } +} + +// Utils +impl TtrpcServiceGenerator { + /// Generate a token stream of the TtrpcContext + fn ttrpc_context(&self, r#async: bool) -> TokenStream { + let mod_path = ttrpc_mod(); + if r#async { + quote!( #mod_path::r#async::TtrpcContext ) + } else { + quote!( #mod_path::TtrpcContext ) + } + } + + fn service_name_rust(&self, service: &Service) -> String { + to_snake_case(&service.name) + } + + fn method_name_rust(&self, method: &Method) -> String { + to_snake_case(&method.name) + } + + fn client_type(&self, service: &Service) -> String { + format!("{}Client", service.name) + } + + fn has_stream_method(&self, service: &Service) -> bool { + service + .methods + .iter() + .any(|method| !matches!(MethodType::from_method(method), MethodType::Unary)) + } +} + +pub enum MethodType { + Unary, + ClientStreaming, + ServerStreaming, + Duplex, +} + +impl fmt::Display for MethodType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "{}", + match self { + MethodType::Unary => "MethodType::Unary", + MethodType::ClientStreaming => "MethodType::ClientStreaming", + MethodType::ServerStreaming => "MethodType::ServerStreaming", + MethodType::Duplex => "MethodType::Duplex", + } + ) + } +} + +impl MethodType { + pub fn from_method(method: &Method) -> Self { + match (method.client_streaming, method.server_streaming) { + (false, false) => MethodType::Unary, + (true, false) => MethodType::ClientStreaming, + (false, true) => MethodType::ServerStreaming, + (true, true) => MethodType::Duplex, + } + } +} + +#[derive(PartialEq, Clone, Copy)] +pub enum AsyncMode { + /// Both client and server are async. + All, + /// Only client is async. + Client, + /// Only server is async. + Server, + /// None of client and server are async, it is the default value. + None, +} + +#[derive(PartialEq)] +/// Indicated the service side +enum Side { + Client, + Server, +} + +fn async_on(mode: AsyncMode, side: Side) -> bool { + mode == AsyncMode::All + || (side == Side::Server && mode == AsyncMode::Server) + || (side == Side::Client && mode == AsyncMode::Client) +} diff --git a/codegen/src/util.rs b/codegen/src/util.rs new file mode 100644 index 00000000..d44a56b7 --- /dev/null +++ b/codegen/src/util.rs @@ -0,0 +1,97 @@ +use proc_macro2::TokenStream; +use quote::{format_ident, quote}; +use std::str; + +// A struct that divide a name into serveral parts that meets rust's guidelines. +struct NameSpliter<'a> { + name: &'a [u8], + pos: usize, +} + +impl<'a> NameSpliter<'a> { + fn new(s: &str) -> NameSpliter { + NameSpliter { + name: s.as_bytes(), + pos: 0, + } + } +} + +impl<'a> Iterator for NameSpliter<'a> { + type Item = &'a str; + + fn next(&mut self) -> Option<&'a str> { + if self.pos == self.name.len() { + return None; + } + // skip all prefix '_' + while self.pos < self.name.len() && self.name[self.pos] == b'_' { + self.pos += 1; + } + let mut pos = self.name.len(); + let mut upper_len = 0; + let mut meet_lower = false; + for i in self.pos..self.name.len() { + let c = self.name[i]; + if (b'A'..=b'Z').contains(&c) { + if meet_lower { + // So it should be AaA or aaA + pos = i; + break; + } + upper_len += 1; + } else if c == b'_' { + pos = i; + break; + } else { + meet_lower = true; + if upper_len > 1 { + // So it should be AAa + pos = i - 1; + break; + } + } + } + let s = str::from_utf8(&self.name[self.pos..pos]).unwrap(); + self.pos = pos; + Some(s) + } +} + +pub fn ttrpc_mod() -> TokenStream { + let ttrpc = format_ident!("ttrpc"); + quote! { ::#ttrpc } +} + +pub fn to_camel_case(name: &str) -> String { + let mut camel_case_name = String::with_capacity(name.len()); + for s in NameSpliter::new(name) { + let mut chs = s.chars(); + camel_case_name.extend(chs.next().unwrap().to_uppercase()); + camel_case_name.push_str(&s[1..].to_lowercase()); + } + camel_case_name +} + +/// Adjust method name to follow rust-guidelines. +pub fn to_snake_case(name: &str) -> String { + let mut snake_method_name = String::with_capacity(name.len()); + for s in NameSpliter::new(name) { + snake_method_name.push_str(&s.to_lowercase()); + snake_method_name.push('_'); + } + snake_method_name.pop(); + snake_method_name +} + +pub fn type_token(type_str: &str) -> TokenStream { + if type_str == "()" { + quote!(()) + } else { + let idents: Vec<_> = type_str + .split("::") + .map(|ident| format_ident!("{}", ident)) + .collect(); + quote!( #(#idents)::* ) + } +} diff --git a/compiler/Makefile b/compiler/Makefile index bb69e69a..acd4a18c 100644 --- a/compiler/Makefile +++ b/compiler/Makefile @@ -1 +1,8 @@ -include ../Makefile +.PHONY: check +check: + cargo fmt --all -- --check + cargo clippy --all-targets -- -D warnings + +.PHONY: test +test: + cargo test --verbose \ No newline at end of file diff --git a/example/Cargo.toml b/example/Cargo.toml index 64116403..aca98425 100644 --- a/example/Cargo.toml +++ b/example/Cargo.toml @@ -13,12 +13,12 @@ description = "An example of ttrpc." [dev-dependencies] protobuf = "3.1.0" bytes = "0.4.11" -libc = "0.2.79" +libc = "0.2.158" byteorder = "1.3.2" log = "0.4.6" simple-logging = "2.0.2" nix = "0.23.0" -ttrpc = { path = "../", features = ["async"] } +ttrpc = { path = "../", features = ["async","rustprotobuf"] } ctrlc = { version = "3.0", features = ["termination"] } tokio = { version = "1.0.1", features = ["signal", "time"] } async-trait = "0.1.42" diff --git a/example/protocols/asynchronous/mod.rs b/example/protocols/asynchronous/mod.rs new file mode 100644 index 00000000..1fa216c4 --- /dev/null +++ b/example/protocols/asynchronous/mod.rs @@ -0,0 +1,12 @@ +pub mod agent_ttrpc; +pub mod health_ttrpc; +pub mod empty; +pub mod types; +// @generated +pub mod streaming; +pub mod agent; +pub mod oci; +pub mod health; + +pub mod streaming_ttrpc; +pub mod gogo; diff --git a/example/protocols/sync/mod.rs b/example/protocols/sync/mod.rs new file mode 100644 index 00000000..4def32b9 --- /dev/null +++ b/example/protocols/sync/mod.rs @@ -0,0 +1,10 @@ + +pub mod health_ttrpc; +pub mod types; +// @generated +pub mod gogo; +pub mod health; +pub mod agent; +pub mod agent_ttrpc; +pub mod oci; +pub mod empty; diff --git a/example2/Cargo.toml b/example2/Cargo.toml new file mode 100644 index 00000000..75a9cdb1 --- /dev/null +++ b/example2/Cargo.toml @@ -0,0 +1,55 @@ +[package] +name = "ttrpc-example" +version = "0.2.0" +authors = ["The AntFin Kata Team "] +edition = "2018" +license = "Apache-2.0" +keywords = ["ttrpc", "protobuf", "rpc"] +readme = "README.md" +repository = "https://github.com/alipay/ttrpc-rust" +homepage = "https://github.com/alipay/ttrpc-rust" +description = "An example of ttrpc." + +[dependencies] +prost = "0.11" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +ttrpc-codegen = { path = "../codegen" } +bytes = "0.4.11" +libc = "0.2.79" +byteorder = "1.3.2" +log = "0.4.6" +simple-logging = "2.0.2" +nix = "0.23.0" +ttrpc = { path = "../", features = ["sync", "async", "prost"], default-features = false } +ctrlc = { version = "3.0", features = ["termination"] } +tokio = { version = "1.0.1", features = ["signal", "time"] } +async-trait = "0.1.42" +rand = "0.8.5" + +[build-dependencies] +ttrpc-codegen = { path = "../codegen" } + +[[example]] +name = "client" +path = "./client.rs" + +[[example]] +name = "server" +path = "./server.rs" + +[[example]] +name = "async-server" +path = "./async_server.rs" + +[[example]] +name = "async-client" +path = "./async_client.rs" + +[[example]] +name = "async-stream-server" +path = "./async_stream_server.rs" + +[[example]] +name = "async-stream-client" +path = "./async_stream_client.rs" diff --git a/example2/Makefile b/example2/Makefile new file mode 100644 index 00000000..7ff706f8 --- /dev/null +++ b/example2/Makefile @@ -0,0 +1,31 @@ +# +# Build +# + +.PHONY: build +build: +ifeq ($(OS),Windows_NT) + @echo "Skipping build on Windows" +else + cargo build +endif + +.PHONY: build-examples +build-examples: build +ifeq ($(OS),Windows_NT) + @echo "Skipping build-examples on Windows" +else + cargo build --example server + cargo build --example client + cargo build --example async-server + cargo build --example async-client + cargo build --example async-stream-server + cargo build --example async-stream-client +endif + +.PHONY: deps +deps: + rustup update stable + rustup default stable + rustup component add rustfmt clippy + ../install_protoc.sh diff --git a/example2/async_client.rs b/example2/async_client.rs new file mode 100644 index 00000000..ca70e140 --- /dev/null +++ b/example2/async_client.rs @@ -0,0 +1,108 @@ +// Copyright (c) 2020 Ant Financial +// +// SPDX-License-Identifier: Apache-2.0 +// + +mod protocols; +mod utils; + +use protocols::r#async::{agent, health}; +use ttrpc::context::{self, Context}; +use ttrpc::r#async::Client; + +#[tokio::main(flavor = "current_thread")] +async fn main() { + let c = Client::connect(utils::SOCK_ADDR).await.unwrap(); + let hc = health::HealthClient::new(c.clone()); + let ac = agent::AgentServiceClient::new(c); + + let thc = hc.clone(); + let tac = ac.clone(); + + let now = std::time::Instant::now(); + + let t1 = tokio::spawn(async move { + let req = health::CheckRequest::default(); + println!( + "Green Thread 1 - {} started: {:?}", + "health.check()", + now.elapsed(), + ); + println!( + "Green Thread 1 - {} -> {:?} ended: {:?}", + "health.check()", + thc.check(context::with_timeout(20 * 1000 * 1000), &req) + .await, + now.elapsed(), + ); + }); + + let t2 = tokio::spawn(async move { + println!( + "Green Thread 2 - {} started: {:?}", + "agent.list_interfaces()", + now.elapsed(), + ); + + let show = match tac + .list_interfaces(default_ctx(), &agent::ListInterfacesRequest::default()) + .await + { + Err(e) => format!("{:?}", e), + Ok(s) => format!("{:?}", s), + }; + + println!( + "Green Thread 2 - {} -> {} ended: {:?}", + "agent.list_interfaces()", + show, + now.elapsed(), + ); + }); + + let t3 = tokio::spawn(async move { + println!( + "Green Thread 3 - {} started: {:?}", + "agent.online_cpu_mem()", + now.elapsed() + ); + + let show = match ac + .online_cpu_mem(default_ctx(), &agent::OnlineCpuMemRequest::default()) + .await + { + Err(e) => format!("{:?}", e), + Ok(s) => format!("{:?}", s), + }; + println!( + "Green Thread 3 - {} -> {} ended: {:?}", + "agent.online_cpu_mem()", + show, + now.elapsed() + ); + + println!( + "Green Thread 3 - {} started: {:?}", + "health.version()", + now.elapsed() + ); + println!( + "Green Thread 3 - {} -> {:?} ended: {:?}", + "health.version()", + hc.version(default_ctx(), &health::CheckRequest::default()) + .await, + now.elapsed() + ); + }); + + let _ = tokio::join!(t1, t2, t3); +} + +fn default_ctx() -> Context { + let mut ctx = context::with_timeout(0); + ctx.add("key-1".to_string(), "value-1-1".to_string()); + ctx.add("key-1".to_string(), "value-1-2".to_string()); + ctx.set("key-2".to_string(), vec!["value-2".to_string()]); + + ctx +} diff --git a/example2/async_server.rs b/example2/async_server.rs new file mode 100644 index 00000000..1a4d2932 --- /dev/null +++ b/example2/async_server.rs @@ -0,0 +1,126 @@ +// Copyright (c) 2020 Ant Financial +// +// SPDX-License-Identifier: Apache-2.0 +// + +mod protocols; +mod utils; + +#[macro_use] +extern crate log; + +use std::sync::Arc; + +use log::LevelFilter; + +use protocols::r#async::{agent, health, types}; +use ttrpc::asynchronous::Server; +use ttrpc::error::{Error, Result}; +use ttrpc::proto::{Code, Status}; + +use async_trait::async_trait; +use tokio::signal::unix::{signal, SignalKind}; +use tokio::time::sleep; + +struct HealthService; + +#[async_trait] +impl health::Health for HealthService { + async fn check( + &self, + _ctx: &::ttrpc::r#async::TtrpcContext, + _req: health::CheckRequest, + ) -> Result { + let mut status = Status::default(); + + status.code = Code::NOT_FOUND as i32; + status.message = "Just for fun".to_string(); + + sleep(std::time::Duration::from_secs(10)).await; + + Err(Error::RpcStatus(status)) + } + + async fn version( + &self, + ctx: &::ttrpc::r#async::TtrpcContext, + req: health::CheckRequest, + ) -> Result { + info!("version {:?}", req); + info!("ctx {:?}", ctx); + let mut rep = health::VersionCheckResponse::default(); + rep.agent_version = "mock.0.1".to_string(); + rep.grpc_version = "0.0.1".to_string(); + let mut status = Status::default(); + status.code = Code::NOT_FOUND as i32; + Ok(rep) + } +} + +struct AgentService; + +#[async_trait] +impl agent::AgentService for AgentService { + async fn list_interfaces( + &self, + _ctx: &::ttrpc::r#async::TtrpcContext, + _req: agent::ListInterfacesRequest, + ) -> ::ttrpc::Result { + let mut rp = Vec::new(); + + let mut i = types::Interface::default(); + i.name = "first".to_string(); + rp.push(i); + let mut i = types::Interface::default(); + i.name = "second".to_string(); + rp.push(i); + + let mut i = agent::Interfaces::default(); + i.interfaces = rp; + + Ok(i) + } +} + +#[tokio::main(flavor = "current_thread")] +async fn main() { + simple_logging::log_to_stderr(LevelFilter::Trace); + + let h = Box::new(HealthService {}) as Box; + let h = Arc::new(h); + let hservice = health::create_health(h); + + let a = Box::new(AgentService {}) as Box; + let a = Arc::new(a); + let aservice = agent::create_agent_service(a); + + utils::remove_if_sock_exist(utils::SOCK_ADDR).unwrap(); + + let mut server = Server::new() + .bind(utils::SOCK_ADDR) + .unwrap() + .register_service(hservice) + .register_service(aservice); + + let mut hangup = signal(SignalKind::hangup()).unwrap(); + let mut interrupt = signal(SignalKind::interrupt()).unwrap(); + server.start().await.unwrap(); + + tokio::select! { + _ = hangup.recv() => { + // test stop_listen -> start + println!("stop listen"); + server.stop_listen().await; + println!("start listen"); + server.start().await.unwrap(); + + // hold some time for the new test connection. + sleep(std::time::Duration::from_secs(100)).await; + } + _ = interrupt.recv() => { + // test graceful shutdown + println!("graceful shutdown"); + server.shutdown().await.unwrap(); + } + }; +} diff --git a/example2/async_stream_client.rs b/example2/async_stream_client.rs new file mode 100644 index 00000000..1030368f --- /dev/null +++ b/example2/async_stream_client.rs @@ -0,0 +1,174 @@ +// Copyright 2022 Alibaba Cloud. All rights reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +mod protocols; +mod utils; + +use protocols::r#async::{google::protobuf::Empty, streaming}; +use ttrpc::context::{self, Context}; +use ttrpc::r#async::Client; + +#[tokio::main(flavor = "current_thread")] +async fn main() { + simple_logging::log_to_stderr(log::LevelFilter::Info); + + let c = Client::connect(utils::SOCK_ADDR).await.unwrap(); + let sc = streaming::StreamingClient::new(c); + + let _now = std::time::Instant::now(); + + let sc1 = sc.clone(); + let t1 = tokio::spawn(echo_request(sc1)); + + let sc1 = sc.clone(); + let t2 = tokio::spawn(echo_stream(sc1)); + + let sc1 = sc.clone(); + let t3 = tokio::spawn(sum_stream(sc1)); + + let sc1 = sc.clone(); + let t4 = tokio::spawn(divide_stream(sc1)); + + let sc1 = sc.clone(); + let t5 = tokio::spawn(echo_null(sc1)); + + let t6 = tokio::spawn(echo_null_stream(sc)); + + let _ = tokio::join!(t1, t2, t3, t4, t5, t6); +} + +fn default_ctx() -> Context { + let mut ctx = context::with_timeout(0); + ctx.add("key-1".to_string(), "value-1-1".to_string()); + ctx.add("key-1".to_string(), "value-1-2".to_string()); + ctx.set("key-2".to_string(), vec!["value-2".to_string()]); + + ctx +} + +async fn echo_request(cli: streaming::StreamingClient) { + let echo1 = streaming::EchoPayload { + seq: 1, + msg: "Echo Me".to_string(), + ..Default::default() + }; + let resp = cli.echo(default_ctx(), &echo1).await.unwrap(); + assert_eq!(resp.msg, echo1.msg); + assert_eq!(resp.seq, echo1.seq + 1); +} + +async fn echo_stream(cli: streaming::StreamingClient) { + let mut stream = cli.echo_stream(default_ctx()).await.unwrap(); + + let mut i = 0; + while i < 100 { + let echo = streaming::EchoPayload { + seq: i as u32, + msg: format!("{}: Echo in a stream", i), + ..Default::default() + }; + stream.send(&echo).await.unwrap(); + let resp = stream.recv().await.unwrap(); + assert_eq!(resp.msg, echo.msg); + assert_eq!(resp.seq, echo.seq + 1); + + i += 2; + } + stream.close_send().await.unwrap(); + let ret = stream.recv().await; + assert!(matches!(ret, Err(ttrpc::Error::Eof))); +} + +async fn sum_stream(cli: streaming::StreamingClient) { + let mut stream = cli.sum_stream(default_ctx()).await.unwrap(); + + let mut sum = streaming::Sum::default(); + stream.send(&streaming::Part::default()).await.unwrap(); + + sum.num += 1; + let mut i = -99i32; + while i <= 100 { + let addi = streaming::Part { + add: i, + ..Default::default() + }; + stream.send(&addi).await.unwrap(); + sum.sum += i; + sum.num += 1; + + i += 1; + } + stream.send(&streaming::Part::default()).await.unwrap(); + sum.num += 1; + + let ssum = stream.close_and_recv().await.unwrap(); + assert_eq!(ssum.sum, sum.sum); + assert_eq!(ssum.num, sum.num); +} + +async fn divide_stream(cli: streaming::StreamingClient) { + let expected = streaming::Sum { + sum: 392, + num: 4, + ..Default::default() + }; + let mut stream = cli.divide_stream(default_ctx(), &expected).await.unwrap(); + + let mut actual = streaming::Sum::default(); + + // NOTE: `for part in stream.recv().await.unwrap()` can't work. + while let Some(part) = stream.recv().await.unwrap() { + actual.sum += part.add; + actual.num += 1; + } + assert_eq!(actual.sum, expected.sum); + assert_eq!(actual.num, expected.num); +} + +async fn echo_null(cli: streaming::StreamingClient) { + let mut stream = cli.echo_null(default_ctx()).await.unwrap(); + + for i in 0..100 { + let echo = streaming::EchoPayload { + seq: i as u32, + msg: "non-empty empty".to_string(), + ..Default::default() + }; + stream.send(&echo).await.unwrap(); + } + let res = stream.close_and_recv().await.unwrap(); + assert_eq!(res, Empty::default()); +} + +async fn echo_null_stream(cli: streaming::StreamingClient) { + let stream = cli.echo_null_stream(default_ctx()).await.unwrap(); + + let (tx, mut rx) = stream.split(); + + let task = tokio::spawn(async move { + loop { + let ret = rx.recv().await; + if matches!(ret, Err(ttrpc::Error::Eof)) { + break; + } + } + }); + + for i in 0..100 { + let echo = streaming::EchoPayload { + seq: i as u32, + msg: "non-empty empty".to_string(), + ..Default::default() + }; + tx.send(&echo).await.unwrap(); + } + + tx.close_send().await.unwrap(); + + tokio::time::timeout(tokio::time::Duration::from_secs(10), task) + .await + .unwrap() + .unwrap(); +} diff --git a/example2/async_stream_server.rs b/example2/async_stream_server.rs new file mode 100644 index 00000000..878d72ff --- /dev/null +++ b/example2/async_stream_server.rs @@ -0,0 +1,170 @@ +// Copyright 2022 Alibaba Cloud. All rights reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +mod protocols; +mod utils; + +use std::sync::Arc; + +use log::{info, LevelFilter}; + +use protocols::r#async::{google::protobuf::Empty, streaming}; +use ttrpc::asynchronous::Server; + +use async_trait::async_trait; +use tokio::signal::unix::{signal, SignalKind}; +use tokio::time::sleep; + +struct StreamingService; + +#[async_trait] +impl streaming::Streaming for StreamingService { + async fn echo( + &self, + _ctx: &::ttrpc::r#async::TtrpcContext, + mut e: streaming::EchoPayload, + ) -> ::ttrpc::Result { + e.seq += 1; + Ok(e) + } + + async fn echo_stream( + &self, + _ctx: &::ttrpc::r#async::TtrpcContext, + mut s: ::ttrpc::r#async::ServerStream, + ) -> ::ttrpc::Result<()> { + while let Some(mut e) = s.recv().await? { + e.seq += 1; + s.send(&e).await?; + } + + Ok(()) + } + + async fn sum_stream( + &self, + _ctx: &::ttrpc::r#async::TtrpcContext, + mut s: ::ttrpc::r#async::ServerStreamReceiver, + ) -> ::ttrpc::Result { + let mut sum = streaming::Sum::default(); + while let Some(part) = s.recv().await? { + sum.sum += part.add; + sum.num += 1; + } + + Ok(sum) + } + + async fn divide_stream( + &self, + _ctx: &::ttrpc::r#async::TtrpcContext, + sum: streaming::Sum, + s: ::ttrpc::r#async::ServerStreamSender, + ) -> ::ttrpc::Result<()> { + let mut parts = vec![streaming::Part::default(); sum.num as usize]; + + let mut total = 0i32; + for i in 1..(sum.num - 2) { + let add = (rand::random::() % 1000) as i32 - 500; + parts[i as usize].add = add; + total += add; + } + + parts[sum.num as usize - 2].add = sum.sum - total; + + for part in parts { + s.send(&part).await.unwrap(); + } + + Ok(()) + } + + async fn echo_null( + &self, + _ctx: &::ttrpc::r#async::TtrpcContext, + mut s: ::ttrpc::r#async::ServerStreamReceiver, + ) -> ::ttrpc::Result { + let mut seq = 0; + while let Some(e) = s.recv().await? { + assert_eq!(e.seq, seq); + assert_eq!(e.msg.as_str(), "non-empty empty"); + seq += 1; + } + Ok(Empty::default()) + } + + async fn echo_null_stream( + &self, + _ctx: &::ttrpc::r#async::TtrpcContext, + s: ::ttrpc::r#async::ServerStream, + ) -> ::ttrpc::Result<()> { + let msg = "non-empty empty".to_string(); + + let mut tasks = Vec::new(); + + let (tx, mut rx) = s.split(); + let mut seq = 0u32; + while let Some(e) = rx.recv().await? { + assert_eq!(e.seq, seq); + assert_eq!(e.msg, msg); + seq += 1; + + for _i in 0..10 { + let tx = tx.clone(); + tasks.push(tokio::spawn( + async move { tx.send(&Empty::default()).await }, + )); + } + } + + for t in tasks { + t.await.unwrap().map_err(|e| { + ::ttrpc::Error::RpcStatus(::ttrpc::get_status( + ::ttrpc::Code::UNKNOWN, + e.to_string(), + )) + })?; + } + Ok(()) + } +} + +#[tokio::main(flavor = "current_thread")] +async fn main() { + simple_logging::log_to_stderr(LevelFilter::Info); + + let s = Box::new(StreamingService {}) as Box; + let s = Arc::new(s); + let service = streaming::create_streaming(s); + + utils::remove_if_sock_exist(utils::SOCK_ADDR).unwrap(); + + let mut server = Server::new() + .bind(utils::SOCK_ADDR) + .unwrap() + .register_service(service); + + let mut hangup = signal(SignalKind::hangup()).unwrap(); + let mut interrupt = signal(SignalKind::interrupt()).unwrap(); + server.start().await.unwrap(); + + tokio::select! { + _ = hangup.recv() => { + // test stop_listen -> start + info!("stop listen"); + server.stop_listen().await; + info!("start listen"); + server.start().await.unwrap(); + + // hold some time for the new test connection. + sleep(std::time::Duration::from_secs(100)).await; + } + _ = interrupt.recv() => { + // test graceful shutdown + info!("graceful shutdown"); + server.shutdown().await.unwrap(); + } + }; +} diff --git a/example2/build.rs b/example2/build.rs new file mode 100644 index 00000000..3b8e5fb5 --- /dev/null +++ b/example2/build.rs @@ -0,0 +1,42 @@ +// Copyright (c) 2020 Ant Financial +// +// SPDX-License-Identifier: Apache-2.0 +// + +// use ttrpc_codegen::CodegenBuilder; + +use ttrpc_codegen::{AsyncMode, CodegenBuilder}; + +fn main() { + let mut protos = vec![ + "protocols/protos/health.proto", + "protocols/protos/agent.proto", + "protocols/protos/oci.proto", + ]; + + let includes = vec!["protocols/protos"]; + + let codegen = CodegenBuilder::new() + .set_out_dir(&"protocols/sync") + .set_protos(&protos) + .set_includes(&includes) + .set_serde(true) + .set_async_mode(AsyncMode::None) + .set_generate_service(true) + .build() + .unwrap(); + codegen.generate().unwrap(); + + protos.push("protocols/protos/streaming.proto"); + + let codegen = CodegenBuilder::new() + .set_out_dir(&"protocols/asynchronous") + .set_protos(&protos) + .set_includes(&includes) + .set_serde(true) + .set_async_mode(AsyncMode::All) + .set_generate_service(true) + .build() + .unwrap(); + codegen.generate().unwrap(); +} diff --git a/example2/client.rs b/example2/client.rs new file mode 100644 index 00000000..833bef30 --- /dev/null +++ b/example2/client.rs @@ -0,0 +1,119 @@ +// Copyright (c) 2019 Ant Financial +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod protocols; +mod utils; + +use protocols::sync::{agent, health}; +use std::thread; +use ttrpc::context::{self, Context}; +use ttrpc::Client; + +fn main() { + let c = Client::connect(utils::SOCK_ADDR).unwrap(); + let health_client = health::HealthClient::new(c.clone()); + let agent_service_client = agent::AgentServiceClient::new(c); + + let thread_health_client = health_client.clone(); + let thread_agent_service_client = agent_service_client.clone(); + + let now = std::time::Instant::now(); + + let t = thread::spawn(move || { + let req = health::CheckRequest::default(); + println!( + "OS Thread {:?} - {} started: {:?}", + std::thread::current().id(), + "health.check()", + now.elapsed(), + ); + println!( + "OS Thread {:?} - {} -> {:?} ended: {:?}", + std::thread::current().id(), + "health.check()", + thread_health_client.check(default_ctx(), &req), + now.elapsed(), + ); + }); + + let t2 = thread::spawn(move || { + println!( + "OS Thread {:?} - {} started: {:?}", + std::thread::current().id(), + "agent.list_interfaces()", + now.elapsed(), + ); + + let show = match thread_agent_service_client + .list_interfaces(default_ctx(), &agent::ListInterfacesRequest::default()) + { + Err(e) => format!("{:?}", e), + Ok(s) => format!("{:?}", s), + }; + + println!( + "OS Thread {:?} - {} -> {} ended: {:?}", + std::thread::current().id(), + "agent.list_interfaces()", + show, + now.elapsed(), + ); + }); + + println!( + "Main OS Thread - {} started: {:?}", + "agent.online_cpu_mem()", + now.elapsed() + ); + let show = match agent_service_client + .online_cpu_mem(default_ctx(), &agent::OnlineCpuMemRequest::default()) + { + Err(e) => format!("{:?}", e), + Ok(s) => format!("{:?}", s), + }; + println!( + "Main OS Thread - {} -> {} ended: {:?}", + "agent.online_cpu_mem()", + show, + now.elapsed() + ); + + println!("\nsleep 2 seconds ...\n"); + thread::sleep(std::time::Duration::from_secs(2)); + println!( + "Main OS Thread - {} started: {:?}", + "health.version()", + now.elapsed() + ); + let mut req = health::CheckRequest::default(); + req.service = "haha".to_owned(); + println!( + "Main OS Thread - {} -> {:?} ended: {:?}", + "health.version()", + health_client.version(default_ctx(), &req), + now.elapsed() + ); + + t.join().unwrap(); + t2.join().unwrap(); +} + +fn default_ctx() -> Context { + let mut ctx = context::with_timeout(0); + ctx.add("key-1".to_string(), "value-1-1".to_string()); + ctx.add("key-1".to_string(), "value-1-2".to_string()); + ctx.set("key-2".to_string(), vec!["value-2".to_string()]); + + ctx +} diff --git a/example2/protocols/asynchronous/mod.rs b/example2/protocols/asynchronous/mod.rs new file mode 100644 index 00000000..a6fc3258 --- /dev/null +++ b/example2/protocols/asynchronous/mod.rs @@ -0,0 +1,8 @@ +// Copyright (c) 2020 Ant Financial +// +// SPDX-License-Identifier: Apache-2.0 +// + +#![allow(dead_code)] + +include!("_include.rs"); diff --git a/example2/protocols/hack/update-generated-proto.sh b/example2/protocols/hack/update-generated-proto.sh new file mode 100755 index 00000000..202db077 --- /dev/null +++ b/example2/protocols/hack/update-generated-proto.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +# Copyright (c) 2019 Ant Financial +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +die() { + echo $1 + exit +} + +get_source_version() { + if [ ! -d $GOPATH/src/$1 ]; then + go get -d -v $1 + fi + [ $? -eq 0 ] || die "Failed to get $1" + if [ "$2" != "" ] ; then + pushd "${GOPATH}/src/$1" + if [ $(git rev-parse HEAD) != $2 ] ; then + git checkout $2 + [ $? -eq 0 ] || die "Failed to get $1 $2" + fi + popd + fi +} + +get_rs() { + local cmd="protoc --rust_out=./ --ttrpc_out=./,plugins=ttrpc:./ --plugin=protoc-gen-ttrpc=`which ttrpc_rust_plugin` -I ./protos/ ./protos/$1" + echo $cmd + $cmd + [ $? -eq 0 ] || die "Failed to get rust from $1" +} + +if [ "$(basename $(pwd))" != "protocols" ] || [ ! -d "./hack/" ]; then + die "Please go to directory of protocols before execute this shell" +fi +which protoc +[ $? -eq 0 ] || die "Please install protoc from github.com/protocolbuffers/protobuf" +which protoc-gen-rust +[ $? -eq 0 ] || die "Please install protobuf-codegen from github.com/pingcap/grpc-rs" +which ttrpc_rust_plugin +[ $? -eq 0 ] || die "Please install ttrpc_rust_plugin from ttrpc-rust/compiler" + +if [ $UPDATE_PROTOS ]; then + if [ ! $GOPATH ]; then + die 'Need $GOPATH to get the proto files' + fi + + get_source_version "github.com/kata-containers/agent" "" + cp $GOPATH/src/github.com/kata-containers/agent/protocols/grpc/agent.proto ./protos/ + cp $GOPATH/src/github.com/kata-containers/agent/protocols/grpc/oci.proto ./protos/ + cp $GOPATH/src/github.com/kata-containers/agent/protocols/grpc/health.proto ./protos/ + mkdir -p ./protos/github.com/kata-containers/agent/pkg/types/ + cp $GOPATH/src/github.com/kata-containers/agent/pkg/types/types.proto ./protos/github.com/kata-containers/agent/pkg/types/ + + # The version is get from https://github.com/kata-containers/agent/blob/master/Gopkg.toml + get_source_version "github.com/gogo/protobuf" "4cbf7e384e768b4e01799441fdf2a706a5635ae7" + mkdir -p ./protos/github.com/gogo/protobuf/gogoproto/ + cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto ./protos/github.com/gogo/protobuf/gogoproto/ + mkdir -p ./protos/google/protobuf/ + cp $GOPATH/src/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto ./protos/google/protobuf/ +fi + +get_rs agent.proto +get_rs health.proto +get_rs github.com/kata-containers/agent/pkg/types/types.proto +get_rs google/protobuf/empty.proto + +get_rs oci.proto +# Need change Box to ::std::boxed::Box because there is another struct Box +sed 's/self: Box/self: ::std::boxed::Box/g' oci.rs > new_oci.rs +mv new_oci.rs oci.rs diff --git a/example2/protocols/mod.rs b/example2/protocols/mod.rs new file mode 100644 index 00000000..b81f3d7d --- /dev/null +++ b/example2/protocols/mod.rs @@ -0,0 +1,8 @@ +// Copyright (c) 2020 Ant Financial +// +// SPDX-License-Identifier: Apache-2.0 +// + +pub mod asynchronous; +pub mod sync; +pub use asynchronous as r#async; diff --git a/example2/protocols/protos/agent.proto b/example2/protocols/protos/agent.proto new file mode 100644 index 00000000..39f192f1 --- /dev/null +++ b/example2/protocols/protos/agent.proto @@ -0,0 +1,486 @@ +// +// Copyright 2017 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// + +syntax = "proto3"; + +package agent; + +import "oci.proto"; +import "github.com/kata-containers/agent/pkg/types/types.proto"; +import "google/protobuf/empty.proto"; + +// unstable +service AgentService { + // execution + rpc CreateContainer(CreateContainerRequest) returns (google.protobuf.Empty); + rpc StartContainer(StartContainerRequest) returns (google.protobuf.Empty); + + // RemoveContainer will tear down an existing container by forcibly terminating + // all processes running inside that container and releasing all internal + // resources associated with it. + // RemoveContainer will wait for all processes termination before returning. + // If any process can not be killed or if it can not be killed after + // the RemoveContainerRequest timeout, RemoveContainer will return an error. + rpc RemoveContainer(RemoveContainerRequest) returns (google.protobuf.Empty); + rpc ExecProcess(ExecProcessRequest) returns (google.protobuf.Empty); + rpc SignalProcess(SignalProcessRequest) returns (google.protobuf.Empty); + rpc WaitProcess(WaitProcessRequest) returns (WaitProcessResponse); // wait & reap like waitpid(2) + rpc ListProcesses(ListProcessesRequest) returns (ListProcessesResponse); + rpc UpdateContainer(UpdateContainerRequest) returns (google.protobuf.Empty); + rpc StatsContainer(StatsContainerRequest) returns (StatsContainerResponse); + rpc PauseContainer(PauseContainerRequest) returns (google.protobuf.Empty); + rpc ResumeContainer(ResumeContainerRequest) returns (google.protobuf.Empty); + + // stdio + rpc WriteStdin(WriteStreamRequest) returns (WriteStreamResponse); + rpc ReadStdout(ReadStreamRequest) returns (ReadStreamResponse); + rpc ReadStderr(ReadStreamRequest) returns (ReadStreamResponse); + rpc CloseStdin(CloseStdinRequest) returns (google.protobuf.Empty); + rpc TtyWinResize(TtyWinResizeRequest) returns (google.protobuf.Empty); + + // networking + rpc UpdateInterface(UpdateInterfaceRequest) returns (types.Interface); + rpc UpdateRoutes(UpdateRoutesRequest) returns (Routes); + rpc ListInterfaces(ListInterfacesRequest) returns(Interfaces); + rpc ListRoutes(ListRoutesRequest) returns (Routes); + + // tracing + rpc StartTracing(StartTracingRequest) returns (google.protobuf.Empty); + rpc StopTracing(StopTracingRequest) returns (google.protobuf.Empty); + + // misc (TODO: some rpcs can be replaced by hyperstart-exec) + rpc CreateSandbox(CreateSandboxRequest) returns (google.protobuf.Empty); + rpc DestroySandbox(DestroySandboxRequest) returns (google.protobuf.Empty); + rpc OnlineCPUMem(OnlineCPUMemRequest) returns (google.protobuf.Empty); + rpc ReseedRandomDev(ReseedRandomDevRequest) returns (google.protobuf.Empty); + rpc GetGuestDetails(GuestDetailsRequest) returns (GuestDetailsResponse); + rpc MemHotplugByProbe(MemHotplugByProbeRequest) returns (google.protobuf.Empty); + rpc SetGuestDateTime(SetGuestDateTimeRequest) returns (google.protobuf.Empty); + rpc CopyFile(CopyFileRequest) returns (google.protobuf.Empty); +} + +message CreateContainerRequest { + string container_id = 1; + string exec_id = 2; + StringUser string_user = 3; + repeated Device devices = 4; + repeated Storage storages = 5; + oci.Spec OCI = 6; + + // This field is used to indicate if the container needs to join + // sandbox shared pid ns or create a new namespace. This field is + // meant to override the NEWPID config settings in the OCI spec. + // The agent would receive an OCI spec with PID namespace cleared + // out altogether and not just the pid ns path. + bool sandbox_pidns = 7; +} + +message StartContainerRequest { + string container_id = 1; +} + +message RemoveContainerRequest { + string container_id = 1; + + // RemoveContainer will return an error if + // it could not kill some container processes + // after timeout seconds. + // Setting timeout to 0 means RemoveContainer will + // wait for ever. + uint32 timeout = 2; +} + +message ExecProcessRequest { + string container_id = 1; + string exec_id = 2; + StringUser string_user = 3; + oci.Process process = 4; +} + +message SignalProcessRequest { + string container_id = 1; + + // Special case for SignalProcess(): exec_id can be empty(""), + // which means to send the signal to all the processes including their descendants. + // Other APIs with exec_id should treat empty exec_id as an invalid request. + string exec_id = 2; + uint32 signal = 3; +} + +message WaitProcessRequest { + string container_id = 1; + string exec_id = 2; +} + +message WaitProcessResponse { + int32 status = 1; +} + +// ListProcessesRequest contains the options used to list running processes inside the container +message ListProcessesRequest { + string container_id = 1; + string format = 2; + repeated string args = 3; +} + +// ListProcessesResponse represents the list of running processes inside the container +message ListProcessesResponse { + bytes process_list = 1; +} + +message UpdateContainerRequest { + string container_id = 1; + oci.LinuxResources resources = 2; +} + +message StatsContainerRequest { + string container_id = 1; +} + +message PauseContainerRequest { + string container_id = 1; +} + +message ResumeContainerRequest { + string container_id = 1; +} + +message CpuUsage { + uint64 total_usage = 1; + repeated uint64 percpu_usage = 2; + uint64 usage_in_kernelmode = 3; + uint64 usage_in_usermode = 4; +} + +message ThrottlingData { + uint64 periods = 1; + uint64 throttled_periods = 2; + uint64 throttled_time = 3; +} + +message CpuStats { + CpuUsage cpu_usage = 1; + ThrottlingData throttling_data = 2; +} + +message PidsStats { + uint64 current = 1; + uint64 limit = 2; +} + +message MemoryData { + uint64 usage = 1; + uint64 max_usage = 2; + uint64 failcnt = 3; + uint64 limit = 4; +} + +message MemoryStats { + uint64 cache = 1; + MemoryData usage = 2; + MemoryData swap_usage = 3; + MemoryData kernel_usage = 4; + bool use_hierarchy = 5; + map stats = 6; +} + + +message BlkioStatsEntry { + uint64 major = 1; + uint64 minor = 2; + string op = 3; + uint64 value = 4; +} + +message BlkioStats { + repeated BlkioStatsEntry io_service_bytes_recursive = 1; // number of bytes transferred to and from the block device + repeated BlkioStatsEntry io_serviced_recursive = 2; + repeated BlkioStatsEntry io_queued_recursive = 3; + repeated BlkioStatsEntry io_service_time_recursive = 4; + repeated BlkioStatsEntry io_wait_time_recursive = 5; + repeated BlkioStatsEntry io_merged_recursive = 6; + repeated BlkioStatsEntry io_time_recursive = 7; + repeated BlkioStatsEntry sectors_recursive = 8; +} + +message HugetlbStats { + uint64 usage = 1; + uint64 max_usage = 2; + uint64 failcnt = 3; +} + +message CgroupStats { + CpuStats cpu_stats = 1; + MemoryStats memory_stats = 2; + PidsStats pids_stats = 3; + BlkioStats blkio_stats = 4; + map hugetlb_stats = 5; // the map is in the format "size of hugepage: stats of the hugepage" + +} + +message NetworkStats { + string name = 1; + uint64 rx_bytes = 2; + uint64 rx_packets = 3; + uint64 rx_errors = 4; + uint64 rx_dropped = 5; + uint64 tx_bytes = 6; + uint64 tx_packets = 7; + uint64 tx_errors = 8; + uint64 tx_dropped = 9; +} + +message StatsContainerResponse { + CgroupStats cgroup_stats = 1; + repeated NetworkStats network_stats = 2; +} + +message WriteStreamRequest { + string container_id = 1; + string exec_id = 2; + bytes data = 3; +} + +message WriteStreamResponse { + uint32 len = 1; +} + +message ReadStreamRequest { + string container_id = 1; + string exec_id = 2; + uint32 len = 3; +} + +message ReadStreamResponse { + bytes data = 1; +} + +message CloseStdinRequest { + string container_id = 1; + string exec_id = 2; +} + +message TtyWinResizeRequest { + string container_id = 1; + string exec_id = 2; + uint32 row = 3; + uint32 column = 4; +} + +message CreateSandboxRequest { + string hostname = 1; + repeated string dns = 2; + repeated Storage storages = 3; + + // This field means that a pause process needs to be created by the + // agent. This pid namespace of the pause process will be treated as + // a shared pid namespace. All containers created will join this shared + // pid namespace. + bool sandbox_pidns = 4; + // SandboxId identifies which sandbox is using the agent. We allow only + // one sandbox per agent and implicitly require that CreateSandbox is + // called before other sandbox/network calls. + string sandbox_id = 5; + // This field, if non-empty, designates an absolute path to a directory + // that the agent will search for OCI hooks to run within the guest. + string guest_hook_path = 6; +} + +message DestroySandboxRequest { +} + +message Interfaces { + repeated types.Interface Interfaces = 1; +} + +message Routes { + repeated types.Route Routes = 1; +} + +message UpdateInterfaceRequest { + types.Interface interface = 1; +} + +message UpdateRoutesRequest { + Routes routes = 1; +} + +message ListInterfacesRequest { +} + +message ListRoutesRequest { +} + +message OnlineCPUMemRequest { + // Wait specifies if the caller waits for the agent to online all resources. + // If true the agent returns once all resources have been connected, otherwise all + // resources are connected asynchronously and the agent returns immediately. + bool wait = 1; + + // NbCpus specifies the number of CPUs that were added and the agent has to online. + uint32 nb_cpus = 2; + + // CpuOnly specifies whether only online CPU or not. + bool cpu_only = 3; +} + +message ReseedRandomDevRequest { + // Data specifies the random data used to reseed the guest crng. + bytes data = 2; +} + +// AgentDetails provides information to the client about the running agent. +message AgentDetails { + // Semantic version of agent (see https://semver.org). + string version = 1; + + // Set if the agent is running as PID 1. + bool init_daemon = 2; + + // List of available device handlers. + repeated string device_handlers = 3; + + // List of available storage handlers. + repeated string storage_handlers = 4; + + // Set only if the agent is built with seccomp support and the guest + // environment supports seccomp. + bool supports_seccomp = 5; +} + +message GuestDetailsRequest { + // MemBlockSize asks server to return the system memory block size that can be used + // for memory hotplug alignment. Typically the server returns what's in + // /sys/devices/system/memory/block_size_bytes. + bool mem_block_size = 1; + + // MemoryHotplugProbe asks server to return whether guest kernel supports memory hotplug + // via probeinterface. Typically the server will check if the path + // /sys/devices/system/memory/probe exists. + bool mem_hotplug_probe = 2; +} + +message GuestDetailsResponse { + // MemBlockSizeBytes returns the system memory block size in bytes. + uint64 mem_block_size_bytes = 1; + + AgentDetails agent_details = 2; + + bool support_mem_hotplug_probe = 3; +} + +message MemHotplugByProbeRequest { + // server needs to send the value of memHotplugProbeAddr into file /sys/devices/system/memory/probe, + // in order to notify the guest kernel about hot-add memory event + repeated uint64 memHotplugProbeAddr = 1; +} + +message SetGuestDateTimeRequest { + // Sec the second since the Epoch. + int64 Sec = 1; + // Usec the microseconds portion of time since the Epoch. + int64 Usec = 2; +} + +// Storage represents both the rootfs of the container, and any volume that +// could have been defined through the Mount list of the OCI specification. +message Storage { + // Driver is used to define the way the storage is passed through the + // virtual machine. It can be "9p", "blk", or something else, but for + // all cases, this will define if some extra steps are required before + // this storage gets mounted into the container. + string driver = 1; + // DriverOptions allows the caller to define a list of options such + // as block sizes, numbers of luns, ... which are very specific to + // every device and cannot be generalized through extra fields. + repeated string driver_options = 2; + // Source can be anything representing the source of the storage. This + // will be handled by the proper handler based on the Driver used. + // For instance, it can be a very simple path if the caller knows the + // name of device inside the VM, or it can be some sort of identifier + // to let the agent find the device inside the VM. + string source = 3; + // Fstype represents the filesystem that needs to be used to mount the + // storage inside the VM. For instance, it could be "xfs" for block + // device, "9p" for shared filesystem, or "tmpfs" for shared /dev/shm. + string fstype = 4; + // Options describes the additional options that might be needed to + // mount properly the storage filesytem. + repeated string options = 5; + // MountPoint refers to the path where the storage should be mounted + // inside the VM. + string mount_point = 6; +} + +// Device represents only the devices that could have been defined through the +// Linux Device list of the OCI specification. +message Device { + // Id can be used to identify the device inside the VM. Some devices + // might not need it to be identified on the VM, and will rely on the + // provided VmPath instead. + string id = 1; + // Type defines the type of device described. This can be "blk", + // "scsi", "vfio", ... + // Particularly, this should be used to trigger the use of the + // appropriate device handler. + string type = 2; + // VmPath can be used by the caller to provide directly the path of + // the device as it will appear inside the VM. For some devices, the + // device id or the list of options passed might not be enough to find + // the device. In those cases, the caller should predict and provide + // this vm_path. + string vm_path = 3; + // ContainerPath defines the path where the device should be found inside + // the container. This path should match the path of the device from + // the device list listed inside the OCI spec. This is used in order + // to identify the right device in the spec and update it with the + // right options such as major/minor numbers as they appear inside + // the VM for instance. Note that an empty ctr_path should be used + // to make sure the device handler inside the agent is called, but + // no spec update needs to be performed. This has to happen for the + // case of rootfs, when a device has to be waited for after it has + // been hotplugged. An equivalent Storage entry should be defined if + // any mount needs to be performed afterwards. + string container_path = 4; + // Options allows the caller to define a list of options such as block + // sizes, numbers of luns, ... which are very specific to every device + // and cannot be generalized through extra fields. + repeated string options = 5; +} + +message StringUser { + string uid = 1; + string gid = 2; + repeated string additionalGids = 3; +} + +message CopyFileRequest { + // Path is the destination file in the guest. It must be absolute, + // canonical and below /run. + string path = 1; + // FileSize is the expected file size, for security reasons write operations + // are made in a temporary file, once it has the expected size, it's moved + // to the destination path. + int64 file_size = 2; + // FileMode is the file mode. + uint32 file_mode = 3; + // DirMode is the mode for the parent directories of destination path. + uint32 dir_mode = 4; + // Uid is the numeric user id. + int32 uid = 5; + // Gid is the numeric group id. + int32 gid = 6; + // Offset for the next write operation. + int64 offset = 7; + // Data to write in the destination file. + bytes data = 8; +} + +message StartTracingRequest { +} + +message StopTracingRequest { +} diff --git a/example2/protocols/protos/github.com/gogo/protobuf/gogoproto/gogo.proto b/example2/protocols/protos/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 00000000..b80c8565 --- /dev/null +++ b/example2/protocols/protos/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/example2/protocols/protos/github.com/kata-containers/agent/pkg/types/types.proto b/example2/protocols/protos/github.com/kata-containers/agent/pkg/types/types.proto new file mode 100644 index 00000000..f6856e1e --- /dev/null +++ b/example2/protocols/protos/github.com/kata-containers/agent/pkg/types/types.proto @@ -0,0 +1,48 @@ +// +// Copyright 2018 Intel Corporation. +// +// SPDX-License-Identifier: Apache-2.0 +// + +syntax = "proto3"; + +package types; + +enum IPFamily { + v4 = 0; + v6 = 1; +} + +message IPAddress { + IPFamily family = 1; + string address = 2; + string mask = 3; +} + +message Interface { + string device = 1; + string name = 2; + repeated IPAddress IPAddresses = 3; + uint64 mtu = 4; + string hwAddr = 5; + + // pciAddr is the PCI address in the format "bridgeAddr/deviceAddr". + // Here, bridgeAddr is the address at which the bridge is attached on the root bus, + // while deviceAddr is the address at which the network device is attached on the bridge. + string pciAddr = 6; + + // Type defines the type of interface described by this structure. + // The expected values are the one that are defined by the netlink + // library, regarding each type of link. Here is a non exhaustive + // list: "veth", "macvtap", "vlan", "macvlan", "tap", ... + string type = 7; + uint32 raw_flags = 8; +} + +message Route { + string dest = 1; + string gateway = 2; + string device = 3; + string source = 4; + uint32 scope = 5; +} diff --git a/example2/protocols/protos/google/protobuf/descriptor.proto b/example2/protocols/protos/google/protobuf/descriptor.proto new file mode 100644 index 00000000..a2102d7a --- /dev/null +++ b/example2/protocols/protos/google/protobuf/descriptor.proto @@ -0,0 +1,885 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + } + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + } + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default = false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default = false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default = false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default = false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default = SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default = false]; + optional bool java_generic_services = 17 [default = false]; + optional bool py_generic_services = 18 [default = false]; + optional bool php_generic_services = 42 [default = false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default = false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default = false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + optional string php_metadata_namespace = 44; + + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + optional string ruby_package = 45; + + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default = false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default = false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default = false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default = false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default = false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default = false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default = false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default = false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = 34 + [default = IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed = true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed = true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed = true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/example2/protocols/protos/google/protobuf/empty.proto b/example2/protocols/protos/google/protobuf/empty.proto new file mode 100644 index 00000000..6057c852 --- /dev/null +++ b/example2/protocols/protos/google/protobuf/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/example2/protocols/protos/google/protobuf/test_empty.proto b/example2/protocols/protos/google/protobuf/test_empty.proto new file mode 100644 index 00000000..778d17bc --- /dev/null +++ b/example2/protocols/protos/google/protobuf/test_empty.proto @@ -0,0 +1,5 @@ +syntax = "proto3"; + +package google.protobuf; + +message Empty {} diff --git a/example2/protocols/protos/health.proto b/example2/protocols/protos/health.proto new file mode 100644 index 00000000..f263212d --- /dev/null +++ b/example2/protocols/protos/health.proto @@ -0,0 +1,40 @@ +// +// Copyright 2017 HyperHQ Inc. +// +// SPDX-License-Identifier: Apache-2.0 +// + +syntax = "proto3"; + +package health; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; +option (gogoproto.populate_all) = true; +option (gogoproto.testgen_all) = true; +option (gogoproto.benchgen_all) = true; + +message CheckRequest { + string service = 1; + optional string option_val = 2; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} + +message VersionCheckResponse { + string grpc_version = 1; + string agent_version = 2; +} + +service Health { + rpc Check(CheckRequest) returns (HealthCheckResponse); + rpc Version(CheckRequest) returns (VersionCheckResponse); +} diff --git a/example2/protocols/protos/oci.proto b/example2/protocols/protos/oci.proto new file mode 100644 index 00000000..c485bb3b --- /dev/null +++ b/example2/protocols/protos/oci.proto @@ -0,0 +1,461 @@ +// +// Copyright (c) 2017 Intel Corporation +// +// SPDX-License-Identifier: Apache-2.0 +// + +syntax = "proto3"; + +package oci; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; +option (gogoproto.populate_all) = true; +option (gogoproto.testgen_all) = true; +option (gogoproto.benchgen_all) = true; + +message Spec { + // Version of the Open Container Initiative Runtime Specification with which the bundle complies. + string Version = 1; + + // Process configures the container process. + Process Process = 2; + + // Root configures the container's root filesystem. + Root Root = 3; + + // Hostname configures the container's hostname. + string Hostname = 4; + + // Mounts configures additional mounts (on top of Root). + repeated Mount Mounts = 5 [(gogoproto.nullable) = false]; + + // Hooks configures callbacks for container lifecycle events. + Hooks Hooks = 6; + + // Annotations contains arbitrary metadata for the container. + map Annotations = 7; + + // Linux is platform-specific configuration for Linux based containers. + Linux Linux = 8; + + // Solaris is platform-specific configuration for Solaris based containers. + Solaris Solaris = 9; + // Windows is platform-specific configuration for Windows based containers. + Windows Windows = 10; +} + +message Process { + // Terminal creates an interactive terminal for the container. + bool Terminal = 1; + + // ConsoleSize specifies the size of the console. + Box ConsoleSize = 2; + + // User specifies user information for the process. + User User = 3 [(gogoproto.nullable) = false]; + + // Args specifies the binary and arguments for the application to execute. + repeated string Args = 4; + + // Env populates the process environment for the process. + repeated string Env = 5; + + // Cwd is the current working directory for the process and must be + // relative to the container's root. + string Cwd = 6; + + // Capabilities are Linux capabilities that are kept for the process. + LinuxCapabilities Capabilities = 7; + + // Rlimits specifies rlimit options to apply to the process. + repeated POSIXRlimit Rlimits = 8 [(gogoproto.nullable) = false]; + + // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. + bool NoNewPrivileges = 9; + + // ApparmorProfile specifies the apparmor profile for the container. + string ApparmorProfile = 10; + + // Specify an oom_score_adj for the container. + int64 OOMScoreAdj = 11; + + // SelinuxLabel specifies the selinux context that the container process is run as. + string SelinuxLabel = 12; +} + +message Box { + // Height is the vertical dimension of a box. + uint32 Height = 1; + + // Width is the horizontal dimension of a box. + uint32 Width = 2; +} + +message User { + // UID is the user id. + uint32 UID = 1; + + // GID is the group id. + uint32 GID = 2; + + // AdditionalGids are additional group ids set for the container's process. + repeated uint32 AdditionalGids = 3; + + // Username is the user name. + string Username = 4; +} + +message LinuxCapabilities { + // Bounding is the set of capabilities checked by the kernel. + repeated string Bounding = 1; + + // Effective is the set of capabilities checked by the kernel. + repeated string Effective = 2; + + // Inheritable is the capabilities preserved across execve. + repeated string Inheritable = 3; + + // Permitted is the limiting superset for effective capabilities. + repeated string Permitted = 4; + + // Ambient is the ambient set of capabilities that are kept. + repeated string Ambient = 5; +} + +message POSIXRlimit { + // Type of the rlimit to set + string Type = 1; + + // Hard is the hard limit for the specified type + uint64 Hard = 2; + + // Soft is the soft limit for the specified type + uint64 Soft = 3; +} + +message Mount { + // destination is the path inside the container expect when it starts with "tmp:/" + string destination = 1; + + // source is the path inside the container expect when it starts with "vm:/dev/" or "tmp:/" + // the path which starts with "vm:/dev/" refers the guest vm's "/dev", + // especially, "vm:/dev/hostfs/" refers to the shared filesystem. + // "tmp:/" is a temporary directory which is used for temporary mounts. + string source = 2; + string type = 3; + repeated string options = 4; +} + +message Root { + // Path is the absolute path to the container's root filesystem. + string Path = 1; + + // Readonly makes the root filesystem for the container readonly before the process is executed. + bool Readonly = 2; +} + +message Hooks { + // Prestart is a list of hooks to be run before the container process is executed. + repeated Hook Prestart = 1 [(gogoproto.nullable) = false]; + + // Poststart is a list of hooks to be run after the container process is started. + repeated Hook Poststart = 2 [(gogoproto.nullable) = false]; + + // Poststop is a list of hooks to be run after the container process exits. + repeated Hook Poststop = 3 [(gogoproto.nullable) = false]; +} + +message Hook { + string Path = 1; + repeated string Args = 2; + repeated string Env = 3; + int64 Timeout = 4; +} + +message Linux { + // UIDMapping specifies user mappings for supporting user namespaces. + repeated LinuxIDMapping UIDMappings = 1 [(gogoproto.nullable) = false]; + + // GIDMapping specifies group mappings for supporting user namespaces. + repeated LinuxIDMapping GIDMappings = 2 [(gogoproto.nullable) = false]; + + // Sysctl are a set of key value pairs that are set for the container on start + map Sysctl = 3; + + // Resources contain cgroup information for handling resource constraints + // for the container + LinuxResources Resources = 4; + + // CgroupsPath specifies the path to cgroups that are created and/or joined by the container. + // The path is expected to be relative to the cgroups mountpoint. + // If resources are specified, the cgroups at CgroupsPath will be updated based on resources. + string CgroupsPath = 5; + + // Namespaces contains the namespaces that are created and/or joined by the container + repeated LinuxNamespace Namespaces = 6 [(gogoproto.nullable) = false]; + + // Devices are a list of device nodes that are created for the container + repeated LinuxDevice Devices = 7 [(gogoproto.nullable) = false]; + + // Seccomp specifies the seccomp security settings for the container. + LinuxSeccomp Seccomp = 8; + + // RootfsPropagation is the rootfs mount propagation mode for the container. + string RootfsPropagation = 9; + + // MaskedPaths masks over the provided paths inside the container. + repeated string MaskedPaths = 10; + + // ReadonlyPaths sets the provided paths as RO inside the container. + repeated string ReadonlyPaths = 11; + + // MountLabel specifies the selinux context for the mounts in the container. + string MountLabel = 12; + + // IntelRdt contains Intel Resource Director Technology (RDT) information + // for handling resource constraints (e.g., L3 cache) for the container + LinuxIntelRdt IntelRdt = 13; +} + +message Windows { + // Dummy string, never used. + string dummy = 1; +} + +message Solaris { + // Dummy string, never used. + string dummy = 1; +} + +message LinuxIDMapping { + // HostID is the starting UID/GID on the host to be mapped to 'ContainerID' + uint32 HostID = 1; + + // ContainerID is the starting UID/GID in the container + uint32 ContainerID = 2; + + // Size is the number of IDs to be mapped + uint32 Size = 3; +} + +message LinuxNamespace { + // Type is the type of namespace + string Type = 1; + + // Path is a path to an existing namespace persisted on disk that can be joined + // and is of the same type + string Path = 2; +} + +message LinuxDevice { + // Path to the device. + string Path = 1; + + // Device type, block, char, etc. + string Type = 2; + + // Major is the device's major number. + int64 Major = 3; + + // Minor is the device's minor number. + int64 Minor = 4; + + // FileMode permission bits for the device. + uint32 FileMode = 5; + + // UID of the device. + uint32 UID = 6; + + // Gid of the device. + uint32 GID = 7; +} + +message LinuxResources { + // Devices configures the device whitelist. + repeated LinuxDeviceCgroup Devices = 1 [(gogoproto.nullable) = false]; + + // Memory restriction configuration + LinuxMemory Memory = 2; + + // CPU resource restriction configuration + LinuxCPU CPU = 3; + + // Task resource restriction configuration. + LinuxPids Pids = 4; + + // BlockIO restriction configuration + LinuxBlockIO BlockIO = 5; + + // Hugetlb limit (in bytes) + repeated LinuxHugepageLimit HugepageLimits = 6 [(gogoproto.nullable) = false]; + + // Network restriction configuration + LinuxNetwork Network = 7; +} + +message LinuxMemory { + // Memory limit (in bytes). + int64 Limit = 1; + + // Memory reservation or soft_limit (in bytes). + int64 Reservation = 2; + + // Total memory limit (memory + swap). + int64 Swap = 3; + + // Kernel memory limit (in bytes). + int64 Kernel = 4; + + // Kernel memory limit for tcp (in bytes) + int64 KernelTCP = 5; + + // How aggressive the kernel will swap memory pages. + uint64 Swappiness = 6; + + // DisableOOMKiller disables the OOM killer for out of memory conditions + bool DisableOOMKiller = 7; +} + +message LinuxCPU { + // CPU shares (relative weight (ratio) vs. other cgroups with cpu shares). + uint64 Shares = 1; + + // CPU hardcap limit (in usecs). Allowed cpu time in a given period. + int64 Quota = 2; + + // CPU period to be used for hardcapping (in usecs). + uint64 Period = 3; + + // How much time realtime scheduling may use (in usecs). + int64 RealtimeRuntime = 4; + + // CPU period to be used for realtime scheduling (in usecs). + uint64 RealtimePeriod = 5; + + // CPUs to use within the cpuset. Default is to use any CPU available. + string Cpus = 6; + + // List of memory nodes in the cpuset. Default is to use any available memory node. + string Mems = 7; +} + +message LinuxWeightDevice { + // Major is the device's major number. + int64 Major = 1; + + // Minor is the device's minor number. + int64 Minor = 2; + + // Weight is the bandwidth rate for the device. + uint32 Weight = 3; + + // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only + uint32 LeafWeight = 4; +} + +message LinuxThrottleDevice { + // Major is the device's major number. + int64 Major = 1; + + // Minor is the device's minor number. + int64 Minor = 2; + + // Rate is the IO rate limit per cgroup per device + uint64 Rate = 3; +} + +message LinuxBlockIO { + // Specifies per cgroup weight + uint32 Weight = 1; + + // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, CFQ scheduler only + uint32 LeafWeight = 2; + + // Weight per cgroup per device, can override BlkioWeight + repeated LinuxWeightDevice WeightDevice = 3 [(gogoproto.nullable) = false]; + + // IO read rate limit per cgroup per device, bytes per second + repeated LinuxThrottleDevice ThrottleReadBpsDevice = 4 [(gogoproto.nullable) = false]; + + // IO write rate limit per cgroup per device, bytes per second + repeated LinuxThrottleDevice ThrottleWriteBpsDevice = 5 [(gogoproto.nullable) = false]; + + // IO read rate limit per cgroup per device, IO per second + repeated LinuxThrottleDevice ThrottleReadIOPSDevice = 6 [(gogoproto.nullable) = false]; + + // IO write rate limit per cgroup per device, IO per second + repeated LinuxThrottleDevice ThrottleWriteIOPSDevice = 7 [(gogoproto.nullable) = false]; +} + +message LinuxPids { + // Maximum number of PIDs. Default is "no limit". + int64 Limit = 1; +} + +message LinuxDeviceCgroup { + // Allow or deny + bool Allow = 1; + + // Device type, block, char, etc. + string Type = 2; + + // Major is the device's major number. + int64 Major = 3; + + // Minor is the device's minor number. + int64 Minor = 4; + + // Cgroup access permissions format, rwm. + string Access = 5; +} + +message LinuxNetwork { + // Set class identifier for container's network packets + uint32 ClassID = 1; + + // Set priority of network traffic for container + repeated LinuxInterfacePriority Priorities = 2 [(gogoproto.nullable) = false]; +} + +message LinuxHugepageLimit { + // Pagesize is the hugepage size + string Pagesize = 1; + + // Limit is the limit of "hugepagesize" hugetlb usage + uint64 Limit = 2; +} + +message LinuxInterfacePriority { + // Name is the name of the network interface + string Name = 1; + + // Priority for the interface + uint32 Priority = 2; +} + +message LinuxSeccomp { + string DefaultAction = 1; + repeated string Architectures = 2; + repeated LinuxSyscall Syscalls = 3 [(gogoproto.nullable) = false]; +} + +message LinuxSeccompArg { + uint64 Index = 1; + uint64 Value = 2; + uint64 ValueTwo = 3; + string Op = 4; +} + +message LinuxSyscall { + repeated string Names = 1; + string Action = 2; + repeated LinuxSeccompArg Args = 3 [(gogoproto.nullable) = false]; +} + +message LinuxIntelRdt { + // The schema for L3 cache id and capacity bitmask (CBM) + // Format: "L3:=;=;..." + string L3CacheSchema = 1; +} diff --git a/example2/protocols/protos/streaming.proto b/example2/protocols/protos/streaming.proto new file mode 100644 index 00000000..26808d28 --- /dev/null +++ b/example2/protocols/protos/streaming.proto @@ -0,0 +1,49 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package streaming; + +import "google/protobuf/empty.proto"; + +// Shim service is launched for each container and is responsible for owning the IO +// for the container and its additional processes. The shim is also the parent of +// each container and allows reattaching to the IO and receiving the exit status +// for the container processes. + +service Streaming { + rpc Echo(EchoPayload) returns (EchoPayload); + rpc EchoStream(stream EchoPayload) returns (stream EchoPayload); + rpc SumStream(stream Part) returns (Sum); + rpc DivideStream(Sum) returns (stream Part); + rpc EchoNull(stream EchoPayload) returns (google.protobuf.Empty); + rpc EchoNullStream(stream EchoPayload) returns (stream google.protobuf.Empty); +} + +message EchoPayload { + uint32 seq = 1; + string msg = 2; +} + +message Part { + int32 add = 1; +} + +message Sum { + int32 sum = 1; + int32 num = 2; +} diff --git a/example2/protocols/sync/mod.rs b/example2/protocols/sync/mod.rs new file mode 100644 index 00000000..a6fc3258 --- /dev/null +++ b/example2/protocols/sync/mod.rs @@ -0,0 +1,8 @@ +// Copyright (c) 2020 Ant Financial +// +// SPDX-License-Identifier: Apache-2.0 +// + +#![allow(dead_code)] + +include!("_include.rs"); diff --git a/example2/server.rs b/example2/server.rs new file mode 100644 index 00000000..604ea31d --- /dev/null +++ b/example2/server.rs @@ -0,0 +1,115 @@ +// Copyright (c) 2019 Ant Financial +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod protocols; +mod utils; + +#[macro_use] +extern crate log; + +use log::LevelFilter; +use std::sync::Arc; +use std::thread; + +use protocols::sync::{agent, health, types}; +use ttrpc::error::{Error, Result}; +use ttrpc::proto::{Code, Status}; +use ttrpc::Server; + +struct HealthService; + +impl health::Health for HealthService { + fn check( + &self, + _ctx: &ttrpc::TtrpcContext, + _: health::CheckRequest, + ) -> Result { + let mut status = Status::default(); + status.code = Code::NOT_FOUND as i32; + status.message = "Just for fun".to_owned(); + Err(Error::RpcStatus(status)) + } + + fn version( + &self, + ctx: &ttrpc::TtrpcContext, + req: health::CheckRequest, + ) -> Result { + info!("version {:?}", req); + info!("ctx {:?}", ctx); + let mut rep = health::VersionCheckResponse::default(); + rep.agent_version = "mock 0.1".to_owned(); + rep.grpc_version = "0.0.1".to_owned(); + let mut status = Status::default(); + status.code = Code::NOT_FOUND as i32; + Ok(rep) + } +} + +struct AgentService; + +impl agent::AgentService for AgentService { + fn list_interfaces( + &self, + _ctx: &::ttrpc::TtrpcContext, + _req: agent::ListInterfacesRequest, + ) -> ::ttrpc::Result { + Ok(agent::Interfaces { + interfaces: vec![ + types::Interface { + name: "first".to_string(), + ..Default::default() + }, + types::Interface { + name: "second".to_string(), + ..Default::default() + }, + ], + ..Default::default() + }) + } +} + +fn main() { + simple_logging::log_to_stderr(LevelFilter::Trace); + + let h = Box::new(HealthService {}) as Box; + let h = Arc::new(h); + let hservice = health::create_health(h); + + let a = Box::new(AgentService {}) as Box; + let a = Arc::new(a); + let aservice = agent::create_agent_service(a); + + utils::remove_if_sock_exist(utils::SOCK_ADDR).unwrap(); + let mut server = Server::new() + .bind(utils::SOCK_ADDR) + .unwrap() + .register_service(hservice) + .register_service(aservice); + + server.start().unwrap(); + + // Hold the main thread until receiving signal SIGTERM + let (tx, rx) = std::sync::mpsc::channel(); + thread::spawn(move || { + ctrlc::set_handler(move || { + tx.send(()).unwrap(); + }) + .expect("Error setting Ctrl-C handler"); + println!("Server is running, press Ctrl + C to exit"); + }); + + rx.recv().unwrap(); +} diff --git a/example2/utils.rs b/example2/utils.rs new file mode 100644 index 00000000..24c6393f --- /dev/null +++ b/example2/utils.rs @@ -0,0 +1,18 @@ +#![allow(dead_code)] +use std::fs; +use std::io::Result; +use std::path::Path; + +pub const SOCK_ADDR: &str = "unix:///tmp/ttrpc-test"; + +pub fn remove_if_sock_exist(sock_addr: &str) -> Result<()> { + let path = sock_addr + .strip_prefix("unix://") + .expect("socket address is not expected"); + + if Path::new(path).exists() { + fs::remove_file(&path)?; + } + + Ok(()) +} diff --git a/install_protoc.sh b/install_protoc.sh new file mode 100755 index 00000000..9926c787 --- /dev/null +++ b/install_protoc.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# Helper script for Github Actions to install protobuf on different runners. +echo "OS: $RUNNER_OS" + +if [ "$RUNNER_OS" == 'Linux' ]; then + # Install on Linux + sudo apt-get update + sudo apt-get install -y protobuf-compiler +elif [ "$RUNNER_OS" == 'macOS' ]; then + # Install on macOS + brew install protobuf +elif [ "$RUNNER_OS" == 'Windows' ]; then + # Install on Windows + choco install -y protoc +else + echo "Unsupported OS: $RUNNER_OS" + exit 1 +fi + +# Check the installed Protobuf version +protoc --version \ No newline at end of file diff --git a/src/asynchronous/client.rs b/src/asynchronous/client.rs index 6198ba36..82b55b79 100644 --- a/src/asynchronous/client.rs +++ b/src/asynchronous/client.rs @@ -77,23 +77,34 @@ impl Client { pub async fn request(&self, req: Request) -> Result { let timeout_nano = req.timeout_nano; let stream_id = self.next_stream_id.fetch_add(2, Ordering::Relaxed); - - let msg: GenMessage = Message::new_request(stream_id, req)? - .try_into() - .map_err(|e: protobuf::Error| Error::Others(e.to_string()))?; + let msg: GenMessage; + #[cfg(not(feature = "prost"))] + { + msg = Message::new_request(stream_id, req)? + .try_into() + .map_err(|err: protobuf::Error| Error::Others(err.to_string()))?; + } + + #[cfg(feature = "prost")] + { + msg = Message::new_request(stream_id, req)? + .try_into() + .map_err(|err: std::io::Error| Error::Others(err.to_string()))?; + } let (tx, mut rx): (ResultSender, ResultReceiver) = mpsc::channel(100); - + self.streams .lock() .map_err(|_| Error::Others("Failed to acquire lock on streams".to_string()))? .insert(stream_id, tx); - + self.req_tx .send(SendingMessage::new(msg)) .await .map_err(|_| Error::LocalClosed)?; - + + #[allow(clippy::unnecessary_lazy_evaluations)] let result = if timeout_nano == 0 { rx.recv().await.ok_or_else(|| Error::RemoteClosed)? } else { @@ -111,9 +122,21 @@ impl Client { let res = Response::decode(msg.payload) .map_err(err_to_others_err!(e, "Unpack response error "))?; - let status = res.status(); - if status.code() != Code::OK { - return Err(Error::RpcStatus((*status).clone())); + #[cfg(not(feature = "prost"))] + { + let status = res.status(); + if status.code() != Code::OK { + return Err(Error::RpcStatus((*status).clone())); + } + } + #[cfg(feature = "prost")] + { + let status = res.status.as_ref(); + if let Some(status) = status { + if status.code != Code::OK as i32 { + return Err(Error::RpcStatus(status.clone())); + } + } } Ok(res) @@ -129,9 +152,15 @@ impl Client { let stream_id = self.next_stream_id.fetch_add(2, Ordering::Relaxed); let is_req_payload_empty = req.payload.is_empty(); + #[cfg(not(feature = "prost"))] + let mut msg: GenMessage = Message::new_request(stream_id, req)? + .try_into() + .map_err(|err: protobuf::Error| Error::Others(err.to_string()))?; + + #[cfg(feature = "prost")] let mut msg: GenMessage = Message::new_request(stream_id, req)? .try_into() - .map_err(|e: protobuf::Error| Error::Others(e.to_string()))?; + .map_err(|err: std::io::Error| Error::Others(err.to_string()))?; if streaming_client { if !is_req_payload_empty { diff --git a/src/asynchronous/server.rs b/src/asynchronous/server.rs index 362b6726..adbc2ad4 100644 --- a/src/asynchronous/server.rs +++ b/src/asynchronous/server.rs @@ -14,6 +14,8 @@ use std::time::Duration; use async_trait::async_trait; use futures::StreamExt as _; + +#[cfg(not(feature = "prost"))] use protobuf::Message as _; use tokio::{ self, select, spawn, @@ -367,9 +369,14 @@ impl HandlerContext { Ok(opt_msg) => match opt_msg { Some(mut resp) => { // Server: check size before sending to client + #[cfg(not(feature = "prost"))] if let Err(e) = check_oversize(resp.compute_size() as usize, true) { resp = e.into(); } + #[cfg(feature = "prost")] + if let Err(e) = check_oversize(resp.size() as usize, true) { + resp = e.into(); + } Self::respond(self.tx.clone(), stream_id, resp) .await @@ -458,7 +465,6 @@ impl HandlerContext { let req_msg = Message::::try_from(msg) .map_err(|e| get_status(Code::INVALID_ARGUMENT, e.to_string()))?; - let req = &req_msg.payload; trace!("Got Message request {} {}", req.service, req.method); @@ -591,8 +597,19 @@ impl HandlerContext { } async fn respond_with_status(tx: MessageSender, stream_id: u32, status: Status) { - let mut resp = Response::new(); - resp.set_status(status); + #[cfg(not(feature = "prost"))] + let resp = { + let mut resp = Response::new(); + resp.set_status(status); + resp + }; + #[cfg(feature = "prost")] + let resp = { + Response { + status: Some(status), + ..Default::default() + } + }; Self::respond(tx, stream_id, resp) .await .map_err(|e| { diff --git a/src/asynchronous/utils.rs b/src/asynchronous/utils.rs index ce315edb..04dd35ee 100644 --- a/src/asynchronous/utils.rs +++ b/src/asynchronous/utils.rs @@ -13,6 +13,7 @@ use crate::proto::{MessageHeader, Request, Response}; /// Handle request in async mode. #[macro_export] +#[cfg(not(feature = "prost"))] macro_rules! async_request_handler { ($class: ident, $ctx: ident, $req: ident, $server: ident, $req_type: ident, $req_fn: ident) => { let mut req = super::$server::$req_type::new(); @@ -49,8 +50,42 @@ macro_rules! async_request_handler { }; } +/// Handle request in async mode. +#[macro_export] +#[cfg(feature = "prost")] +macro_rules! async_request_handler { + ($class: ident, $ctx: ident, $req: ident, $req_type: ident, $req_fn: ident) => { + let mut req = $req_type::default(); + req.merge(&$req.payload as &[u8]) + .map_err(::ttrpc::err_to_others!(e, "Merge request.payload"))?; + + let mut res = ::ttrpc::Response::default(); + match $class.service.$req_fn(&$ctx, req).await { + Ok(rep) => { + res.status = Some(::ttrpc::get_status(::ttrpc::Code::OK, "".to_string())); + rep.encode(&mut res.payload) + .map_err(::ttrpc::err_to_others!(e, "Encoding error "))?; + } + Err(x) => match x { + ::ttrpc::Error::RpcStatus(s) => { + res.status = Some(s); + } + _ => { + res.status = Some(::ttrpc::get_status( + ::ttrpc::Code::UNKNOWN, + format!("{:?}", x), + )); + } + }, + } + + return Ok(res); + }; +} + /// Handle client streaming in async mode. #[macro_export] +#[cfg(not(feature = "prost"))] macro_rules! async_client_streamimg_handler { ($class: ident, $ctx: ident, $inner: ident, $req_fn: ident) => { let stream = ::ttrpc::r#async::ServerStreamReceiver::new($inner); @@ -80,8 +115,37 @@ macro_rules! async_client_streamimg_handler { }; } +#[macro_export] +#[cfg(feature = "prost")] +macro_rules! async_client_streamimg_handler { + ($class: ident, $ctx: ident, $inner: ident, $req_fn: ident) => { + let stream = ::ttrpc::r#async::ServerStreamReceiver::new($inner); + let mut res = ::ttrpc::Response::default(); + match $class.service.$req_fn(&$ctx, stream).await { + Ok(rep) => { + res.status = Some(::ttrpc::get_status(::ttrpc::Code::OK, "".to_string())); + rep.encode(&mut res.payload) + .map_err(::ttrpc::err_to_others!(e, "Encoding error "))?; + } + Err(x) => match x { + ::ttrpc::Error::RpcStatus(s) => { + res.status = Some(s); + } + _ => { + res.status = Some(::ttrpc::get_status( + ::ttrpc::Code::UNKNOWN, + format!("{:?}", x), + )); + } + }, + } + return Ok(Some(res)); + }; +} + /// Handle server streaming in async mode. #[macro_export] +#[cfg(not(feature = "prost"))] macro_rules! async_server_streamimg_handler { ($class: ident, $ctx: ident, $inner: ident, $server: ident, $req_type: ident, $req_fn: ident) => { let req_buf = $inner.recv().await?; @@ -111,8 +175,40 @@ macro_rules! async_server_streamimg_handler { }; } +#[macro_export] +#[cfg(feature = "prost")] +macro_rules! async_server_streamimg_handler { + ($class: ident, $ctx: ident, $inner: ident, $req_type: ident, $req_fn: ident) => { + let req_buf = $inner.recv().await?; + let req = <$req_type as ::ttrpc::proto::Codec>::decode(&req_buf) + .map_err(|e| ::ttrpc::Error::Others(e.to_string()))?; + let stream = ::ttrpc::r#async::ServerStreamSender::new($inner); + match $class.service.$req_fn(&$ctx, req, stream).await { + Ok(_) => { + return Ok(None); + } + Err(x) => { + let mut res = ::ttrpc::Response::default(); + match x { + ::ttrpc::Error::RpcStatus(s) => { + res.status = Some(s); + } + _ => { + res.status = Some(::ttrpc::get_status( + ::ttrpc::Code::UNKNOWN, + format!("{:?}", x), + )); + } + } + return Ok(Some(res)); + } + } + }; +} + /// Handle duplex streaming in async mode. #[macro_export] +#[cfg(not(feature = "prost"))] macro_rules! async_duplex_streamimg_handler { ($class: ident, $ctx: ident, $inner: ident, $req_fn: ident) => { let stream = ::ttrpc::r#async::ServerStream::new($inner); @@ -139,8 +235,37 @@ macro_rules! async_duplex_streamimg_handler { }; } +#[macro_export] +#[cfg(feature = "prost")] +macro_rules! async_duplex_streamimg_handler { + ($class: ident, $ctx: ident, $inner: ident, $req_fn: ident) => { + let stream = ::ttrpc::r#async::ServerStream::new($inner); + match $class.service.$req_fn(&$ctx, stream).await { + Ok(_) => { + return Ok(None); + } + Err(x) => { + let mut res = ::ttrpc::Response::default(); + match x { + ::ttrpc::Error::RpcStatus(s) => { + res.status = Some(s); + } + _ => { + res.status = Some(::ttrpc::get_status( + ::ttrpc::Code::UNKNOWN, + format!("{:?}", x), + )); + } + } + return Ok(Some(res)); + } + } + }; +} + /// Send request through async client. #[macro_export] +#[cfg(not(feature = "prost"))] macro_rules! async_client_request { ($self: ident, $ctx: ident, $req: ident, $server: expr, $method: expr, $cres: ident) => { let mut creq = ttrpc::Request { @@ -169,8 +294,35 @@ macro_rules! async_client_request { }; } +/// Send request through async client. +#[macro_export] +#[cfg(feature = "prost")] +macro_rules! async_client_request { + ($self: ident, $ctx: ident, $req: ident, $server: expr, $method: expr, $cres: ident) => { + let mut creq = ::ttrpc::Request { + service: $server.to_string(), + method: $method.to_string(), + timeout_nano: $ctx.timeout_nano, + metadata: ttrpc::context::to_pb($ctx.metadata), + payload: Vec::new(), + ..Default::default() + }; + + $req.encode(&mut creq.payload) + .map_err(::ttrpc::err_to_others!(e, "Encoding error "))?; + + let res = $self.client.request(creq).await?; + $cres + .merge(&res.payload as &[u8]) + .map_err(::ttrpc::err_to_others!(e, "Unpack get error "))?; + + return Ok($cres); + }; +} + /// Duplex streaming through async client. #[macro_export] +#[cfg(not(feature = "prost"))] macro_rules! async_client_stream { ($self: ident, $ctx: ident, $server: expr, $method: expr) => { let mut creq = ::ttrpc::Request::new(); @@ -187,8 +339,27 @@ macro_rules! async_client_stream { }; } +#[macro_export] +#[cfg(feature = "prost")] +macro_rules! async_client_stream { + ($self: ident, $ctx: ident, $server: expr, $method: expr) => { + let mut creq = ::ttrpc::Request::default(); + creq.service = $server.to_string(); + creq.method = $method.to_string(); + creq.timeout_nano = $ctx.timeout_nano; + let md = ::ttrpc::context::to_pb($ctx.metadata); + creq.metadata = md; + + let inner = $self.client.new_stream(creq, true, true).await?; + let stream = ::ttrpc::r#async::ClientStream::new(inner); + + return Ok(stream); + }; +} + /// Only send streaming through async client. #[macro_export] +#[cfg(not(feature = "prost"))] macro_rules! async_client_stream_send { ($self: ident, $ctx: ident, $server: expr, $method: expr) => { let mut creq = ::ttrpc::Request::new(); @@ -205,8 +376,27 @@ macro_rules! async_client_stream_send { }; } +#[macro_export] +#[cfg(feature = "prost")] +macro_rules! async_client_stream_send { + ($self: ident, $ctx: ident, $server: expr, $method: expr) => { + let mut creq = ::ttrpc::Request::default(); + creq.service = $server.to_string(); + creq.method = $method.to_string(); + creq.timeout_nano = $ctx.timeout_nano; + let md = ::ttrpc::context::to_pb($ctx.metadata); + creq.metadata = md; + + let inner = $self.client.new_stream(creq, true, false).await?; + let stream = ::ttrpc::r#async::ClientStreamSender::new(inner); + + return Ok(stream); + }; +} + /// Only receive streaming through async client. #[macro_export] +#[cfg(not(feature = "prost"))] macro_rules! async_client_stream_receive { ($self: ident, $ctx: ident, $req: ident, $server: expr, $method: expr) => { let mut creq = ::ttrpc::Request::new(); @@ -230,6 +420,26 @@ macro_rules! async_client_stream_receive { }; } +#[macro_export] +#[cfg(feature = "prost")] +macro_rules! async_client_stream_receive { + ($self: ident, $ctx: ident, $req: ident, $server: expr, $method: expr) => { + let mut creq = ::ttrpc::Request::default(); + creq.service = $server.to_string(); + creq.method = $method.to_string(); + creq.timeout_nano = $ctx.timeout_nano; + let md = ::ttrpc::context::to_pb($ctx.metadata); + creq.metadata = md; + $req.encode(&mut creq.payload) + .map_err(::ttrpc::err_to_others!(e, "Encoding error "))?; + + let inner = $self.client.new_stream(creq, false, true).await?; + let stream = ::ttrpc::r#async::ClientStreamReceiver::new(inner, $self.client.clone()); + + return Ok(stream); + }; +} + /// Trait that implements handler which is a proxy to the desired method (async). #[async_trait] pub trait MethodHandler { diff --git a/src/context.rs b/src/context.rs index c22e4e91..2e3fd98b 100644 --- a/src/context.rs +++ b/src/context.rs @@ -70,11 +70,17 @@ pub fn to_pb(kvs: HashMap>) -> Vec { for (k, vl) in kvs { for v in vl { + #[cfg(not(feature = "prost"))] let key = KeyValue { key: k.clone(), value: v.clone(), ..Default::default() }; + #[cfg(feature = "prost")] + let key = KeyValue { + key: k.clone(), + value: v.clone(), + }; meta.push(key); } } @@ -96,11 +102,17 @@ mod tests { ("key1", "value1-2"), ("key2", "value2"), ] { + #[cfg(not(feature = "prost"))] let key = KeyValue { key: i.0.to_string(), value: i.1.to_string(), ..Default::default() }; + #[cfg(feature = "prost")] + let key = KeyValue { + key: i.0.to_string(), + value: i.1.to_string(), + }; src.push(key); } diff --git a/src/error.rs b/src/error.rs index 87ceb786..a131a8be 100644 --- a/src/error.rs +++ b/src/error.rs @@ -13,8 +13,8 @@ // limitations under the License. //! Error and Result of ttrpc and relevant functions, macros. - -use crate::proto::{Code, Response, Status}; +#[allow(unused_imports)] +use crate::proto::{self, Code, Response, Status}; use std::result; use thiserror::Error; @@ -55,16 +55,26 @@ impl From for Response { } else { get_status(Code::UNKNOWN, e) }; - - let mut res = Response::new(); - res.set_status(status); - res + #[cfg(not(feature = "prost"))] + { + let mut res = Response::new(); + res.set_status(status); + res + } + #[cfg(feature = "prost")] + { + Response { + status: Some(status), + ..Default::default() + } + } } } /// A specialized Result type for ttrpc. pub type Result = result::Result; +#[cfg(not(feature = "prost"))] /// Get ttrpc::Status from ttrpc::Code and a message. pub fn get_status(c: Code, msg: impl ToString) -> Status { let mut status = Status::new(); @@ -74,6 +84,16 @@ pub fn get_status(c: Code, msg: impl ToString) -> Status { status } +#[cfg(feature = "prost")] +/// Get ttrpc::Status from ttrpc::Code and a message. +pub fn get_status(c: Code, msg: impl ToString) -> Status { + Status { + code: c as i32, + message: msg.to_string(), + ..Default::default() + } +} + pub fn get_rpc_status(c: Code, msg: impl ToString) -> Error { Error::RpcStatus(get_status(c, msg)) } diff --git a/src/lib.rs b/src/lib.rs index 9742b7a0..aec7462f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -62,10 +62,10 @@ pub use crate::error::{get_status, Error, Result}; cfg_sync! { pub mod sync; - #[doc(hidden)] - pub use sync::response_to_channel; #[doc(inline)] pub use sync::{MethodHandler, TtrpcContext}; + #[doc(inline)] + pub use sync::utils::{response_to_channel, response_error_to_channel}; pub use sync::Client; #[doc(inline)] pub use sync::Server; @@ -76,3 +76,17 @@ cfg_async! { #[doc(hidden)] pub use asynchronous as r#async; } + +macro_rules! assert_unique_feature { + () => {}; + ($first:tt $(,$rest:tt)*) => { + $( + #[cfg(all(feature = $first, feature = $rest))] + compile_error!(concat!("features \"", $first, "\" and \"", $rest, "\" cannot be used together")); + )* + assert_unique_feature!($($rest),*); + } +} + +// Enabling feature the rustprotobuf and the prost together is prohibited. +assert_unique_feature!("rustprotobuf", "prost"); diff --git a/src/proto.rs b/src/proto.rs index ff3e3f7c..e93fc36e 100644 --- a/src/proto.rs +++ b/src/proto.rs @@ -11,6 +11,7 @@ mod compiled { pub use compiled::ttrpc::*; use byteorder::{BigEndian, ByteOrder}; +#[cfg(not(feature = "prost"))] use protobuf::{CodedInputStream, CodedOutputStream}; use crate::error::{get_rpc_status, Error, Result as TtResult}; @@ -266,6 +267,7 @@ pub trait Codec { Self: Sized; } +#[cfg(not(feature = "prost"))] impl Codec for M { type E = protobuf::Error; @@ -288,6 +290,26 @@ impl Codec for M { } } +#[cfg(feature = "prost")] +impl Codec for M { + type E = std::io::Error; + + fn size(&self) -> u32 { + self.encoded_len() as u32 + } + + fn encode(&self) -> Result, Self::E> { + Ok(self.encode_to_vec()) + } + + fn decode(buf: impl AsRef<[u8]>) -> Result + where + Self: Sized, + { + prost::Message::decode(buf.as_ref()).map_err(std::io::Error::from) + } +} + /// Message of ttrpc. #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct Message { @@ -428,6 +450,7 @@ mod tests { 117, 101, 49, ]; + #[cfg(not(feature = "prost"))] fn new_protobuf_request() -> Request { let mut creq = Request::new(); creq.set_service("grpc.TestServices".to_string()); @@ -443,6 +466,21 @@ mod tests { creq } + #[cfg(feature = "prost")] + fn new_protobuf_request() -> Request { + let meta = vec![KeyValue { + key: "test_key1".to_string(), + value: "test_value1".to_string(), + }]; + Request { + service: "grpc.TestServices".to_owned(), + method: "Test".to_owned(), + timeout_nano: 20 * 1000 * 1000, + metadata: meta, + payload: vec![0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9], + } + } + #[test] fn protobuf_codec() { let creq = new_protobuf_request(); @@ -479,6 +517,7 @@ mod tests { } #[cfg(feature = "async")] + #[cfg(not(feature = "prost"))] #[tokio::test] async fn async_gen_message() { // Test packet which exceeds maximum message size @@ -519,6 +558,7 @@ mod tests { } #[cfg(feature = "async")] + #[cfg(not(feature = "prost"))] #[tokio::test] async fn async_message() { // Test packet which exceeds maximum message size diff --git a/src/sync/client.rs b/src/sync/client.rs index da86f7d1..abac7f21 100644 --- a/src/sync/client.rs +++ b/src/sync/client.rs @@ -17,12 +17,13 @@ #[cfg(unix)] use std::os::unix::io::RawFd; -use protobuf::Message; use std::collections::HashMap; use std::sync::mpsc; use std::sync::{Arc, Mutex}; use std::thread; use std::time::Duration; +#[cfg(not(feature = "prost"))] +use protobuf::Message; use crate::error::{Error, Result}; use crate::proto::{ @@ -157,6 +158,10 @@ impl Client { }) } pub fn request(&self, req: Request) -> Result { + #[cfg(feature = "prost")] + check_oversize(req.payload.len(), false)?; + + #[cfg(not(feature = "prost"))] check_oversize(req.compute_size() as usize, false)?; let buf = req.encode().map_err(err_to_others_err!(e, ""))?; @@ -183,12 +188,23 @@ impl Client { let buf = result?; let res = Response::decode(buf).map_err(err_to_others_err!(e, "Unpack response error "))?; - - let status = res.status(); - if status.code() != Code::OK { - return Err(Error::RpcStatus((*status).clone())); + #[cfg(not(feature = "prost"))] + { + let status = res.status(); + if status.code() != Code::OK { + return Err(Error::RpcStatus((*status).clone())); + } } - + #[cfg(feature = "prost")] + { + let status = res.status.as_ref(); + if let Some(status) = status { + if status.code != Code::OK as i32 { + return Err(Error::RpcStatus(status.clone())); + } + } + } + Ok(res) } } diff --git a/src/sync/mod.rs b/src/sync/mod.rs index 53d0680c..29c6dc6f 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -11,11 +11,10 @@ mod server; mod sys; #[macro_use] -mod utils; +pub mod utils; pub use client::Client; pub use server::Server; #[doc(hidden)] -pub use utils::response_to_channel; pub use utils::{MethodHandler, TtrpcContext}; diff --git a/src/sync/server.rs b/src/sync/server.rs index d19b0558..6dab5210 100644 --- a/src/sync/server.rs +++ b/src/sync/server.rs @@ -19,6 +19,9 @@ use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; use std::time::Duration; +#[cfg(feature = "prost")] +use prost::Message; +#[cfg(not(feature = "prost"))] use protobuf::{CodedInputStream, Message}; use std::collections::HashMap; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; @@ -27,7 +30,8 @@ use std::sync::{Arc, Mutex}; use std::thread; use std::thread::JoinHandle; -use super::utils::{response_error_to_channel, response_to_channel}; +use super::utils::response_error_to_channel; +use crate::sync::utils::response_to_channel; use crate::context; use crate::error::{get_status, Error, Result}; use crate::proto::{Code, MessageHeader, Request, Response, MESSAGE_TYPE_REQUEST}; @@ -162,28 +166,72 @@ fn start_method_handler_thread( if mh.type_ != MESSAGE_TYPE_REQUEST { continue; } - let mut s = CodedInputStream::from_bytes(&buf); - let mut req = Request::new(); - if let Err(x) = req.merge_from(&mut s) { - let status = get_status(Code::INVALID_ARGUMENT, x.to_string()); - let mut res = Response::new(); - res.set_status(status); - if let Err(x) = response_to_channel(mh.stream_id, res, res_tx.clone()) { - debug!("response_to_channel get error {:?}", x); - quit_connection(quit, control_tx); - break; + #[allow(unused_mut)] + #[allow(unused_assignments)] + let mut req: Request = Request::default(); + #[cfg(not(feature = "prost"))] + { + let mut s = CodedInputStream::from_bytes(&buf); + req = Request::new(); + if let Err(x) = req.merge_from(&mut s) { + let status = get_status(Code::INVALID_ARGUMENT, x.to_string()); + let mut res = Response::new(); + res.set_status(status); + if let Err(x) = response_to_channel(mh.stream_id, res, res_tx.clone()) { + debug!("response_to_channel get error {:?}", x); + quit_connection(quit, control_tx); + break; + } + continue; } - continue; } + + #[cfg(feature = "prost")] + { + if let Err(x) = req.merge(&buf as &[u8]) { + let status = get_status(Code::INVALID_ARGUMENT, x.to_string()); + let res = Response { + status: Some(status), + ..Default::default() + }; + if let Err(x) = response_to_channel(mh.stream_id, res, res_tx.clone()) { + debug!("response_to_channel get error {:?}", x); + quit.store(true, Ordering::SeqCst); + // the client connection would be closed and + // the connection dealing main thread would have + // exited. + control_tx + .send(()) + .unwrap_or_else(|err| trace!("Failed to send {:?}", err)); + break; + } + continue; + } + } + trace!("Got Message request {:?}", req); let path = format!("/{}/{}", req.service, req.method); let method = if let Some(x) = methods.get(&path) { x - } else { - let status = get_status(Code::INVALID_ARGUMENT, format!("{path} does not exist")); - let mut res = Response::new(); - res.set_status(status); + } else { + let mut res; + let status; + #[cfg(not(feature = "prost"))] + { + status = + get_status(Code::INVALID_ARGUMENT, format!("{path} does not exist")); + res = Response::new(); + res.set_status(status); + } + + #[cfg(feature = "prost")] + { + status = + get_status(Code::INVALID_ARGUMENT, format!("{path} does not exist")); + res = Response::default(); + res.status = Some(status); + } if let Err(x) = response_to_channel(mh.stream_id, res, res_tx.clone()) { info!("response_to_channel get error {:?}", x); quit_connection(quit, control_tx); diff --git a/src/sync/utils.rs b/src/sync/utils.rs index 616b615e..4fa526c6 100644 --- a/src/sync/utils.rs +++ b/src/sync/utils.rs @@ -4,11 +4,14 @@ // use crate::error::{Error, Result}; +#[allow(unused_imports)] use crate::proto::{ check_oversize, Codec, MessageHeader, Request, Response, MESSAGE_TYPE_RESPONSE, }; + use std::collections::HashMap; +#[cfg(not(feature = "prost"))] /// Response message through a channel. /// Eventually the message will sent to Client. pub fn response_to_channel( @@ -35,6 +38,27 @@ pub fn response_to_channel( Ok(()) } +#[cfg(feature = "prost")] +pub fn response_to_channel( + stream_id: u32, + res: Response, + tx: std::sync::mpsc::Sender<(MessageHeader, Vec)>, +) -> Result<()> { + let mut buffer = Vec::new(); + ::encode(&res, &mut buffer).map_err(err_to_others_err!(e, ""))?; + let mh = MessageHeader { + length: buffer.len() as u32, + stream_id, + type_: MESSAGE_TYPE_RESPONSE, + flags: 0, + }; + + tx.send((mh, buffer)).map_err(err_to_others_err!(e, ""))?; + + Ok(()) +} + + pub fn response_error_to_channel( stream_id: u32, e: Error, @@ -45,6 +69,7 @@ pub fn response_error_to_channel( /// Handle request in sync mode. #[macro_export] +#[cfg(not(feature = "prost"))] macro_rules! request_handler { ($class: ident, $ctx: ident, $req: ident, $server: ident, $req_type: ident, $req_fn: ident) => { let mut s = CodedInputStream::from_bytes(&$req.payload); @@ -78,8 +103,41 @@ macro_rules! request_handler { }; } +/// Handle request in sync mode. +#[macro_export] +#[cfg(feature = "prost")] +macro_rules! request_handler { + ($class: ident, $ctx: ident, $req: ident, $req_type: ident, $req_fn: ident) => { + let mut req = $req_type::default(); + req.merge(&$req.payload as &[u8]) + .map_err(::ttrpc::err_to_others!(e, ""))?; + + let mut res = ::ttrpc::Response::default(); + match $class.service.$req_fn(&$ctx, req) { + Ok(rep) => { + res.status = Some(::ttrpc::get_status(::ttrpc::Code::OK, "".to_string())); + rep.encode(&mut res.payload) + .map_err(::ttrpc::err_to_others!(e, "Encoding error "))?; + } + Err(x) => match x { + ::ttrpc::Error::RpcStatus(s) => { + res.status = Some(s); + } + _ => { + res.status = Some(::ttrpc::get_status( + ::ttrpc::Code::UNKNOWN, + format!("{:?}", x), + )); + } + }, + } + ::ttrpc::response_to_channel($ctx.mh.stream_id, res, $ctx.res_tx)? + }; +} + /// Send request through sync client. #[macro_export] +#[cfg(not(feature = "prost"))] macro_rules! client_request { ($self: ident, $ctx: ident, $req: ident, $server: expr, $method: expr, $cres: ident) => { let mut creq = ::ttrpc::Request::new(); @@ -104,6 +162,27 @@ macro_rules! client_request { }; } +/// Send request through sync client. +#[macro_export] +#[cfg(feature = "prost")] +macro_rules! client_request { + ($self: ident, $ctx: ident, $req: ident, $server: expr, $method: expr, $cres: ident) => { + let mut creq = ::ttrpc::Request::default(); + creq.service = $server.to_string(); + creq.method = $method.to_string(); + creq.timeout_nano = $ctx.timeout_nano; + let md = ::ttrpc::context::to_pb($ctx.metadata); + creq.metadata = md; + creq.payload.reserve($req.encoded_len()); + $req.encode(&mut creq.payload).map_err(::ttrpc::err_to_others!(e, "Encoding error "))?; + + let res = $self.client.request(creq)?; + $cres + .merge(&res.payload as &[u8]) + .map_err(::ttrpc::err_to_others!(e, "Unpack get error "))?; + }; +} + /// The context of ttrpc (sync). #[derive(Debug)] pub struct TtrpcContext { diff --git a/src/ttrpc.proto b/src/ttrpc.proto index b5273ab9..e17c9516 100644 --- a/src/ttrpc.proto +++ b/src/ttrpc.proto @@ -14,7 +14,7 @@ syntax = "proto3"; -package grpc; +package ttrpc; message Request { string service = 1; diff --git a/tests/run-examples.rs b/tests/run-examples.rs index 459700f0..21de4b14 100644 --- a/tests/run-examples.rs +++ b/tests/run-examples.rs @@ -4,12 +4,16 @@ use std::{ time::Duration, }; -fn run_example(server: &str, client: &str) -> Result<(), Box> { +fn run_example( + server: &str, + client: &str, + example_dir: &str, +) -> Result<(), Box> { // start the server and give it a moment to start. - let mut server = do_run_example(server).spawn().unwrap(); + let mut server = do_run_example(server, example_dir).spawn().unwrap(); std::thread::sleep(Duration::from_secs(2)); - let mut client = do_run_example(client).spawn().unwrap(); + let mut client = do_run_example(client, example_dir).spawn().unwrap(); let mut client_succeeded = false; let start = std::time::Instant::now(); let timeout = Duration::from_secs(600); @@ -55,14 +59,13 @@ fn run_example(server: &str, client: &str) -> Result<(), Box Command { +fn do_run_example(example: &str, example_dir: &str) -> Command { let mut cmd = Command::new("cargo"); - cmd.arg("run") - .arg("--example") - .arg(example) - .stdout(std::process::Stdio::piped()) + cmd.arg("run").arg("--example").arg(example); + + cmd.stdout(std::process::Stdio::piped()) .stderr(std::process::Stdio::piped()) - .current_dir("example"); + .current_dir(example_dir); cmd } @@ -83,9 +86,23 @@ fn wait_with_output(name: &str, cmd: Child) { #[test] fn run_examples() -> Result<(), Box> { - run_example("server", "client")?; - run_example("async-server", "async-client")?; - run_example("async-stream-server", "async-stream-client")?; + #[cfg(feature = "rustprotobuf")] + { + println!("Running examples with rustprotobuf feature"); + run_example("server", "client", "example")?; + run_example("async-server", "async-client", "example")?; + run_example("async-stream-server", "async-stream-client", "example")?; + } + + #[cfg(feature = "prost")] + { + println!("Running examples with prost feature"); + // run_example("server", "client", "example2")?; + #[cfg(unix)] + run_example("async-server", "async-client", "example2")?; + #[cfg(unix)] + run_example("async-stream-server", "async-stream-client", "example2")?; + } Ok(()) } diff --git a/ttrpc-codegen/Makefile b/ttrpc-codegen/Makefile index bb69e69a..acd4a18c 100644 --- a/ttrpc-codegen/Makefile +++ b/ttrpc-codegen/Makefile @@ -1 +1,8 @@ -include ../Makefile +.PHONY: check +check: + cargo fmt --all -- --check + cargo clippy --all-targets -- -D warnings + +.PHONY: test +test: + cargo test --verbose \ No newline at end of file