From 46d83110b85dc2f5d3aba8d34e50be16ca1f218a Mon Sep 17 00:00:00 2001 From: raihan <54474184+ryihan@users.noreply.github.com> Date: Tue, 26 Oct 2021 03:47:07 +0600 Subject: [PATCH] Upgrade Md --- .github/ISSUE_TEMPLATE/bug_report.md | 41 + .github/ISSUE_TEMPLATE/cleanup_request.md | 19 + .github/ISSUE_TEMPLATE/feature_request.md | 30 + .github/ISSUE_TEMPLATE/question.md | 17 + .github/pull_request_template.md | 11 + BUILDING.md | 278 ++++ CODE-OF-CONDUCT.md | 3 + CONCEPTS.md | 64 + CONTRIBUTING.md | 134 ++ GOVERNANCE.md | 1 + MAINTAINERS.md | 84 ++ MANIFEST.md | 23 + SECURITY.md | 3 + TROUBLESHOOTING.md | 43 + bazel/test/python_test_repo/README.md | 5 + doc/PROTOCOL-HTTP2.md | 259 ++++ doc/PROTOCOL-WEB.md | 141 ++ doc/binary-logging.md | 59 + doc/c-style-guide.md | 92 ++ doc/command_line_tool.md | 199 +++ doc/compression.md | 118 ++ doc/compression_cookbook.md | 133 ++ ...ection-backoff-interop-test-description.md | 77 + doc/connection-backoff.md | 56 + doc/connectivity-semantics-and-api.md | 154 ++ doc/core/combiner-explainer.md | 158 ++ doc/core/epoll-polling-engine.md | 121 ++ ...grpc-client-server-polling-engine-usage.md | 32 + doc/core/grpc-cq.md | 64 + doc/core/grpc-error.md | 160 +++ doc/core/grpc-polling-engines.md | 152 ++ doc/core/moving-to-c++.md | 66 + doc/core/pending_api_cleanups.md | 20 + doc/core/transport_explainer.md | 197 +++ doc/cpp-style-guide.md | 8 + doc/cpp/pending_api_cleanups.md | 22 + doc/cpp/perf_notes.md | 29 + doc/csharp/server_reflection.md | 54 + doc/environment_variables.md | 173 +++ doc/fail_fast.md | 1 + doc/fork_support.md | 46 + doc/g_stands_for.md | 44 + doc/grpc_release_schedule.md | 43 + doc/grpc_xds_features.md | 66 + doc/health-checking.md | 78 + doc/http-grpc-status-mapping.md | 30 + doc/http2-interop-test-descriptions.md | 271 ++++ doc/internationalization.md | 45 + doc/interop-test-descriptions.md | 1280 +++++++++++++++++ doc/keepalive.md | 54 + doc/load-balancing.md | 140 ++ doc/naming.md | 88 ++ doc/python/server_reflection.md | 66 + doc/security_audit.md | 70 + doc/server-reflection.md | 176 +++ doc/server_reflection_tutorial.md | 189 +++ doc/server_side_auth.md | 61 + doc/service_config.md | 117 ++ doc/ssl-performance.md | 44 + doc/status_ordering.md | 17 + doc/statuscodes.md | 74 + doc/unit_testing.md | 175 +++ doc/versioning.md | 45 + doc/wait-for-ready.md | 14 + doc/workarounds.md | 19 + doc/xds-test-descriptions.md | 737 ++++++++++ etc/README.md | 4 + examples/README.md | 21 + .../binder/java/io/grpc/binder/cpp/README.md | 21 + examples/android/helloworld/README.md | 24 + examples/cpp/README.md | 13 + examples/cpp/compression/README.md | 84 ++ examples/cpp/helloworld/README.md | 6 + examples/cpp/load_balancing/README.md | 65 + examples/cpp/metadata/README.md | 67 + examples/cpp/route_guide/README.md | 6 + examples/csharp/Helloworld/README.md | 39 + .../csharp/HelloworldLegacyCsproj/README.md | 74 + examples/csharp/HelloworldUnity/README.md | 19 + examples/csharp/HelloworldXamarin/README.md | 32 + examples/csharp/RouteGuide/README.md | 6 + examples/csharp/Xds/README.md | 99 ++ examples/node/README.md | 50 + examples/node/dynamic_codegen/README.md | 1 + .../dynamic_codegen/route_guide/README.md | 5 + examples/node/static_codegen/README.md | 8 + .../node/static_codegen/route_guide/README.md | 5 + examples/objective-c/auth_sample/README.md | 3 + examples/objective-c/helloworld/README.md | 107 ++ .../objective-c/helloworld_macos/README.md | 6 + examples/objective-c/route_guide/README.md | 4 + examples/php/README.md | 9 + examples/php/echo/README.md | 86 ++ examples/php/route_guide/README.md | 6 + examples/protos/README.md | 8 + examples/python/README.md | 1 + examples/python/async_streaming/README.md | 50 + examples/python/auth/README.md | 112 ++ examples/python/cancellation/README.md | 127 ++ examples/python/compression/README.md | 58 + .../python/data_transmission/README.cn.md | 36 + .../python/data_transmission/README.en.md | 37 + examples/python/debug/README.md | 68 + examples/python/errors/README.md | 107 ++ examples/python/helloworld/README.md | 1 + examples/python/metadata/README.md | 6 + examples/python/multiplex/README.md | 3 + examples/python/multiprocessing/README.md | 74 + examples/python/retry/README.md | 48 + examples/python/route_guide/README.md | 1 + examples/python/wait_for_ready/README.md | 32 + examples/python/xds/README.md | 123 ++ examples/ruby/README.md | 63 + .../ruby/errors_and_cancellation/README.md | 25 + examples/ruby/route_guide/README.md | 6 + examples/ruby/without_protobuf/README.md | 6 + include/grpc++/README.md | 7 + include/grpc/event_engine/README.md | 38 + include/grpc/impl/codegen/README.md | 22 + include/grpcpp/impl/README.md | 4 + include/grpcpp/impl/codegen/README.md | 21 + src/android/test/interop/README.md | 37 + src/compiler/README.md | 4 + src/core/README.md | 6 + src/core/ext/README.md | 5 + src/core/ext/filters/client_channel/README.md | 49 + .../filters/client_channel/resolver/README.md | 4 + .../client_channel/resolver/binder/README.md | 9 + .../resolver/dns/native/README.md | 2 + .../resolver/sockaddr/README.md | 1 + src/core/ext/transport/README.md | 1 + src/core/ext/transport/binder/README.md | 6 + src/core/ext/transport/chttp2/README.md | 1 + .../chttp2/client/insecure/README.md | 1 + .../transport/chttp2/client/secure/README.md | 1 + .../chttp2/server/insecure/README.md | 1 + .../transport/chttp2/server/secure/README.md | 1 + .../ext/transport/chttp2/transport/README.md | 4 + src/core/lib/README.md | 6 + src/core/lib/channel/README.md | 4 + src/core/lib/gpr/README.md | 8 + src/core/lib/gprpp/README.md | 8 + src/core/lib/iomgr/README.md | 6 + src/core/lib/surface/README.md | 4 + src/core/lib/transport/README.md | 7 + src/core/tsi/README.md | 2 + src/cpp/README.md | 225 +++ src/csharp/BUILD-INTEGRATION.md | 411 ++++++ src/csharp/README.md | 125 ++ src/csharp/docfx/README.md | 28 + src/csharp/experimental/README.md | 103 ++ src/csharp/keys/README.md | 9 + src/csharp/unitypackage/README.md | 13 + src/objective-c/GRPCClient/README.md | 4 + src/objective-c/NetworkTransitionBehavior.md | 92 ++ src/objective-c/README-CFSTREAM.md | 40 + src/objective-c/README.md | 253 ++++ src/objective-c/RxLibrary/README.md | 8 + src/objective-c/examples/Sample/README.md | 1 + src/objective-c/tests/Connectivity/README.md | 14 + src/php/README.md | 405 ++++++ src/php/docker/README.md | 182 +++ .../tests/unit/credentials/README.md | 15 + src/ruby/CHANGELOG.md | 11 + src/ruby/README.md | 83 ++ src/ruby/end2end/README.md | 18 + src/ruby/pb/README.md | 42 + src/ruby/tools/README.md | 12 + summerofcode/2016/siddharth_shukla.md | 65 + summerofcode/2018/naresh.md | 191 +++ summerofcode/ideas.md | 36 + templates/README.md | 133 ++ test/core/event_engine/test_suite/README.md | 35 + test/cpp/README-iOS.md | 52 + third_party/ABSEIL_MANUAL.md | 31 + third_party/README.md | 142 ++ third_party/toolchains/README.md | 5 + third_party/upb/CONTRIBUTING.md | 7 + third_party/upb/DESIGN.md | 72 + third_party/upb/README.md | 124 ++ third_party/upb/cmake/README.md | 23 + tools/README.md | 23 + tools/http2_interop/README.md | 9 + tools/internal_ci/README.md | 7 + tools/interop_matrix/README.md | 53 + tools/interop_matrix/patches/README.md | 38 + tools/profiling/microbenchmarks/README.md | 4 + .../microbenchmarks/bm_diff/README.md | 116 ++ tools/remote_build/README.md | 71 + tools/run_tests/README.md | 53 + tools/run_tests/performance/README.md | 462 ++++++ tools/run_tests/xds_k8s_test_driver/README.md | 416 ++++++ 192 files changed, 13684 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/cleanup_request.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/ISSUE_TEMPLATE/question.md create mode 100644 .github/pull_request_template.md create mode 100644 BUILDING.md create mode 100644 CODE-OF-CONDUCT.md create mode 100644 CONCEPTS.md create mode 100644 CONTRIBUTING.md create mode 100644 GOVERNANCE.md create mode 100644 MAINTAINERS.md create mode 100644 MANIFEST.md create mode 100644 SECURITY.md create mode 100644 TROUBLESHOOTING.md create mode 100644 bazel/test/python_test_repo/README.md create mode 100644 doc/PROTOCOL-HTTP2.md create mode 100644 doc/PROTOCOL-WEB.md create mode 100644 doc/binary-logging.md create mode 100644 doc/c-style-guide.md create mode 100644 doc/command_line_tool.md create mode 100644 doc/compression.md create mode 100644 doc/compression_cookbook.md create mode 100644 doc/connection-backoff-interop-test-description.md create mode 100644 doc/connection-backoff.md create mode 100644 doc/connectivity-semantics-and-api.md create mode 100644 doc/core/combiner-explainer.md create mode 100644 doc/core/epoll-polling-engine.md create mode 100644 doc/core/grpc-client-server-polling-engine-usage.md create mode 100644 doc/core/grpc-cq.md create mode 100644 doc/core/grpc-error.md create mode 100644 doc/core/grpc-polling-engines.md create mode 100644 doc/core/moving-to-c++.md create mode 100644 doc/core/pending_api_cleanups.md create mode 100644 doc/core/transport_explainer.md create mode 100644 doc/cpp-style-guide.md create mode 100644 doc/cpp/pending_api_cleanups.md create mode 100644 doc/cpp/perf_notes.md create mode 100644 doc/csharp/server_reflection.md create mode 100644 doc/environment_variables.md create mode 100644 doc/fail_fast.md create mode 100644 doc/fork_support.md create mode 100644 doc/g_stands_for.md create mode 100644 doc/grpc_release_schedule.md create mode 100644 doc/grpc_xds_features.md create mode 100644 doc/health-checking.md create mode 100644 doc/http-grpc-status-mapping.md create mode 100644 doc/http2-interop-test-descriptions.md create mode 100644 doc/internationalization.md create mode 100644 doc/interop-test-descriptions.md create mode 100644 doc/keepalive.md create mode 100644 doc/load-balancing.md create mode 100644 doc/naming.md create mode 100644 doc/python/server_reflection.md create mode 100644 doc/security_audit.md create mode 100644 doc/server-reflection.md create mode 100644 doc/server_reflection_tutorial.md create mode 100644 doc/server_side_auth.md create mode 100644 doc/service_config.md create mode 100644 doc/ssl-performance.md create mode 100644 doc/status_ordering.md create mode 100644 doc/statuscodes.md create mode 100644 doc/unit_testing.md create mode 100644 doc/versioning.md create mode 100644 doc/wait-for-ready.md create mode 100644 doc/workarounds.md create mode 100644 doc/xds-test-descriptions.md create mode 100644 etc/README.md create mode 100644 examples/README.md create mode 100644 examples/android/binder/java/io/grpc/binder/cpp/README.md create mode 100644 examples/android/helloworld/README.md create mode 100644 examples/cpp/README.md create mode 100644 examples/cpp/compression/README.md create mode 100644 examples/cpp/helloworld/README.md create mode 100644 examples/cpp/load_balancing/README.md create mode 100644 examples/cpp/metadata/README.md create mode 100644 examples/cpp/route_guide/README.md create mode 100644 examples/csharp/Helloworld/README.md create mode 100644 examples/csharp/HelloworldLegacyCsproj/README.md create mode 100644 examples/csharp/HelloworldUnity/README.md create mode 100644 examples/csharp/HelloworldXamarin/README.md create mode 100644 examples/csharp/RouteGuide/README.md create mode 100644 examples/csharp/Xds/README.md create mode 100644 examples/node/README.md create mode 100644 examples/node/dynamic_codegen/README.md create mode 100644 examples/node/dynamic_codegen/route_guide/README.md create mode 100644 examples/node/static_codegen/README.md create mode 100644 examples/node/static_codegen/route_guide/README.md create mode 100644 examples/objective-c/auth_sample/README.md create mode 100644 examples/objective-c/helloworld/README.md create mode 100644 examples/objective-c/helloworld_macos/README.md create mode 100644 examples/objective-c/route_guide/README.md create mode 100644 examples/php/README.md create mode 100644 examples/php/echo/README.md create mode 100644 examples/php/route_guide/README.md create mode 100644 examples/protos/README.md create mode 100644 examples/python/README.md create mode 100644 examples/python/async_streaming/README.md create mode 100644 examples/python/auth/README.md create mode 100644 examples/python/cancellation/README.md create mode 100644 examples/python/compression/README.md create mode 100644 examples/python/data_transmission/README.cn.md create mode 100644 examples/python/data_transmission/README.en.md create mode 100644 examples/python/debug/README.md create mode 100644 examples/python/errors/README.md create mode 100644 examples/python/helloworld/README.md create mode 100644 examples/python/metadata/README.md create mode 100644 examples/python/multiplex/README.md create mode 100644 examples/python/multiprocessing/README.md create mode 100644 examples/python/retry/README.md create mode 100644 examples/python/route_guide/README.md create mode 100644 examples/python/wait_for_ready/README.md create mode 100644 examples/python/xds/README.md create mode 100644 examples/ruby/README.md create mode 100644 examples/ruby/errors_and_cancellation/README.md create mode 100644 examples/ruby/route_guide/README.md create mode 100644 examples/ruby/without_protobuf/README.md create mode 100644 include/grpc++/README.md create mode 100644 include/grpc/event_engine/README.md create mode 100644 include/grpc/impl/codegen/README.md create mode 100644 include/grpcpp/impl/README.md create mode 100644 include/grpcpp/impl/codegen/README.md create mode 100644 src/android/test/interop/README.md create mode 100644 src/compiler/README.md create mode 100644 src/core/README.md create mode 100644 src/core/ext/README.md create mode 100644 src/core/ext/filters/client_channel/README.md create mode 100644 src/core/ext/filters/client_channel/resolver/README.md create mode 100644 src/core/ext/filters/client_channel/resolver/binder/README.md create mode 100644 src/core/ext/filters/client_channel/resolver/dns/native/README.md create mode 100644 src/core/ext/filters/client_channel/resolver/sockaddr/README.md create mode 100644 src/core/ext/transport/README.md create mode 100644 src/core/ext/transport/binder/README.md create mode 100644 src/core/ext/transport/chttp2/README.md create mode 100644 src/core/ext/transport/chttp2/client/insecure/README.md create mode 100644 src/core/ext/transport/chttp2/client/secure/README.md create mode 100644 src/core/ext/transport/chttp2/server/insecure/README.md create mode 100644 src/core/ext/transport/chttp2/server/secure/README.md create mode 100644 src/core/ext/transport/chttp2/transport/README.md create mode 100644 src/core/lib/README.md create mode 100644 src/core/lib/channel/README.md create mode 100644 src/core/lib/gpr/README.md create mode 100644 src/core/lib/gprpp/README.md create mode 100644 src/core/lib/iomgr/README.md create mode 100644 src/core/lib/surface/README.md create mode 100644 src/core/lib/transport/README.md create mode 100644 src/core/tsi/README.md create mode 100644 src/cpp/README.md create mode 100644 src/csharp/BUILD-INTEGRATION.md create mode 100644 src/csharp/README.md create mode 100644 src/csharp/docfx/README.md create mode 100644 src/csharp/experimental/README.md create mode 100644 src/csharp/keys/README.md create mode 100644 src/csharp/unitypackage/README.md create mode 100644 src/objective-c/GRPCClient/README.md create mode 100644 src/objective-c/NetworkTransitionBehavior.md create mode 100644 src/objective-c/README-CFSTREAM.md create mode 100644 src/objective-c/README.md create mode 100644 src/objective-c/RxLibrary/README.md create mode 100644 src/objective-c/examples/Sample/README.md create mode 100644 src/objective-c/tests/Connectivity/README.md create mode 100644 src/php/README.md create mode 100644 src/php/docker/README.md create mode 100644 src/python/grpcio_tests/tests/unit/credentials/README.md create mode 100644 src/ruby/CHANGELOG.md create mode 100644 src/ruby/README.md create mode 100644 src/ruby/end2end/README.md create mode 100644 src/ruby/pb/README.md create mode 100644 src/ruby/tools/README.md create mode 100644 summerofcode/2016/siddharth_shukla.md create mode 100644 summerofcode/2018/naresh.md create mode 100644 summerofcode/ideas.md create mode 100644 templates/README.md create mode 100644 test/core/event_engine/test_suite/README.md create mode 100644 test/cpp/README-iOS.md create mode 100644 third_party/ABSEIL_MANUAL.md create mode 100644 third_party/README.md create mode 100644 third_party/toolchains/README.md create mode 100644 third_party/upb/CONTRIBUTING.md create mode 100644 third_party/upb/DESIGN.md create mode 100644 third_party/upb/README.md create mode 100644 third_party/upb/cmake/README.md create mode 100644 tools/README.md create mode 100644 tools/http2_interop/README.md create mode 100644 tools/internal_ci/README.md create mode 100644 tools/interop_matrix/README.md create mode 100644 tools/interop_matrix/patches/README.md create mode 100644 tools/profiling/microbenchmarks/README.md create mode 100644 tools/profiling/microbenchmarks/bm_diff/README.md create mode 100644 tools/remote_build/README.md create mode 100644 tools/run_tests/README.md create mode 100644 tools/run_tests/performance/README.md create mode 100644 tools/run_tests/xds_k8s_test_driver/README.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..a191005d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,41 @@ +--- +name: Report a bug +about: Create a report to help us improve +labels: kind/bug, priority/P2 +assignees: yashykt + +--- + + + +### What version of gRPC and what language are you using? + + +### What operating system (Linux, Windows,...) and version? + + +### What runtime / compiler are you using (e.g. python version or version of gcc) + + +### What did you do? +Please provide either 1) A unit test for reproducing the bug or 2) Specific steps for us to follow to reproduce the bug. If there’s not enough information to debug the problem, gRPC team may close the issue at their discretion. You’re welcome to re-open the issue once you have a reproduction. + +### What did you expect to see? + + +### What did you see instead? + +Make sure you include information that can help us debug (full error message, exception listing, stack trace, logs). + +See [TROUBLESHOOTING.md](https://github.com/grpc/grpc/blob/master/TROUBLESHOOTING.md) for how to diagnose problems better. + +### Anything else we should know about your project / environment? diff --git a/.github/ISSUE_TEMPLATE/cleanup_request.md b/.github/ISSUE_TEMPLATE/cleanup_request.md new file mode 100644 index 00000000..98b0a5a5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/cleanup_request.md @@ -0,0 +1,19 @@ +--- +name: Request a cleanup +about: Suggest a cleanup in our repository +labels: kind/internal cleanup, priority/P2 +assignees: yashykt + +--- + + + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..b6a5493c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,30 @@ +--- +name: Request a feature +about: Suggest an idea for this project +labels: kind/enhancement, priority/P2 +assignees: yashykt + +--- + + + +### Is your feature request related to a problem? Please describe. +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +### Describe the solution you'd like +A clear and concise description of what you want to happen. + +### Describe alternatives you've considered +A clear and concise description of any alternative solutions or features you've considered. + +### Additional context +Add any other context about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 00000000..b92cd62f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,17 @@ +--- +name: Ask a question +about: Ask a question +labels: kind/question, priority/P3 +assignees: yashykt + +--- + +PLEASE DO NOT POST A QUESTION HERE. +This form is for bug reports and feature requests ONLY! + +For general questions and troubleshooting, please ask/look for answers at StackOverflow, with "grpc" tag: https://stackoverflow.com/questions/tagged/grpc + +For questions that specifically need to be answered by gRPC team members, please ask/look for answers at grpc.io mailing list: https://groups.google.com/forum/#!forum/grpc-io + +This issue will be closed down once seen by the repo managers. + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..380c2bda --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,11 @@ + + + + + +@yashykt diff --git a/BUILDING.md b/BUILDING.md new file mode 100644 index 00000000..80dadc64 --- /dev/null +++ b/BUILDING.md @@ -0,0 +1,278 @@ +gRPC C++ - Building from source +=========================== + +This document has detailed instructions on how to build gRPC C++ from source. Note that it only covers the build of gRPC itself and is mostly meant for gRPC C++ contributors and/or power users. +Other should follow the user instructions. See the [How to use](https://github.com/grpc/grpc/tree/master/src/cpp#to-start-using-grpc-c) instructions for guidance on how to add gRPC as a dependency to a C++ application (there are several ways and system wide installation is often not the best choice). + +# Pre-requisites + +## Linux + +```sh + $ [sudo] apt-get install build-essential autoconf libtool pkg-config +``` + +If you plan to build using CMake +```sh + $ [sudo] apt-get install cmake +``` + +If you are a contributor and plan to build and run tests, install the following as well: +```sh + $ # clang and LLVM C++ lib is only required for sanitizer builds + $ [sudo] apt-get install clang libc++-dev +``` + +## MacOS + +On a Mac, you will first need to +install Xcode or +[Command Line Tools for Xcode](https://developer.apple.com/download/more/) +and then run the following command from a terminal: + +```sh + $ [sudo] xcode-select --install +``` + +To build gRPC from source, you may need to install the following +packages from [Homebrew](https://brew.sh): + +```sh + $ brew install autoconf automake libtool shtool +``` + +If you plan to build using CMake, follow the instructions from https://cmake.org/download/ + +*Tip*: when building, +you *may* want to explicitly set the `LIBTOOL` and `LIBTOOLIZE` +environment variables when running `make` to ensure the version +installed by `brew` is being used: + +```sh + $ LIBTOOL=glibtool LIBTOOLIZE=glibtoolize make +``` + +## Windows + +To prepare for cmake + Microsoft Visual C++ compiler build +- Install Visual Studio 2015 or 2017 (Visual C++ compiler will be used). +- Install [Git](https://git-scm.com/). +- Install [CMake](https://cmake.org/download/). +- Install [nasm](https://www.nasm.us/) and add it to `PATH` (`choco install nasm`) - *required by boringssl* +- (Optional) Install [Ninja](https://ninja-build.org/) (`choco install ninja`) + +# Clone the repository (including submodules) + +Before building, you need to clone the gRPC github repository and download submodules containing source code +for gRPC's dependencies (that's done by the `submodule` command or `--recursive` flag). Use following commands +to clone the gRPC repository at the [latest stable release tag](https://github.com/grpc/grpc/releases) + +## Unix + +```sh + $ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc + $ cd grpc + $ git submodule update --init + ``` + +## Windows + +``` +> git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc +> cd grpc +> git submodule update --init +``` + +NOTE: The `bazel` build tool uses a different model for dependencies. You only need to worry about downloading submodules if you're building +with something else than `bazel` (e.g. `cmake`). + +# Build from source + +In the C++ world, there's no "standard" build system that would work for in all supported use cases and on all supported platforms. +Therefore, gRPC supports several major build systems, which should satisfy most users. Depending on your needs +we recommend building using `bazel` or `cmake`. + +## Building with bazel (recommended) + +Bazel is the primary build system for gRPC C++ and if you're comfortable with using bazel, we can certainly recommend it. +Using bazel will give you the best developer experience as well as faster and cleaner builds. + +You'll need `bazel` version `1.0.0` or higher to build gRPC. +See [Installing Bazel](https://docs.bazel.build/versions/master/install.html) for instructions how to install bazel on your system. +We support building with `bazel` on Linux, MacOS and Windows. + +From the grpc repository root +``` +# Build gRPC C++ +$ bazel build :all +``` + +``` +# Run all the C/C++ tests +$ bazel test --config=dbg //test/... +``` + +NOTE: If you are gRPC maintainer and you have access to our test cluster, you should use the our [gRPC's Remote Execution environment](tools/remote_build/README.md) +to get significant improvement to the build and test speed (and a bunch of other very useful features). + +## Building with CMake + +### Linux/Unix, Using Make + +Run from grpc directory after cloning the repo with --recursive or updating submodules. +``` +$ mkdir -p cmake/build +$ cd cmake/build +$ cmake ../.. +$ make +``` + +If you want to build shared libraries (`.so` files), run `cmake` with `-DBUILD_SHARED_LIBS=ON`. + +### Windows, Using Visual Studio 2015 or 2017 + +When using the "Visual Studio" generator, +cmake will generate a solution (`grpc.sln`) that contains a VS project for +every target defined in `CMakeLists.txt` (+ few extra convenience projects +added automatically by cmake). After opening the solution with Visual Studio +you will be able to browse and build the code. +``` +> @rem Run from grpc directory after cloning the repo with --recursive or updating submodules. +> md .build +> cd .build +> cmake .. -G "Visual Studio 14 2015" +> cmake --build . --config Release +``` + +Using gRPC C++ as a DLL is not recommended, but you can still enable it by running `cmake` with `-DBUILD_SHARED_LIBS=ON`. + +### Windows, Using Ninja (faster build). + +Please note that when using Ninja, you will still need Visual C++ (part of Visual Studio) +installed to be able to compile the C/C++ sources. +``` +> @rem Run from grpc directory after cloning the repo with --recursive or updating submodules. +> cd cmake +> md build +> cd build +> call "%VS140COMNTOOLS%..\..\VC\vcvarsall.bat" x64 +> cmake ..\.. -GNinja -DCMAKE_BUILD_TYPE=Release +> cmake --build . +``` + +Using gRPC C++ as a DLL is not recommended, but you can still enable it by running `cmake` with `-DBUILD_SHARED_LIBS=ON`. + +### Windows: A note on building shared libs (DLLs) + +Windows DLL build is supported at a "best effort" basis and we don't recommend using gRPC C++ as a DLL as there are some known drawbacks around how C++ DLLs work on Windows. For example, there is no stable C++ ABI and you can't safely allocate memory in one DLL, and free it in another etc. + +That said, we don't actively prohibit building DLLs on windows (it can be enabled in cmake with `-DBUILD_SHARED_LIBS=ON`), and are free to use the DLL builds +at your own risk. +- you've been warned that there are some important drawbacks and some things might not work at all or will be broken in interesting ways. +- we don't have extensive testing for DLL builds in place (to avoid maintenance costs, increased test duration etc.) so regressions / build breakages might occur + +### Dependency management + +gRPC's CMake build system has two options for handling dependencies. +CMake can build the dependencies for you, or it can search for libraries +that are already installed on your system and use them to build gRPC. + +This behavior is controlled by the `gRPC__PROVIDER` CMake variables, +e.g. `gRPC_CARES_PROVIDER`. The options that these variables take are as follows: + +* module - build dependencies alongside gRPC. The source code is obtained from +gRPC's git submodules. +* package - use external copies of dependencies that are already available +on your system. These could come from your system package manager, or perhaps +you pre-installed them using CMake with the `CMAKE_INSTALL_PREFIX` option. + +For example, if you set `gRPC_CARES_PROVIDER=module`, then CMake will build +c-ares before building gRPC. On the other hand, if you set +`gRPC_CARES_PROVIDER=package`, then CMake will search for a copy of c-ares +that's already installed on your system and use it to build gRPC. + +### Install after build + +Perform the following steps to install gRPC using CMake. +* Set `-DgRPC_INSTALL=ON` +* Build the `install` target + +The install destination is controlled by the +[`CMAKE_INSTALL_PREFIX`](https://cmake.org/cmake/help/latest/variable/CMAKE_INSTALL_PREFIX.html) variable. + +If you are running CMake v3.13 or newer you can build gRPC's dependencies +in "module" mode and install them alongside gRPC in a single step. +[Example](test/distrib/cpp/run_distrib_test_cmake_module_install.sh) + +If you are building gRPC < 1.27 or if you are using CMake < 3.13 you will need +to select "package" mode (rather than "module" mode) for the dependencies. +This means you will need to have external copies of these libraries available +on your system. This [example](test/distrib/cpp/run_distrib_test_cmake.sh) shows +how to install dependencies with cmake before proceeding to installing gRPC itself. + +``` +# NOTE: all of gRPC's dependencies need to be already installed +$ cmake ../.. -DgRPC_INSTALL=ON \ + -DCMAKE_BUILD_TYPE=Release \ + -DgRPC_ABSL_PROVIDER=package \ + -DgRPC_CARES_PROVIDER=package \ + -DgRPC_PROTOBUF_PROVIDER=package \ + -DgRPC_RE2_PROVIDER=package \ + -DgRPC_SSL_PROVIDER=package \ + -DgRPC_ZLIB_PROVIDER=package +$ make +$ make install +``` + +### Cross-compiling + +You can use CMake to cross-compile gRPC for another architecture. In order to +do so, you will first need to build `protoc` and `grpc_cpp_plugin` +for the host architecture. These tools are used during the build of gRPC, so +we need copies of executables that can be run natively. + +You will likely need to install the toolchain for the platform you are +targeting for your cross-compile. Once you have done so, you can write a +toolchain file to tell CMake where to find the compilers and system tools +that will be used for this build. + +This toolchain file is specified to CMake by setting the `CMAKE_TOOLCHAIN_FILE` +variable. +``` +$ cmake ../.. -DCMAKE_TOOLCHAIN_FILE=path/to/file +$ make +``` + +[Cross-compile example](test/distrib/cpp/run_distrib_test_cmake_aarch64_cross.sh) + +### A note on SONAME and its ABI compatibility implications in the cmake build + +Best efforts are made to bump the SONAME revision during ABI breaches. While a +change in the SONAME clearly indicates an ABI incompatibility, no hard guarantees +can be made about any sort of ABI stability across the same SONAME version. + +## Building with make on UNIX systems (deprecated) + +NOTE: `make` used to be gRPC's default build system, but we're no longer recommending it. You should use `bazel` or `cmake` instead. The `Makefile` is only intended for internal usage and is not meant for public consumption. + +From the grpc repository root +```sh + $ make +``` + +NOTE: if you get an error on linux such as 'aclocal-1.15: command not found', which can happen if you ran 'make' before installing the pre-reqs, try the following: +```sh +$ git clean -f -d -x && git submodule foreach --recursive git clean -f -d -x +$ [sudo] apt-get install build-essential autoconf libtool pkg-config +$ make +``` + +### A note on `protoc` + +By default gRPC uses [protocol buffers](https://github.com/protocolbuffers/protobuf), +you will need the `protoc` compiler to generate stub server and client code. + +If you compile gRPC from source, as described above, the Makefile will +automatically try compiling the `protoc` in third_party if you cloned the +repository recursively and it detects that you do not already have 'protoc' compiler +installed. diff --git a/CODE-OF-CONDUCT.md b/CODE-OF-CONDUCT.md new file mode 100644 index 00000000..9d4213eb --- /dev/null +++ b/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/CONCEPTS.md b/CONCEPTS.md new file mode 100644 index 00000000..c85974ed --- /dev/null +++ b/CONCEPTS.md @@ -0,0 +1,64 @@ +# gRPC Concepts Overview + +Remote Procedure Calls (RPCs) provide a useful abstraction for building +distributed applications and services. The libraries in this repository +provide a concrete implementation of the gRPC protocol, layered over HTTP/2. +These libraries enable communication between clients and servers using any +combination of the supported languages. + + +## Interface + +Developers using gRPC start with a language agnostic description of an RPC service (a collection +of methods). From this description, gRPC will generate client and server side interfaces +in any of the supported languages. The server implements +the service interface, which can be remotely invoked by the client interface. + +By default, gRPC uses [Protocol Buffers](https://github.com/protocolbuffers/protobuf) as the +Interface Definition Language (IDL) for describing both the service interface +and the structure of the payload messages. It is possible to use other +alternatives if desired. + +### Invoking & handling remote calls +Starting from an interface definition in a .proto file, gRPC provides +Protocol Compiler plugins that generate Client- and Server-side APIs. +gRPC users call into these APIs on the Client side and implement +the corresponding API on the server side. + +#### Synchronous vs. asynchronous +Synchronous RPC calls, that block until a response arrives from the server, are +the closest approximation to the abstraction of a procedure call that RPC +aspires to. + +On the other hand, networks are inherently asynchronous and in many scenarios, +it is desirable to have the ability to start RPCs without blocking the current +thread. + +The gRPC programming surface in most languages comes in both synchronous and +asynchronous flavors. + + +## Streaming + +gRPC supports streaming semantics, where either the client or the server (or both) +send a stream of messages on a single RPC call. The most general case is +Bidirectional Streaming where a single gRPC call establishes a stream in which both +the client and the server can send a stream of messages to each other. The streamed +messages are delivered in the order they were sent. + + +# Protocol + +The [gRPC protocol](doc/PROTOCOL-HTTP2.md) specifies the abstract requirements for communication between +clients and servers. A concrete embedding over HTTP/2 completes the picture by +fleshing out the details of each of the required operations. + +## Abstract gRPC protocol +A gRPC call comprises of a bidirectional stream of messages, initiated by the client. In the client-to-server direction, this stream begins with a mandatory `Call Header`, followed by optional `Initial-Metadata`, followed by zero or more `Payload Messages`. A client signals end of its message stream by means of an underlying lower level protocol. The server-to-client direction contains an optional `Initial-Metadata`, followed by zero or more `Payload Messages` terminated with a mandatory `Status` and optional `Status-Metadata` (a.k.a.,`Trailing-Metadata`). + +## Implementation over HTTP/2 +The abstract protocol defined above is implemented over [HTTP/2](https://http2.github.io/). gRPC bidirectional streams are mapped to HTTP/2 streams. The contents of `Call Header` and `Initial Metadata` are sent as HTTP/2 headers and subject to HPACK compression. `Payload Messages` are serialized into a byte stream of length prefixed gRPC frames which are then fragmented into HTTP/2 frames at the sender and reassembled at the receiver. `Status` and `Trailing-Metadata` are sent as HTTP/2 trailing headers (a.k.a., trailers). A client signals end of its message stream by setting `END_STREAM` flag on the last DATA frame. +For a detailed description see [doc/PROTOCOL-HTTP2.md](doc/PROTOCOL-HTTP2.md). + +## Flow Control +gRPC uses the flow control mechanism in HTTP/2. This enables fine-grained control of memory used for buffering in-flight messages. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..1d074d8d --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,134 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. + +If you are new to github, please start by reading [Pull Request +howto](https://help.github.com/articles/about-pull-requests/) + +If you are looking for features to work on, please filter the issues list with the label ["disposition/help wanted"](https://github.com/grpc/grpc/issues?q=label%3A%22disposition%2Fhelp+wanted%22). +Please note that some of these feature requests might have been closed in the past as a result of them being marked as stale due to there being no activity, but these are still valid feature requests. + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License +Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Cloning the repository + +Before starting any development work you will need a local copy of the gRPC repository. +Please follow the instructions in [Building gRPC C++: Clone the repository](BUILDING.md#clone-the-repository-including-submodules). + +## Building & Running tests + +Different languages use different build systems. To hide the complexity +of needing to build with many different build systems, a portable python +script that unifies the experience of building and testing gRPC in different +languages and on different platforms is provided. + +To build gRPC in the language of choice (e.g. `c++`, `csharp`, `php`, `python`, `ruby`, ...) +- Prepare your development environment based on language-specific instructions in `src/YOUR-LANGUAGE` directory. +- The language-specific instructions might involve installing C/C++ prerequisites listed in + [Building gRPC C++: Prerequisites](BUILDING.md#pre-requisites). This is because gRPC implementations + in this repository are using the native gRPC "core" library internally. +- Run + ``` + python tools/run_tests/run_tests.py -l YOUR_LANGUAGE --build_only + ``` +- To also run all the unit tests after building + ``` + python tools/run_tests/run_tests.py -l YOUR_LANGUAGE + ``` + +You can also run `python tools/run_tests/run_tests.py --help` to discover useful command line flags supported. For more details, +see [tools/run_tests](tools/run_tests) where you will also find guidance on how to run various other test suites (e.g. interop tests, benchmarks). + +## Generated project files + +To ease maintenance of language- and platform- specific build systems, many +projects files are generated using templates and should not be edited by hand. +Run `tools/buildgen/generate_projects.sh` to regenerate. See +[templates](templates) for details. + +As a rule of thumb, if you see the "sanity tests" failing you've most likely +edited generated files or you didn't regenerate the projects properly (or your +code formatting doesn't match our code style). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often times receive PRs that are trying to fix several things + at a time, but only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address different + concerns and everyone will be happy. + +- For speculative changes, consider opening an issue and discussing it first. + If you are suggesting a behavioral or API change, consider starting with a + [gRFC proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made + and **why** it was made. Link to a GitHub issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line + to address an issue. PRs with irrelevant changes won't be merged. If you do + want to fix formatting or style, do that in a separate PR. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + +- Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We expect you to be reasonably + responsive to those comments, otherwise the PR will be closed after 2-3 weeks + of inactivity. + +- If you have non-trivial contributions, please consider adding an entry to [the + AUTHORS file](https://github.com/grpc/grpc/blob/master/AUTHORS) listing the + copyright holder for the contribution (yourself, if you are signing the + individual CLA, or your company, for corporate CLAs) in the same PR as your + contribution. This needs to be done only once, for each company, or + individual. Please keep this file in alphabetical order. + +- Maintain **clean commit history** and use **meaningful commit messages**. + PRs with messy commit history are difficult to review and won't be merged. + Use `rebase -i upstream/master` to curate your commit history and/or to + bring in latest changes from master (but avoid rebasing in the middle of + a code review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, + we can't really merge your change). + +- If you are regenerating the projects using + `tools/buildgen/generate_projects.sh`, make changes to generated files a + separate commit with commit message `regenerate projects`. Mixing changes + to generated and hand-written files make your PR difficult to review. + Note that running this script requires the installation of Python packages + `pyyaml` and `mako` (typically installed using `pip`) as well as a recent + version of [`go`](https://golang.org/doc/install#install). + +- **All tests need to be passing** before your change can be merged. + We recommend you **run tests locally** before creating your PR to catch + breakages early on (see [tools/run_tests](tools/run_tests). Ultimately, the + green signal will be provided by our testing infrastructure. The reviewer + will help you if there are test failures that seem not related to the change + you are making. + +- Exceptions to the rules can be made if there's a compelling reason for doing + so. + +## Obtaining Commit Access +We grant Commit Access to contributors based on the following criteria: +* Sustained contribution to the gRPC project. +* Deep understanding of the areas contributed to, and good consideration of various reliability, usability and performance tradeoffs. +* Contributions demonstrate that obtaining Commit Access will significantly reduce friction for the contributors or others. + +In addition to submitting PRs, a Contributor with Commit Access can: +* Review PRs and merge once other checks and criteria pass. +* Triage bugs and PRs and assign appropriate labels and reviewers. + +### Obtaining Commit Access without Code Contributions +The [gRPC organization](https://github.com/grpc) is comprised of multiple repositories and commit access is usually restricted to one or more of these repositories. Some repositories such as the [grpc.github.io](https://github.com/grpc/grpc.github.io/) do not have code, but the same principle of sustained, high quality contributions, with a good understanding of the fundamentals, apply. + diff --git a/GOVERNANCE.md b/GOVERNANCE.md new file mode 100644 index 00000000..d6ff2674 --- /dev/null +++ b/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/MAINTAINERS.md b/MAINTAINERS.md new file mode 100644 index 00000000..6f6c8a5f --- /dev/null +++ b/MAINTAINERS.md @@ -0,0 +1,84 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) +- [a11r](https://github.com/a11r), Google LLC +- [apolcyn](https://github.com/apolcyn), Google LLC +- [arjunroy](https://github.com/arjunroy), Google LLC +- [AspirinSJL](https://github.com/AspirinSJL), Google LLC +- [bogdandrutu](https://github.com/bogdandrutu), Google LLC +- [ctiller](https://github.com/ctiller), Google LLC +- [daniel-j-born](https://github.com/daniel-j-born), Google LLC +- [dapengzhang0](https://github.com/dapengzhang0), Google LLC +- [dfawley](https://github.com/dfawley), Google LLC +- [dklempner](https://github.com/dklempner), Google LLC +- [ejona86](https://github.com/ejona86), Google LLC +- [ericgribkoff](https://github.com/ericgribkoff), Google LLC +- [gnossen](https://github.com/gnossen), Google LLC +- [guantaol](https://github.com/guantaol), Google LLC +- [hcaseyal](https://github.com/hcaseyal), Google LLC +- [jboeuf](https://github.com/jboeuf), Google LLC +- [jiangtaoli2016](https://github.com/jiangtaoli2016), Google LLC +- [jkolhe](https://github.com/jkolhe), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [karthikravis](https://github.com/karthikravis), Google LLC +- [kumaralokgithub](https://github.com/kumaralokgithub), Google LLC +- [lidizheng](https://github.com/lidizheng), Google LLC +- [markdroth](https://github.com/markdroth), Google LLC +- [matthewstevenson88](https://github.com/matthewstevenson88), Google LLC +- [mehrdada](https://github.com/mehrdada), Dropbox, Inc. +- [mhaidrygoog](https://github.com/mhaidrygoog), Google LLC +- [murgatroid99](https://github.com/murgatroid99), Google LLC +- [muxi](https://github.com/muxi), Google LLC +- [nanahpang](https://github.com/nanahpang), Google LLC +- [nathanielmanistaatgoogle](https://github.com/nathanielmanistaatgoogle), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [pfreixes](https://github.com/pfreixes), Skyscanner Ltd +- [qixuanl1](https://github.com/qixuanl1), Google LLC +- [ran-su](https://github.com/ran-su), Google LLC +- [rmstar](https://github.com/rmstar), Google LLC +- [sanjaypujare](https://github.com/sanjaypujare), Google LLC +- [sheenaqotj](https://github.com/sheenaqotj), Google LLC +- [soheilhy](https://github.com/soheilhy), Google LLC +- [sreecha](https://github.com/sreecha), LinkedIn +- [srini100](https://github.com/srini100), Google LLC +- [stanley-cheung](https://github.com/stanley-cheung), Google LLC +- [veblush](https://github.com/veblush), Google LLC +- [vishalpowar](https://github.com/vishalpowar), Google LLC +- [Vizerai](https://github.com/Vizerai), Google LLC +- [vjpai](https://github.com/vjpai), Google LLC +- [wcevans](https://github.com/wcevans), Google LLC +- [wenbozhu](https://github.com/wenbozhu), Google LLC +- [yang-g](https://github.com/yang-g), Google LLC +- [yashykt](https://github.com/yashykt), Google LLC +- [yihuazhang](https://github.com/yihuazhang), Google LLC +- [ZhenLian](https://github.com/ZhenLian), Google LLC +- [ZhouyihaiDing](https://github.com/ZhouyihaiDing), Google LLC + + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez), Google LLC +- [billfeng327](https://github.com/billfeng327), Google LLC +- [dgquintas](https://github.com/dgquintas), Google LLC +- [fengli79](https://github.com/fengli79), Google LLC +- [jcanizales](https://github.com/jcanizales), Google LLC +- [jpalmerLinuxFoundation](https://github.com/jpalmerLinuxFoundation), Linux Foundation +- [justinburke](https://github.com/justinburke), Google LLC +- [kpayson64](https://github.com/kpayson64), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [mit-mit](https://github.com/mit-mit), Google LLC +- [mpwarres](https://github.com/mpwarres), Google LLC +- [ncteisen](https://github.com/ncteisen), Google LLC +- [pmarks-net](https://github.com/pmarks-net), Google LLC +- [slash-lib](https://github.com/slash-lib), Google LLC +- [soltanmm](https://github.com/soltanmm), Google LLC +- [summerxyt](https://github.com/summerxyt), Google LLC +- [y-zeng](https://github.com/y-zeng), Google LLC +- [zpencer](https://github.com/zpencer), Google LLC diff --git a/MANIFEST.md b/MANIFEST.md new file mode 100644 index 00000000..9581e1c9 --- /dev/null +++ b/MANIFEST.md @@ -0,0 +1,23 @@ +# Top-level Items by language + +## Bazel +* [grpc.bzl](grpc.bzl) + +## Objective-C +* [gRPC.podspec](gRPC.podspec) + +## PHP +* [composer.json](composer.json) +* [config.m4](config.m4) +* [package.xml](package.xml) + +## Python +* [requirements.txt](requirements.txt) +* [setup.cfg](setup.cfg) +* [setup.py](setup.py) +* [PYTHON-MANIFEST.in](PYTHON-MANIFEST.in) + +## Ruby +* [Gemfile](Gemfile) +* [grpc.gemspec](grpc.gemspec) +* [Rakefile](Rakefile) diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..be6e1087 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md new file mode 100644 index 00000000..035213b9 --- /dev/null +++ b/TROUBLESHOOTING.md @@ -0,0 +1,43 @@ +# Troubleshooting gRPC + +This guide is for troubleshooting gRPC implementations based on C core library (sources for most of them are living in the `grpc/grpc` repository). + +## Enabling extra logging and tracing + +Extra logging can be very useful for diagnosing problems. All gRPC implementations based on C core library support +the `GRPC_VERBOSITY` and `GRPC_TRACE` environment variables that can be used to increase the amount of information +that gets printed to stderr. + +## GRPC_VERBOSITY + +`GRPC_VERBOSITY` is used to set the minimum level of log messages printed by gRPC (supported values are `DEBUG`, `INFO` and `ERROR`). If this environment variable is unset, only `ERROR` logs will be printed. + +## GRPC_TRACE + +`GRPC_TRACE` can be used to enable extra logging for some internal gRPC components. Enabling the right traces can be invaluable +for diagnosing for what is going wrong when things aren't working as intended. Possible values for `GRPC_TRACE` are listed in [Environment Variables Overview](doc/environment_variables.md). +Multiple traces can be enabled at once (use comma as separator). + +``` +# Enable debug logs for an application +GRPC_VERBOSITY=debug ./helloworld_application_using_grpc +``` + +``` +# Print information about invocations of low-level C core API. +# Note that trace logs of log level DEBUG won't be displayed. +# Also note that most tracers user log level INFO, so without setting +# GPRC_VERBOSITY accordingly, no traces will be printed. +GRPC_VERBOSITY=info GRPC_TRACE=api ./helloworld_application_using_grpc +``` + +``` +# Print info from 3 different tracers, including tracing logs with log level DEBUG +GRPC_VERBOSITY=debug GRPC_TRACE=tcp,http,api ./helloworld_application_using_grpc +``` + +Known limitations: `GPRC_TRACE=tcp` is currently not implemented for Windows (you won't see any tcp traces). + +Please note that the `GRPC_TRACE` environment variable has nothing to do with gRPC's "tracing" feature (= tracing RPCs in +microservice environment to gain insight about how requests are processed by deployment), it is merely used to enable printing +of extra logs. diff --git a/bazel/test/python_test_repo/README.md b/bazel/test/python_test_repo/README.md new file mode 100644 index 00000000..d9dfd1d9 --- /dev/null +++ b/bazel/test/python_test_repo/README.md @@ -0,0 +1,5 @@ +## Bazel Workspace Test + +This directory houses a test ensuring that downstream projects can use +`@com_github_grpc_grpc//src/python/grpcio:grpcio`, `py_proto_library`, and +`py_grpc_library`. diff --git a/doc/PROTOCOL-HTTP2.md b/doc/PROTOCOL-HTTP2.md new file mode 100644 index 00000000..1dc1ca42 --- /dev/null +++ b/doc/PROTOCOL-HTTP2.md @@ -0,0 +1,259 @@ +# gRPC over HTTP2 + +## Introduction +This document serves as a detailed description for an implementation of gRPC carried over HTTP2 framing. It assumes familiarity with the HTTP2 specification. + +## Protocol +Production rules are using ABNF syntax. + +### Outline + +The following is the general sequence of message atoms in a GRPC request & response message stream + +* Request → Request-Headers \*Length-Prefixed-Message EOS +* Response → (Response-Headers \*Length-Prefixed-Message Trailers) / Trailers-Only + + +### Requests + +* Request → Request-Headers \*Length-Prefixed-Message EOS + +Request-Headers are delivered as HTTP2 headers in HEADERS + CONTINUATION frames. + +* **Request-Headers** → Call-Definition \*Custom-Metadata +* **Call-Definition** → Method Scheme Path TE [Authority] [Timeout] Content-Type [Message-Type] [Message-Encoding] [Message-Accept-Encoding] [User-Agent] +* **Method** → ":method POST" +* **Scheme** → ":scheme " ("http" / "https") +* **Path** → ":path" "/" Service-Name "/" {_method name_} # But see note below. +* **Service-Name** → {_IDL-specific service name_} +* **Authority** → ":authority" {_virtual host name of authority_} +* **TE** → "te" "trailers" # Used to detect incompatible proxies +* **Timeout** → "grpc-timeout" TimeoutValue TimeoutUnit +* **TimeoutValue** → {_positive integer as ASCII string of at most 8 digits_} +* **TimeoutUnit** → Hour / Minute / Second / Millisecond / Microsecond / Nanosecond + * **Hour** → "H" + * **Minute** → "M" + * **Second** → "S" + * **Millisecond** → "m" + * **Microsecond** → "u" + * **Nanosecond** → "n" +* **Content-Type** → "content-type" "application/grpc" [("+proto" / "+json" / {_custom_})] +* **Content-Coding** → "identity" / "gzip" / "deflate" / "snappy" / {_custom_} +* **Message-Encoding** → "grpc-encoding" Content-Coding +* **Message-Accept-Encoding** → "grpc-accept-encoding" Content-Coding \*("," Content-Coding) +* **User-Agent** → "user-agent" {_structured user-agent string_} +* **Message-Type** → "grpc-message-type" {_type name for message schema_} +* **Custom-Metadata** → Binary-Header / ASCII-Header +* **Binary-Header** → {Header-Name "-bin" } {_base64 encoded value_} +* **ASCII-Header** → Header-Name ASCII-Value +* **Header-Name** → 1\*( %x30-39 / %x61-7A / "\_" / "-" / ".") ; 0-9 a-z \_ - . +* **ASCII-Value** → 1\*( %x20-%x7E ) ; space and printable ASCII + + +HTTP2 requires that reserved headers, ones starting with ":" appear before all other headers. Additionally implementations should send **Timeout** immediately after the reserved headers and they should send the **Call-Definition** headers before sending **Custom-Metadata**. + +**Path** is case-sensitive. Some gRPC implementations may allow the **Path** +format shown above to be overridden, but this functionality is strongly +discouraged. gRPC does not go out of its way to break users that are using this +kind of override, but we do not actively support it, and some functionality +(e.g., service config support) will not work when the path is not of the form +shown above. + +If **Timeout** is omitted a server should assume an infinite timeout. Client implementations are free to send a default minimum timeout based on their deployment requirements. + +If **Content-Type** does not begin with "application/grpc", gRPC servers SHOULD respond with HTTP status of 415 (Unsupported Media Type). This will prevent other HTTP/2 clients from interpreting a gRPC error response, which uses status 200 (OK), as successful. + +**Custom-Metadata** is an arbitrary set of key-value pairs defined by the application layer. Header names starting with "grpc-" but not listed here are reserved for future GRPC use and should not be used by applications as **Custom-Metadata**. + +Note that HTTP2 does not allow arbitrary octet sequences for header values so binary header values must be encoded using Base64 as per https://tools.ietf.org/html/rfc4648#section-4. Implementations MUST accept padded and un-padded values and should emit un-padded values. Applications define binary headers by having their names end with "-bin". Runtime libraries use this suffix to detect binary headers and properly apply base64 encoding & decoding as headers are sent and received. + +**Custom-Metadata** header order is not guaranteed to be preserved except for +values with duplicate header names. Duplicate header names may have their values +joined with "," as the delimiter and be considered semantically equivalent. +Implementations must split **Binary-Header**s on "," before decoding the +Base64-encoded values. + +**ASCII-Value** should not have leading or trailing whitespace. If it contains +leading or trailing whitespace, it may be stripped. The **ASCII-Value** +character range defined is more strict than HTTP. Implementations must not error +due to receiving an invalid **ASCII-Value** that's a valid **field-value** in +HTTP, but the precise behavior is not strictly defined: they may throw the value +away or accept the value. If accepted, care must be taken to make sure that the +application is permitted to echo the value back as metadata. For example, if the +metadata is provided to the application as a list in a request, the application +should not trigger an error by providing that same list as the metadata in the +response. + +Servers may limit the size of **Request-Headers**, with a default of 8 KiB +suggested. Implementations are encouraged to compute total header size like +HTTP/2's `SETTINGS_MAX_HEADER_LIST_SIZE`: the sum of all header fields, for each +field the sum of the uncompressed field name and value lengths plus 32, with +binary values' lengths being post-Base64. + +The repeated sequence of **Length-Prefixed-Message** items is delivered in DATA frames + +* **Length-Prefixed-Message** → Compressed-Flag Message-Length Message +* **Compressed-Flag** → 0 / 1 # encoded as 1 byte unsigned integer +* **Message-Length** → {_length of Message_} # encoded as 4 byte unsigned integer (big endian) +* **Message** → \*{binary octet} + +A **Compressed-Flag** value of 1 indicates that the binary octet sequence of **Message** is compressed using the mechanism declared by the **Message-Encoding** header. A value of 0 indicates that no encoding of **Message** bytes has occurred. Compression contexts are NOT maintained over message boundaries, implementations must create a new context for each message in the stream. If the **Message-Encoding** header is omitted then the **Compressed-Flag** must be 0. + +For requests, **EOS** (end-of-stream) is indicated by the presence of the END_STREAM flag on the last received DATA frame. In scenarios where the **Request** stream needs to be closed but no data remains to be sent implementations MUST send an empty DATA frame with this flag set. + +### Responses + +* **Response** → (Response-Headers \*Length-Prefixed-Message Trailers) / Trailers-Only +* **Response-Headers** → HTTP-Status [Message-Encoding] [Message-Accept-Encoding] Content-Type \*Custom-Metadata +* **Trailers-Only** → HTTP-Status Content-Type Trailers +* **Trailers** → Status [Status-Message] \*Custom-Metadata +* **HTTP-Status** → ":status 200" +* **Status** → "grpc-status" 1\*DIGIT ; 0-9 +* **Status-Message** → "grpc-message" Percent-Encoded +* **Percent-Encoded** → 1\*(Percent-Byte-Unencoded / Percent-Byte-Encoded) +* **Percent-Byte-Unencoded** → 1\*( %x20-%x24 / %x26-%x7E ) ; space and VCHAR, except % +* **Percent-Byte-Encoded** → "%" 2HEXDIGIT ; 0-9 A-F + +**Response-Headers** & **Trailers-Only** are each delivered in a single HTTP2 HEADERS frame block. Most responses are expected to have both headers and trailers but **Trailers-Only** is permitted for calls that produce an immediate error. Status must be sent in **Trailers** even if the status code is OK. + +For responses end-of-stream is indicated by the presence of the END_STREAM flag on the last received HEADERS frame that carries **Trailers**. + +Implementations should expect broken deployments to send non-200 HTTP status codes in responses as well as a variety of non-GRPC content-types and to omit **Status** & **Status-Message**. Implementations must synthesize a **Status** & **Status-Message** to propagate to the application layer when this occurs. + +Clients may limit the size of **Response-Headers**, **Trailers**, and +**Trailers-Only**, with a default of 8 KiB each suggested. + +The value portion of **Status** is a decimal-encoded integer as an ASCII string, +without any leading zeros. + +The value portion of **Status-Message** is conceptually a Unicode string +description of the error, physically encoded as UTF-8 followed by +percent-encoding. Percent-encoding is specified in [RFC 3986 +§2.1](https://tools.ietf.org/html/rfc3986#section-2.1), although the form used +here has different restricted characters. When decoding invalid values, +implementations MUST NOT error or throw away the message. At worst, the +implementation can abort decoding the status message altogether such that the +user would received the raw percent-encoded form. Alternatively, the +implementation can decode valid portions while leaving broken %-encodings as-is +or replacing them with a replacement character (e.g., '?' or the Unicode +replacement character). + +#### Example + +Sample unary-call showing HTTP2 framing sequence + +**Request** + +``` +HEADERS (flags = END_HEADERS) +:method = POST +:scheme = http +:path = /google.pubsub.v2.PublisherService/CreateTopic +:authority = pubsub.googleapis.com +grpc-timeout = 1S +content-type = application/grpc+proto +grpc-encoding = gzip +authorization = Bearer y235.wef315yfh138vh31hv93hv8h3v + +DATA (flags = END_STREAM) + +``` +**Response** +``` +HEADERS (flags = END_HEADERS) +:status = 200 +grpc-encoding = gzip +content-type = application/grpc+proto + +DATA + + +HEADERS (flags = END_STREAM, END_HEADERS) +grpc-status = 0 # OK +trace-proto-bin = jher831yy13JHy3hc +``` + +#### User Agents + +While the protocol does not require a user-agent to function it is recommended that clients provide a structured user-agent string that provides a basic description of the calling library, version & platform to facilitate issue diagnosis in heterogeneous environments. The following structure is recommended to library developers +``` +User-Agent → "grpc-" Language ?("-" Variant) "/" Version ?( " (" *(AdditionalProperty ";") ")" ) +``` +E.g. + +``` +grpc-java/1.2.3 +grpc-ruby/1.2.3 +grpc-ruby-jruby/1.3.4 +grpc-java-android/0.9.1 (gingerbread/1.2.4; nexus5; tmobile) +``` + +#### Idempotency and Retries + +Unless explicitly defined to be, gRPC Calls are not assumed to be idempotent. Specifically: + +* Calls that cannot be proven to have started will not be retried. +* There is no mechanism for duplicate suppression as it is not necessary. +* Calls that are marked as idempotent may be sent multiple times. + + +#### HTTP2 Transport Mapping + +##### Stream Identification +All GRPC calls need to specify an internal ID. We will use HTTP2 stream-ids as call identifiers in this scheme. NOTE: These ids are contextual to an open HTTP2 session and will not be unique within a given process that is handling more than one HTTP2 session nor can they be used as GUIDs. + +##### Data Frames +DATA frame boundaries have no relation to **Length-Prefixed-Message** boundaries and implementations should make no assumptions about their alignment. + +##### Errors + +When an application or runtime error occurs during an RPC a **Status** and **Status-Message** are delivered in **Trailers**. + +In some cases it is possible that the framing of the message stream has become corrupt and the RPC runtime will choose to use an **RST_STREAM** frame to indicate this state to its peer. RPC runtime implementations should interpret RST_STREAM as immediate full-closure of the stream and should propagate an error up to the calling application layer. + +The following mapping from RST_STREAM error codes to GRPC error codes is applied. + +HTTP2 Code|GRPC Code +----------|----------- +NO_ERROR(0)|INTERNAL - An explicit GRPC status of OK should have been sent but this might be used to aggressively [lameduck](https://landing.google.com/sre/sre-book/chapters/load-balancing-datacenter/#identifying-bad-tasks-flow-control-and-lame-ducks-bEs0uy) in some scenarios. +PROTOCOL_ERROR(1)|INTERNAL +INTERNAL_ERROR(2)|INTERNAL +FLOW_CONTROL_ERROR(3)|INTERNAL +SETTINGS_TIMEOUT(4)|INTERNAL +STREAM_CLOSED|No mapping as there is no open stream to propagate to. Implementations should log. +FRAME_SIZE_ERROR|INTERNAL +REFUSED_STREAM|UNAVAILABLE - Indicates that no processing occurred and the request can be retried, possibly elsewhere. +CANCEL(8)|Mapped to call cancellation when sent by a client.Mapped to CANCELLED when sent by a server. Note that servers should only use this mechanism when they need to cancel a call but the payload byte sequence is incomplete. +COMPRESSION_ERROR|INTERNAL +CONNECT_ERROR|INTERNAL +ENHANCE_YOUR_CALM|RESOURCE_EXHAUSTED ...with additional error detail provided by runtime to indicate that the exhausted resource is bandwidth. +INADEQUATE_SECURITY| PERMISSION_DENIED … with additional detail indicating that permission was denied as protocol is not secure enough for call. + + +##### Security + +The HTTP2 specification mandates the use of TLS 1.2 or higher when TLS is used with HTTP2. It also places some additional constraints on the allowed ciphers in deployments to avoid known-problems as well as requiring SNI support. It is also expected that HTTP2 will be used in conjunction with proprietary transport security mechanisms about which the specification can make no meaningful recommendations. + +##### Connection Management + +###### GOAWAY Frame +Sent by servers to clients to indicate that they will no longer accept any new streams on the associated connections. This frame includes the id of the last successfully accepted stream by the server. Clients should consider any stream initiated after the last successfully accepted stream as UNAVAILABLE and retry the call elsewhere. Clients are free to continue working with the already accepted streams until they complete or the connection is terminated. + +Servers should send GOAWAY before terminating a connection to reliably inform clients which work has been accepted by the server and is being executed. + +###### PING Frame +Both clients and servers can send a PING frame that the peer must respond to by precisely echoing what they received. This is used to assert that the connection is still live as well as providing a means to estimate end-to-end latency. If a server initiated PING does not receive a response within the deadline expected by the runtime all outstanding calls on the server will be closed with a CANCELLED status. An expired client initiated PING will cause all calls to be closed with an UNAVAILABLE status. Note that the frequency of PINGs is highly dependent on the network environment, implementations are free to adjust PING frequency based on network and application requirements. + +###### Connection failure +If a detectable connection failure occurs on the client all calls will be closed with an UNAVAILABLE status. For servers open calls will be closed with a CANCELLED status. + + +### Appendix A - GRPC for Protobuf + +The service interfaces declared by protobuf are easily mapped onto GRPC by +code generation extensions to protoc. The following defines the mapping +to be used. + +* **Service-Name** → ?( {_proto package name_} "." ) {_service name_} +* **Message-Type** → {_fully qualified proto message name_} +* **Content-Type** → "application/grpc+proto" diff --git a/doc/PROTOCOL-WEB.md b/doc/PROTOCOL-WEB.md new file mode 100644 index 00000000..c3f9169b --- /dev/null +++ b/doc/PROTOCOL-WEB.md @@ -0,0 +1,141 @@ +# gRPC Web + +gRPC-Web provides a JS client library that supports the same API +as gRPC-Node to access a gRPC service. Due to browser limitation, +the Web client library implements a different protocol than the +[native gRPC protocol](PROTOCOL-HTTP2.md). +This protocol is designed to make it easy for a proxy to translate +between the protocols as this is the most likely deployment model. + +This document lists the differences between the two protocols. +To help tracking future revisions, this document describes a delta +with the protocol details specified in the +[native gRPC protocol](PROTOCOL-HTTP2.md). + +# Design goals + +For the gRPC-Web protocol, we have decided on the following design goals: + +* adopt the same framing as “application/grpc” whenever possible +* decouple from HTTP/2 framing which is not, and will never be, directly +exposed by browsers +* support text streams (e.g. base64) in order to provide cross-browser +support (e.g. IE-10) + +While the new protocol will be published/reviewed publicly, we also +intend to keep the protocol as an internal detail to gRPC-Web. +More specifically, we expect the protocol to + +* evolve over time, mainly to optimize for browser clients or support +web-specific features such as CORS, XSRF +* become optional (in 1-2 years) when browsers are able to speak the native +gRPC protocol via the new [whatwg streams API](https://github.com/whatwg/streams) + +# Protocol differences vs [gRPC over HTTP2](PROTOCOL-HTTP2.md) + +Content-Type + +1. application/grpc-web + * e.g. application/grpc-web+[proto, json, thrift] + * the sender should always specify the message format, e.g. +proto, +json + * the receiver should assume the default is "+proto" when the message format is missing in Content-Type (as "application/grpc-web") +2. application/grpc-web-text + * text-encoded streams of “application/grpc-web” + * e.g. application/grpc-web-text+[proto, thrift] + +--- + +HTTP wire protocols + +1. support any HTTP/*, with no dependency on HTTP/2 specific framing +2. use lower-case header/trailer names +3. use EOF (end of body) to close the stream + +--- + +HTTP/2 related behavior (specified in [gRPC over HTTP2](PROTOCOL-HTTP2.md)) + +1. stream-id is not supported or used +2. go-away is not supported or used + +--- + +Message framing (vs. [http2-transport-mapping](PROTOCOL-HTTP2.md#http2-transport-mapping)) + +1. Response status encoded as part of the response body + * Key-value pairs encoded as a HTTP/1 headers block (without the terminating newline), per https://tools.ietf.org/html/rfc7230#section-3.2 + ``` + key1: foo\r\n + key2: bar\r\n + ``` +2. 8th (MSB) bit of the 1st gRPC frame byte + * 0: data + * 1: trailers + ``` + 10000000b: an uncompressed trailer (as part of the body) + 10000001b: a compressed trailer + ``` +3. Trailers must be the last message of the response, as enforced +by the implementation +4. Trailers-only responses: no change to the gRPC protocol spec. +Trailers may be sent together with response headers, with no message +in the body. + +--- + +User Agent + +* Do NOT use User-Agent header (which is to be set by browsers, by default) +* Use X-User-Agent: grpc-web-javascript/0.1 (follow the same format as specified in [gRPC over HTTP2](PROTOCOL-HTTP2.md)) + +--- + +Text-encoded (response) streams + +1. The client library should indicate to the server via the "Accept" header that +the response stream needs to be text encoded e.g. when XHR is used or due +to security policies with XHR + * Accept: application/grpc-web-text +2. The default text encoding is base64 + * Note that “Content-Transfer-Encoding: base64” should not be used. + Due to in-stream base64 padding when delimiting messages, the entire + response body is not necessarily a valid base64-encoded entity + * While the server runtime will always base64-encode and flush gRPC messages + atomically the client library should not assume base64 padding always + happens at the boundary of message frames. That is, the implementation may send base64-encoded "chunks" with potential padding whenever the runtime needs to flush a byte buffer. + +# Other features + +Retries, caching + +* Will spec out the support after their respective gRPC spec extensions +are finalized + * Safe retries: PUT + * Caching: header encoded request and/or a web specific spec + +--- + +Keep-alive + +* HTTP/2 PING is not supported or used +* Will not support send-beacon (GET) + +--- + +Bidi-streaming, with flow-control + +* Pending on [whatwg fetch/streams](https://github.com/whatwg/fetch) to be +finalized and implemented in modern browsers +* gRPC-Web client will support the native gRPC protocol with modern browsers + +--- + +Versioning + +* Special headers may be introduced to support features that may break compatibility. + +--- + +Browser-specific features + +* For features that are unique to browser or HTML clients, check the [spec doc](https://github.com/grpc/grpc-web/blob/master/doc/browser-features.md) published in the grpc/grpc-web repo. diff --git a/doc/binary-logging.md b/doc/binary-logging.md new file mode 100644 index 00000000..86b3f766 --- /dev/null +++ b/doc/binary-logging.md @@ -0,0 +1,59 @@ +# Binary Logging + +## Format + +The log format is described in [this proto file](/src/proto/grpc/binary_log/v1alpha/log.proto). It is intended that multiple parts of the call will be logged in separate files, and then correlated by analysis tools using the rpc\_id. + +## API + +The binary logger will be a separate library from gRPC, in each language that we support. The user will need to explicitly call into the library to generate logs. The library will provide the ability to log sending or receiving, as relevant, the following on both the client and the server: + + - Initial metadata + - Messages + - Status with trailing metadata from the server + - Additional key/value pairs that are associated with a call but not sent over the wire + +The following is an example of what such an API could look like in C++: + +```c++ +// The context provides the method_name, deadline, peer, and metadata contents. +// direction = CLIENT_SEND +LogRequestHeaders(ClientContext context); +// direction = SERVER_RECV +LogRequestHeaders(ServerContext context); + +// The context provides the metadata contents +// direction = CLIENT_RECV +LogResponseHeaders(ClientContext context); +// direction = SERVER_SEND +LogResponseHeaders(ServerContext context); + +// The context provides the metadata contents +// direction = CLIENT_RECV +LogStatus(ClientContext context, grpc_status_code code, string details); +// direction = SERVER_SEND +LogStatus(ServerContext context, grpc_status_code code, string details); + +// The context provides the user data contents +// direction = CLIENT_SEND +LogUserData(ClientContext context); +// direction = SERVER_SEND +LogUserData(ServerContext context); + +// direction = CLIENT_SEND +LogRequestMessage(ClientContext context, uint32_t length, T message); +// direction = SERVER_RECV +LogRequestMessage(ServerContext context, uint32_t length, T message); +// direction = CLIENT_RECV +LogResponseMessage(ClientContext context, uint32_t length, T message); +// direction = SERVER_SEND +LogResponseMessage(ServerContext context, uint32_t length, T message); +``` + +In all of those cases, the `rpc_id` is provided by the context, and each combination of method and context argument type implies a single direction, as noted in the comments. + +For the message log functions, the `length` argument indicates the length of the complete message, and the `message` argument may be only part of the complete message, stripped of sensitive material and/or shortened for efficiency. + +## Language differences + +In other languages, more or less data will need to be passed explicitly as separate arguments. In some languages, for example, the metadata will be separate from the context-like object and will need to be passed as a separate argument. diff --git a/doc/c-style-guide.md b/doc/c-style-guide.md new file mode 100644 index 00000000..2cfa41dd --- /dev/null +++ b/doc/c-style-guide.md @@ -0,0 +1,92 @@ +GRPC C STYLE GUIDE +===================== + +Background +---------- + +Here we document style rules for C usage in the gRPC Core library. + +General +------- + +- Layout rules are defined by clang-format, and all code should be passed + through clang-format. A (docker-based) script to do so is included in + [tools/distrib/clang\_format\_code.sh](../tools/distrib/clang_format_code.sh). + +Header Files +------------ + +- Public header files (those in the include/grpc tree) should compile as + pedantic C89. +- Public header files should be includable from C++ programs. That is, they + should include the following: + ```c + #ifdef __cplusplus + extern "C" { + # endif + + /* ... body of file ... */ + + #ifdef __cplusplus + } + # endif + ``` +- Header files should be self-contained and end in .h. +- All header files should have a `#define` guard to prevent multiple inclusion. + To guarantee uniqueness they should be based on the file's path. + + For public headers: `include/grpc/grpc.h` → `GRPC_GRPC_H` + + For private headers: + `src/core/lib/channel/channel_stack.h` → + `GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_H` + +Variable Initialization +----------------------- + +When declaring a (non-static) pointer variable, always initialize it to `NULL`. +Even in the case of static pointer variables, it's recommended to explicitly +initialize them to `NULL`. + + +C99 Features +------------ + +- Variable sized arrays are not allowed. +- Do not use the 'inline' keyword. +- Flexible array members are allowed + (https://en.wikipedia.org/wiki/Flexible_array_member). + +Comments +-------- + +Within public header files, only `/* */` comments are allowed. + +Within implementation files and private headers, either single line `//` +or multi line `/* */` comments are allowed. Only one comment style per file is +allowed however (i.e. if single line comments are used anywhere within a file, +ALL comments within that file must be single line comments). + +Symbol Names +------------ + +- Non-static functions must be prefixed by `grpc_` +- Static functions must *not* be prefixed by `grpc_` +- Typenames of `struct`s , `union`s, and `enum`s must be prefixed by `grpc_` if + they are declared in a header file. They must not be prefixed by `grpc_` if + they are declared in a source file. +- Enumeration values and `#define` names must be uppercase. All other values + must be lowercase. +- Enumeration values or `#define` names defined in a header file must be + prefixed with `GRPC_` (except for `#define` macros that are being used to + substitute functions; those should follow the general rules for + functions). Enumeration values or `#define`s defined in source files must not + be prefixed with `GRPC_`. +- Multiple word identifiers use underscore as a delimiter, *never* camel + case. E.g. `variable_name`. + +Functions +---------- + +- The use of [`atexit()`](http://man7.org/linux/man-pages/man3/atexit.3.html) is + in forbidden in libgrpc. diff --git a/doc/command_line_tool.md b/doc/command_line_tool.md new file mode 100644 index 00000000..e303b085 --- /dev/null +++ b/doc/command_line_tool.md @@ -0,0 +1,199 @@ +# gRPC command line tool + +## Overview + +This document describes the command line tool that comes with gRPC repository. It is desirable to have command line +tools written in other languages roughly follow the same syntax and flags. + +At this point, the tool needs to be built from source, and it should be moved out to grpc-tools repository as a stand +alone application once it is mature enough. + +## Core functionality + +The command line tool can do the following things: + +- Send unary rpc. +- Attach metadata and display received metadata. +- Handle common authentication to server. +- Infer request/response types from server reflection result. +- Find the request/response types from a given proto file. +- Read proto request in text form. +- Read request in wire form (for protobuf messages, this means serialized binary form). +- Display proto response in text form. +- Write response in wire form to a file. + +The command line tool should support the following things: + +- List server services and methods through server reflection. +- Fine-grained auth control (such as, use this oauth token to talk to the server). +- Send streaming rpc. + +## Code location + +To use the tool, you need to get the grpc repository and make sure your system +has the prerequisites for building grpc from source, given in the [installation +instructions](../BUILDING.md). + +In order to build the grpc command line tool from a fresh clone of the grpc +repository, you need to run the following command to update submodules: + +``` +git submodule update --init +``` + +Once the prerequisites are satisfied, you can build with cmake: + +``` +$ mkdir -p cmake/build +$ cd cmake/build +$ cmake -DgRPC_BUILD_TESTS=ON ../.. +$ make grpc_cli +``` + +The main file can be found at +https://github.com/grpc/grpc/blob/master/test/cpp/util/grpc_cli.cc + +## Prerequisites + +Most `grpc_cli` commands need the server to support server reflection. See +guides for +[Java](https://github.com/grpc/grpc-java/blob/master/documentation/server-reflection-tutorial.md#enable-server-reflection) +, [C++](https://github.com/grpc/grpc/blob/master/doc/server_reflection_tutorial.md) +and [Go](https://github.com/grpc/grpc-go/blob/master/Documentation/server-reflection-tutorial.md) + +Local proto files can be used as an alternative. See instructions [below](#Call-a-remote-method). + +## Usage + +### List services + +`grpc_cli ls` command lists services and methods exposed at a given port + +- List all the services exposed at a given port + + ```sh + $ grpc_cli ls localhost:50051 + ``` + + output: + + ```none + helloworld.Greeter + grpc.reflection.v1alpha.ServerReflection + ``` + + The `localhost:50051` part indicates the server you are connecting to. + +- List one service with details + + `grpc_cli ls` command inspects a service given its full name (in the format + of \.\). It can print information with a long listing + format when `-l` flag is set. This flag can be used to get more details + about a service. + + ```sh + $ grpc_cli ls localhost:50051 helloworld.Greeter -l + ``` + + `helloworld.Greeter` is full name of the service. + + output: + + ```proto + filename: helloworld.proto + package: helloworld; + service Greeter { + rpc SayHello(helloworld.HelloRequest) returns (helloworld.HelloReply) {} + } + + ``` + +### List methods + +- List one method with details + + `grpc_cli ls` command also inspects a method given its full name (in the + format of \.\.\). + + ```sh + $ grpc_cli ls localhost:50051 helloworld.Greeter.SayHello -l + ``` + + `helloworld.Greeter.SayHello` is full name of the method. + + output: + + ```proto + rpc SayHello(helloworld.HelloRequest) returns (helloworld.HelloReply) {} + ``` + +### Inspect message types + +We can use `grpc_cli type` command to inspect request/response types given the +full name of the type (in the format of \.\). + +- Get information about the request type + + ```sh + $ grpc_cli type localhost:50051 helloworld.HelloRequest + ``` + + `helloworld.HelloRequest` is the full name of the request type. + + output: + + ```proto + message HelloRequest { + optional string name = 1; + } + ``` + +### Call a remote method + +We can send RPCs to a server and get responses using `grpc_cli call` command. + +- Call a unary method Send a rpc to a helloworld server at `localhost:50051`: + + ```sh + $ grpc_cli call localhost:50051 SayHello "name: 'gRPC CLI'" + ``` + + output: `sh message: "Hello gRPC CLI"` + + `SayHello` is (part of) the gRPC method string. Then `"name: 'world'"` is + the text format of the request proto message. For information on more flags, + look at the comments of `grpc_cli.cc`. + +- Use local proto files + + If the server does not have the server reflection service, you will need to + provide local proto files containing the service definition. The tool will + try to find request/response types from them. + + ```sh + $ grpc_cli call localhost:50051 SayHello "name: 'world'" \ + --protofiles=examples/protos/helloworld.proto + ``` + + If the proto file is not under the current directory, you can use + `--proto_path` to specify new search roots + (separated by colon on Mac/Linux/Cygwin or semicolon on Windows). + + Note that the tool will always attempt to use the reflection service first, + falling back to local proto files if the service is not found. Use + `--noremotedb` to avoid attempting to use the reflection service. + +- Send non-proto rpc + + For using gRPC with protocols other than protobuf, you will need the exact + method name string and a file containing the raw bytes to be sent on the + wire. + + ```bash + $ grpc_cli call localhost:50051 /helloworld.Greeter/SayHello \ + --input_binary_file=input.bin \ + --output_binary_file=output.bin + ``` + + On success, you will need to read or decode the response from the + `output.bin` file. diff --git a/doc/compression.md b/doc/compression.md new file mode 100644 index 00000000..7f0c3823 --- /dev/null +++ b/doc/compression.md @@ -0,0 +1,118 @@ +## gRPC Compression + +The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", +"SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be +interpreted as described in [RFC 2119](http://www.ietf.org/rfc/rfc2119.txt). + +### Intent + +Compression is used to reduce the amount of bandwidth used between peers. The +compression supported by gRPC acts _at the individual message level_, taking +_message_ [as defined in the wire format +document](PROTOCOL-HTTP2.md). + +The implementation supports different compression algorithms. A _default +compression level_, to be used in the absence of message-specific settings, MAY +be specified for during channel creation. + +The ability to control compression settings per call and to enable/disable +compression on a per message basis MAY be used to prevent CRIME/BEAST attacks. +It also allows for asymmetric compression communication, whereby a response MAY +be compressed differently, if at all. + +### Specification + +Compression MAY be configured by the Client Application by calling the +appropriate API method. There are two scenarios where compression MAY be +configured: + ++ At channel creation time, which sets the channel default compression and + therefore the compression that SHALL be used in the absence of per-RPC + compression configuration. ++ At response time, via: + + For unary RPCs, the {Client,Server}Context instance. + + For streaming RPCs, the {Client,Server}Writer instance. In this case, + configuration is reduced to disabling compression altogether. + +### Compression Method Asymmetry Between Peers + +A gRPC peer MAY choose to respond using a different compression method to that +of the request, including not performing any compression, regardless of channel +and RPC settings (for example, if compression would result in small or negative +gains). + +If a client message is compressed by an algorithm that is not supported +by a server, the message WILL result in an `UNIMPLEMENTED` error status on the +server. The server will then include a `grpc-accept-encoding` response +header which specifies the algorithms that the server accepts. If the client +message is compressed using one of the algorithms from the `grpc-accept-encoding` header +and an `UNIMPLEMENTED` error status is returned from the server, the cause of the error +MUST NOT be related to compression. If a server sent data which is compressed by an algorithm +that is not supported by the client, an `INTERNAL` error status will occur on the client side. + +Note that a peer MAY choose to not disclose all the encodings it supports. +However, if it receives a message compressed in an undisclosed but supported +encoding, it MUST include said encoding in the response's `grpc-accept-encoding` +header. + +For every message a server is requested to compress using an algorithm it knows +the client doesn't support (as indicated by the last `grpc-accept-encoding` +header received from the client), it SHALL send the message uncompressed. + +### Specific Disabling of Compression + +If the user (through the previously described mechanisms) requests to disable +compression the next message MUST be sent uncompressed. This is instrumental in +preventing BEAST/CRIME attacks. This applies to both the unary and streaming +cases. + +### Compression Levels and Algorithms + +The set of supported algorithm is implementation dependent. In order to simplify +the public API and to operate seamlessly across implementations (both in terms +of languages but also different version of the same one), we introduce the idea +of _compression levels_ (such as "low", "medium", "high"). + +Levels map to concrete algorithms and/or their settings (such as "low" mapping +to "gzip -3" and "high" mapping to "gzip -9") automatically depending on what a +peer is known to support. A server is always aware of what its clients support, +as clients disclose it in the Message-Accept-Encoding header as part of the +RPC. A client doesn't a priori (presently) know which algorithms a +server supports. This issue can be addressed with an initial negotiation of +capabilities or an automatic retry mechanism. These features will be implemented +in the future. Currently however, compression levels are only supported at the +server side, which is aware of the client's capabilities through the incoming +Message-Accept-Encoding header. + +### Propagation to child RPCs + +The inheritance of the compression configuration by child RPCs is left up to the +implementation. Note that in the absence of changes to the parent channel, its +configuration will be used. + +### Test cases + +1. When a compression level is not specified for either the channel or the +message, the default channel level _none_ is considered: data MUST NOT be +compressed. +1. When per-RPC compression configuration isn't present for a message, the +channel compression configuration MUST be used. +1. When a compression method (including no compression) is specified for an +outgoing message, the message MUST be compressed accordingly. +1. A message compressed by a client in a way not supported by its server MUST +fail with status `UNIMPLEMENTED`, its associated description indicating the +unsupported condition as well as the supported ones. The returned +`grpc-accept-encoding` header MUST NOT contain the compression method +(encoding) used. +1. A message compressed by a server in a way not supported by its client MUST +fail with status `INTERNAL`, its associated description indicating the +unsupported condition as well as the supported ones. The returned +`grpc-accept-encoding` header MUST NOT contain the compression method +(encoding) used. +1. An ill-constructed message with its [Compressed-Flag +bit](PROTOCOL-HTTP2.md#compressed-flag) +set but lacking a +[grpc-encoding](PROTOCOL-HTTP2.md#message-encoding) +entry different from _identity_ in its metadata MUST fail with `INTERNAL` +status, its associated description indicating the invalid Compressed-Flag +condition. diff --git a/doc/compression_cookbook.md b/doc/compression_cookbook.md new file mode 100644 index 00000000..c10a805f --- /dev/null +++ b/doc/compression_cookbook.md @@ -0,0 +1,133 @@ +# gRPC (Core) Compression Cookbook + +## Introduction + +This document describes compression as implemented by the gRPC C core. See [the +full compression specification](compression.md) for details. + +### Intended Audience + +Wrapped languages developers, for the purposes of supporting compression by +interacting with the C core. + +## Criteria for GA readiness + +1. Be able to set compression at [channel](#per-channel-settings), + [call](#per-call-settings) and [message](#per-message-settings) level. + In principle this API should be based on _compression levels_ as opposed to + algorithms. See the discussion [below](#level-vs-algorithms). +1. Have unit tests covering [the cases from the + spec](https://github.com/grpc/grpc/blob/master/doc/compression.md#test-cases). +1. Interop tests implemented and passing on Jenkins. The two relevant interop + test cases are + [large_compressed_unary](https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#large_compressed_unary) + and + [server_compressed_streaming](https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#server_compressed_streaming). + +## Summary Flowcharts + +The following flowcharts depict the evolution of a message, both _incoming_ and +_outgoing_, irrespective of the client/server character of the call. Aspects +still not symmetric between clients and servers (e.g. the [use of compression +levels](https://github.com/grpc/grpc/blob/master/doc/compression.md#compression-levels-and-algorithms)) +are explicitly marked. The in-detail textual description for the different +scenarios is described in subsequent sections. + +## Incoming Messages + +![image](images/compression_cookbook_incoming.png) + +## Outgoing Messages + +![image](images/compression_cookbook_outgoing.png) + +## Levels vs Algorithms + +As mentioned in [the relevant discussion on the spec +document](https://github.com/grpc/grpc/blob/master/doc/compression.md#compression-levels-and-algorithms), +compression _levels_ are the primary mechanism for compression selection _at the +server side_. In the future, it'll also be at the client side. The use of levels +abstracts away the intricacies of selecting a concrete algorithm supported by a +peer, on top of removing the burden of choice from the developer. +As of this writing (Q2 2016), clients can only specify compression _algorithms_. +Clients will support levels as soon as an automatic retry/negotiation mechanism +is in place. + +## Per Channel Settings + +Compression may be configured at channel creation. This is a convenience to +avoid having to repeatedly configure compression for every call. Note that any +compression setting on individual [calls](#per-call-settings) or +[messages](#per-message-settings) overrides channel settings. + +The following aspects can be configured at channel-creation time via channel arguments: + +#### Disable Compression _Algorithms_ + +Use the channel argument key +`GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET` (from +[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)), +takes a 32 bit bitset value. A set bit means the algorithm with that enum value +according to `grpc_compression_algorithm` is _enabled_. +For example, `GRPC_COMPRESS_GZIP` currently has a numeric value of 2. To +enable/disable GZIP for a channel, one would set/clear the 3rd LSB (eg, 0b100 = +0x4). Note that setting/clearing 0th position, that corresponding to +`GRPC_COMPRESS_NONE`, has no effect, as no-compression (a.k.a. _identity_) is +always supported. +Incoming messages compressed (ie, encoded) with a disabled algorithm will result +in the call being closed with `GRPC_STATUS_UNIMPLEMENTED`. + +#### Default Compression _Level_ + +**(currently, Q2 2016, only applicable for server side channels. It's ignored +for clients.)** +Use the channel argument key `GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL` (from +[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)), +valued by an integer corresponding to a value from the `grpc_compression_level` +enum. + +#### Default Compression _Algorithm_ + +Use the channel argument key `GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM` (from +[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)), +valued by an integer corresponding to a value from the `grpc_compression_level` +enum. + +## Per Call Settings + +### Compression **Level** in Call Responses + +The server requests a compression level via initial metadata. The +`send_initial_metadata` `grpc_op` contains a `maybe_compression_level` field +with two fields, `is_set` and `compression_level`. The former must be set when +actively choosing a level to disambiguate the default value of zero (no +compression) from the proactive selection of no compression. + +The core will receive the request for the compression level and automatically +choose a compression algorithm based on its knowledge about the peer +(communicated by the client via the `grpc-accept-encoding` header. Note that the +absence of this header means no compression is supported by the client/peer). + +### Compression **Algorithm** in Call Responses + +**Server should avoid setting the compression algorithm directly**. Prefer +setting compression levels unless there's a _very_ compelling reason to choose +specific algorithms (benchmarking, testing). + +Selection of concrete compression algorithms is performed by adding a +`(GRPC_COMPRESS_REQUEST_ALGORITHM_KEY, )` key-value pair to the +initial metadata, where `GRPC_COMPRESS_REQUEST_ALGORITHM_KEY` is defined in +[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)), +and `` is the human readable name of the algorithm as given in +[the HTTP2 spec](https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md) +for `Message-Encoding` (e.g. gzip, identity, etc.). See +[`grpc_compression_algorithm_name`](https://github.com/grpc/grpc/blob/master/src/core/lib/compression/compression.c) +for the mapping between the `grpc_compression_algorithm` enum values and their +textual representation. + +## Per Message Settings + +To disable compression for a specific message, the `flags` field of `grpc_op` +instances of type `GRPC_OP_SEND_MESSAGE` must have its `GRPC_WRITE_NO_COMPRESS` +bit set. Refer to +[`grpc/impl/codegen/compression_types.h`](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/compression_types.h)), diff --git a/doc/connection-backoff-interop-test-description.md b/doc/connection-backoff-interop-test-description.md new file mode 100644 index 00000000..eb59e250 --- /dev/null +++ b/doc/connection-backoff-interop-test-description.md @@ -0,0 +1,77 @@ +Connection Backoff Interop Test Descriptions +=============================================== + +This test is to verify the client is reconnecting the server with correct +backoffs as specified in +[the spec](https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md). +The test server has a port (control_port) running a rpc service for controlling +the server and another port (retry_port) to close any incoming tcp connections. +The test has the following flow: + +1. The server starts listening on control_port. +2. The client calls Start rpc on server control_port. +3. The server starts listening on retry_port. +4. The client connects to server retry_port and retries with backoff for 540s, +which translates to about 13 retries. +5. The client calls Stop rpc on server control port. +6. The client checks the response to see whether the server thinks the backoffs +are conforming the spec or do its own check on the backoffs in the response. + +Client and server use +[test.proto](https://github.com/grpc/grpc/blob/master/src/proto/grpc/testing/test.proto). +Each language should implement its own client. The C++ server is shared among +languages. + +Client +------ + +Clients should accept these arguments: +* --server_control_port=PORT + * The server port to connect to for rpc. For example, "8080" +* --server_retry_port=PORT + * The server port to connect to for testing backoffs. For example, "8081" + +The client must connect to the control port without TLS. The client must connect +to the retry port with TLS. The client should either assert on the server +returned backoff status or check the returned backoffs on its own. + +Procedure of client: + +1. Calls Start on server control port with a large deadline or no deadline, +waits for its finish and checks it succeeded. +2. Initiates a channel connection to server retry port, which should perform +reconnections with proper backoffs. A convenient way to achieve this is to +call Start with a deadline of 540s. The rpc should fail with deadline exceeded. +3. Calls Stop on server control port and checks it succeeded. +4. Checks the response to see whether the server thinks the backoffs passed the + test. +5. Optionally, the client can do its own check on the returned backoffs. + + +Server +------ + +A C++ server can be used for the test. Other languages do NOT need to implement +a server. To minimize the network delay, the server binary should run on the +same machine or on a nearby machine (in terms of network distance) with the +client binary. + +A server implements the ReconnectService to its state. It also opens a +tcp server on the retry_port, which just shuts down all incoming tcp +connections to simulate connection failures. The server will keep a record of +all the reconnection timestamps and return the connection backoffs in the +response in milliseconds. The server also checks the backoffs to see whether +they conform the spec and returns whether the client passes the test. + +If the server receives a Start call when another client is being tested, it +finishes the call when the other client is done. If some other host connects +to the server retry_port when a client is being tested, the server will log an +error but likely would think the client fails the test. + +The server accepts these arguments: + +* --control_port=PORT + * The port to listen on for control rpcs. For example, "8080" +* --retry_port=PORT + * The tcp server port. For example, "8081" + diff --git a/doc/connection-backoff.md b/doc/connection-backoff.md new file mode 100644 index 00000000..0e83d9b9 --- /dev/null +++ b/doc/connection-backoff.md @@ -0,0 +1,56 @@ +GRPC Connection Backoff Protocol +================================ + +When we do a connection to a backend which fails, it is typically desirable to +not retry immediately (to avoid flooding the network or the server with +requests) and instead do some form of exponential backoff. + +We have several parameters: + 1. INITIAL_BACKOFF (how long to wait after the first failure before retrying) + 1. MULTIPLIER (factor with which to multiply backoff after a failed retry) + 1. JITTER (by how much to randomize backoffs). + 1. MAX_BACKOFF (upper bound on backoff) + 1. MIN_CONNECT_TIMEOUT (minimum time we're willing to give a connection to + complete) + +## Proposed Backoff Algorithm + +Exponentially back off the start time of connection attempts up to a limit of +MAX_BACKOFF, with jitter. + +``` +ConnectWithBackoff() + current_backoff = INITIAL_BACKOFF + current_deadline = now() + INITIAL_BACKOFF + while (TryConnect(Max(current_deadline, now() + MIN_CONNECT_TIMEOUT)) + != SUCCESS) + SleepUntil(current_deadline) + current_backoff = Min(current_backoff * MULTIPLIER, MAX_BACKOFF) + current_deadline = now() + current_backoff + + UniformRandom(-JITTER * current_backoff, JITTER * current_backoff) + +``` + +With specific parameters of +MIN_CONNECT_TIMEOUT = 20 seconds +INITIAL_BACKOFF = 1 second +MULTIPLIER = 1.6 +MAX_BACKOFF = 120 seconds +JITTER = 0.2 + +Implementations with pressing concerns (such as minimizing the number of wakeups +on a mobile phone) may wish to use a different algorithm, and in particular +different jitter logic. + +Alternate implementations must ensure that connection backoffs started at the +same time disperse, and must not attempt connections substantially more often +than the above algorithm. + +## Reset Backoff + +The back off should be reset to INITIAL_BACKOFF at some time point, so that the +reconnecting behavior is consistent no matter the connection is a newly started +one or a previously disconnected one. + +We choose to reset the Backoff when the SETTINGS frame is received, at that time +point, we know for sure that this connection was accepted by the server. diff --git a/doc/connectivity-semantics-and-api.md b/doc/connectivity-semantics-and-api.md new file mode 100644 index 00000000..48a84767 --- /dev/null +++ b/doc/connectivity-semantics-and-api.md @@ -0,0 +1,154 @@ +gRPC Connectivity Semantics and API +=================================== + +This document describes the connectivity semantics for gRPC channels and the +corresponding impact on RPCs. We then discuss an API. + +States of Connectivity +---------------------- + +gRPC Channels provide the abstraction over which clients can communicate with +servers.The client-side channel object can be constructed using little more +than a DNS name. Channels encapsulate a range of functionality including name +resolution, establishing a TCP connection (with retries and backoff) and TLS +handshakes. Channels can also handle errors on established connections and +reconnect, or in the case of HTTP/2 GO_AWAY, re-resolve the name and reconnect. + +To hide the details of all this activity from the user of the gRPC API (i.e., +application code) while exposing meaningful information about the state of a +channel, we use a state machine with five states, defined below: + +CONNECTING: The channel is trying to establish a connection and is waiting to +make progress on one of the steps involved in name resolution, TCP connection +establishment or TLS handshake. This may be used as the initial state for channels upon +creation. + +READY: The channel has successfully established a connection all the way through +TLS handshake (or equivalent) and protocol-level (HTTP/2, etc) handshaking, and +all subsequent attempt to communicate have succeeded (or are pending without any +known failure). + +TRANSIENT_FAILURE: There has been some transient failure (such as a TCP 3-way +handshake timing out or a socket error). Channels in this state will eventually +switch to the CONNECTING state and try to establish a connection again. Since +retries are done with exponential backoff, channels that fail to connect will +start out spending very little time in this state but as the attempts fail +repeatedly, the channel will spend increasingly large amounts of time in this +state. For many non-fatal failures (e.g., TCP connection attempts timing out +because the server is not yet available), the channel may spend increasingly +large amounts of time in this state. + +IDLE: This is the state where the channel is not even trying to create a +connection because of a lack of new or pending RPCs. New RPCs MAY be created +in this state. Any attempt to start an RPC on the channel will push the channel +out of this state to connecting. When there has been no RPC activity on a channel +for a specified IDLE_TIMEOUT, i.e., no new or pending (active) RPCs for this +period, channels that are READY or CONNECTING switch to IDLE. Additionally, +channels that receive a GOAWAY when there are no active or pending RPCs should +also switch to IDLE to avoid connection overload at servers that are attempting +to shed connections. We will use a default IDLE_TIMEOUT of 300 seconds (5 minutes). + +SHUTDOWN: This channel has started shutting down. Any new RPCs should fail +immediately. Pending RPCs may continue running till the application cancels them. +Channels may enter this state either because the application explicitly requested +a shutdown or if a non-recoverable error has happened during attempts to connect +communicate . (As of 6/12/2015, there are no known errors (while connecting or +communicating) that are classified as non-recoverable.) Channels that enter this +state never leave this state. + +The following table lists the legal transitions from one state to another and +corresponding reasons. Empty cells denote disallowed transitions. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
From/ToCONNECTINGREADYTRANSIENT_FAILUREIDLESHUTDOWN
CONNECTINGIncremental progress during connection establishmentAll steps needed to establish a connection succeededAny failure in any of the steps needed to establish connectionNo RPC activity on channel for IDLE_TIMEOUTShutdown triggered by application.
READYIncremental successful communication on established channel.Any failure encountered while expecting successful communication on + established channel.No RPC activity on channel for IDLE_TIMEOUT
OR
upon receiving a GOAWAY while there are no pending RPCs.
Shutdown triggered by application.
TRANSIENT_FAILUREWait time required to implement (exponential) backoff is over.Shutdown triggered by application.
IDLEAny new RPC activity on the channelShutdown triggered by application.
SHUTDOWN
+ + +Channel State API +----------------- + +All gRPC libraries will expose a channel-level API method to poll the current +state of a channel. In C++, this method is called GetState and returns an enum +for one of the five legal states. It also accepts a boolean `try_to_connect` to +transition to CONNECTING if the channel is currently IDLE. The boolean should +act as if an RPC occurred, so it should also reset IDLE_TIMEOUT. + +```cpp +grpc_connectivity_state GetState(bool try_to_connect); +``` + +All libraries should also expose an API that enables the application (user of +the gRPC API) to be notified when the channel state changes. Since state +changes can be rapid and race with any such notification, the notification +should just inform the user that some state change has happened, leaving it to +the user to poll the channel for the current state. + +The synchronous version of this API is: + +```cpp +bool WaitForStateChange(grpc_connectivity_state source_state, gpr_timespec deadline); +``` + +which returns `true` when the state is something other than the +`source_state` and `false` if the deadline expires. Asynchronous- and futures-based +APIs should have a corresponding method that allows the application to be +notified when the state of a channel changes. + +Note that a notification is delivered every time there is a transition from any +state to any *other* state. On the other hand the rules for legal state +transition, require a transition from CONNECTING to TRANSIENT_FAILURE and back +to CONNECTING for every recoverable failure, even if the corresponding +exponential backoff requires no wait before retry. The combined effect is that +the application may receive state change notifications that appear spurious. +e.g., an application waiting for state changes on a channel that is CONNECTING +may receive a state change notification but find the channel in the same +CONNECTING state on polling for current state because the channel may have +spent infinitesimally small amount of time in the TRANSIENT_FAILURE state. diff --git a/doc/core/combiner-explainer.md b/doc/core/combiner-explainer.md new file mode 100644 index 00000000..20b7f8df --- /dev/null +++ b/doc/core/combiner-explainer.md @@ -0,0 +1,158 @@ +# Combiner Explanation +## Talk by ctiller, notes by vjpai + +Typical way of doing critical section + +``` +mu.lock() +do_stuff() +mu.unlock() +``` + +An alternative way of doing it is + +``` +class combiner { + run(f) { + mu.lock() + f() + mu.unlock() + } + mutex mu; +} + +combiner.run(do_stuff) +``` + +If you have two threads calling combiner, there will be some kind of +queuing in place. It's called `combiner` because you can pass in more +than one do_stuff at once and they will run under a common `mu`. + +The implementation described above has the issue that you're blocking a thread +for a period of time, and this is considered harmful because it's an application thread that you're blocking. + +Instead, get a new property: +* Keep things running in serial execution +* Don't ever sleep the thread +* But maybe allow things to end up running on a different thread from where they were started +* This means that `do_stuff` doesn't necessarily run to completion when `combiner.run` is invoked + +``` +class combiner { + mpscq q; // multi-producer single-consumer queue can be made non-blocking + state s; // is it empty or executing + + run(f) { + if (q.push(f)) { + // q.push returns true if it's the first thing + while (q.pop(&f)) { // modulo some extra work to avoid races + f(); + } + } + } +} +``` + +The basic idea is that the first one to push onto the combiner +executes the work and then keeps executing functions from the queue +until the combiner is drained. + +Our combiner does some additional work, with the motivation of write-batching. + +We have a second tier of `run` called `run_finally`. Anything queued +onto `run_finally` runs after we have drained the queue. That means +that there is essentially a finally-queue. This is not guaranteed to +be final, but it's best-effort. In the process of running the finally +item, we might put something onto the main combiner queue and so we'll +need to re-enter. + +`chttp2` runs all ops in the run state except if it sees a write it puts that into a finally. That way anything else that gets put into the combiner can add to that write. + +``` +class combiner { + mpscq q; // multi-producer single-consumer queue can be made non-blocking + state s; // is it empty or executing + queue finally; // you can only do run_finally when you are already running something from the combiner + + run(f) { + if (q.push(f)) { + // q.push returns true if it's the first thing + loop: + while (q.pop(&f)) { // modulo some extra work to avoid races + f(); + } + while (finally.pop(&f)) { + f(); + } + goto loop; + } + } +} +``` + +So that explains how combiners work in general. In gRPC, there is +`start_batch(..., tag)` and then work only gets activated by somebody +calling `cq::next` which returns a tag. This gives an API-level +guarantee that there will be a thread doing polling to actually make +work happen. However, some operations are not covered by a poller +thread, such as cancellation that doesn't have a completion. Other +callbacks that don't have a completion are the internal work that gets +done before the batch gets completed. We need a condition called +`covered_by_poller` that means that the item will definitely need some +thread at some point to call `cq::next` . This includes those +callbacks that directly cause a completion but also those that are +indirectly required before getting a completion. If we can't tell for +sure for a specific path, we have to assumed it is not covered by +poller. + +The above combiner has the problem that it keeps draining for a +potentially infinite amount of time and that can lead to a huge tail +latency for some operations. So we can tweak it by returning to the application +if we know that it is valid to do so: + +``` +while (q.pop(&f)) { + f(); + if (control_can_be_returned && some_still_queued_thing_is_covered_by_poller) { + offload_combiner_work_to_some_other_thread(); + } +} +``` + +`offload` is more than `break`; it does `break` but also causes some +other thread that is currently waiting on a poll to break out of its +poll. This is done by setting up a per-polling-island work-queue +(distributor) wakeup FD. The work-queue is the converse of the combiner; it +tries to spray events onto as many threads as possible to get as much concurrency as possible. + +So `offload` really does: + +``` + workqueue.run(continue_from_while_loop); + break; +``` + +This needs us to add another class variable for a `workqueue` +(which is really conceptually a distributor). + +``` +workqueue::run(f) { + q.push(f) + eventfd.wakeup() +} + +workqueue::readable() { + eventfd.consume(); + q.pop(&f); + f(); + if (!q.empty()) { + eventfd.wakeup(); // spray across as many threads as are waiting on this workqueue + } +} +``` + +In principle, `run_finally` could get starved, but this hasn't +happened in practice. If we were concerned about this, we could put a +limit on how many things come off the regular `q` before the `finally` +queue gets processed. + diff --git a/doc/core/epoll-polling-engine.md b/doc/core/epoll-polling-engine.md new file mode 100644 index 00000000..8ce54a02 --- /dev/null +++ b/doc/core/epoll-polling-engine.md @@ -0,0 +1,121 @@ +# `epoll`-based pollset implementation in gRPC + +Sree Kuchibhotla (sreek@) [May - 2016] +(Design input from Craig Tiller and David Klempner) + +> Status: As of June 2016, this change is implemented and merged. + +> * The bulk of the functionality is in: [ev_epollsig_linux.c](https://github.com/grpc/grpc/blob/master/src/core/lib/iomgr/ev_epollsig_linux.c) +> * Pull request: https://github.com/grpc/grpc/pull/6803 + +## 1. Introduction +The document talks about the proposed changes to `epoll`-based implementation of pollsets in gRPC. Section-2 gives an overview of the current implementation, Section-3 talks about the problems in the current implementation and finally Section-4 talks about the proposed changes. + +## 2. Current `epoll`-based implementation in gRPC + +![image](images/old_epoll_impl.png) + +**Figure 1: Current implementation** + +A gRPC client or a server can have more than one completion queue. Each completion queue creates a pollset. + +The gRPC core library does not create any threads[^1] on its own and relies on the application using the gRPC core library to provide the threads. A thread starts to poll for events by calling the gRPC core surface APIs `grpc_completion_queue_next()` or `grpc_completion_queue_pluck()`. More than one thread can call `grpc_completion_queue_next()`on the same completion queue[^2]. + +A file descriptor can be in more than one completion queue. There are examples in the next section that show how this can happen. + +When an event of interest happens in a pollset, multiple threads are woken up and there are no guarantees on which thread actually ends up performing the work i.e executing the callbacks associated with that event. The thread that performs the work finally queues a completion event `grpc_cq_completion` on the appropriate completion queue and "kicks" (i.e wakes ups) the thread that is actually interested in that event (which can be itself - in which case there is no thread hop) + +For example, in **Figure 1**, if `fd1` becomes readable, any one of the threads i.e *Threads 1* to *Threads K* or *Thread P*, might be woken up. Let's say *Thread P* was calling a `grpc_completion_queue_pluck()` and was actually interested in the event on `fd1` but *Thread 1* woke up. In this case, *Thread 1* executes the callbacks and finally kicks *Thread P* by signalling `event_fd_P`. *Thread P* wakes up, realizes that there is a new completion event for it and returns from `grpc_completion_queue_pluck()` to its caller. + +## 3. Issues in the current architecture + +### _Thundering Herds_ + +If multiple threads concurrently call `epoll_wait()`, we are guaranteed that only one thread is woken up if one of the `fds` in the set becomes readable/writable. However, in our current implementation, the threads do not directly call a blocking `epoll_wait()`[^3]. Instead, they call `poll()` on the set containing `[event_fd`[^4]`, epoll_fd]`. **(see Figure 1)** + +Considering the fact that an `fd` can be in multiple `pollsets` and that each `pollset` might have multiple poller threads, it means that whenever an `fd` becomes readable/writable, all the threads in all the `pollsets` (in which that `fd` is present) are woken up. + +The performance impact of this would be more conspicuous on the server side. Here are a two examples of thundering herds on the server side. + +Example 1: Listening fds on server + +* A gRPC server can have multiple server completion queues (i.e completion queues which are used to listen for incoming channels). +* A gRPC server can also listen on more than one TCP-port. +* A listening socket is created for each port the gRPC server would be listening on. +* Every listening socket's fd is added to all the server completion queues' pollsets. (Currently we do not do any sharding of the listening fds across these pollsets). + +This means that for every incoming new channel, all the threads waiting on all the pollsets are woken up. + +Example 2: New Incoming-channel fds on server + +* Currently, every new incoming channel's `fd` (i.e the socket `fd` that is returned by doing an `accept()` on the new incoming channel) is added to all the server completion queues' pollsets [^5]). +* Clearly, this would also cause all thundering herd problem for every read onthat fd + +There are other scenarios especially on the client side where an fd can end up being on multiple pollsets which would cause thundering herds on the clients. + + +## 4. Proposed changes to the current `epoll`-based polling implementation: + +The main idea in this proposal is to group 'related' `fds` into a single epoll-based set. This would ensure that only one thread wakes up in case of an event on one of the `fds` in the epoll set. + +To accomplish this, we introduce a new abstraction called `polling_island` which will have an epoll set underneath (See **Figure 2** below). A `polling_island` contains the following: + +* `epoll_fd`: The file descriptor of the underlying epoll set +* `fd_set`: The set of 'fds' in the pollset island i.e in the epoll set (The pollset island merging operation described later requires the list of fds in the pollset island and currently there is no API available to enumerate all the fds in an epoll set) +* `event_fd`: A level triggered _event fd_ that is used to wake up all the threads waiting on this epoll set (Note: This `event_fd` is added to the underlying epoll set during pollset island creation. This is useful in the pollset island merging operation described later) +* `merged_to`: The polling island into which this one merged. See section 4.2 (case 2) for more details on this. Also note that if `merged_to` is set, all the other fields in this polling island are not used anymore + +In this new model, only one thread wakes up whenever an event of interest happens in an epoll set. + +![drawing](images/new_epoll_impl.png) + +**Figure 2: Proposed changes** + +### 4.1 Relation between `fd`, `pollset` and `polling_island:` + +* An `fd` may belong to multiple `pollsets` but belongs to exactly one `polling_island` +* A `pollset` belongs to exactly one `polling_island` +* An `fd` and the `pollset(s`) it belongs to, have same `polling_island` + +### 4.2 Algorithm to add an `fd` to a `pollset` + +There are two cases to check here: + +* **Case 1:** Both `fd` and `pollset` already belong to the same `polling_island` + * This is straightforward and nothing really needs to be done here +* **Case 2:** The `fd `and `pollset` point to different `polling_islands`: In this case we _merge_ both the polling islands i.e: + * Add all the `fds` from the smaller `polling_island `to the larger `polling_island` and update the `merged_to` pointer on the smaller island to point to the larger island. + * Wake up all the threads waiting on the smaller `polling_island`'s `epoll_fd` (by signaling the `event_fd` on that island) and make them now wait on the larger `polling_island`'s `epoll_fd` + * Update `fd` and `pollset` to now point to the larger `polling_island` + +### 4.3 Directed wakeups: + +The new implementation, just like the current implementation, does not provide us any guarantees that the thread that is woken up is the thread that is actually interested in the event. So the thread that woke up executes the callbacks and finally has to 'kick' the appropriate polling thread interested in the event. + +In the current implementation, every polling thread also had a `event_fd` on which it was listening to and hence waking it up was as simple as signaling that `event_fd`. However, using an `event_fd` also meant that every thread has to use a `poll()` (on `event_fd` and `epoll_fd`) instead of doing an `epoll_wait()` and this resulted in the thundering herd problems described above. + +The proposal here is to use signals and kicking a thread would just be sending a signal to that thread. Unfortunately there are only a few signals available on POSIX systems and most of them have pre-determined behavior leaving only a few signals `SIGUSR1`, `SIGUSR2` and `SIGRTx (SIGRTMIN to SIGRTMAX)` for custom use. + +The calling application might have registered other signal handlers for these signals. `We will provide a new API where the applications can "give a signal number" to gRPC library to use for this purpose. + +``` +void grpc_use_signal(int signal_num) +``` + +If the calling application does not provide a signal number, then the gRPC library will relegate to using a model similar to the current implementation (where every thread does a blocking `poll()` on its `wakeup_fd` and the `epoll_fd`). The function` psi_wait() `in figure 2 implements this logic. + +**>> **(**NOTE**: Or alternatively, we can implement a turnstile polling (i.e having only one thread calling `epoll_wait()` on the epoll set at any time - which all other threads call poll on their `wakeup_fds`) +in case of not getting a signal number from the applications. + + +## Notes + +[^1]: Only exception is in case of name-resolution + +[^2]: However, a `grpc_completion_queue_next()` and `grpc_completion_queue_pluck()` must not be called in parallel on the same completion queue + +[^3]: The threads first do a blocking` poll()` with `[wakeup_fd, epoll_fd]`. If the `poll()` returns due to an event of interest in the epoll set, they then call a non-blocking i.e a zero-timeout `epoll_wait()` on the `epoll_fd` + +[^4]: `event_fd` is the linux platform specific implementation of `grpc_wakeup_fd`. A `wakeup_fd` is used to wake up polling threads typically when the event for which the polling thread is waiting is already completed by some other thread. It is also used to wake up the polling threads in case of shutdowns or to re-evaluate the poller's interest in the fds to poll (the last scenario is only in case of `poll`-based (not `epoll`-based) implementation of `pollsets`). + +[^5]: See more details about the issue here https://github.com/grpc/grpc/issues/5470 and for a proposed fix here: https://github.com/grpc/grpc/pull/6149 diff --git a/doc/core/grpc-client-server-polling-engine-usage.md b/doc/core/grpc-client-server-polling-engine-usage.md new file mode 100644 index 00000000..3aa3cc16 --- /dev/null +++ b/doc/core/grpc-client-server-polling-engine-usage.md @@ -0,0 +1,32 @@ +# Polling Engine Usage on gRPC client and Server + +_Author: Sree Kuchibhotla (@sreecha) - Sep 2018_ + + +This document talks about how polling engine is used in gRPC core (both on client and server code paths). + +## gRPC client + +### Relation between Call, Channel (sub-channels), Completion queue, `grpc_pollset` +- A gRPC Call is tied to a channel (more specifically a sub-channel) and a completion queue for the lifetime of the call. +- Once a _sub-channel_ is picked for the call, the file-descriptor (socket fd in case of TCP channels) is added to the pollset corresponding to call's completion queue. (Recall that as per [grpc-cq](grpc-cq.md), a completion queue has a pollset by default) + +![image](../images/grpc-call-channel-cq.png) + + +### Making progress on Async `connect()` on sub-channels (`grpc_pollset_set` usecase) +- A gRPC channel is created between a client and a 'target'. The 'target' may resolve in to one or more backend servers. +- A sub-channel is the 'connection' from a client to the backend server +- While establishing sub-channels (i.e connections) to the backends, gRPC issues async [`connect()`](https://github.com/grpc/grpc/blob/v1.15.1/src/core/lib/iomgr/tcp_client_posix.cc#L296) calls which may not complete right away. When the `connect()` eventually succeeds, the socket fd is make 'writable' + - This means that the polling engine must be monitoring all these sub-channel `fd`s for writable events and we need to make sure there is a polling thread that monitors all these fds + - To accomplish this, the `grpc_pollset_set` is used the following way (see picture below) + +![image](../images/grpc-client-lb-pss.png) + +## gRPC server + +- The listening fd (i.e., the socket fd corresponding to the server listening port) is added to each of the server completion queues. Note that in gRPC we use SO_REUSEPORT option and create multiple listening fds but all of them map to the same listening port +- A new incoming channel is assigned to some server completion queue picked randomly (note that we currently [round-robin](https://github.com/grpc/grpc/blob/v1.15.1/src/core/lib/iomgr/tcp_server_posix.cc#L231) over the server completion queues) + +![image](../images/grpc-server-cq-fds.png) + diff --git a/doc/core/grpc-cq.md b/doc/core/grpc-cq.md new file mode 100644 index 00000000..e8338d88 --- /dev/null +++ b/doc/core/grpc-cq.md @@ -0,0 +1,64 @@ +# gRPC Completion Queue + +_Author: Sree Kuchibhotla (@sreecha) - Sep 2018_ + +Code: [completion_queue.cc](https://github.com/grpc/grpc/blob/v1.15.1/src/core/lib/surface/completion_queue.cc) + +This document gives an overview of completion queue architecture and focuses mainly on the interaction between completion queue and the Polling engine layer. + +## Completion queue attributes +Completion queue has two attributes + + - Completion_type: + - GRPC_CQ_NEXT: grpc_completion_queue_next() can be called (but not grpc_completion_queue_pluck()) + - GRPC_CQ_PLUCK: grpc_completion_queue_pluck() can be called (but not grpc_completion_queue_next()) + - GRPC_CQ_CALLBACK: The tags in the queue are function pointers to callbacks. Also, neither next() nor pluck() can be called on this + + - Polling_type: + - GRPC_CQ_NON_POLLING: Threads calling completion_queue_next/pluck do not do any polling + - GRPC_CQ_DEFAULT_POLLING: Threads calling completion_queue_next/pluck do polling + - GRPC_CQ_NON_LISTENING: Functionally similar to default polling except for a boolean attribute that states that the cq is non-listening. This is used by the grpc-server code to not associate any listening sockets with this completion-queue’s pollset + + +## Details + +![image](../images/grpc-cq.png) + + +### **grpc\_completion\_queue\_next()** & **grpc_completion_queue_pluck()** APIS + + +``` C++ +grpc_completion_queue_next(cq, deadline)/pluck(cq, deadline, tag) { + while(true) { + \\ 1. If an event is queued in the completion queue, dequeue and return + \\ (in case of pluck() dequeue only if the tag is the one we are interested in) + + \\ 2. If completion queue shutdown return + + \\ 3. In case of pluck, add (tag, worker) pair to the tag<->worker map on the cq + + \\ 4. Call grpc_pollset_work(cq’s-pollset, deadline) to do polling + \\ Note that if this function found some fds to be readable/writable/error, + \\ it would have scheduled those closures (which may queue completion events + \\ on SOME completion queue - not necessarily this one) + } +} +``` + +### Queuing a completion event (i.e., "tag") + +``` C++ +grpc_cq_end_op(cq, tag) { + \\ 1. Queue the tag in the event queue + + \\ 2. Find the pollset corresponding to the completion queue + \\ (i) If the cq is of type GRPC_CQ_NEXT, then KICK ANY worker + \\ i.e., call grpc_pollset_kick(pollset, nullptr) + \\ (ii) If the cq is of type GRPC_CQ_PLUCK, then search the tag<->worker + \\ map on the completion queue to find the worker. Then specifically + \\ kick that worker i.e call grpc_pollset_kick(pollset, worker) +} + +``` + diff --git a/doc/core/grpc-error.md b/doc/core/grpc-error.md new file mode 100644 index 00000000..dc0e0ea9 --- /dev/null +++ b/doc/core/grpc-error.md @@ -0,0 +1,160 @@ +# gRPC Error + +## Background + +`grpc_error` is the c-core's opaque representation of an error. It holds a +collection of integers, strings, timestamps, and child errors that related to +the final error. + +always present are: + +* GRPC_ERROR_STR_FILE and GRPC_ERROR_INT_FILE_LINE - the source location where + the error was generated +* GRPC_ERROR_STR_DESCRIPTION - a human readable description of the error +* GRPC_ERROR_TIME_CREATED - a timestamp indicating when the error happened + +An error can also have children; these are other errors that are believed to +have contributed to this one. By accumulating children, we can begin to root +cause high level failures from low level failures, without having to derive +execution paths from log lines. + +grpc_errors are refcounted objects, which means they need strict ownership +semantics. An extra ref on an error can cause a memory leak, and a missing ref +can cause a crash. + +This document serves as a detailed overview of grpc_error's ownership rules. It +should help people use the errors, as well as help people debug refcount related +errors. + +## Clarification of Ownership + +If a particular function is said to "own" an error, that means it has the +responsibility of calling unref on the error. A function may have access to an +error without ownership of it. + +This means the function may use the error, but must not call unref on it, since +that will be done elsewhere in the code. A function that does not own an error +may explicitly take ownership of it by manually calling GRPC_ERROR_REF. + +## Ownership Rules + +There are three rules of error ownership, which we will go over in detail. + +* If `grpc_error` is returned by a function, the caller owns a ref to that + instance. +* If a `grpc_error` is passed to a `grpc_closure` callback function, then that + function does not own a ref to the error. +* if a `grpc_error` is passed to *any other function*, then that function + takes ownership of the error. + +### Rule 1 + +> If `grpc_error` is returned by a function, the caller owns a ref to that +> instance.* + +For example, in the following code block, error1 and error2 are owned by the +current function. + +```C +grpc_error_handle error1 = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Some error occurred"); +grpc_error_handle error2 = some_operation_that_might_fail(...); +``` + +The current function would have to explicitly call GRPC_ERROR_UNREF on the +errors, or pass them along to a function that would take over the ownership. + +### Rule 2 + +> If a `grpc_error` is passed to a `grpc_closure` callback function, then that +> function does not own a ref to the error. + +A `grpc_closure` callback function is any function that has the signature: + +```C +void (*cb)(void *arg, grpc_error_handle error); +``` + +This means that the error ownership is NOT transferred when a functions calls: + +```C +c->cb(c->cb_arg, err); +``` + +The caller is still responsible for unref-ing the error. + +Note that you'll likely never need to run `c->cb(...)` yourself; the idiomatic +way to execute callbacks is via the `Closure::Run` method, which takes ownership +of the error variable. + +```C +grpc_error_handle error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Some error occurred"); +grpc_core::Closure::Run(DEBUG_LOCATION, c->cb, error); +// current function no longer has ownership of the error +``` + +If you schedule or run a closure, but still need ownership of the error, then +you must explicitly take a reference. + +```C +grpc_error_handle error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Some error occurred"); +grpc_core::Closure::Run(DEBUG_LOCATION, c->cb, GRPC_ERROR_REF(error)); +// do some other things with the error +GRPC_ERROR_UNREF(error); +``` + +Rule 2 is more important to keep in mind when **implementing** `grpc_closure` +callback functions. You must keep in mind that you do not own the error, and +must not unref it. More importantly, you cannot pass it to any function that +would take ownership of the error, without explicitly taking ownership yourself. +For example: + +```C +void on_some_action(void *arg, grpc_error_handle error) { + // this would cause a crash, because some_function will unref the error, + // and the caller of this callback will also unref it. + some_function(error); + + // this callback function must take ownership, so it can give that + // ownership to the function it is calling. + some_function(GRPC_ERROR_REF(error)); +} +``` + +### Rule 3 + +> if a `grpc_error` is passed to *any other function*, then that function takes +> ownership of the error. + +Take the following example: + +```C +grpc_error_handle error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Some error occurred"); +// do some things +some_function(error); +// can't use error anymore! might be gone. +``` + +When some_function is called, it takes over the ownership of the error, and it +will eventually unref it. So the caller can no longer safely use the error. + +If the caller needed to keep using the error (or passing it to other functions), +if would have to take on a reference to it. This is a common pattern seen. + +```C +void func() { + grpc_error_handle error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Some error"); + some_function(GRPC_ERROR_REF(error)); + // do things + some_other_function(GRPC_ERROR_REF(error)); + // do more things + some_last_function(error); +} +``` + +The last call takes ownership and will eventually give the error its final +unref. + +When **implementing** a function that takes an error (and is not a +`grpc_closure` callback function), you must ensure the error is unref-ed either +by doing it explicitly with GRPC_ERROR_UNREF, or by passing the error to a +function that takes over the ownership. diff --git a/doc/core/grpc-polling-engines.md b/doc/core/grpc-polling-engines.md new file mode 100644 index 00000000..e5b700c0 --- /dev/null +++ b/doc/core/grpc-polling-engines.md @@ -0,0 +1,152 @@ +# Polling Engines + +_Author: Sree Kuchibhotla (@sreecha) - Sep 2018_ + + +## Why do we need a 'polling engine' ? + +Polling engine component was created for the following reasons: + +- gRPC code deals with a bunch of file descriptors on which events like descriptor being readable/writable/error have to be monitored +- gRPC code knows the actions to perform when such events happen + - For example: + - `grpc_endpoint` code calls `recvmsg` call when the fd is readable and `sendmsg` call when the fd is writable + - ` tcp_client` connect code issues async `connect` and finishes creating the client once the fd is writable (i.e when the `connect` actually finished) +- gRPC needed some component that can "efficiently" do the above operations __using the threads provided by the applications (i.e., not create any new threads)__. Also by "efficiently" we mean optimized for latency and throughput + + +## Polling Engine Implementations in gRPC +There are multiple polling engine implementations depending on the OS and the OS version. Fortunately all of them expose the same interface + +- Linux: + + - **`epollex`** (default but requires kernel version >= 4.5), + - `epoll1` (If `epollex` is not available and glibc version >= 2.9) + - `poll` (If kernel does not have epoll support) +- Mac: **`poll`** (default) +- Windows: (no name) +- One-off polling engines: + - NodeJS : `libuv` polling engine implementation (requires different compile `#define`s) + +## Polling Engine Interface + +### Opaque Structures exposed by the polling engine +The following are the **Opaque** structures exposed by Polling Engine interface (NOTE: Different polling engine implementations have different definitions of these structures) + +- **grpc_fd:** Structure representing a file descriptor +- **grpc_pollset:** A set of one or more grpc_fds that are ‘polled’ for readable/writable/error events. One grpc_fd can be in multiple `grpc_pollset`s +- **grpc_pollset_worker:** Structure representing a ‘polling thread’ - more specifically, the thread that calls `grpc_pollset_work()` API +- **grpc_pollset_set:** A group of `grpc_fd`s, `grpc_pollset`s and `grpc_pollset_set`s (yes, a `grpc_pollset_set` can contain other `grpc_pollset_set`s) + +### Polling engine API + +#### grpc_fd +- **grpc\_fd\_notify\_on\_[read|write|error]** + - Signature: `grpc_fd_notify_on_(grpc_fd* fd, grpc_closure* closure)` + - Register a [closure](https://github.com/grpc/grpc/blob/v1.15.1/src/core/lib/iomgr/closure.h#L67) to be called when the fd becomes readable/writable or has an error (In grpc parlance, we refer to this act as “arming the fd”) + - The closure is called exactly once per event. I.e once the fd becomes readable (or writable or error), the closure is fired and the fd is ‘unarmed’. To be notified again, the fd has to be armed again. + +- **grpc_fd_shutdown** + - Signature: `grpc_fd_shutdown(grpc_fd* fd)` + - Any current (or future) closures registered for readable/writable/error events are scheduled immediately with an error + +- **grpc_fd_orphan** + - Signature: `grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd, char* reason)` + - Release the `grpc_fd` structure and call `on_done` closure when the operation is complete + - If `release_fd` is set to `nullptr`, then `close()` the underlying fd as well. If not, put the underlying fd in `release_fd` (and do not call `close()`) + - `release_fd` set to non-null in cases where the underlying fd is NOT owned by grpc core (like for example the fds used by C-Ares DNS resolver ) + +#### grpc_pollset + +- **grpc_pollset_add_fd** + - Signature: `grpc_pollset_add_fd(grpc_pollset* ps, grpc_fd *fd)` + - Add fd to pollset + > **NOTE**: There is no `grpc_pollset_remove_fd`. This is because calling `grpc_fd_orphan()` will effectively remove the fd from all the pollsets it’s a part of + +- **grpc_pollset_work** + - Signature: `grpc_pollset_work(grpc_pollset* ps, grpc_pollset_worker** worker, grpc_millis deadline)` + > **NOTE**: `grpc_pollset_work()` requires the pollset mutex to be locked before calling it. Shortly after calling `grpc_pollset_work()`, the function populates the `*worker` pointer (among other things) and releases the mutex. Once `grpc_pollset_work()` returns, the `*worker` pointer is **invalid** and should not be used anymore. See the code in `completion_queue.cc` to see how this is used. + - Poll the fds in the pollset for events AND return when ANY of the following is true: + - Deadline expired + - Some fds in the pollset were found to be readable/writable/error and those associated closures were ‘scheduled’ (but not necessarily executed) + - worker is “kicked” (see `grpc_pollset_kick` for more details) + +- **grpc_pollset_kick** + - Signature: `grpc_pollset_kick(grpc_pollset* ps, grpc_pollset_worker* worker)` + - “Kick the worker” i.e Force the worker to return from grpc_pollset_work() + - If `worker == nullptr`, kick ANY worker active on that pollset + +#### grpc_pollset_set + +- **grpc\_pollset\_set\_[add|del]\_fd** + - Signature: `grpc_pollset_set_[add|del]_fd(grpc_pollset_set* pss, grpc_fd *fd)` + - Add/Remove fd to the `grpc_pollset_set` + +- **grpc\_pollset\_set_[add|del]\_pollset** + - Signature: `grpc_pollset_set_[add|del]_pollset(grpc_pollset_set* pss, grpc_pollset* ps)` + - What does adding a pollset to a pollset_set mean ? + - It means that calling `grpc_pollset_work()` on the pollset will also poll all the fds in the pollset_set i.e semantically, it is similar to adding all the fds inside pollset_set to the pollset. + - This guarantee is no longer true once the pollset is removed from the pollset_set + +- **grpc\_pollset\_set_[add|del]\_pollset\_set** + - Signature: `grpc_pollset_set_[add|del]_pollset_set(grpc_pollset_set* bag, grpc_pollset_set* item)` + - Semantically, this is similar to adding all the fds in the ‘bag’ pollset_set to the ‘item’ pollset_set + + +#### Recap: + +__Relation between grpc_pollset_worker, grpc_pollset and grpc_fd:__ + +![image](../images/grpc-ps-pss-fd.png) + +__grpc_pollset_set__ + +![image](../images/grpc-pss.png) + + +## Polling Engine Implementations + +### epoll1 + +![image](../images/grpc-epoll1.png) + +Code at `src/core/lib/iomgr/ev_epoll1_posix.cc` + +- The logic to choose a designated poller is quite complicated. Pollsets are internally sharded into what are called `pollset_neighborhood` (a structure internal to `epoll1` polling engine implementation). `grpc_pollset_workers` that call `grpc_pollset_work` on a given pollset are all queued in a linked-list against the `grpc_pollset`. The head of the linked list is called "root worker" + +- There are as many neighborhoods as the number of cores. A pollset is put in a neighborhood based on the CPU core of the root worker thread. When picking the next designated poller, we always try to find another worker on the current pollset. If there are no more workers in the current pollset, a `pollset_neighborhood` listed is scanned to pick the next pollset and worker that could be the new designated poller. + - NOTE: There is room to tune this implementation. All we really need is good way to maintain a list of `grpc_pollset_workers` with a way to group them per-pollset (needed to implement `grpc_pollset_kick` semantics) and a way randomly select a new designated poller + +- See [`begin_worker()`](https://github.com/grpc/grpc/blob/v1.15.1/src/core/lib/iomgr/ev_epoll1_linux.cc#L729) function to see how a designated poller is chosen. Similarly [`end_worker()`](https://github.com/grpc/grpc/blob/v1.15.1/src/core/lib/iomgr/ev_epoll1_linux.cc#L916) function is called by the worker that was just out of `epoll_wait()` and will have to choose a new designated poller) + + +### epollex + +![image](../images/grpc-epollex.png) + +Code at `src/core/lib/iomgr/ev_epollex_posix.cc` + +- FDs are added to multiple epollsets with EPOLLEXCLUSIVE flag. This prevents multiple worker threads from waking up from polling whenever the fd is readable/writable + +- A few observations: + + - If multiple pollsets are pointing to the same `Pollable`, then the `pollable` MUST be either empty or of type `PO_FD` (i.e single-fd) + - A multi-pollable has one-and-only-one incoming link from a pollset + - The same FD can be in multiple `Pollable`s (even if one of the `Pollable`s is of type PO_FD) + - There cannot be two `Pollable`s of type PO_FD for the same fd + +- Why do we need `Pollable` of type PO_FD and PO_EMPTY ? + - The main reason is the Sync client API + - We create one new completion queue per call. If we didn’t have PO_EMPTY and PO_FD type pollables, then every call on a given channel will effectively have to create a `Pollable` and hence an epollset. This is because every completion queue automatically creates a pollset and the channel fd will have to be put in that pollset. This clearly requires an epollset to put that fd. Creating an epollset per call (even if we delete the epollset once the call is completed) would mean a lot of sys calls to create/delete epoll fds. This is clearly not a good idea. + - With these new types of `Pollable`s, all pollsets (corresponding to the new per-call completion queue) will initially point to PO_EMPTY global epollset. Then once the channel fd is added to the pollset, the pollset will point to the `Pollable` of type PO_FD containing just that fd (i.e it will reuse the existing `Pollable`). This way, the epoll fd creation/deletion churn is avoided. + + +### Other polling engine implementations (poll and windows polling engine) +- **poll** polling engine: gRPC's `poll` polling engine is quite complicated. It uses the `poll()` function to do the polling (and hence it is for platforms like osx where epoll is not available) + - The implementation is further complicated by the fact that poll() is level triggered (just keep this in mind in case you wonder why the code at `src/core/lib/iomgr/ev_poll_posix.cc` is written a certain/seemingly complicated way :)) + +- **Polling engine on Windows**: Windows polling engine looks nothing like other polling engines + - Unlike the grpc polling engines for Unix systems (epollex, epoll1 and poll) Windows endpoint implementation and polling engine implementations are very closely tied together + - Windows endpoint read/write API implementations use the Windows IO API which require specifying an [I/O completion port](https://docs.microsoft.com/en-us/windows/desktop/fileio/i-o-completion-ports) + - In Windows polling engine’s grpc_pollset_work() implementation, ONE of the threads is chosen to wait on the I/O completion port while other threads wait on a condition variable (much like the turnstile polling in epollex/epoll1) + diff --git a/doc/core/moving-to-c++.md b/doc/core/moving-to-c++.md new file mode 100644 index 00000000..db1ae48b --- /dev/null +++ b/doc/core/moving-to-c++.md @@ -0,0 +1,66 @@ +# Moving gRPC core to C++ + +Originally written by ctiller, markdroth, and vjpai in October 2017 + +Revised by veblush in October 2019 + +## Background and Goal + +gRPC core was originally written in C89 for several reasons +(possibility of kernel integration, ease of wrapping, compiler +support, etc). Over time, this was changed to C99 as all relevant +compilers in active use came to support C99 effectively. + +gRPC started allowing to use C++ with a couple of exceptions not to +have C++ library linked such as `libstdc++.so`. +(For more detail, see the [proposal](https://github.com/grpc/proposal/blob/master/L6-core-allow-cpp.md)) + +Finally gRPC became ready to use full C++11 with the standard library by the [proposal](https://github.com[/grpc/proposal/blob/master/L59-core-allow-cppstdlib.md). + +Throughout all of these transitions, the public header files are committed to remain in C89. + +The goal now is to make the gRPC core implementation true idiomatic +C++ compatible with +[Google's C++ style guide](https://google.github.io/styleguide/cppguide.html). + +## Constraints + +- Most of features available in C++11 are allowed to use but there are some exceptions + because gRPC should support old systems. + - Should be built with gcc 4.8, clang 3.3, and Visual C++ 2015. + - Should be run on Linux system with libstdc++ 6.0.9 to support + [manylinux1](https://www.python.org/dev/peps/pep-0513). +- This would limit us not to use modern C++11 standard library such as `filesystem`. + You can easily see whether PR is free from this issue by checking the result of + `Artifact Build Linux` test. +- `thread_local` is not allowed to use on Apple's products because their old OSes + (e.g. ios < 9.0) don't support `thread_local`. Please use `GPR_TLS_DECL` instead. +- gRPC main libraries (grpc, grpc+++, and plugins) cannot use following C++ libraries: + (Test and example codes are relatively free from this constraints) + - ``. Use `grpc_core::Thread`. + - ``. Use `grpc_core::CondVar`. + - ``. Use `grpc_core::Mutex`, `grpc_core::MutexLock`, and `grpc_core::ReleasableMutexLock`. + - `` + - `` + - `` + - `` + +## Roadmap + +- What should be the phases of getting code converted to idiomatic C++ + - Opportunistically do leaf code that other parts don't depend on + - Spend a little time deciding how to do non-leaf stuff that isn't central or polymorphic (e.g., timer, call combiner) + - For big central or polymorphic interfaces, actually do an API review (for things like transport, filter API, endpoint, closure, exec_ctx, ...) . + - Core internal changes don't need a gRFC, but core surface changes do + - But an API review should include at least a PR with the header change and tests to use it before it gets used more broadly + - iomgr polling for POSIX is a gray area whether it's a leaf or central +- What is the schedule? + - In Q4 2017, if some stuff happens opportunistically, great; otherwise ¯\\\_(ツ)\_/¯ + - More updates as team time becomes available and committed to this project + +## Implications for C++ API and wrapped languages + +- For C++ structs, switch to `using` when possible (e.g., Slice, +ByteBuffer, ...) +- The C++ API implementation might directly start using +`grpc_transport_stream_op_batch` rather than the core surface `grpc_op`. diff --git a/doc/core/pending_api_cleanups.md b/doc/core/pending_api_cleanups.md new file mode 100644 index 00000000..4bfc9ae1 --- /dev/null +++ b/doc/core/pending_api_cleanups.md @@ -0,0 +1,20 @@ +There are times when we make changes that include a temporary shim for +backward-compatibility (e.g., a macro or some other function to preserve +the original API) to avoid having to bump the major version number in +the next release. However, when we do eventually want to release a +feature that does change the API in a non-backward-compatible way, we +will wind up bumping the major version number anyway, at which point we +can take the opportunity to clean up any pending backward-compatibility +shims. + +This file lists all pending backward-compatibility changes that should +be cleaned up the next time we are going to bump the major version +number: + +- remove `GRPC_ARG_MAX_MESSAGE_LENGTH` channel arg from + `include/grpc/impl/codegen/grpc_types.h` (commit `af00d8b`) + (cannot be done until after next grpc release, so that TensorFlow can + use the same code both internally and externally) +- require a C++ runtime for all languages wrapping core. +- remove `GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS` channel arg + diff --git a/doc/core/transport_explainer.md b/doc/core/transport_explainer.md new file mode 100644 index 00000000..665fcdcb --- /dev/null +++ b/doc/core/transport_explainer.md @@ -0,0 +1,197 @@ +# Transport Explainer + +@vjpai + +## Existing Transports + +[gRPC +transports](https://github.com/grpc/grpc/tree/master/src/core/ext/transport) +plug in below the core API (one level below the C++ or other wrapped-language +API). You can write your transport in C or C++ though; currently (Nov 2017) all +the transports are nominally written in C++ though they are idiomatically C. The +existing transports are: + +* [HTTP/2](https://github.com/grpc/grpc/tree/master/src/core/ext/transport/chttp2) +* [Cronet](https://github.com/grpc/grpc/tree/master/src/core/ext/transport/cronet) +* [In-process](https://github.com/grpc/grpc/tree/master/src/core/ext/transport/inproc) + +Among these, the in-process is likely the easiest to understand, though arguably +also the least similar to a "real" sockets-based transport since it is only used +in a single process. + +## Transport stream ops + +In the gRPC core implementation, a fundamental struct is the +`grpc_transport_stream_op_batch` which represents a collection of stream +operations sent to a transport. (Note that in gRPC, _stream_ and _RPC_ are used +synonymously since all RPCs are actually streams internally.) The ops in a batch +can include: + +* send\_initial\_metadata + - Client: initiate an RPC + - Server: supply response headers +* recv\_initial\_metadata + - Client: get response headers + - Server: accept an RPC +* send\_message (zero or more) : send a data buffer +* recv\_message (zero or more) : receive a data buffer +* send\_trailing\_metadata + - Client: half-close indicating that no more messages will be coming + - Server: full-close providing final status for the RPC +* recv\_trailing\_metadata: get final status for the RPC + - Server extra: This op shouldn't actually be considered complete until the + server has also sent trailing metadata to provide the other side with final + status +* cancel\_stream: Attempt to cancel an RPC +* collect\_stats: Get stats + +The fundamental responsibility of the transport is to transform between this +internal format and an actual wire format, so the processing of these operations +is largely transport-specific. + +One or more of these ops are grouped into a batch. Applications can start all of +a call's ops in a single batch, or they can split them up into multiple +batches. Results of each batch are returned asynchronously via a completion +queue. + +Internally, we use callbacks to indicate completion. The surface layer creates a +callback when starting a new batch and sends it down the filter stack along with +the batch. The transport must invoke this callback when the batch is complete, +and then the surface layer returns an event to the application via the +completion queue. Each batch can have up to 3 callbacks: + +* recv\_initial\_metadata\_ready (called by the transport when the + recv\_initial\_metadata op is complete) +* recv\_message\_ready (called by the transport when the recv_message op is + complete) +* on\_complete (called by the transport when the entire batch is complete) + +## Timelines of transport stream op batches + +The transport's job is to sequence and interpret various possible interleavings +of the basic stream ops. For example, a sample timeline of batches would be: + +1. Client send\_initial\_metadata: Initiate an RPC with a path (method) and authority +1. Server recv\_initial\_metadata: accept an RPC +1. Client send\_message: Supply the input proto for the RPC +1. Server recv\_message: Get the input proto from the RPC +1. Client send\_trailing\_metadata: This is a half-close indicating that the + client will not be sending any more messages +1. Server recv\_trailing\_metadata: The server sees this from the client and + knows that it will not get any more messages. This won't complete yet though, + as described above. +1. Server send\_initial\_metadata, send\_message, send\_trailing\_metadata: A + batch can contain multiple ops, and this batch provides the RPC response + headers, response content, and status. Note that sending the trailing + metadata will also complete the server's receive of trailing metadata. +1. Client recv\_initial\_metadata: The number of ops in one side of the batch + has no relation with the number of ops on the other side of the batch. In + this case, the client is just collecting the response headers. +1. Client recv\_message, recv\_trailing\_metadata: Get the data response and + status + + +There are other possible sample timelines. For example, for client-side streaming, a "typical" sequence would be: + +1. Server: recv\_initial\_metadata + - At API-level, that would be the server requesting an RPC +1. Server: recv\_trailing\_metadata + - This is for when the server wants to know the final completion of the RPC + through an `AsyncNotifyWhenDone` API in C++ +1. Client: send\_initial\_metadata, recv\_message, recv\_trailing\_metadata + - At API-level, that's a client invoking a client-side streaming call. The + send\_initial\_metadata is the call invocation, the recv\_message collects + the final response from the server, and the recv\_trailing\_metadata gets + the `grpc::Status` value that will be returned from the call +1. Client: send\_message / Server: recv\_message + - Repeat the above step numerous times; these correspond to a client issuing + `Write` in a loop and a server doing `Read` in a loop until `Read` fails +1. Client: send\_trailing\_metadata / Server: recv\_message that indicates doneness (NULL) + - These correspond to a client issuing `WritesDone` which causes the server's + `Read` to fail +1. Server: send\_message, send\_trailing\_metadata + - These correspond to the server doing `Finish` + +The sends on one side will call their own callbacks when complete, and they will +in turn trigger actions that cause the other side's recv operations to +complete. In some transports, a send can sometimes complete before the recv on +the other side (e.g., in HTTP/2 if there is sufficient flow-control buffer space +available) + +## Other transport duties + +In addition to these basic stream ops, the transport must handle cancellations +of a stream at any time and pass their effects to the other side. For example, +in HTTP/2, this triggers a `RST_STREAM` being sent on the wire. The transport +must perform operations like pings and statistics that are used to shape +transport-level characteristics like flow control (see, for example, their use +in the HTTP/2 transport). + +## Putting things together with detail: Sending Metadata + +* API layer: `map` that is specific to this RPC +* Core surface layer: array of `{slice, slice}` pairs where each slice + references an underlying string +* [Core transport + layer](https://github.com/grpc/grpc/tree/master/src/core/lib/transport): list + of `{slice, slice}` pairs that includes the above plus possibly some general + metadata (e.g., Method and Authority for initial metadata) +* [Specific transport + layer](https://github.com/grpc/grpc/tree/master/src/core/ext/transport): + - Either send it to the other side using transport-specific API (e.g., Cronet) + - Or have it sent through the [iomgr/endpoint + layer](https://github.com/grpc/grpc/tree/master/src/core/lib/iomgr) (e.g., + HTTP/2) + - Or just manipulate pointers to get it from one side to the other (e.g., + In-process) + +## Requirements for any transport + +Each transport implements several operations in a vtbl (may change to actual +virtual functions as transport moves to idiomatic C++). + +The most important and common one is `perform_stream_op`. This function +processes a single stream op batch on a specific stream that is associated with +a specific transport: + +* Gets the 6 ops/cancel passed down from the surface +* Pass metadata from one side to the other as described above +* Transform messages between slice buffer structure and stream of bytes to pass + to other side + - May require insertion of extra bytes (e.g., per-message headers in HTTP/2) +* React to metadata to preserve expected orderings (*) +* Schedule invocation of completion callbacks + +There are other functions in the vtbl as well. + +* `perform_transport_op` + - Configure the transport instance for the connectivity state change notifier + or the server-side accept callback + - Disconnect transport or set up a goaway for later streams +* `init_stream` + - Starts a stream from the client-side + - (*) Server-side of the transport must call `accept_stream_cb` when a new + stream is available + * Triggers request-matcher +* `destroy_stream`, `destroy_transport` + - Free up data related to a stream or transport +* `set_pollset`, `set_pollset_set`, `get_endpoint` + - Map each specific instance of the transport to FDs being used by iomgr (for + HTTP/2) + - Get a pointer to the endpoint structure that actually moves the data + (wrapper around a socket for HTTP/2) + +## Book-keeping responsibilities of the transport layer + +A given transport must keep all of its transport and streams ref-counted. This +is essential to make sure that no struct disappears before it is done being +used. + +A transport must also preserve relevant orders for the different categories of +ops on a stream, as described above. A transport must also make sure that all +relevant batch operations have completed before scheduling the `on_complete` +closure for a batch. Further examples include the idea that the server logic +expects to not complete recv\_trailing\_metadata until after it actually sends +trailing metadata since it would have already found this out by seeing a NULL’ed +recv\_message. This is considered part of the transport's duties in preserving +orders. diff --git a/doc/cpp-style-guide.md b/doc/cpp-style-guide.md new file mode 100644 index 00000000..8211703d --- /dev/null +++ b/doc/cpp-style-guide.md @@ -0,0 +1,8 @@ +GRPC C++ STYLE GUIDE +===================== + +The majority of gRPC's C++ requirements are drawn from the [Google C++ style +guide] (https://google.github.io/styleguide/cppguide.html). Additionally, +as in C, layout rules are defined by clang-format, and all code +should be passed through clang-format. A (docker-based) script to do +so is included in [tools/distrib/clang_format_code.sh](../tools/distrib/clang_format_code.sh). diff --git a/doc/cpp/pending_api_cleanups.md b/doc/cpp/pending_api_cleanups.md new file mode 100644 index 00000000..5c231eda --- /dev/null +++ b/doc/cpp/pending_api_cleanups.md @@ -0,0 +1,22 @@ +There are times when we make changes that include a temporary shim for +backward-compatibility (e.g., a macro or some other function to preserve +the original API) to avoid having to bump the major version number in +the next release. However, when we do eventually want to release a +feature that does change the API in a non-backward-compatible way, we +will wind up bumping the major version number anyway, at which point we +can take the opportunity to clean up any pending backward-compatibility +shims. + +This file lists all pending backward-compatibility changes that should +be cleaned up the next time we are going to bump the major version +number: + +- remove `ServerBuilder::SetMaxMessageSize()` method from + `include/grpc++/server_builder.h` (commit `6980362`) +- remove `ClientContext::set_fail_fast()` method from + `include/grpc++/impl/codegen/client_context.h` (commit `9477724`) +- remove directory `include/grpc++` and all headers in it + (commit `eb06572`) +- make all `Request` and `Mark` methods in `grpc::Service` take a + `size_t` argument for `index` rather than `int` (since that is only + used as a vector index) diff --git a/doc/cpp/perf_notes.md b/doc/cpp/perf_notes.md new file mode 100644 index 00000000..c8755755 --- /dev/null +++ b/doc/cpp/perf_notes.md @@ -0,0 +1,29 @@ +# C++ Performance Notes + +## Streaming write buffering + +Generally, each write operation (Write(), WritesDone()) implies a syscall. +gRPC will try to batch together separate write operations from different +threads, but currently cannot automatically infer batching in a single stream. + +If message k+1 in a stream does not rely on responses from message k, it's +possible to enable write batching by passing a WriteOptions argument to Write +with the buffer_hint set: + +~~~{.cpp} +stream_writer->Write(message, WriteOptions().set_buffer_hint()); +~~~ + +The write will be buffered until one of the following is true: +- the per-stream buffer is filled (controllable with the channel argument + GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE) - this prevents infinite buffering leading + to OOM +- a subsequent Write without buffer_hint set is posted +- the call is finished for writing (WritesDone() called on the client, + or Finish() called on an async server stream, or the service handler returns + for a sync server stream) + +## Completion Queues and Threading in the Async API + +Right now, the best performance trade-off is having numcpu's threads and one +completion queue per thread. diff --git a/doc/csharp/server_reflection.md b/doc/csharp/server_reflection.md new file mode 100644 index 00000000..be71424d --- /dev/null +++ b/doc/csharp/server_reflection.md @@ -0,0 +1,54 @@ +# gRPC C# Server Reflection + +This document shows how to use gRPC Server Reflection in gRPC C#. +Please see [C++ Server Reflection Tutorial](../server_reflection_tutorial.md) +for general information and more examples how to use server reflection. + +## Enable server reflection in C# servers + +C# Server Reflection is an add-on library. +To use it, first install the [Grpc.Reflection](https://www.nuget.org/packages/Grpc.Reflection/) +Nuget package into your project. + +Note that with C# you need to manually register the service +descriptors with the reflection service implementation when creating a server +(this isn't necessary with e.g. C++ or Java) +```csharp +// the reflection service will be aware of "Greeter" and "ServerReflection" services. +var reflectionServiceImpl = new ReflectionServiceImpl(Greeter.Descriptor, ServerReflection.Descriptor); +server = new Server() +{ + Services = + { + // the server will serve 2 services, the Greeter and the ServerReflection + Greeter.BindService(new GreeterImpl()), + ServerReflection.BindService(reflectionServiceImpl) + }, + Ports = { { "localhost", 50051, ServerCredentials.Insecure } } +}; +server.Start(); +``` + +After starting the server, you can verify that the server reflection +is working properly by using the [`grpc_cli` command line +tool](https://github.com/grpc/grpc/blob/master/doc/command_line_tool.md): + + ```sh + $ grpc_cli ls localhost:50051 + ``` + + output: + ```sh + helloworld.Greeter + grpc.reflection.v1alpha.ServerReflection + ``` + + For more examples and instructions how to use the `grpc_cli` tool, + please refer to the [`grpc_cli` documentation](../command_line_tool.md) + and the [C++ Server Reflection Tutorial](../server_reflection_tutorial.md). + +## Additional Resources + +The [Server Reflection Protocol](../server-reflection.md) provides detailed +information about how the server reflection works and describes the server reflection +protocol in detail. diff --git a/doc/environment_variables.md b/doc/environment_variables.md new file mode 100644 index 00000000..8a19ecaf --- /dev/null +++ b/doc/environment_variables.md @@ -0,0 +1,173 @@ +gRPC environment variables +-------------------------- + +gRPC C core based implementations (those contained in this repository) expose +some configuration as environment variables that can be set. + +* grpc_proxy, https_proxy, http_proxy + The URI of the proxy to use for HTTP CONNECT support. These variables are + checked in order, and the first one that has a value is used. + +* no_grpc_proxy, no_proxy + A comma separated list of hostnames to connect to without using a proxy even + if a proxy is set. These variables are checked in order, and the first one + that has a value is used. + +* GRPC_ABORT_ON_LEAKS + A debugging aid to cause a call to abort() when gRPC objects are leaked past + grpc_shutdown(). Set to 1 to cause the abort, if unset or 0 it does not + abort the process. + +* GOOGLE_APPLICATION_CREDENTIALS + The path to find the credentials to use when Google credentials are created + +* GRPC_SSL_CIPHER_SUITES + A colon separated list of cipher suites to use with OpenSSL + Defaults to: + ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384 + +* GRPC_DEFAULT_SSL_ROOTS_FILE_PATH + PEM file to load SSL roots from + +* GRPC_POLL_STRATEGY [posix-style environments only] + Declares which polling engines to try when starting gRPC. + This is a comma-separated list of engines, which are tried in priority order + first -> last. + Available polling engines include: + - epoll (linux-only) - a polling engine based around the epoll family of + system calls + - poll - a portable polling engine based around poll(), intended to be a + fallback engine when nothing better exists + - legacy - the (deprecated) original polling engine for gRPC + +* GRPC_TRACE + A comma separated list of tracers that provide additional insight into how + gRPC C core is processing requests via debug logs. Available tracers include: + - api - traces api calls to the C core + - bdp_estimator - traces behavior of bdp estimation logic + - call_error - traces the possible errors contributing to final call status + - cares_resolver - traces operations of the c-ares based DNS resolver + - cares_address_sorting - traces operations of the c-ares based DNS + resolver's resolved address sorter + - cds_lb - traces cds LB policy + - channel - traces operations on the C core channel stack + - client_channel_call - traces client channel call batch activity + - client_channel_routing - traces client channel call routing, including + resolver and load balancing policy interaction + - compression - traces compression operations + - connectivity_state - traces connectivity state changes to channels + - cronet - traces state in the cronet transport engine + - executor - traces grpc's internal thread pool ('the executor') + - glb - traces the grpclb load balancer + - handshaker - traces handshaking state + - health_check_client - traces health checking client code + - http - traces state in the http2 transport engine + - http2_stream_state - traces all http2 stream state mutations. + - http1 - traces HTTP/1.x operations performed by gRPC + - inproc - traces the in-process transport + - http_keepalive - traces gRPC keepalive pings + - flowctl - traces http2 flow control + - op_failure - traces error information when failure is pushed onto a + completion queue + - pick_first - traces the pick first load balancing policy + - plugin_credentials - traces plugin credentials + - pollable_refcount - traces reference counting of 'pollable' objects (only + in DEBUG) + - priority_lb - traces priority LB policy + - resource_quota - trace resource quota objects internals + - ring_hash_lb - traces the ring hash load balancing policy + - rls_lb - traces the RLS load balancing policy + - round_robin - traces the round_robin load balancing policy + - queue_pluck + - sdk_authz - traces sdk authorization + - server_channel - lightweight trace of significant server channel events + - secure_endpoint - traces bytes flowing through encrypted channels + - subchannel - traces the connectivity state of subchannel + - subchannel_pool - traces subchannel pool + - timer - timers (alarms) in the grpc internals + - timer_check - more detailed trace of timer logic in grpc internals + - transport_security - traces metadata about secure channel establishment + - tcp - traces bytes in and out of a channel + - tsi - traces tsi transport security + - weighted_target_lb - traces weighted_target LB policy + - xds_client - traces xds client + - xds_cluster_manager_lb - traces cluster manager LB policy + - xds_cluster_impl_lb - traces cluster impl LB policy + - xds_cluster_resolver_lb - traces xds cluster resolver LB policy + - xds_resolver - traces xds resolver + + The following tracers will only run in binaries built in DEBUG mode. This is + accomplished by invoking `CONFIG=dbg make ` + - metadata - tracks creation and mutation of metadata + - combiner - traces combiner lock state + - call_combiner - traces call combiner state + - closure - tracks closure creation, scheduling, and completion + - fd_trace - traces fd create(), shutdown() and close() calls for channel fds. + Also traces epoll fd create()/close() calls in epollex polling engine + traces epoll-fd creation/close calls for epollex polling engine + - pending_tags - traces still-in-progress tags on completion queues + - polling - traces the selected polling engine + - polling_api - traces the api calls to polling engine + - subchannel_refcount + - queue_refcount + - error_refcount + - stream_refcount + - workqueue_refcount + - fd_refcount + - cq_refcount + - auth_context_refcount + - security_connector_refcount + - resolver_refcount + - lb_policy_refcount + - chttp2_refcount + + 'all' can additionally be used to turn all traces on. + Individual traces can be disabled by prefixing them with '-'. + + 'refcount' will turn on all of the tracers for refcount debugging. + + if 'list_tracers' is present, then all of the available tracers will be + printed when the program starts up. + + Example: + export GRPC_TRACE=all,-pending_tags + +* GRPC_VERBOSITY + Default gRPC logging verbosity - one of: + - DEBUG - log all gRPC messages + - INFO - log INFO and ERROR message + - ERROR - log only errors (default) + - NONE - won't log any + +* GRPC_STACKTRACE_MINLOGLEVEL + Minimum loglevel to print the stack-trace - one of DEBUG, INFO, ERROR, and NONE. + NONE is a default value. + +* GRPC_TRACE_FUZZER + if set, the fuzzers will output trace (it is usually suppressed). + +* GRPC_DNS_RESOLVER + Declares which DNS resolver to use. The default is ares if gRPC is built with + c-ares support. Otherwise, the value of this environment variable is ignored. + Available DNS resolver include: + - ares (default on most platforms except iOS, Android or Node)- a DNS + resolver based around the c-ares library + - native - a DNS resolver based around getaddrinfo(), creates a new thread to + perform name resolution + +* GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS + Default: 5000 + Declares the interval between two backup polls on client channels. These polls + are run in the timer thread so that gRPC can process connection failures while + there is no active polling thread. They help reconnect disconnected client + channels (mostly due to idleness), so that the next RPC on this channel won't + fail. Set to 0 to turn off the backup polls. + +* GRPC_EXPERIMENTAL_DISABLE_FLOW_CONTROL + if set, flow control will be effectively disabled. Max out all values and + assume the remote peer does the same. Thus we can ignore any flow control + bookkeeping, error checking, and decision making + +* grpc_cfstream + set to 1 to turn on CFStream experiment. With this experiment gRPC uses CFStream API to make TCP + connections. The option is only available on iOS platform and when macro GRPC_CFSTREAM is defined. diff --git a/doc/fail_fast.md b/doc/fail_fast.md new file mode 100644 index 00000000..ff3d2353 --- /dev/null +++ b/doc/fail_fast.md @@ -0,0 +1 @@ +Moved to [wait-for-ready.md](wait-for-ready.md) diff --git a/doc/fork_support.md b/doc/fork_support.md new file mode 100644 index 00000000..ff1018ab --- /dev/null +++ b/doc/fork_support.md @@ -0,0 +1,46 @@ +# Background # + +In Python, multithreading is ineffective at concurrency for CPU bound tasks +due to the GIL (global interpreter lock). Extension modules can release +the GIL in CPU bound tasks, but that isn't an option in pure Python. +Users use libraries such as multiprocessing, subprocess, concurrent.futures.ProcessPoolExecutor, +etc, to work around the GIL. These modules call ```fork()``` underneath the hood. Various issues have +been reported when using these modules with gRPC Python. gRPC Python wraps +gRPC core, which uses multithreading for performance, and hence doesn't support ```fork()```. +Historically, we didn't support forking in gRPC, but some users seemed +to be doing fine until their code started to break on version 1.6. This was +likely caused by the addition of background c-threads and a background +Python thread. + +# Current Status # + +## 1.11 ## +The background Python thread was removed entirely. This allows forking +after creating a channel. However, the channel must not have issued any +RPCs prior to the fork. Attempting to fork with an active channel that +has been used can result in deadlocks/corrupted wire data. + +## 1.9 ## +A regression was noted in cases where users are doing fork/exec. This +was due to ```pthread_atfork()``` handler that was added in 1.7 to partially +support forking in gRPC. A deadlock can happen when pthread_atfork +handler is running, and an application thread is calling into gRPC. +We have provided a workaround for this issue by allowing users to turn +off the handler using env flag ```GRPC_ENABLE_FORK_SUPPORT=False```. +This should be set whenever a user expects to always call exec +immediately following fork. It will disable the fork handlers. + +## 1.7 ## +A ```pthread_atfork()``` handler was added in 1.7 to automatically shut down +the background c-threads when fork was called. This does not shut down the +background Python thread, so users could not have any open channels when +forking. + +# Future Work # + +## 1.13 ## +The workaround when using fork/exec by setting +```GRPC_ENABLE_FORK_SUPPORT=False``` should no longer be needed. Following +[this PR](https://github.com/grpc/grpc/pull/14647), fork +handlers will not automatically run when multiple threads are calling +into gRPC. diff --git a/doc/g_stands_for.md b/doc/g_stands_for.md new file mode 100644 index 00000000..5223302e --- /dev/null +++ b/doc/g_stands_for.md @@ -0,0 +1,44 @@ +'g' stands for something different every gRPC release: + +- 1.0 'g' stands for ['gRPC'](https://github.com/grpc/grpc/tree/v1.0.x) +- 1.1 'g' stands for ['good'](https://github.com/grpc/grpc/tree/v1.1.x) +- 1.2 'g' stands for ['green'](https://github.com/grpc/grpc/tree/v1.2.x) +- 1.3 'g' stands for ['gentle'](https://github.com/grpc/grpc/tree/v1.3.x) +- 1.4 'g' stands for ['gregarious'](https://github.com/grpc/grpc/tree/v1.4.x) +- 1.6 'g' stands for ['garcia'](https://github.com/grpc/grpc/tree/v1.6.x) +- 1.7 'g' stands for ['gambit'](https://github.com/grpc/grpc/tree/v1.7.x) +- 1.8 'g' stands for ['generous'](https://github.com/grpc/grpc/tree/v1.8.x) +- 1.9 'g' stands for ['glossy'](https://github.com/grpc/grpc/tree/v1.9.x) +- 1.10 'g' stands for ['glamorous'](https://github.com/grpc/grpc/tree/v1.10.x) +- 1.11 'g' stands for ['gorgeous'](https://github.com/grpc/grpc/tree/v1.11.x) +- 1.12 'g' stands for ['glorious'](https://github.com/grpc/grpc/tree/v1.12.x) +- 1.13 'g' stands for ['gloriosa'](https://github.com/grpc/grpc/tree/v1.13.x) +- 1.14 'g' stands for ['gladiolus'](https://github.com/grpc/grpc/tree/v1.14.x) +- 1.15 'g' stands for ['glider'](https://github.com/grpc/grpc/tree/v1.15.x) +- 1.16 'g' stands for ['gao'](https://github.com/grpc/grpc/tree/v1.16.x) +- 1.17 'g' stands for ['gizmo'](https://github.com/grpc/grpc/tree/v1.17.x) +- 1.18 'g' stands for ['goose'](https://github.com/grpc/grpc/tree/v1.18.x) +- 1.19 'g' stands for ['gold'](https://github.com/grpc/grpc/tree/v1.19.x) +- 1.20 'g' stands for ['godric'](https://github.com/grpc/grpc/tree/v1.20.x) +- 1.21 'g' stands for ['gandalf'](https://github.com/grpc/grpc/tree/v1.21.x) +- 1.22 'g' stands for ['gale'](https://github.com/grpc/grpc/tree/v1.22.x) +- 1.23 'g' stands for ['gangnam'](https://github.com/grpc/grpc/tree/v1.23.x) +- 1.24 'g' stands for ['ganges'](https://github.com/grpc/grpc/tree/v1.24.x) +- 1.25 'g' stands for ['game'](https://github.com/grpc/grpc/tree/v1.25.x) +- 1.26 'g' stands for ['gon'](https://github.com/grpc/grpc/tree/v1.26.x) +- 1.27 'g' stands for ['guantao'](https://github.com/grpc/grpc/tree/v1.27.x) +- 1.28 'g' stands for ['galactic'](https://github.com/grpc/grpc/tree/v1.28.x) +- 1.29 'g' stands for ['gringotts'](https://github.com/grpc/grpc/tree/v1.29.x) +- 1.30 'g' stands for ['gradius'](https://github.com/grpc/grpc/tree/v1.30.x) +- 1.31 'g' stands for ['galore'](https://github.com/grpc/grpc/tree/v1.31.x) +- 1.32 'g' stands for ['giggle'](https://github.com/grpc/grpc/tree/v1.32.x) +- 1.33 'g' stands for ['geeky'](https://github.com/grpc/grpc/tree/v1.33.x) +- 1.34 'g' stands for ['gauntlet'](https://github.com/grpc/grpc/tree/v1.34.x) +- 1.35 'g' stands for ['gecko'](https://github.com/grpc/grpc/tree/v1.35.x) +- 1.36 'g' stands for ['gummybear'](https://github.com/grpc/grpc/tree/v1.36.x) +- 1.37 'g' stands for ['gilded'](https://github.com/grpc/grpc/tree/v1.37.x) +- 1.38 'g' stands for ['guadalupe_river_park_conservancy'](https://github.com/grpc/grpc/tree/v1.38.x) +- 1.39 'g' stands for ['goofy'](https://github.com/grpc/grpc/tree/v1.39.x) +- 1.40 'g' stands for ['guileless'](https://github.com/grpc/grpc/tree/v1.40.x) +- 1.41 'g' stands for ['goat'](https://github.com/grpc/grpc/tree/v1.41.x) +- 1.42 'g' stands for ['granola'](https://github.com/grpc/grpc/tree/master) diff --git a/doc/grpc_release_schedule.md b/doc/grpc_release_schedule.md new file mode 100644 index 00000000..a33ace89 --- /dev/null +++ b/doc/grpc_release_schedule.md @@ -0,0 +1,43 @@ +# gRPC Release Schedule + +Below is the release schedule for gRPC [Java](https://github.com/grpc/grpc-java/releases), [Go](https://github.com/grpc/grpc-go/releases) and [Core](https://github.com/grpc/grpc/releases) and its dependent languages C++, C#, Objective-C, PHP, Python and Ruby. + +Releases are scheduled every six weeks on Tuesdays on a best effort basis. In some unavoidable situations a release may be delayed or released early or a language may skip a release altogether and do the next release to catch up with other languages. See the past releases in the links above. A six-week cycle gives us a good balance between delivering new features/fixes quickly and keeping the release overhead low. + +The gRPC release support policy can be found [here](https://grpc.io/docs/what-is-grpc/faq/#how-long-are-grpc-releases-supported-for). + +Releases are cut from release branches. For Core and Java repos, the release branch is cut two weeks before the scheduled release date. For Go, the branch is cut just before the release. An RC (release candidate) is published for Core and its dependent languages just after the branch cut. This RC is later promoted to release version if no further changes are made to the release branch. We do our best to keep head of master branch stable at all times regardless of release schedule. Daily build packages from master branch for C#, PHP, Python, Ruby and Protoc plugins are published on [packages.grpc.io](https://packages.grpc.io/). If you depend on gRPC in production we recommend to set up your CI system to test the RCs and, if possible, the daily builds. + +Names of gRPC releases are [here](https://github.com/grpc/grpc/blob/master/doc/g_stands_for.md). + +Release |Scheduled Branch Cut|Scheduled Release Date +--------|--------------------|------------- +v1.17.0 |Nov 19, 2018 |Dec 4, 2018 +v1.18.0 |Jan 2, 2019 |Jan 15, 2019 +v1.19.0 |Feb 12, 2019 |Feb 26, 2019 +v1.20.0 |Mar 26, 2019 |Apr 9, 2019 +v1.21.0 |May 7, 2019 |May 21, 2019 +v1.22.0 |Jun 18, 2019 |Jul 2, 2019 +v1.23.0 |Jul 30, 2019 |Aug 13, 2019 +v1.24.0 |Sept 10, 2019 |Sept 24, 2019 +v1.25.0 |Oct 22, 2019 |Nov 5, 2019 +v1.26.0 |Dec 3, 2019 |Dec 17, 2019 +v1.27.0 |Jan 14, 2020 |Jan 28, 2020 +v1.28.0 |Feb 25, 2020 |Mar 10, 2020 +v1.29.0 |Apr 7, 2020 |Apr 21, 2020 +v1.30.0 |May 19, 2020 |Jun 2, 2020 +v1.31.0 |Jul 14, 2020 |Jul 28, 2020 +v1.32.0 |Aug 25, 2020 |Sep 8, 2020 +v1.33.0 |Oct 6, 2020 |Oct 20, 2020 +v1.34.0 |Nov 17, 2020 |Dec 1, 2020 +v1.35.0 |Dec 29, 2020 |Jan 12, 2021 +v1.36.0 |Feb 9, 2021 |Feb 23, 2021 +v1.37.0 |Mar 23, 2021 |Apr 6, 2021 +v1.38.0 |May 4, 2021 |May 18, 2021 +v1.39.0 |Jun 15, 2021 |Jun 29, 2021 +v1.40.0 |Jul 27, 2021 |Aug 10, 2021 +v1.41.0 |Sep 7, 2021 |Sep 21, 2021 +v1.42.0 |Oct 19, 2021 |Nov 2, 2021 +v1.43.0 |Nov 30, 2021 |Dec 14, 2021 +v1.44.0 |Jan 11, 2022 |Jan 25, 2022 +v1.45.0 |Feb 22, 2022 |Mar 8, 2022 diff --git a/doc/grpc_xds_features.md b/doc/grpc_xds_features.md new file mode 100644 index 00000000..44ebfc0e --- /dev/null +++ b/doc/grpc_xds_features.md @@ -0,0 +1,66 @@ +# xDS Features in gRPC + +This document lists the [xDS](https://github.com/envoyproxy/data-plane-api/tree/master/envoy/api/v2) +features supported in various gRPC language implementations and versions. + +Note that a gRPC client will simply ignore the configuration of a feature it +does not support. The gRPC client does not generate a log +to indicate that some configuration was ignored. It is impractical to generate +a log and keep it up-to-date because xDS has a large number of APIs that gRPC +does not support and the APIs keep evolving too. In the case where an xDS +field corresponding to a feature is supported but the value configured for +that field is not supported, a gRPC client will NACK such a configuration. +We recommend reading the +[first gRFC](https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md) +on xDS support in gRPC to understand the design philosophy. + +Not all cluster load balancing policies are supported. A gRPC client will +NACK the configuration that contains unsupported cluster load balancing +policy. This will cause all cluster configurations to be rejected by the +client because the xDS protocol currently requires rejecting all resources in +a given response, rather than being able to reject only an individual resource +from the response. Due to this limitation, you must ensure that all clients +support the required cluster load balancing policy before configuring that +policy for a service. For example, if you change the ROUND_ROBIN policy to +RING_HASH, you must ensure that all the clients are upgraded to a version that +supports RING_HASH. + +The EDS policy will *not* support +[overprovisioning](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/overprovisioning), +which is different from Envoy. Envoy takes the overprovisioning into +account in both [locality-weighted load balancing](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/locality_weight) +and [priority failover](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/priority), +but gRPC assumes that the xDS server will update it to redirect traffic +when this kind of graceful failover is needed. gRPC will send the +[`envoy.lb.does_not_support_overprovisioning` client +feature](https://github.com/envoyproxy/envoy/pull/10136) to the xDS +server to tell the xDS server that it will not perform graceful failover; +xDS server implementations may use this to decide whether to perform +graceful failover themselves. + +The EDS policy will not support per-endpoint stats; it will report only +per-locality stats. + +An [`lb_endpoint`](https://github.com/envoyproxy/envoy/blob/12a4bc430eaf440ceb0d11286cfbd4c16b79cdd1/api/envoy/api/v2/endpoint/endpoint_components.proto#L72) +is ignored if the `health_status` is not HEALTHY or UNKNOWN. +The optional `load_balancing_weight` is always ignored. + +Initially, only `google_default` channel creds will be supported +to authenticate with the xDS server. + +The gRPC language implementations not listed in the table below do not support +xDS features. + +Features | gRFCs | [C++, Python,
Ruby, PHP](https://github.com/grpc/grpc/releases) | [Java](https://github.com/grpc/grpc-java/releases) | [Go](https://github.com/grpc/grpc-go/releases) | [Node](https://github.com/grpc/grpc-node/releases) +---------|--------|--------------|------|------|------ +**xDS Infrastructure in gRPC client channel:**
  • LDS->RDS->CDS->EDS flow
  • ADS stream
| [A27](https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md) | v1.30.0 | v1.30.0 | v1.30.0 | v1.2.0 | +**Load Balancing:**
  • [Virtual host](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/route/route_components.proto#route-virtualhost) domains matching
  • Only default path ("" or "/") matching
  • Priority-based weighted round-robin locality picking
  • Round-robin endpoint picking within locality
  • [Cluster](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/route/route_components.proto#envoy-api-msg-route-routeaction) route action
  • Client-side Load reporting via [LRS](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/load_stats/v2/lrs.proto)
| [A27](https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md) | v1.30.0 | v1.30.0 | v1.30.0 | v1.2.0 | +Request matching based on:
  • [Path](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/route/route_components.proto#route-routematch) (prefix, full path and safe regex)
    • [case_sensitive](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/route/route_components.proto#route-routematch) must be true else config is NACKed
  • [Headers](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/route/route_components.proto#route-headermatcher)
Request routing to multiple clusters based on [weights](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/route/route_components.proto#route-weightedcluster) | [A28](https://github.com/grpc/proposal/blob/master/A28-xds-traffic-splitting-and-routing.md) | v1.31.0 | v1.31.0 | v1.31.0 | v1.3.0 | +Case insensitive prefix/full path matching:
  • [case_sensitive](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/route/route_components.proto#route-routematch) can be true or false
| | v1.34.0 | v1.34.0 | v1.34.0 | v1.3.0 | +Support for [xDS v3 APIs](https://www.envoyproxy.io/docs/envoy/latest/api-v3/api) | [A30](https://github.com/grpc/proposal/blob/master/A30-xds-v3.md) | v1.36.0 | v1.36.0 | v1.36.0 | v1.4.0 | +[Maximum Stream Duration](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#config-route-v3-routeaction-maxstreamduration):
  • Only max_stream_duration is supported.
| [A31](https://github.com/grpc/proposal/blob/master/A31-xds-timeout-support-and-config-selector.md) | v1.37.1 | v1.37.1 | v1.37.0 | v1.4.0 | +[Circuit Breaking](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/circuit_breaker.proto):
  • Only max_requests is supported.
| [A32](https://github.com/grpc/proposal/blob/master/A32-xds-circuit-breaking.md) | v1.37.1 (N/A for PHP) | v1.37.1 | v1.37.0 | v1.4.0 | +[Fault Injection](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/http/fault/v3/fault.proto):
Only the following fields are supported:
  • delay
  • abort
  • max_active_faults
  • headers
| [A33](https://github.com/grpc/proposal/blob/master/A33-Fault-Injection.md) | v1.37.1 | v1.37.1 | v1.37.0 | v1.4.0 | +[Client Status Discovery Service](https://github.com/envoyproxy/envoy/blob/main/api/envoy/service/status/v3/csds.proto) | [A40](https://github.com/grpc/proposal/blob/master/A40-csds-support.md) | v1.37.1 (C++)
v1.38.0 (Python) | v1.37.1 | v1.37.0 | | +[Ring hash](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/load_balancers#ring-hash) load balancing policy:
Only the following [policy specifiers](https://github.com/envoyproxy/envoy/blob/2443032526cf6e50d63d35770df9473dd0460fc0/api/envoy/config/route/v3/route_components.proto#L706) are supported:
  • header
  • filter_state with key `io.grpc.channel_id`
Only [`XX_HASH`](https://github.com/envoyproxy/envoy/blob/2443032526cf6e50d63d35770df9473dd0460fc0/api/envoy/config/cluster/v3/cluster.proto#L383) function is supported. | [A42](https://github.com/grpc/proposal/blob/master/A42-xds-ring-hash-lb-policy.md) | v1.40.0
(C++ and Python) | v1.40.1 | 1.41.0 | | +[Retry](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#envoy-v3-api-msg-config-route-v3-retrypolicy):
Only the following fields are supported:
  • retry_on for the following conditions: cancelled, deadline-exceeded, internal, resource-exhausted, and unavailable.
  • num_retries
  • retry_back_off
| [A44](https://github.com/grpc/proposal/blob/master/A44-xds-retry.md) | v1.40.0
(C++ and Python) | v1.40.1 | 1.41.0 | | diff --git a/doc/health-checking.md b/doc/health-checking.md new file mode 100644 index 00000000..0d866e3f --- /dev/null +++ b/doc/health-checking.md @@ -0,0 +1,78 @@ +GRPC Health Checking Protocol +================================ + +Health checks are used to probe whether the server is able to handle rpcs. The +client-to-server health checking can happen from point to point or via some +control system. A server may choose to reply “unhealthy” because it +is not ready to take requests, it is shutting down or some other reason. +The client can act accordingly if the response is not received within some time +window or the response says unhealthy in it. + + +A GRPC service is used as the health checking mechanism for both simple +client-to-server scenario and other control systems such as load-balancing. +Being a high +level service provides some benefits. Firstly, since it is a GRPC service +itself, doing a health check is in the same format as a normal rpc. Secondly, +it has rich semantics such as per-service health status. Thirdly, as a GRPC +service, it is able reuse all the existing billing, quota infrastructure, etc, +and thus the server has full control over the access of the health checking +service. + +## Service Definition + +The server should export a service defined in the following proto: + +``` +syntax = "proto3"; + +package grpc.health.v1; + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + SERVICE_UNKNOWN = 3; // Used only by the Watch method. + } + ServingStatus status = 1; +} + +service Health { + rpc Check(HealthCheckRequest) returns (HealthCheckResponse); + + rpc Watch(HealthCheckRequest) returns (stream HealthCheckResponse); +} +``` + +A client can query the server’s health status by calling the `Check` method, and +a deadline should be set on the rpc. The client can optionally set the service +name it wants to query for health status. The suggested format of service name +is `package_names.ServiceName`, such as `grpc.health.v1.Health`. + +The server should register all the services manually and set +the individual status, including an empty service name and its status. For each +request received, if the service name can be found in the registry, +a response must be sent back with an `OK` status and the status field should be +set to `SERVING` or `NOT_SERVING` accordingly. If the service name is not +registered, the server returns a `NOT_FOUND` GRPC status. + +The server should use an empty string as the key for server's +overall health status, so that a client not interested in a specific service can +query the server's status with an empty request. The server can just do exact +matching of the service name without support of any kind of wildcard matching. +However, the service owner has the freedom to implement more complicated +matching semantics that both the client and server agree upon. + +A client can declare the server as unhealthy if the rpc is not finished after +some amount of time. The client should be able to handle the case where server +does not have the Health service. + +A client can call the `Watch` method to perform a streaming health-check. +The server will immediately send back a message indicating the current +serving status. It will then subsequently send a new message whenever +the service's serving status changes. diff --git a/doc/http-grpc-status-mapping.md b/doc/http-grpc-status-mapping.md new file mode 100644 index 00000000..928fe544 --- /dev/null +++ b/doc/http-grpc-status-mapping.md @@ -0,0 +1,30 @@ +# HTTP to gRPC Status Code Mapping + +Since intermediaries are a common part of HTTP infrastructure some responses to +gRPC requests may be received that do not include the grpc-status header. In +some cases mapping error codes from an intermediary allows the gRPC client to +behave more appropriately to the error situation without overloading the +semantics of either error code. + +This table is to be used _only_ for clients that received a response that did +not include grpc-status. If grpc-status was provided, it _must_ be used. Servers +_must not_ use this table to determine an HTTP status code to use; the mappings +are neither symmetric nor 1-to-1. + +| HTTP Status Code | gRPC Status Code | +|----------------------------|--------------------| +| 400 Bad Request | INTERNAL | +| 401 Unauthorized | UNAUTHENTICATED | +| 403 Forbidden | PERMISSION\_DENIED | +| 404 Not Found | UNIMPLEMENTED | +| 429 Too Many Requests | UNAVAILABLE | +| 502 Bad Gateway | UNAVAILABLE | +| 503 Service Unavailable | UNAVAILABLE | +| 504 Gateway Timeout | UNAVAILABLE | +| _All other codes_ | UNKNOWN | + +Technically, 1xx should have the entire header skipped and a subsequent header +be read. See RFC 7540 §8.1. + +200 is UNKNOWN because there should be a grpc-status in case of truly OK +response. diff --git a/doc/http2-interop-test-descriptions.md b/doc/http2-interop-test-descriptions.md new file mode 100644 index 00000000..c596f7c1 --- /dev/null +++ b/doc/http2-interop-test-descriptions.md @@ -0,0 +1,271 @@ +Negative HTTP/2 Interop Test Case Descriptions +======================================= + +Client and server use +[test.proto](../src/proto/grpc/testing/test.proto). + +Server +------ +The code for the custom http2 server can be found +[here](https://github.com/grpc/grpc/tree/master/test/http2_test). +It is responsible for handling requests and sending responses, and also for +fulfilling the behavior of each particular test case. + +Server should accept these arguments: +* --port=PORT + * The port the server will run on. For example, "8080" +* --test_case=TESTCASE + * The name of the test case to execute. For example, "goaway" + +Client +------ + +Clients implement test cases that test certain functionality. Each client is +provided the test case it is expected to run as a command-line parameter. Names +should be lowercase and without spaces. + +Clients should accept these arguments: +* --server_host=HOSTNAME + * The server host to connect to. For example, "localhost" or "127.0.0.1" +* --server_port=PORT + * The server port to connect to. For example, "8080" +* --test_case=TESTCASE + * The name of the test case to execute. For example, "goaway" + +Note +----- + +Note that the server and client must be invoked with the same test case or else +the test will be meaningless. For convenience, we provide a shell script wrapper +that invokes both server and client at the same time, with the same test_case. +This is the preferred way to run these tests. + +## Test Cases + +### goaway + +This test verifies that the client correctly responds to a goaway sent by the +server. The client should handle the goaway by switching to a new stream without +the user application having to do a thing. + +Client Procedure: + 1. Client sends two UnaryCall requests (and sleeps for 1 second in-between). + TODO: resolve [9300](https://github.com/grpc/grpc/issues/9300) and remove the 1 second sleep + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* Both calls are successful. +* Response payload body is 314159 bytes in size. + +Server Procedure: + 1. Server sends a GOAWAY after receiving the first UnaryCall. + +Server asserts: +* Two different connections were used from the client. + +### rst_after_header + +This test verifies that the client fails correctly when the server sends a +RST_STREAM immediately after sending headers to the client. + +Procedure: + 1. Client sends UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* Call was not successful. + +Server Procedure: + 1. Server sends a RST_STREAM with error code 0 after sending headers to the client. + +*At the moment the error code and message returned are not standardized throughout all +languages. Those checks will be added once all client languages behave the same way. [#9142](https://github.com/grpc/grpc/issues/9142) is in flight.* + +### rst_during_data + +This test verifies that the client fails "correctly" when the server sends a +RST_STREAM halfway through sending data to the client. + +Procedure: + 1. Client sends UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* Call was not successful. + +Server Procedure: + 1. Server sends a RST_STREAM with error code 0 after sending half of + the requested data to the client. + +### rst_after_data + +This test verifies that the client fails "correctly" when the server sends a +RST_STREAM after sending all of the data to the client. + +Procedure: + 1. Client sends UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* Call was not successful. + +Server Procedure: + 1. Server sends a RST_STREAM with error code 0 after sending all of the + data to the client. + +*Certain client languages allow the data to be accessed even though a RST_STREAM +was encountered. Once all client languages behave this way, checks will be added on +the incoming data.* + +### ping + +This test verifies that the client correctly acknowledges all pings it gets from the +server. + +Procedure: + 1. Client sends UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* call was successful. +* response payload body is 314159 bytes in size. + +Server Procedure: + 1. Server tracks the number of outstanding pings (i.e. +1 when it sends a ping, and -1 + when it receives an ack from the client). + 2. Server sends pings before and after sending headers, also before and after sending data. + +Server Asserts: +* Number of outstanding pings is 0 when the connection is lost. + +### max_streams + +This test verifies that the client observes the MAX_CONCURRENT_STREAMS limit set by the server. + +Client Procedure: + 1. Client sends initial UnaryCall to allow the server to update its MAX_CONCURRENT_STREAMS settings. + 2. Client concurrently sends 10 UnaryCalls. + +Client Asserts: +* All UnaryCalls were successful, and had the correct type and payload size. + +Server Procedure: + 1. Sets MAX_CONCURRENT_STREAMS to one after the connection is made. + +*The assertion that the MAX_CONCURRENT_STREAMS limit is upheld occurs in the http2 library we used.* + +### data_frame_padding + +This test verifies that the client can correctly receive padded http2 data +frames. It also stresses the client's flow control (there is a high chance +that the sender will deadlock if the client's flow control logic doesn't +correctly account for padding). + +Client Procedure: +(Note this is the same procedure as in the "large_unary" gRPC interop tests. +Clients should use their "large_unary" gRPC interop test implementations.) +Procedure: + 1. Client calls UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* call was successful +* response payload body is 314159 bytes in size +* clients are free to assert that the response payload body contents are zero + and comparing the entire response message against a golden response + +Server Procedure: + 1. Reply to the client's request with a `SimpleResponse`, with a payload + body length of `SimpleRequest.response_size`. But send it across specific + http2 data frames as follows: + * Each http2 data frame contains a 5 byte payload and 255 bytes of padding. + + * Note the 5 byte payload and 255 byte padding are partly arbitrary, + and other numbers are also ok. With 255 bytes of padding for each 5 bytes of + payload containing actual gRPC message, the 300KB response size will + multiply into around 15 megabytes of flow control debt, which should stress + flow control accounting. + +### no_df_padding_sanity_test + +This test verifies that the client can correctly receive a series of small +data frames. Note that this test is intentionally a slight variation of +"data_frame_padding", with the only difference being that this test doesn't use data +frame padding when the response is sent. This test is primarily meant to +prove correctness of the http2 server implementation and highlight failures +of the "data_frame_padding" test. + +Client Procedure: +(Note this is the same procedure as in the "large_unary" gRPC interop tests. +Clients should use their "large_unary" gRPC interop test implementations.) +Procedure: + 1. Client calls UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* call was successful +* response payload body is 314159 bytes in size +* clients are free to assert that the response payload body contents are zero + and comparing the entire response message against a golden response + +Server Procedure: + 1. Reply to the client's request with a `SimpleResponse`, with a payload + body length of `SimpleRequest.response_size`. But send it across series of + http2 data frames that contain 5 bytes of "payload" and zero bytes of + "padding" (the padding flags on the data frames should not be set). diff --git a/doc/internationalization.md b/doc/internationalization.md new file mode 100644 index 00000000..6ff5894c --- /dev/null +++ b/doc/internationalization.md @@ -0,0 +1,45 @@ +gRPC Internationalization +========================= + +As a universal RPC framework, gRPC needs to be fully usable within/across different international environments. +This document describes gRPC API and behavior specifics when used in a non-english environment. + +## API Concepts + +While some API elements need to be able to represent non-english content, some are intentionally left as ASCII-only +for simplicity & performance reasons. + +### Method name (in RPC Invocation) +Method names are ASCII-only and may only contain characters allowed by HTTP/2 text header values. That should not +be very limiting as most gRPC services will use protobuf which only allows method names from an even more restricted ASCII subset. +Also, handling method names is a very hot code path so any additional encoding/decoding step is to be avoided. + +Recommended representation in language-specific APIs: string type. + +### Host name (in RPC Invocation) +Host names are punycode encoded, but the user is responsible for providing the punycode-encoded string if she wishes to use an internationalized host name. + +Recommended representation in language-specific APIs: string/unicode string. + +NOTE: overriding host name when invoking RPCs is only supported by C-core based gRPC implementations. + +### Status detail/message (accompanies RPC status code) + +Status messages are expected to contain national-alphabet characters. +Allowed values are unicode strings (content will be percent-encoded on the wire). + +Recommended representation in language-specific APIs: unicode string. + +### Metadata key +Allowed values are defined by HTTP/2 standard (metadata keys are represented as HTTP/2 header/trailer names). + +Recommended representation in language-specific APIs: string. + +### Metadata value (text-valued metadata) +Allowed values are defined by HTTP/2 standard (metadata values are represented as HTTP/2 header/trailer text values). + +Recommended representation in language-specific APIs: string. + +### Channel target (in channel creation) + +TBD diff --git a/doc/interop-test-descriptions.md b/doc/interop-test-descriptions.md new file mode 100644 index 00000000..46a1ca7a --- /dev/null +++ b/doc/interop-test-descriptions.md @@ -0,0 +1,1280 @@ +Interoperability Test Case Descriptions +======================================= + +Client and server use +[test.proto](../src/proto/grpc/testing/test.proto) +and the [gRPC over HTTP/2 v2 protocol](./PROTOCOL-HTTP2.md). + +Client +------ + +Clients implement test cases that test certain functionally. Each client is +provided the test case it is expected to run as a command-line parameter. Names +should be lowercase and without spaces. + +Clients should accept these arguments: +* --server_host=HOSTNAME + * The server host to connect to. For example, "localhost" or "127.0.0.1" +* --server_host_override=HOSTNAME + * The server host to claim to be connecting to, for use in TLS and HTTP/2 + :authority header. If unspecified, the value of --server_host will be + used +* --server_port=PORT + * The server port to connect to. For example, "8080" +* --test_case=TESTCASE + * The name of the test case to execute. For example, "empty_unary" +* --use_tls=BOOLEAN + * Whether to use a plaintext or encrypted connection +* --use_test_ca=BOOLEAN + * Whether to replace platform root CAs with + [ca.pem](https://github.com/grpc/grpc/blob/master/src/core/tsi/test_creds/ca.pem) + as the CA root +* --default_service_account=ACCOUNT_EMAIL + * Email of the GCE default service account. +* --oauth_scope=SCOPE + * OAuth scope. For example, "https://www.googleapis.com/auth/xapi.zoo" +* --service_account_key_file=PATH + * The path to the service account JSON key file generated from GCE developer + console. +* --service_config_json=SERVICE_CONFIG_JSON + * Disables service config lookups and sets the provided string as the + default service config. + +Clients must support TLS with ALPN. Clients must not disable certificate +checking. + +### empty_unary + +This test verifies that implementations support zero-size messages. Ideally, +client implementations would verify that the request and response were zero +bytes serialized, but this is generally prohibitive to perform, so is not +required. + +Server features: +* [EmptyCall][] + +Procedure: + 1. Client calls EmptyCall with the default Empty message + +Client asserts: +* call was successful +* response is non-null + +*It may be possible to use UnaryCall instead of EmptyCall, but it is harder to +ensure that the proto serialized to zero bytes.* + +### cacheable_unary + +This test verifies that gRPC requests marked as cacheable use GET verb instead +of POST, and that server sets appropriate cache control headers for the response +to be cached by a proxy. This test requires that the server is behind +a caching proxy. Use of current timestamp in the request prevents accidental +cache matches left over from previous tests. + +Server features: +* [CacheableUnaryCall][] + +Procedure: + 1. Client calls CacheableUnaryCall with `SimpleRequest` request with payload + set to current timestamp. Timestamp format is irrelevant, and resolution is + in nanoseconds. + Client adds a `x-user-ip` header with value `1.2.3.4` to the request. + This is done since some proxys such as GFE will not cache requests from + localhost. + Client marks the request as cacheable by setting the cacheable flag in the + request context. Longer term this should be driven by the method option + specified in the proto file itself. + 2. Client calls CacheableUnaryCall again immediately with the same request and + configuration as the previous call. + +Client asserts: +* Both calls were successful +* The payload body of both responses is the same. + +### large_unary + +This test verifies unary calls succeed in sending messages, and touches on flow +control (even if compression is enabled on the channel). + +Server features: +* [UnaryCall][] + +Procedure: + 1. Client calls UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* call was successful +* response payload body is 314159 bytes in size +* clients are free to assert that the response payload body contents are zero + and comparing the entire response message against a golden response + +### client_compressed_unary + +This test verifies the client can compress unary messages by sending two unary +calls, for compressed and uncompressed payloads. It also sends an initial +probing request to verify whether the server supports the [CompressedRequest][] +feature by checking if the probing call fails with an `INVALID_ARGUMENT` status. + +Server features: +* [UnaryCall][] +* [CompressedRequest][] + +Procedure: + 1. Client calls UnaryCall with the feature probe, an *uncompressed* message: + ``` + { + expect_compressed:{ + value: true + } + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + + 1. Client calls UnaryCall with the *compressed* message: + + ``` + { + expect_compressed:{ + value: true + } + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + + 1. Client calls UnaryCall with the *uncompressed* message: + + ``` + { + expect_compressed:{ + value: false + } + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + + Client asserts: + * First call failed with `INVALID_ARGUMENT` status. + * Subsequent calls were successful. + * Response payload body is 314159 bytes in size. + * Clients are free to assert that the response payload body contents are + zeros and comparing the entire response message against a golden response. + + +### server_compressed_unary + +This test verifies the server can compress unary messages. It sends two unary +requests, expecting the server's response to be compressed or not according to +the `response_compressed` boolean. + +Whether compression was actually performed is determined by the compression bit +in the response's message flags. *Note that some languages may not have access +to the message flags, in which case the client will be unable to verify that +the `response_compressed` boolean is obeyed by the server*. + + +Server features: +* [UnaryCall][] +* [CompressedResponse][] + +Procedure: + 1. Client calls UnaryCall with `SimpleRequest`: + + ``` + { + response_compressed:{ + value: true + } + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + + ``` + { + response_compressed:{ + value: false + } + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + Client asserts: + * call was successful + * if supported by the implementation, when `response_compressed` is true, + the response MUST have the compressed message flag set. + * if supported by the implementation, when `response_compressed` is false, + the response MUST NOT have the compressed message flag set. + * response payload body is 314159 bytes in size in both cases. + * clients are free to assert that the response payload body contents are + zero and comparing the entire response message against a golden response + + +### client_streaming + +This test verifies that client-only streaming succeeds. + +Server features: +* [StreamingInputCall][] + +Procedure: + 1. Client calls StreamingInputCall + 2. Client sends: + + ``` + { + payload:{ + body: 27182 bytes of zeros + } + } + ``` + + 3. Client then sends: + + ``` + { + payload:{ + body: 8 bytes of zeros + } + } + ``` + + 4. Client then sends: + + ``` + { + payload:{ + body: 1828 bytes of zeros + } + } + ``` + + 5. Client then sends: + + ``` + { + payload:{ + body: 45904 bytes of zeros + } + } + ``` + + 6. Client half-closes + +Client asserts: +* call was successful +* response aggregated_payload_size is 74922 + + +### client_compressed_streaming + +This test verifies the client can compress requests on per-message basis by +performing a two-request streaming call. It also sends an initial probing +request to verify whether the server supports the [CompressedRequest][] feature +by checking if the probing call fails with an `INVALID_ARGUMENT` status. + +Procedure: + 1. Client calls `StreamingInputCall` and sends the following feature-probing + *uncompressed* `StreamingInputCallRequest` message + + ``` + { + expect_compressed:{ + value: true + } + payload:{ + body: 27182 bytes of zeros + } + } + ``` + If the call does not fail with `INVALID_ARGUMENT`, the test fails. + Otherwise, we continue. + + 1. Client calls `StreamingInputCall` again, sending the *compressed* message + + ``` + { + expect_compressed:{ + value: true + } + payload:{ + body: 27182 bytes of zeros + } + } + ``` + + 1. And finally, the *uncompressed* message + ``` + { + expect_compressed:{ + value: false + } + payload:{ + body: 45904 bytes of zeros + } + } + ``` + + 1. Client half-closes + +Client asserts: +* First call fails with `INVALID_ARGUMENT`. +* Next calls succeeds. +* Response aggregated payload size is 73086. + + +### server_streaming + +This test verifies that server-only streaming succeeds. + +Server features: +* [StreamingOutputCall][] + +Procedure: + 1. Client calls StreamingOutputCall with `StreamingOutputCallRequest`: + + ``` + { + response_parameters:{ + size: 31415 + } + response_parameters:{ + size: 9 + } + response_parameters:{ + size: 2653 + } + response_parameters:{ + size: 58979 + } + } + ``` + +Client asserts: +* call was successful +* exactly four responses +* response payload bodies are sized (in order): 31415, 9, 2653, 58979 +* clients are free to assert that the response payload body contents are zero + and comparing the entire response messages against golden responses + +### server_compressed_streaming + +This test verifies that the server can compress streaming messages and disable +compression on individual messages, expecting the server's response to be +compressed or not according to the `response_compressed` boolean. + +Whether compression was actually performed is determined by the compression bit +in the response's message flags. *Note that some languages may not have access +to the message flags, in which case the client will be unable to verify that the +`response_compressed` boolean is obeyed by the server*. + +Server features: +* [StreamingOutputCall][] +* [CompressedResponse][] + + +Procedure: + 1. Client calls StreamingOutputCall with `StreamingOutputCallRequest`: + + ``` + { + response_parameters:{ + compressed: { + value: true + } + size: 31415 + } + response_parameters:{ + compressed: { + value: false + } + size: 92653 + } + } + ``` + + Client asserts: + * call was successful + * exactly two responses + * if supported by the implementation, when `response_compressed` is false, + the response's messages MUST NOT have the compressed message flag set. + * if supported by the implementation, when `response_compressed` is true, + the response's messages MUST have the compressed message flag set. + * response payload bodies are sized (in order): 31415, 92653 + * clients are free to assert that the response payload body contents are + zero and comparing the entire response messages against golden responses + +### ping_pong + +This test verifies that full duplex bidi is supported. + +Server features: +* [FullDuplexCall][] + +Procedure: + 1. Client calls FullDuplexCall with: + + ``` + { + response_parameters:{ + size: 31415 + } + payload:{ + body: 27182 bytes of zeros + } + } + ``` + + 2. After getting a reply, it sends: + + ``` + { + response_parameters:{ + size: 9 + } + payload:{ + body: 8 bytes of zeros + } + } + ``` + + 3. After getting a reply, it sends: + + ``` + { + response_parameters:{ + size: 2653 + } + payload:{ + body: 1828 bytes of zeros + } + } + ``` + + 4. After getting a reply, it sends: + + ``` + { + response_parameters:{ + size: 58979 + } + payload:{ + body: 45904 bytes of zeros + } + } + ``` + + 5. After getting a reply, client half-closes + +Client asserts: +* call was successful +* exactly four responses +* response payload bodies are sized (in order): 31415, 9, 2653, 58979 +* clients are free to assert that the response payload body contents are zero + and comparing the entire response messages against golden responses + +### empty_stream + +This test verifies that streams support having zero-messages in both +directions. + +Server features: +* [FullDuplexCall][] + +Procedure: + 1. Client calls FullDuplexCall and then half-closes + +Client asserts: +* call was successful +* exactly zero responses + +### compute_engine_creds + +This test is only for cloud-to-prod path. + +This test verifies unary calls succeed in sending messages while using Service +Credentials from GCE metadata server. The client instance needs to be created +with desired oauth scope. + +The test uses `--default_service_account` with GCE service account email and +`--oauth_scope` with the OAuth scope to use. For testing against +grpc-test.sandbox.googleapis.com, "https://www.googleapis.com/auth/xapi.zoo" +should +be passed in as `--oauth_scope`. + +Server features: +* [UnaryCall][] +* [Echo Authenticated Username][] +* [Echo OAuth Scope][] + +Procedure: + 1. Client configures channel to use GCECredentials + 2. Client calls UnaryCall on the channel with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + fill_username: true + fill_oauth_scope: true + } + ``` + +Client asserts: +* call was successful +* received SimpleResponse.username equals the value of + `--default_service_account` flag +* received SimpleResponse.oauth_scope is in `--oauth_scope` +* response payload body is 314159 bytes in size +* clients are free to assert that the response payload body contents are zero + and comparing the entire response message against a golden response + +### jwt_token_creds + +This test is only for cloud-to-prod path. + +This test verifies unary calls succeed in sending messages while using JWT +token (created by the project's key file) + +Test caller should set flag `--service_account_key_file` with the +path to json key file downloaded from +https://console.developers.google.com. Alternately, if using a +usable auth implementation, she may specify the file location in the environment +variable GOOGLE_APPLICATION_CREDENTIALS. + +Server features: +* [UnaryCall][] +* [Echo Authenticated Username][] +* [Echo OAuth Scope][] + +Procedure: + 1. Client configures the channel to use JWTTokenCredentials + 2. Client calls UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + fill_username: true + } + ``` + +Client asserts: +* call was successful +* received SimpleResponse.username is not empty and is in the json key file used +by the auth library. The client can optionally check the username matches the +email address in the key file or equals the value of `--default_service_account` +flag. +* response payload body is 314159 bytes in size +* clients are free to assert that the response payload body contents are zero + and comparing the entire response message against a golden response + +### oauth2_auth_token + +This test is only for cloud-to-prod path and some implementations may run +in GCE only. + +This test verifies unary calls succeed in sending messages using an OAuth2 token +that is obtained out of band. For the purpose of the test, the OAuth2 token is +actually obtained from a service account credentials or GCE credentials via the +language-specific authorization library. + +The difference between this test and the other auth tests is that it +first uses the authorization library to obtain an authorization token. + +The test +- uses the flag `--service_account_key_file` with the path to a json key file +downloaded from https://console.developers.google.com. Alternately, if using a +usable auth implementation, it may specify the file location in the environment +variable GOOGLE_APPLICATION_CREDENTIALS, *OR* if GCE credentials is used to +fetch the token, `--default_service_account` can be used to pass in GCE service +account email. +- uses the flag `--oauth_scope` for the oauth scope. For testing against +grpc-test.sandbox.googleapis.com, "https://www.googleapis.com/auth/xapi.zoo" +should be passed as the `--oauth_scope`. + +Server features: +* [UnaryCall][] +* [Echo Authenticated Username][] +* [Echo OAuth Scope][] + +Procedure: + 1. Client uses the auth library to obtain an authorization token + 2. Client configures the channel to use AccessTokenCredentials with the access + token obtained in step 1 + 3. Client calls UnaryCall with the following message + + ``` + { + fill_username: true + fill_oauth_scope: true + } + ``` + +Client asserts: +* call was successful +* received SimpleResponse.username is valid. Depending on whether a service +account key file or GCE credentials was used, client should check against the +json key file or GCE default service account email. +* received SimpleResponse.oauth_scope is in `--oauth_scope` + +### per_rpc_creds + +Similar to the other auth tests, this test is only for cloud-to-prod path. + +This test verifies unary calls succeed in sending messages using a JWT or a +service account credentials set on the RPC. + +The test +- uses the flag `--service_account_key_file` with the path to a json key file +downloaded from https://console.developers.google.com. Alternately, if using a +usable auth implementation, it may specify the file location in the environment +variable GOOGLE_APPLICATION_CREDENTIALS +- optionally uses the flag `--oauth_scope` for the oauth scope if implementer +wishes to use service account credential instead of JWT credential. For testing +against grpc-test.sandbox.googleapis.com, oauth scope +"https://www.googleapis.com/auth/xapi.zoo" should be used. + +Server features: +* [UnaryCall][] +* [Echo Authenticated Username][] +* [Echo OAuth Scope][] + +Procedure: + 1. Client configures the channel with just SSL credentials + 2. Client calls UnaryCall, setting per-call credentials to + JWTTokenCredentials. The request is the following message + + ``` + { + fill_username: true + } + ``` + +Client asserts: +* call was successful +* received SimpleResponse.username is not empty and is in the json key file used +by the auth library. The client can optionally check the username matches the +email address in the key file. + +### google_default_credentials + +Similar to the other auth tests, this test should only be run against prod +servers. Different from some of the other auth tests however, this test +may be also run from outside of GCP. + +This test verifies unary calls succeed when the client uses +GoogleDefaultCredentials. The path to a service account key file in the +GOOGLE_APPLICATION_CREDENTIALS environment variable may or may not be +provided by the test runner. For example, the test runner might set +this environment when outside of GCP but keep it unset when on GCP. + +The test uses `--default_service_account` with GCE service account email. + +Server features: +* [UnaryCall][] +* [Echo Authenticated Username][] + +Procedure: + 1. Client configures the channel to use GoogleDefaultCredentials + * Note: the term `GoogleDefaultCredentials` within the context + of this test description refers to an API which encapsulates + both "transport credentials" and "call credentials" and which + is capable of transport creds auto-selection (including ALTS). + Similar APIs involving only auto-selection of OAuth mechanisms + might work for this test but aren't the intended subjects. + 2. Client calls UnaryCall with: + + ``` + { + fill_username: true + } + ``` + +Client asserts: +* call was successful +* received SimpleResponse.username matches the value of + `--default_service_account` + +### compute_engine_channel_credentials + +Similar to the other auth tests, this test should only be run against prod +servers. Note that this test may only be ran on GCP. + +This test verifies unary calls succeed when the client uses +ComputeEngineChannelCredentials. All that is needed by the test environment +is for the client to be running on GCP. + +The test uses `--default_service_account` with GCE service account email. This +email must identify the default service account of the GCP VM that the test +is running on. + +Server features: +* [UnaryCall][] +* [Echo Authenticated Username][] + +Procedure: + 1. Client configures the channel to use ComputeEngineChannelCredentials + * Note: the term `ComputeEngineChannelCredentials` within the context + of this test description refers to an API which encapsulates + both "transport credentials" and "call credentials" and which + is capable of transport creds auto-selection (including ALTS). + The exact name of the API may vary per language. + 2. Client calls UnaryCall with: + + ``` + { + fill_username: true + } + ``` + +Client asserts: +* call was successful +* received SimpleResponse.username matches the value of + `--default_service_account` + +### custom_metadata + +This test verifies that custom metadata in either binary or ascii format can be +sent as initial-metadata by the client and as both initial- and trailing-metadata +by the server. + +Server features: +* [UnaryCall][] +* [FullDuplexCall][] +* [Echo Metadata][] + +Procedure: + 1. The client attaches custom metadata with the following keys and values: + + ``` + key: "x-grpc-test-echo-initial", value: "test_initial_metadata_value" + key: "x-grpc-test-echo-trailing-bin", value: 0xababab + ``` + + to a UnaryCall with request: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + + 2. The client attaches custom metadata with the following keys and values: + + ``` + key: "x-grpc-test-echo-initial", value: "test_initial_metadata_value" + key: "x-grpc-test-echo-trailing-bin", value: 0xababab + ``` + + to a FullDuplexCall with request: + + ``` + { + response_parameters:{ + size: 314159 + } + payload:{ + body: 271828 bytes of zeros + } + } + ``` + + and then half-closes + +Client asserts: +* call was successful +* metadata with key `"x-grpc-test-echo-initial"` and value + `"test_initial_metadata_value"`is received in the initial metadata for calls + in Procedure steps 1 and 2. +* metadata with key `"x-grpc-test-echo-trailing-bin"` and value `0xababab` is + received in the trailing metadata for calls in Procedure steps 1 and 2. + + + +### status_code_and_message + +This test verifies unary calls succeed in sending messages, and propagate back +status code and message sent along with the messages. + +Server features: +* [UnaryCall][] +* [FullDuplexCall][] +* [Echo Status][] + +Procedure: + 1. Client calls UnaryCall with: + + ``` + { + response_status:{ + code: 2 + message: "test status message" + } + } + ``` + + 2. Client calls FullDuplexCall with: + + ``` + { + response_status:{ + code: 2 + message: "test status message" + } + } + ``` + + and then half-closes + + +Client asserts: +* received status code is the same as the sent code for both Procedure steps 1 + and 2 +* received status message is the same as the sent message for both Procedure + steps 1 and 2 + +### special_status_message + +This test verifies Unicode and whitespace is correctly processed in status +message. "\t" is horizontal tab. "\r" is carriage return. "\n" is line feed. + +Server features: +* [UnaryCall][] +* [Echo Status][] + +Procedure: + 1. Client calls UnaryCall with: + + ``` + { + response_status:{ + code: 2 + message: "\t\ntest with whitespace\r\nand Unicode BMP ☺ and non-BMP 😈\t\n" + } + } + ``` + +Client asserts: +* received status code is the same as the sent code for Procedure step 1 +* received status message is the same as the sent message for Procedure step 1, + including all whitespace characters + +### unimplemented_method + +This test verifies that calling an unimplemented RPC method returns the +UNIMPLEMENTED status code. + +Server features: +N/A + +Procedure: +* Client calls `grpc.testing.TestService/UnimplementedCall` with an empty + request (defined as `grpc.testing.Empty`): + + ``` + { + } + ``` + +Client asserts: +* received status code is 12 (UNIMPLEMENTED) + +### unimplemented_service + +This test verifies calling an unimplemented server returns the UNIMPLEMENTED +status code. + +Server features: +N/A + +Procedure: +* Client calls `grpc.testing.UnimplementedService/UnimplementedCall` with an + empty request (defined as `grpc.testing.Empty`) + +Client asserts: +* received status code is 12 (UNIMPLEMENTED) + +### cancel_after_begin + +This test verifies that a request can be cancelled after metadata has been sent +but before payloads are sent. + +Server features: +* [StreamingInputCall][] + +Procedure: + 1. Client starts StreamingInputCall + 2. Client immediately cancels request + +Client asserts: +* Call completed with status CANCELLED + +### cancel_after_first_response + +This test verifies that a request can be cancelled after receiving a message +from the server. + +Server features: +* [FullDuplexCall][] + +Procedure: + 1. Client starts FullDuplexCall with + + ``` + { + response_parameters:{ + size: 31415 + } + payload:{ + body: 27182 bytes of zeros + } + } + ``` + + 2. After receiving a response, client cancels request + +Client asserts: +* Call completed with status CANCELLED + +### timeout_on_sleeping_server + +This test verifies that an RPC request whose lifetime exceeds its configured +timeout value will end with the DeadlineExceeded status. + +Server features: +* [FullDuplexCall][] + +Procedure: + 1. Client calls FullDuplexCall with the following request and sets its timeout + to 1ms + + ``` + { + payload:{ + body: 27182 bytes of zeros + } + } + ``` + + 2. Client waits + +Client asserts: +* Call completed with status DEADLINE_EXCEEDED. + +### concurrent_large_unary + +Status: TODO + +Client performs 1000 large_unary tests in parallel on the same channel. + +### Flow control. Pushback at client for large messages (TODO: fix name) + +Status: TODO + +This test verifies that a client sending faster than a server can drain sees +pushback (i.e., attempts to send succeed only after appropriate delays). + +#### rpc_soak + +The client performs many large_unary RPCs in sequence over the same channel. +The client records the latency and status of each RPC in some data structure. +If the test ever consumes `soak_overall_timeout_seconds` seconds and still hasn't +completed `soak_iterations` RPCs, then the test should discontinue sending RPCs +as soon as possible. After performing all RPCs, the test should examine +previously recorded RPC latency and status results in a second pass and fail if +either: + +a) not all `soak_iterations` RPCs were completed + +b) the sum of RPCs that either completed with a non-OK status or exceeded + `max_acceptable_per_rpc_latency_ms` exceeds `soak_max_failures` + +Implementations should use a timer with sub-millisecond precision to measure +latency. Also, implementations should avoid setting RPC deadlines and should +instead wait for each RPC to complete. Doing so provides more data for +debugging in case of failure. For example, if RPC deadlines are set to +`soak_per_iteration_max_acceptable_latency_ms` and one of the RPCs hits that +deadline, it's not clear if the RPC was late by a millisecond or a minute. + +This test must be configurable via a few different command line flags: + +* `soak_iterations`: Controls the number of RPCs to perform. This should + default to 10. + +* `soak_max_failures`: An inclusive upper limit on the number of RPC failures + that should be tolerated (i.e. after which the test process should + still exit 0). A failure is considered to be either a non-OK status or an RPC + whose latency exceeds `soak_per_iteration_max_acceptable_latency_ms`. This + should default to 0. + +* `soak_per_iteration_max_acceptable_latency_ms`: An upper limit on the latency + of a single RPC in order for that RPC to be considered successful. This + should default to 1000. + +* `soak_overall_timeout_seconds`: The overall number of seconds after which + the test should stop and fail if `soak_iterations` have not yet been + completed. This should default to + `soak_per_iteration_max_acceptable_latency_ms` * `soak_iterations`. + +The following is optional but encouraged to improve debuggability: + +* Implementations should log the number of milliseconds that each RPC takes. + Additionally, implementations should use a histogram of RPC latencies + to log interesting latency percentiles at the end of the test (e.g. median, + 90th, and max latency percentiles). + +#### channel_soak + +Similar to rpc_soak, but this time each RPC is performed on a new channel. The +channel is created just before each RPC and is destroyed just after. + +This test is configured with the same command line flags that the rpc_soak test +is configured with, with only one semantic difference: when measuring an RPCs +latency to see if it exceeds `soak_per_iteration_max_acceptable_latency_ms` or +not, the creation of the channel should be included in that +latency measurement, but the teardown of that channel should **not** be +included in that latency measurement (channel teardown semantics differ widely +between languages). This latency measurement should also be the value that is +logged and recorded in the latency histogram. + +### Experimental Tests + +These tests are not yet standardized, and are not yet implemented in all +languages. Therefore they are not part of our interop matrix. + +#### long_lived_channel + +The client performs a number of large_unary RPCs over a single long-lived +channel with a fixed but configurable interval between each RPC. + +### TODO Tests + +#### High priority: + +Propagation of status code and message (yangg) + +Multiple thousand simultaneous calls on same Channel (ctiller) + +Metadata: client headers, server headers + trailers, binary+ascii + +#### Normal priority: + +Cancel before start (ctiller) + +Cancel after sent first message (ctiller) + +Cancel after received headers (ctiller) + +Timeout but completed before expire (zhaoq) + +Multiple thousand simultaneous calls timeout on same Channel (ctiller) + +#### Lower priority: + +Flow control. Pushback at client for large messages (abhishek) + +Flow control. Pushback at server for large messages (abhishek) + +Going over max concurrent streams doesn't fail (client controls itself) +(abhishek) + +RPC method not implemented (yangg) + +Multiple thousand simultaneous calls on different Channels (ctiller) + +Failed TLS hostname verification (ejona?) + +Large amount of headers to cause CONTINUATIONs; 63K of 'X's, all in one header. + +#### To priorize: + +Start streaming RPC but don't send any requests, server responds + +### Postponed Tests + +Resilience to buggy servers: These tests would verify that a client application +isn't affected negatively by the responses put on the wire by a buggy server +(e.g. the client library won't make the application crash). + +Reconnect after transport failure + +Reconnect backoff + +Fuzz testing + + +Server +------ + +Servers implement various named features for clients to test with. Server +features are orthogonal. If a server implements a feature, it is always +available for clients. Names are simple descriptions for developer +communication and tracking. + +Servers should accept these arguments: + +* --port=PORT + + * The port to listen on. For example, "8080" + +* --use_tls=BOOLEAN + + * Whether to use a plaintext or encrypted connection + +Servers must support TLS with ALPN. They should use +[server1.pem](https://github.com/grpc/grpc/blob/master/src/core/tsi/test_creds/server1.pem) +for their certificate. + +### EmptyCall +[EmptyCall]: #emptycall + +Server implements EmptyCall which immediately returns the empty message. + +### UnaryCall +[UnaryCall]: #unarycall + +Server implements UnaryCall which immediately returns a SimpleResponse with a +payload body of size `SimpleRequest.response_size` bytes and type as appropriate +for the `SimpleRequest.response_type`. If the server does not support the +`response_type`, then it should fail the RPC with `INVALID_ARGUMENT`. + +### CacheableUnaryCall +[CacheableUnaryCall]: #cacheableunarycall + +Server gets the default SimpleRequest proto as the request. The content of the +request is ignored. It returns the SimpleResponse proto with the payload set +to current timestamp. The timestamp is an integer representing current time +with nanosecond resolution. This integer is formatted as ASCII decimal in the +response. The format is not really important as long as the response payload +is different for each request. In addition it adds + 1. cache control headers such that the response can be cached by proxies in + the response path. Server should be behind a caching proxy for this test + to pass. Currently we set the max-age to 60 seconds. + +### CompressedResponse +[CompressedResponse]: #compressedresponse + +When the client sets `response_compressed` to true, the server's response is +sent back compressed. Note that `response_compressed` is present on both +`SimpleRequest` (unary) and `StreamingOutputCallRequest` (streaming). + +### CompressedRequest +[CompressedRequest]: #compressedrequest + +When the client sets `expect_compressed` to true, the server expects the client +request to be compressed. If it's not, it fails the RPC with `INVALID_ARGUMENT`. +Note that `response_compressed` is present on both `SimpleRequest` (unary) and +`StreamingOutputCallRequest` (streaming). + +### StreamingInputCall +[StreamingInputCall]: #streaminginputcall + +Server implements StreamingInputCall which upon half close immediately returns +a StreamingInputCallResponse where aggregated_payload_size is the sum of all +request payload bodies received. + +### StreamingOutputCall +[StreamingOutputCall]: #streamingoutputcall + +Server implements StreamingOutputCall by replying, in order, with one +StreamingOutputCallResponse for each ResponseParameters in +StreamingOutputCallRequest. Each StreamingOutputCallResponse should have a +payload body of size ResponseParameters.size bytes, as specified by its +respective ResponseParameters. After sending all responses, it closes with OK. + +### FullDuplexCall +[FullDuplexCall]: #fullduplexcall + +Server implements FullDuplexCall by replying, in order, with one +StreamingOutputCallResponse for each ResponseParameters in each +StreamingOutputCallRequest. Each StreamingOutputCallResponse should have a +payload body of size ResponseParameters.size bytes, as specified by its +respective ResponseParameters. After receiving half close and sending all +responses, it closes with OK. + +### Echo Status +[Echo Status]: #echo-status +When the client sends a response_status in the request payload, the server closes +the stream with the status code and message contained within said response_status. +The server will not process any further messages on the stream sent by the client. +This can be used by clients to verify correct handling of different status codes and +associated status messages end-to-end. + +### Echo Metadata +[Echo Metadata]: #echo-metadata +When the client sends metadata with the key `"x-grpc-test-echo-initial"` with its +request, the server sends back exactly this key and the corresponding value back to +the client as part of initial metadata. When the client sends metadata with the key +`"x-grpc-test-echo-trailing-bin"` with its request, the server sends back exactly this +key and the corresponding value back to the client as trailing metadata. + +### Observe ResponseParameters.interval_us +[Observe ResponseParameters.interval_us]: #observe-responseparametersinterval_us + +In StreamingOutputCall and FullDuplexCall, server delays sending a +StreamingOutputCallResponse by the ResponseParameters' `interval_us` for that +particular response, relative to the last response sent. That is, `interval_us` +acts like a sleep *before* sending the response and accumulates from one +response to the next. + +Interaction with flow control is unspecified. + +### Echo Auth Information + +Status: Pending + +#### Echo Authenticated Username +[Echo Authenticated Username]: #echo-authenticated-username + +If a SimpleRequest has fill_username=true and that request was successfully +authenticated, then the SimpleResponse should have username filled with the +canonical form of the authenticated source. The canonical form is dependent on +the authentication method, but is likely to be a base 10 integer identifier or +an email address. + +#### Echo OAuth scope +[Echo OAuth Scope]: #echo-oauth-scope + +If a SimpleRequest has `fill_oauth_scope=true` and that request was successfully +authenticated via OAuth, then the SimpleResponse should have oauth_scope filled +with the scope of the method being invoked. + +Although a general server-side feature, most test servers won't implement this +feature. The TLS server `grpc-test.sandbox.googleapis.com:443` supports this +feature. It requires at least the OAuth scope +`https://www.googleapis.com/auth/xapi.zoo` for authentication to succeed. + +Discussion: + +Ideally, this would be communicated via metadata and not in the +request/response, but we want to use this test in code paths that don't yet +fully communicate metadata. diff --git a/doc/keepalive.md b/doc/keepalive.md new file mode 100644 index 00000000..f773bd87 --- /dev/null +++ b/doc/keepalive.md @@ -0,0 +1,54 @@ +# Keepalive User Guide for gRPC Core (and dependents) + +The keepalive ping is a way to check if a channel is currently working by sending HTTP2 pings over the transport. It is sent periodically, and if the ping is not acknowledged by the peer within a certain timeout period, the transport is disconnected. + +This guide documents the knobs within gRPC core to control the current behavior of the keepalive ping. + +The keepalive ping in core is controlled by the following channel arguments - + +* **GRPC_ARG_KEEPALIVE_TIME_MS** + * This channel argument controls the period (in milliseconds) after which a keepalive ping is sent on the transport. +* **GRPC_ARG_KEEPALIVE_TIMEOUT_MS** + * This channel argument controls the amount of time (in milliseconds) the sender of the keepalive ping waits for an acknowledgement. If it does not receive an acknowledgment within this time, it will close the connection. +* **GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA** + * This channel argument controls the maximum number of pings that can be sent when there is no data/header frame to be sent. gRPC Core will not continue sending pings if we run over the limit. Setting it to 0 allows sending pings without such a restriction. (Note that this is an unfortunate setting that does not agree with [A8-client-side-keepalive.md](https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md). There should ideally be no such restriction on the keepalive ping and we plan to deprecate it in the future.) +* **GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS** + * This channel argument if set to 1 (0 : false; 1 : true), allows keepalive pings to be sent even if there are no calls in flight. + +On the server-side, the following additional channel arguments need to be configured - + +* **GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS** + * If there are no data/header frames being sent on the transport, this channel argument on the server side controls the minimum time (in milliseconds) that gRPC Core would expect between receiving successive pings. If the time between successive pings is less that than this time, then the ping will be considered a bad ping from the peer. Such a ping counts as a ‘ping strike’. +On the client side, this does not have any effect. +* **GRPC_ARG_HTTP2_MAX_PING_STRIKES** + * This arg controls the maximum number of bad pings that the server will tolerate before sending an HTTP2 GOAWAY frame and closing the transport. Setting it to 0 allows the server to accept any number of bad pings. + +**IMPORTANT NOTE** - For keepalive to work properly and as intended, all of the above channel arguments should be configured appropriately. The client-side keepalive settings should also be in agreement with the server-side settings. If a client sends pings more often than the server is willing to accept, the connection will be terminated with a GOAWAY frame with "too_many_pings" as the debug data. + +### Defaults Values + +Channel Argument| Client|Server +----------------|-------|------ +GRPC_ARG_KEEPALIVE_TIME_MS|INT_MAX (disabled)|7200000 (2 hours) +GRPC_ARG_KEEPALIVE_TIMEOUT_MS|20000 (20 seconds)|20000 (20 seconds) +GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS|0 (false)|0 (false) +GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA|2|2 +GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS|N/A|300000 (5 minutes) +GRPC_ARG_HTTP2_MAX_PING_STRIKES|N/A|2 + +### FAQ +* When is the keepalive timer started? + * The keepalive timer is started when a transport is done connecting (after handshake). +* What happens when the keepalive timer fires? + * When the keepalive timer fires, gRPC Core will try to send a keepalive ping on the transport. This ping can be blocked if - + * there is no active call on that transport and `GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS` is false. + * the number of pings already sent on the transport without any data has already exceeded `GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA`. + * If a keepalive ping is not blocked and is sent on the transport, then the keepalive watchdog timer is started which will close the transport if the ping is not acknowledged before it fires. +* Why am I receiving a GOAWAY with error code `ENHANCE_YOUR_CALM`? + * A server sends a GOAWAY with `ENHANCE_YOUR_CALM` if the client sends too many misbehaving pings as described in [A8-client-side-keepalive.md](https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md). Some scenarios where this can happen are - + * if a server has `GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS` set to false while the client has set this to true resulting in keepalive pings being sent even when there is no call in flight. + * if the client's `GRPC_ARG_KEEPALIVE_TIME_MS` setting is lower than the server's `GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS`. +* Why is my client not sending keepalive pings even after configuring `GRPC_ARG_KEEPALIVE_TIME_MS` and `GRPC_ARG_KEEPALIVE_TIMEOUT_MS`? + * This can happen in the following cases - + * There are no RPCs in flight and `GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS` has not been set to 1(defaults to 0). If we require the endpoint to be able to send pings even when there are no ongoing RPCs, `GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS` should be set to 1 as documented above. + * When there isn't any data/header being sent on the transport, gRPC clients restrict the number of pings to 2 by default. Setting `GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA` to 0 will remove this limit. diff --git a/doc/load-balancing.md b/doc/load-balancing.md new file mode 100644 index 00000000..743b779d --- /dev/null +++ b/doc/load-balancing.md @@ -0,0 +1,140 @@ +Load Balancing in gRPC +====================== + +# Scope + +This document explains the design for load balancing within gRPC. + +# Background + +Load-balancing within gRPC happens on a per-call basis, not a +per-connection basis. In other words, even if all requests come from a +single client, we still want them to be load-balanced across all servers. + +# Architecture + +## Overview + +The gRPC client supports an API that allows load balancing policies to +be implemented and plugged into gRPC. An LB policy is responsible for: +- receiving updated configuration and list of server addresses from the + resolver +- creating subchannels for the server addresses and managing their + connectivity behavior +- setting the overall [connectivity state](connectivity-semantics-and-api.md) + (usually computed by aggregating the connectivity states of its subchannels) + of the channel +- for each RPC sent on the channel, determining which subchannel to send + the RPC on + +There are a number of LB policies provided with gRPC. The most +notable ones are `pick_first` (the default), `round_robin`, and +`grpclb`. There are also a number of additional LB policies to support +[xDS](grpc_xds_features.md), although they are not currently configurable +directly. + +## Workflow + +Load-balancing policies fit into the gRPC client workflow in between +name resolution and the connection to the server. Here's how it all +works: + +![image](images/load-balancing.png) + +1. On startup, the gRPC client issues a [name resolution](naming.md) request + for the server name. The name will resolve to a list of IP addresses, + a [service config](service_config.md) that indicates which client-side + load-balancing policy to use (e.g., `round_robin` or `grpclb`) and + provides a configuration for that policy, and a set of attributes + (channel args in C-core). +2. The client instantiates the load balancing policy and passes it its + configuration from the service config, the list of IP addresses, and + the attributes. +3. The load balancing policy creates a set of subchannels for the IP + addresses of the servers (which might be different from the IP + addresses returned by the resolver; see below). It also watches the + subchannels' connectivity states and decides when each subchannel + should attempt to connect. +4. For each RPC sent, the load balancing policy decides which + subchannel (i.e., which server) the RPC should be sent to. + +See below for more information on `grpclb`. + +## Load Balancing Policies + +### `pick_first` + +This is the default LB policy if the service config does not specify any +LB policy. It does not require any configuration. + +The `pick_first` policy takes a list of addresses from the resolver. It +attempts to connect to those addresses one at a time, in order, until it +finds one that is reachable. If none of the addresses are reachable, it +sets the channel's state to TRANSIENT_FAILURE while it attempts to +reconnect. Appropriate [backoff](connection-backoff.md) is applied for +repeated connection attempts. + +If it is able to connect to one of the addresses, it sets the channel's +state to READY, and then all RPCs sent on the channel will be sent to +that address. If the connection to that address is later broken, +the `pick_first` policy will put the channel into state IDLE, and it +will not attempt to reconnect until the application requests that it +does so (either via the channel's connectivity state API or by sending +an RPC). + +### `round_robin` + +This LB policy is selected via the service config. It does not require +any configuration. + +This policy takes a list of addresses from the resolver. It creates a +subchannel for each of those addresses and constantly monitors the +connectivity state of the subchannels. Whenever a subchannel becomes +disconnected, the `round_robin` policy will ask it to reconnect, with +appropriate connection [backoff](connection-backoff.md). + +The policy sets the channel's connectivity state by aggregating the +states of the subchannels: +- If any one subchannel is in READY state, the channel's state is READY. +- Otherwise, if there is any subchannel in state CONNECTING, the channel's + state is CONNECTING. +- Otherwise, if there is any subchannel in state IDLE, the channel's state is + IDLE. +- Otherwise, if all subchannels are in state TRANSIENT_FAILURE, the channel's + state is TRANSIENT_FAILURE. + +Note that when a given subchannel reports TRANSIENT_FAILURE, it is +considered to still be in TRANSIENT_FAILURE until it successfully +reconnects and reports READY. In particular, we ignore the transition +from TRANSIENT_FAILURE to CONNECTING. + +When an RPC is sent on the channel, the `round_robin` policy will +iterate over all subchannels that are currently in READY state, sending +each successive RPC to the next successive subchannel in the list, +wrapping around to the start of the list when needed. + +### `grpclb` + +(This policy is deprecated. We recommend using [xDS](grpc_xds_features.md) +instead.) + +This LB policy was originally intended as gRPC's primary extensibility +mechanism for load balancing. The intent was that instead of adding new +LB policies directly in the client, the client could implement only +simple algorithms like `round_robin`, and any more complex algorithms +would be provided by a look-aside load balancer. + +The client relies on the load balancer to provide _load balancing +configuration_ and _the list of server addresses_ to which the client should +send requests. The balancer updates the server list as needed to balance +the load as well as handle server unavailability or health issues. The +load balancer will make any necessary complex decisions and inform the +client. The load balancer may communicate with the backend servers to +collect load and health information. + +The `grpclb` policy uses the addresses returned by the resolver (if any) +as fallback addresses, which are used when it loses contact with the +balancers. + +The `grpclb` policy gets the list of addresses of the balancers to talk to +via an attribute returned by the resolver. diff --git a/doc/naming.md b/doc/naming.md new file mode 100644 index 00000000..f975ffc2 --- /dev/null +++ b/doc/naming.md @@ -0,0 +1,88 @@ +# gRPC Name Resolution + +## Overview + +gRPC supports DNS as the default name-system. A number of alternative +name-systems are used in various deployments. We support an API that is +general enough to support a range of name-systems and the corresponding +syntax for names. The gRPC client library in various languages will +provide a plugin mechanism so resolvers for different name-systems can +be plugged in. + +## Detailed Design + +### Name Syntax + +A fully qualified, self contained name used for gRPC channel construction +uses URI syntax as defined in [RFC 3986](https://tools.ietf.org/html/rfc3986). + +The URI scheme indicates what resolver plugin to use. If no scheme +prefix is specified or the scheme is unknown, the `dns` scheme is used +by default. + +The URI path indicates the name to be resolved. + +Most gRPC implementations support the following URI schemes: + +- `dns:[//authority/]host[:port]` -- DNS (default) + - `host` is the host to resolve via DNS. + - `port` is the port to return for each address. If not specified, + 443 is used (but some implementations default to 80 for insecure + channels). + - `authority` indicates the DNS server to use, although this is only + supported by some implementations. (In C-core, the default DNS + resolver does not support this, but the c-ares based resolver + supports specifying this in the form "IP:port".) + +- `unix:path`, `unix://absolute_path` -- Unix domain sockets (Unix systems only) + - `path` indicates the location of the desired socket. + - In the first form, the path may be relative or absolute; in the + second form, the path must be absolute (i.e., there will actually be + three slashes, two prior to the path and another to begin the + absolute path). + +- `unix-abstract:abstract_path` -- Unix domain socket in abstract namespace (Unix systems only) + - `abstract_path` indicates a name in the abstract namespace. + - The name has no connection with filesystem pathnames. + - No permissions will apply to the socket - any process/user may access the socket. + - The underlying implementation of Abstract sockets uses a null byte ('\0') + as the first character; the implementation will prepend this null. Do not include + the null in `abstract_path`. + - `abstract_path` cannot contain null bytes. + - TODO(https://github.com/grpc/grpc/issues/24638): Unix allows abstract socket names to contain null bytes, + but this is not supported by the gRPC C-core implementation. + +The following schemes are supported by the gRPC C-core implementation, +but may not be supported in other languages: + +- `ipv4:address[:port][,address[:port],...]` -- IPv4 addresses + - Can specify multiple comma-delimited addresses of the form `address[:port]`: + - `address` is the IPv4 address to use. + - `port` is the port to use. If not specified, 443 is used. + +- `ipv6:address[:port][,address[:port],...]` -- IPv6 addresses + - Can specify multiple comma-delimited addresses of the form `address[:port]`: + - `address` is the IPv6 address to use. To use with a `port` the `address` + must enclosed in literal square brackets (`[` and `]`). Example: + `ipv6:[2607:f8b0:400e:c00::ef]:443` or `ipv6:[::]:1234` + - `port` is the port to use. If not specified, 443 is used. + +In the future, additional schemes such as `etcd` could be added. + +### Resolver Plugins + +The gRPC client library will use the specified scheme to pick the right +resolver plugin and pass it the fully qualified name string. + +Resolvers should be able to contact the authority and get a resolution +that they return back to the gRPC client library. The returned contents +include: + +- A list of resolved addresses (both IP address and port). Each address + may have a set of arbitrary attributes (key/value pairs) associated with + it, which can be used to communicate information from the resolver to the + [load balancing](load-balancing.md) policy. +- A [service config](service_config.md). + +The plugin API allows the resolvers to continuously watch an endpoint +and return updated resolutions as needed. diff --git a/doc/python/server_reflection.md b/doc/python/server_reflection.md new file mode 100644 index 00000000..5e963ad1 --- /dev/null +++ b/doc/python/server_reflection.md @@ -0,0 +1,66 @@ +# gRPC Python Server Reflection + +This document shows how to use gRPC Server Reflection in gRPC Python. +Please see [C++ Server Reflection Tutorial] for general information +and more examples how to use server reflection. + +## Enable server reflection in Python servers + +gRPC Python Server Reflection is an add-on library. To use it, first install +the [grpcio-reflection] PyPI package into your project. + +Note that with Python you need to manually register the service +descriptors with the reflection service implementation when creating a server +(this isn't necessary with e.g. C++ or Java) +```python +# add the following import statement to use server reflection +from grpc_reflection.v1alpha import reflection +# ... +def serve(): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server) + # the reflection service will be aware of "Greeter" and "ServerReflection" services. + SERVICE_NAMES = ( + helloworld_pb2.DESCRIPTOR.services_by_name['Greeter'].full_name, + reflection.SERVICE_NAME, + ) + reflection.enable_server_reflection(SERVICE_NAMES, server) + server.add_insecure_port('[::]:50051') + server.start() +``` + +Please see [greeter_server_with_reflection.py] in the examples directory for the full +example, which extends the gRPC [Python `Greeter` example] on a reflection-enabled server. + +After starting the server, you can verify that the server reflection +is working properly by using the [`grpc_cli` command line tool]: + + ```sh + $ grpc_cli ls localhost:50051 + ``` + + output: + ```sh + grpc.reflection.v1alpha.ServerReflection + helloworld.Greeter + ``` + + For more examples and instructions how to use the `grpc_cli` tool, + please refer to the [`grpc_cli` documentation] and the + [C++ Server Reflection Tutorial]. + +## Additional Resources + +The [Server Reflection Protocol] provides detailed +information about how the server reflection works and describes the server reflection +protocol in detail. + + +[C++ Server Reflection Tutorial]: ../server_reflection_tutorial.md +[grpcio-reflection]: https://pypi.org/project/grpcio-reflection/ +[greeter_server_with_reflection.py]: https://github.com/grpc/grpc/blob/master/examples/python/helloworld/greeter_server_with_reflection.py +[Python `Greeter` example]: https://github.com/grpc/grpc/tree/master/examples/python/helloworld +[`grpc_cli` command line tool]: https://github.com/grpc/grpc/blob/master/doc/command_line_tool.md +[`grpc_cli` documentation]: ../command_line_tool.md +[C++ Server Reflection Tutorial]: ../server_reflection_tutorial.md +[Server Reflection Protocol]: ../server-reflection.md diff --git a/doc/security_audit.md b/doc/security_audit.md new file mode 100644 index 00000000..a7b935a2 --- /dev/null +++ b/doc/security_audit.md @@ -0,0 +1,70 @@ +# gRPC Security Audit + +A third-party security audit of gRPC C++ stack was performed by [Cure53](https://cure53.de) in October 2019. The full report can be found [here](https://github.com/grpc/grpc/tree/master/doc/grpc_security_audit.pdf). + +# Addressing grpc_security_audit + +The following describes how gRPC team has or will address each of the security issues pointed out in the report. + +## GRP-01-001 DoS through uninitialized pointer dereference + +GRP-01-001 was fixed in version 1.24.0 and above with https://github.com/grpc/grpc/pull/20351. The fix was also patched in version 1.23.1. + +## GRP-01-002 Refs to freed memory not automatically nulled +GRP-01-002 describes a programming pattern in gRPC Core where `gpr_free` is called and then the pointer is nulled afterwards. GRP-01-002 can be split into two concerns: 1) dangling pointer bugs and 2) the potential vulnerability of leveraging other bugs to access data through a freed pointer. + +Regarding 1), gRPC uses a suite of sanitizer tests (asan, tsan, etc) to detect and fix any memory-related bugs. gRPC is also in the process of moving to c++ and the standard library, enabling the use of smart pointers in Core and thus making it harder to generate memory-related bugs. There are also plans to remove `gpr_free` in general. + +Regarding 2), moving to smart pointers (in particular, unique_ptr) will help this issue as well. In addition, gRPC has continuous fuzzing tests to find and resolve security issues, and the pen test did not discover any concrete vulnerabilities in this area. + +Below is a list of alternatives that gRPC team considered. + + +### Alternative #1: Rewrite gpr_free to take void\*\* +One solution is to change the API of `gpr_free` so that it automatically nulls the given pointer after freeing it. + +``` +gpr_free (void** ptr) { + ... + *ptr = nullptr; +} +``` + +This defensive programming pattern would help protect gRPC from the potential exploits and latent dangling pointer bugs mentioned in the security report. + +However, performance would be a significant concern as we are now unconditionally adding a store to every gpr_free call, and there are potentially hundreds of these per RPC. At the RPC layer, this can add up to prohibitive costs. + +Maintainability is also an issue since this approach impacts use of `*const`. Member pointers that are set in the initialization list of a constructor and not changed thereafter can be declared `*const`. This is a useful compile-time check if the member is taking ownership of something that was passed in by argument or allocated through a helper function called by the constructor initializer list. If this thing needs to be `gpr_free`'d using the proposed syntax, it can no longer be `*const` and we lose these checks (or we have to const_cast it which is also error-prone). + +Another concern is readability - this `gpr_free` interface is less intuitive than the current one. + +Yet another concern is that the use of non-smart pointers doesn’t imply ownership - it doesn’t protect against spare copies of the same pointers. + +### Alternative #2: Add another gpr_free to the Core API +Adding an alternative `gpr_free` that nulls the given pointer is undesirable because we cannot enforce that we’re using this version of `gpr_free` everywhere we need to. It doesn’t solve the original problem because it doesn’t reduce the chance of programmer error. + +Like alternative #1, this solution doesn’t protect against spare copies of the same pointers and is subject to the same maintainability concerns. + +### Alternative #3: Rewrite gpr_free to take void\*& +``` +gpr_free (void*& ptr) { + ... + ptr = nullptr; +} +``` +This falls into the same pitfalls as solution #1 and furthermore is C89 non-compliant, which is a current requirement for `gpr_free`. Moreover, Google’s style guide discourages non-const reference parameters, so this is even less desirable than solution #1. + + +### Conclusion +Because of performance and maintainability concerns, GRP-01-002 will be addressed through the ongoing work to move gRPC Core to C++ and smart pointers and the future work of removing `gpr_free` in general. We will continue to leverage our sanitizer and fuzzing tests to help expose vulnerabilities. + +## GRP-01-003 Calls to malloc suffer from potential integer overflows +The vulnerability, as defined by the report, is that calls to `gpr_malloc` in the C-core codebase may suffer from potential integer overflow in cases where we multiply the array element size by the size of the array. The penetration testers did not identify a concrete place where this occurred, but rather emphasized that the coding pattern itself had potential to lead to vulnerabilities. The report’s suggested solution for GRP-01-003 was to create a `calloc(size_t nmemb, size_t size)` wrapper that contains integer overflow checks. + +However, gRPC team firmly believes that gRPC Core should only use integer overflow checks in the places where they’re needed; for example, any place where remote input influences the input to `gpr_malloc` in an unverified way. This is because bounds-checking is very expensive at the RPC layer. + +Determining exactly where bounds-checking is needed requires an audit of tracing each `gpr_malloc` (or `gpr_realloc` or `gpr_zalloc`) call up the stack to determine if the sufficient bounds-checking was performed. This kind of audit, done manually, is fairly expensive engineer-wise. + +### Conclusion +GRP-01-003 will be addressed through leveraging gRPC Core fuzzer tests to actively identify and resolve any integer overflow issues. If any issues are identified, we may create a `gpr_safe_malloc(size_t nmemb, size_t size)` wrapper to consolidate bounds-checking in one place. This function will *not* zero out memory because of performance concerns, and so will not be a calloc-style wrapper. + diff --git a/doc/server-reflection.md b/doc/server-reflection.md new file mode 100644 index 00000000..c0b86f06 --- /dev/null +++ b/doc/server-reflection.md @@ -0,0 +1,176 @@ +GRPC Server Reflection Protocol +=============================== + +This document describes server reflection as an optional extension for servers +to assist clients in runtime construction of requests without having stub +information precompiled into the client. + +The primary usecase for server reflection is to write (typically) command line +debugging tools for talking to a grpc server. In particular, such a tool will +take in a method and a payload (in human readable text format) send it to the +server (typically in binary proto wire format), and then take the response and +decode it to text to present to the user. + +This broadly involves two problems: determining what formats (which protobuf +messages) a server’s method uses, and determining how to convert messages +between human readable format and the (likely binary) wire format. + +## Method reflection + +We want to be able to answer the following queries: + 1. What methods does a server export? + 2. For a particular method, how do we call it? +Specifically, what are the names of the methods, are those methods unary or +streaming, and what are the types of the argument and result? + +The first proposed version of the protocol is here: +https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto + +Note that a server is under no obligation to return a complete list of all +methods it supports. For example, a reverse proxy may support server reflection +for methods implemented directly on the proxy but not enumerate all methods +supported by its backends. + + +### Open questions on method reflection + * Consider how to extend this protocol to support non-protobuf methods. + +## Argument reflection +The second half of the problem is converting between the human readable +input/output of a debugging tool and the binary format understood by the +method. + +This is obviously dependent on protocol type. At one extreme, if both the +server and the debugging tool accept JSON, there may be no need for such a +conversion in the first place. At the opposite extreme, a server using a custom +binary format has no hope of being supported by a generic system. The +intermediate interesting common case is a server which speaks binary-proto and +a debugging client which speaks either ascii-proto or json-proto. + +One approach would be to require servers directly support human readable input. +In the future method reflection may be extended to document such support, +should it become widespread or standardized. + +## Protobuf descriptors + +A second would be for the server to export its +google::protobuf::DescriptorDatabase over the wire. This is very easy to +implement in C++, and Google implementations of a similar protocol already +exist in C++, Go, and Java. + +This protocol mostly returns FileDescriptorProtos, which are a proto encoding +of a parsed .proto file. It supports four queries: + 1. The FileDescriptorProto for a given file name + 2. The FileDescriptorProto for the file with a given symbol + 3. The FileDescriptorProto for the file with a given extension + 4. The list of known extension tag numbers of a given type + +These directly correspond to the methods of +google::protobuf::DescriptorDatabase. Note that this protocol includes support +for extensions, which have been removed from proto3 but are still in widespread +use in Google’s codebase. + +Because most usecases will require also requesting the transitive dependencies +of requested files, the queries will also return all transitive dependencies of +the returned file. Should interesting usecases for non-transitive queries turn +up later, we can easily extend the protocol to support them. + +### Reverse proxy traversal + +One potential issue with naive reverse proxies is that, while any individual +server will have a consistent and valid picture of the proto DB which is +sufficient to handle incoming requests, incompatibilities will arise if the +backend servers have a mix of builds. For example, if a given message is moved +from foo.proto to bar.proto, and the client requests foo.proto from an old +server and bar.proto from a new server, the resulting database will have a +double definition. + +To solve this problem, the protocol is structured as a bidirectional stream, +ensuring all related requests go to a single server. This has the additional +benefit that overlapping recursive requests don’t require sending a lot of +redundant information, because there is a single stream to maintain context +between queries. + +``` +package grpc.reflection.v1alpha; +message DescriptorDatabaseRequest { + string host = 1; + oneof message_request { + string files_for_file_name = 3; + string files_for_symbol_name = 4; + FileContainingExtensionRequest file_containing_extension = 5; + string list_all_extensions_of_type = 6; + } +} + +message FileContainingExtensionRequest { + string base_message = 1; + int64 extension_id = 2; +} + +message DescriptorDatabaseResponse { + string valid_host = 1; + DescriptorDatabaseRequest original_request = 2; + oneof message_response { + // These are proto2 type google.protobuf.FileDescriptorProto, but + // we avoid taking a dependency on descriptor.proto, which uses + // proto2 only features, by making them opaque + // bytes instead + repeated bytes fd_proto = 4; + ListAllExtensionsResponse extensions_response = 5; + // Notably includes error code 5, NOT FOUND + int32 error_code = 6; + } +} + +message ListAllExtensionsResponse { + string base_type_name; + repeated int64 extension_number; +} + +service ProtoDescriptorDatabase { + rpc DescriptorDatabaseInfo(stream DescriptorDatabaseRequest) returns (stream DescriptorDatabaseResponse); +} +``` + +Any given request must either result in an error code or an answer, usually in +the form of a series of FileDescriptorProtos with the requested file itself +and all previously unsent transitive imports of that file. Servers may track +which FileDescriptorProtos have been sent on a given stream, for a given value +of valid_host, and avoid sending them repeatedly for overlapping requests. + +| message_request message | Result | +| --------------------------- | ----------------------------------------------- | +| files_for_file_name | transitive closure of file name | +| files_for_symbol_name | transitive closure file containing symbol | +| file_containing_extension | transitive closure of file containing a given extension number of a given symbol | +| list_all_extensions_of_type | ListAllExtensionsResponse containing all known extension numbers of a given type | + +At some point it would make sense to additionally also support any.proto’s +format. Note that known any.proto messages can be queried by symbol using this +protocol even without any such support, by parsing the url and extracting the +symbol name from it. + +## Language specific implementation thoughts +All of the information needed to implement Proto reflection is available to the +code generator, but I’m not certain we actually generate this in every +language. If the proto implementation in the language doesn’t have something +like google::protobuf::DescriptorPool the grpc implementation for that language +will need to index those FileDescriptorProtos by file and symbol and imports. + +One issue is that some grpc implementations are very loosely coupled with +protobufs; in such implementations it probably makes sense to split apart these +reflection APIs so as not to take an additional proto dependency. + +## Known Implementations + +Enabling server reflection differs language-to-language. Here are links to docs relevant to +each language: + +- [Java](https://github.com/grpc/grpc-java/blob/master/documentation/server-reflection-tutorial.md#enable-server-reflection) +- [Go](https://github.com/grpc/grpc-go/blob/master/Documentation/server-reflection-tutorial.md#enable-server-reflection) +- [C++](https://grpc.io/grpc/cpp/md_doc_server_reflection_tutorial.html) +- [C#](https://github.com/grpc/grpc/blob/master/doc/csharp/server_reflection.md) +- [Python](https://github.com/grpc/grpc/blob/master/doc/python/server_reflection.md) +- Ruby: not yet implemented [#2567](https://github.com/grpc/grpc/issues/2567) +- Node: not yet implemented [#2568](https://github.com/grpc/grpc/issues/2568) diff --git a/doc/server_reflection_tutorial.md b/doc/server_reflection_tutorial.md new file mode 100644 index 00000000..ccad0605 --- /dev/null +++ b/doc/server_reflection_tutorial.md @@ -0,0 +1,189 @@ +# gRPC Server Reflection Tutorial + +gRPC Server Reflection provides information about publicly-accessible gRPC +services on a server, and assists clients at runtime to construct RPC +requests and responses without precompiled service information. It is used by +gRPC CLI, which can be used to introspect server protos and send/receive test +RPCs. + +## Enable Server Reflection + +### Enable server reflection in C++ servers + +C++ Server Reflection is an add-on library, `libgrpc++_reflection`. To enable C++ +server reflection, you can link this library to your server binary. + +Some platforms (e.g. Ubuntu 11.10 onwards) only link in libraries that directly +contain symbols used by the application. On these platforms, LD flag +`--no-as-needed` is needed for dynamic linking and `--whole-archive` is +needed for static linking. + +This [Makefile](../examples/cpp/helloworld/Makefile#L37#L45) demonstrates +enabling c++ server reflection on Linux and MacOS. + +## Test services using Server Reflection + +After enabling Server Reflection in a server application, you can use gRPC CLI +to test its services. + +Instructions on how to use gRPC CLI can be found at +[command_line_tool.md](command_line_tool.md), or using `grpc_cli help` command. + +Here we use `examples/cpp/helloworld` as an example to show the use of gRPC +Server Reflection and gRPC CLI. First, we need to build gRPC CLI and setup an +example server with Server Reflection enabled. + +- Setup an example server + + Server Reflection has already been enabled in the + [Makefile](../examples/cpp/helloworld/Makefile) of the helloworld example. We + can simply make it and run the greeter_server. + + ```sh + $ make -C examples/cpp/helloworld + $ examples/cpp/helloworld/greeter_server & + ``` + +- Build gRPC CLI + + ```sh + make grpc_cli + cd bins/opt + ``` + + gRPC CLI binary `grpc_cli` can be found at `bins/opt/` folder. This tool is + still new and does not have a `make install` target yet. + +### List services + +`grpc_cli ls` command lists services and methods exposed at a given port + +- List all the services exposed at a given port + + ```sh + $ grpc_cli ls localhost:50051 + ``` + + output: + ```sh + helloworld.Greeter + grpc.reflection.v1alpha.ServerReflection + ``` + +- List one service with details + + `grpc_cli ls` command inspects a service given its full name (in the format of + \.\). It can print information with a long listing format + when `-l` flag is set. This flag can be used to get more details about a + service. + + ```sh + $ grpc_cli ls localhost:50051 helloworld.Greeter -l + ``` + + output: + ```sh + filename: helloworld.proto + package: helloworld; + service Greeter { + rpc SayHello(helloworld.HelloRequest) returns (helloworld.HelloReply) {} + } + + ``` + +### List methods + +- List one method with details + + `grpc_cli ls` command also inspects a method given its full name (in the + format of \.\.\). + + ```sh + $ grpc_cli ls localhost:50051 helloworld.Greeter.SayHello -l + ``` + + output: + ```sh + rpc SayHello(helloworld.HelloRequest) returns (helloworld.HelloReply) {} + ``` + +### Inspect message types + +We can use`grpc_cli type` command to inspect request/response types given the +full name of the type (in the format of \.\). + +- Get information about the request type + + ```sh + $ grpc_cli type localhost:50051 helloworld.HelloRequest + ``` + + output: + ```sh + message HelloRequest { + optional string name = 1; + } + ``` + +### Call a remote method + +We can send RPCs to a server and get responses using `grpc_cli call` command. + +- Call a unary method + + ```sh + $ grpc_cli call localhost:50051 SayHello "name: 'gRPC CLI'" + ``` + + output: + ```sh + message: "Hello gRPC CLI" + ``` + +## Use Server Reflection in a C++ client + +Server Reflection can be used by clients to get information about gRPC services +at runtime. We've provided a descriptor database called +[grpc::ProtoReflectionDescriptorDatabase](../test/cpp/util/proto_reflection_descriptor_database.h) +which implements the +[google::protobuf::DescriptorDatabase](https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.descriptor_database#DescriptorDatabase) +interface. It manages the communication between clients and reflection services +and the storage of received information. Clients can use it as using a local +descriptor database. + +- To use Server Reflection with grpc::ProtoReflectionDescriptorDatabase, first + initialize an instance with a grpc::Channel. + + ```c++ + std::shared_ptr channel = + grpc::CreateChannel(server_address, server_cred); + grpc::ProtoReflectionDescriptorDatabase reflection_db(channel); + ``` + +- Then use this instance to feed a + [google::protobuf::DescriptorPool](https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.descriptor#DescriptorPool). + + ```c++ + google::protobuf::DescriptorPool desc_pool(&reflection_db); + ``` + +- Example usage of this descriptor pool + + * Get Service/method descriptors. + + ```c++ + const google::protobuf::ServiceDescriptor* service_desc = + desc_pool->FindServiceByName("helloworld.Greeter"); + const google::protobuf::MethodDescriptor* method_desc = + desc_pool->FindMethodByName("helloworld.Greeter.SayHello"); + ``` + + * Get message type descriptors and create messages dynamically. + + ```c++ + const google::protobuf::Descriptor* request_desc = + desc_pool->FindMessageTypeByName("helloworld.HelloRequest"); + google::protobuf::DynamicMessageFactory dmf; + google::protobuf::Message* request = dmf.GetPrototype(request_desc)->New(); + ``` + diff --git a/doc/server_side_auth.md b/doc/server_side_auth.md new file mode 100644 index 00000000..5905626f --- /dev/null +++ b/doc/server_side_auth.md @@ -0,0 +1,61 @@ +Server-side API for Authenticating Clients +========================================== + +NOTE: This document describes how server-side authentication works in C-core based gRPC implementations only. In gRPC Java and Go, server side authentication is handled differently. +NOTE2: `CallCredentials` class is only valid for secure channels in C-Core. So, for connections under insecure channels, features below might not be available. + +## AuthContext + +To perform server-side authentication, gRPC exposes the *authentication context* for each call. The context exposes important authentication-related information about the RPC such as the type of security/authentication type being used and the peer identity. + +The authentication context is structured as a multi-map of key-value pairs - the *auth properties*. In addition to that, for authenticated RPCs, the set of properties corresponding to a selected key will represent the verified identity of the caller - the *peer identity*. + +The contents of the *auth properties* are populated by an *auth interceptor*. The interceptor also chooses which property key will act as the peer identity (e.g. for client certificate authentication this property will be `"x509_common_name"` or `"x509_subject_alternative_name"`). + +WARNING: AuthContext is the only reliable source of truth when it comes to authenticating RPCs. Using any other call/context properties for authentication purposes is wrong and inherently unsafe. + +#### Example AuthContext contents + +For secure channel using mutual TLS authentication with both client and server certificates (test certificates from this repository are used). + +Populated auth properties: +``` +"transport_security_type": "ssl" # connection is secured using TLS/SSL +"x509_common_name": "*.test.google.com" # from client's certificate +"x509_pem_cert": "-----BEGIN CERTIFICATE-----\n..." # client's PEM encoded certificate +"x509_subject_alternative_name": "*.test.google.fr" +"x509_subject_alternative_name": "waterzooi.test.google.be" +"x509_subject_alternative_name": "*.test.youtube.com" +"x509_subject_alternative_name": "192.168.1.3" +``` + +The peer identity is set of all properties named `"x509_subject_alternative_name"`: +``` +peer_identity_property_name = "x509_subject_alternative_name" +``` + +## AuthProperty + +Auth properties are elements of the AuthContext. They have a name (a key of type string) and a value which can be a string or binary data. + +## Auth Interceptors + +Auth interceptors are gRPC components that populate contents of the auth context based on gRPC's internal state and/or call metadata. +gRPC comes with some basic "interceptors" already built-in. + +WARNING: While there is a public API that allows anyone to write their own custom interceptor, please think twice before using it. +There are legitimate uses for custom interceptors but you should keep in mind that as auth interceptors essentially decide which RPCs are authenticated and which are not, their code is very sensitive from the security perspective and getting things wrong might have serious consequences. If unsure, we strongly recommend to rely on official & proven interceptors that come with gRPC. + +#### Available auth interceptors +- TLS/SSL certificate authentication (built into gRPC's security layer, automatically used whenever you use a secure connection) +- (coming soon) JWT auth token authentication +- more will be added over time + +## Status (by language) +C-core exposes low level API to access auth context contents and to implement an auth interceptor. +In C++, the auth interceptor API is exposed as `AuthMetadataProcessor`. + +A high level API to access AuthContext contents is available in these languages: +- C++ +- C# (implementation in-progress) +- other languages coming soon diff --git a/doc/service_config.md b/doc/service_config.md new file mode 100644 index 00000000..57f5e198 --- /dev/null +++ b/doc/service_config.md @@ -0,0 +1,117 @@ +Service Config in gRPC +====================== + +# Objective + +The service config is a mechanism that allows service owners to publish +parameters to be automatically used by all clients of their service. + +# Format + +The fields of the service config are defined by the +[`grpc.service_config.ServiceConfig` protocol buffer +message](https://github.com/grpc/grpc-proto/blob/master/grpc/service_config/service_config.proto). +Note that new fields may be added in the future as new functionality is +introduced. + +Internally, gRPC uses the service config in JSON form. The JSON +representation is the result of converting the protobuf form into JSON +using the normal [protobuf to JSON translation +rules](https://developers.google.com/protocol-buffers/docs/proto3#json). +In particular, this means: +- Field names are converted from `snake_case` to `camelCase`. +- Field values are converted as per the documented translation rules: + - Strings, 32-bit integers, and bools are converted into the + corresponding JSON types. + - 64-bit integers are converted into strings (e.g., `"251"`). + - The value of a repeated field will be represented as a JSON array. + - The value of a `google.protobuf.Duration` will be represented as a + string containing a decimal number of seconds (e.g., `"1.000340012s"`). + +For more details, see the protobuf docs linked above. + +Note that the JSON representation has one advantage over the protobuf +representation, which is that it is possible to encode configurations +for [LB policies](load-balancing.md) that are not known to gRPC. In +protobuf form, the `loadBalancingConfig` field contains a `oneof` +supporting only the built-in LB policies. However, in JSON form, the +field inside the `oneof` is encoded as a string that indicates the LB +policy name. In JSON form, that string can be any arbitrary value, not +just one of the supported policies inside of the `oneof`, so third-party +policies can be selected. + +# Architecture + +A service config is associated with a server name. The [name +resolver](naming.md) plugin, when asked to resolve a particular server +name, will return both the resolved addresses and the service config. + +The name resolver returns the service config to the gRPC client in JSON form. +Individual resolver implementations determine where and in what format the +service config is stored. If the resolver implemention obtains the +service config in protobuf form, it must convert it to JSON. +Alternatively, a resolver implementation may obtain the service config +already in JSON form, in which case it may return it directly. Or it +may construct the JSON dynamically from some other source data. + +For details of how the DNS resolver plugin supports service configs, see +[gRFC A2: Service Config via +DNS](https://github.com/grpc/proposal/blob/master/A2-service-configs-in-dns.md). + +# Example + +Here is an example service config in protobuf form: + +``` +{ + // Use round_robin LB policy. + load_balancing_config: { round_robin: {} } + // This method config applies to method "foo/bar" and to all methods + // of service "baz". + method_config: { + name: { + service: "foo" + method: "bar" + } + name: { + service: "baz" + } + // Default timeout for matching methods. + timeout: { + seconds: 1 + nanos: 1 + } + } +} +``` + +Here is the same example service config in JSON form: + +``` +{ + "loadBalancingConfig": [ { "round_robin": {} } ], + "methodConfig": [ + { + "name": [ + { "service": "foo", "method": "bar" }, + { "service": "baz" } + ], + "timeout": "1.000000001s" + } + ] +} +``` + +# APIs + +The service config is used in the following APIs: + +- In the resolver API, used by resolver plugins to return the service + config to the gRPC client. +- In the gRPC client API, where users can query the channel to obtain + the service config associated with the channel (for debugging + purposes). +- In the gRPC client API, where users can set the service config + explicitly. This can be used to set the config in unit tests. It can + also be used to set the default config that will be used if the + resolver plugin does not return a service config. diff --git a/doc/ssl-performance.md b/doc/ssl-performance.md new file mode 100644 index 00000000..0ef32b30 --- /dev/null +++ b/doc/ssl-performance.md @@ -0,0 +1,44 @@ +# SSL in gRPC and performance + +The SSL requirement of gRPC isn't necessarily making it easy to integrate. The HTTP/2 protocol requires ALPN support, which is a fairly new handshake protocol only supported by recent implementations. + +As a result, we've tried hard to provide a smooth experience to our users when compiling and distributing gRPC, but this may come at performance costs due to this. More specifically, we will sometime build the SSL library by disabling assembly code +(by setting the `OPENSSL_NO_ASM` option), which can impact performance by an order of magnitude when processing encrypted streams. + +## gRPC C++: Building from Source + +Build system | Condition | Platform | Uses assembly optimizations +---|---|---|-- +Makefile | with OpenSSL 1.0.2 development files | all | :heavy_check_mark: +Makefile | all other cases | all | :x: +Bazel | | Linux | :heavy_check_mark: +Bazel | | MacOS | :heavy_check_mark: +Bazel | | Windows | :x: +CMake | boringssl from submodule (default) | Linux or MacOS | :heavy_check_mark: +CMake | boringssl from submodule (default), generator=Ninja | Windows | :heavy_check_mark: +CMake | boringssl from submodule (default), generator=Visual Studio | Windows | :x: +CMake | pre-installed OpenSSL 1.0.2+ (`gRPC_SSL_PROVIDER=package`) | all | :heavy_check_mark: + +## Other Languages: Binary/Source Packages + +In addition, we are shipping packages for language implementations. These packages are source packages, but also have pre-built binaries being distributed. Building packages from source may give a different result in some cases. + +Language | From source | Platform | Uses assembly optimizations +---|---|---|--- +C# | n/a | Linux, 64bit | :heavy_check_mark: +C# | n/a | Linux, 32bit | :x: +C# | n/a | MacOS | :heavy_check_mark: +C# | n/a | Windows | :heavy_check_mark: +Node.JS | n/a | Linux | :heavy_check_mark: +Node.JS | n/a | MacOS | :heavy_check_mark: +Node.JS | n/a | Windows | :x: +Electron | n/a | all | :heavy_check_mark: +ObjC | Yes | iOS | :x: +PHP | Yes | all | Same as the `Makefile` case from above +PHP | No | all | :x: +Python | n/a | Linux, 64bit | :heavy_check_mark: +Python | n/a | Linux, 32bit | :x: +Python | n/a | MacOS, 64bit | :heavy_check_mark: +Python | n/a | MacOS, 32bit | :x: +Python | n/a | Windows | :x: +Ruby | No | all | :x: diff --git a/doc/status_ordering.md b/doc/status_ordering.md new file mode 100644 index 00000000..d72235b7 --- /dev/null +++ b/doc/status_ordering.md @@ -0,0 +1,17 @@ +Ordering Status and Reads in the gRPC API +----------------------------------------- + +Rules for implementors: +1. Reads and Writes Must not succeed after Status has been delivered. +2. Status is only delivered after all buffered messages are read. +3. Reads May continue to succeed after a failing write. + However, once a write fails, all subsequent writes Must fail, + and similarly, once a read fails, all subsequent reads Must fail. +4. A non-OK status received from the server is not considered an error status. +5. When an error status is known to the library, if the user asks for status, + the library Should discard messages received in the library but not delivered + to the user and then deliver the status. If the user does not ask for status + but continues reading, the library Should deliver buffered messages before + delivering status. The library MAY choose to implement the stricter version + where errors cause all buffered messages to be dropped, but this is not a + requirement. diff --git a/doc/statuscodes.md b/doc/statuscodes.md new file mode 100644 index 00000000..af2774d6 --- /dev/null +++ b/doc/statuscodes.md @@ -0,0 +1,74 @@ +# Status codes and their use in gRPC + +gRPC uses a set of well defined status codes as part of the RPC API. These +statuses are defined as such: + +| Code | Number | Description | +|------------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| OK | 0 | Not an error; returned on success. | +| CANCELLED | 1 | The operation was cancelled, typically by the caller. | +| UNKNOWN | 2 | Unknown error. For example, this error may be returned when a `Status` value received from another address space belongs to an error space that is not known in this address space. Also errors raised by APIs that do not return enough error information may be converted to this error. | +| INVALID_ARGUMENT | 3 | The client specified an invalid argument. Note that this differs from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments that are problematic regardless of the state of the system (e.g., a malformed file name). | +| DEADLINE_EXCEEDED | 4 | The deadline expired before the operation could complete. For operations that change the state of the system, this error may be returned even if the operation has completed successfully. For example, a successful response from a server could have been delayed long | +| NOT_FOUND | 5 | Some requested entity (e.g., file or directory) was not found. Note to server developers: if a request is denied for an entire class of users, such as gradual feature rollout or undocumented allowlist, `NOT_FOUND` may be used. If a request is denied for some users within a class of users, such as user-based access control, `PERMISSION_DENIED` must be used. | +| ALREADY_EXISTS | 6 | The entity that a client attempted to create (e.g., file or directory) already exists. | +| PERMISSION_DENIED | 7 | The caller does not have permission to execute the specified operation. `PERMISSION_DENIED` must not be used for rejections caused by exhausting some resource (use `RESOURCE_EXHAUSTED` instead for those errors). `PERMISSION_DENIED` must not be used if the caller can not be identified (use `UNAUTHENTICATED` instead for those errors). This error code does not imply the request is valid or the requested entity exists or satisfies other pre-conditions. | +| RESOURCE_EXHAUSTED | 8 | Some resource has been exhausted, perhaps a per-user quota, or perhaps the entire file system is out of space. | +| FAILED_PRECONDITION | 9 | The operation was rejected because the system is not in a state required for the operation's execution. For example, the directory to be deleted is non-empty, an rmdir operation is applied to a non-directory, etc. Service implementors can use the following guidelines to decide between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: (a) Use `UNAVAILABLE` if the client can retry just the failing call. (b) Use `ABORTED` if the client should retry at a higher level (e.g., when a client-specified test-and-set fails, indicating the client should restart a read-modify-write sequence). (c) Use `FAILED_PRECONDITION` if the client should not retry until the system state has been explicitly fixed. E.g., if an "rmdir" fails because the directory is non-empty, `FAILED_PRECONDITION` should be returned since the client should not retry unless the files are deleted from the directory. | +| ABORTED | 10 | The operation was aborted, typically due to a concurrency issue such as a sequencer check failure or transaction abort. See the guidelines above for deciding between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`. | +| OUT_OF_RANGE | 11 | The operation was attempted past the valid range. E.g., seeking or reading past end-of-file. Unlike `INVALID_ARGUMENT`, this error indicates a problem that may be fixed if the system state changes. For example, a 32-bit file system will generate `INVALID_ARGUMENT` if asked to read at an offset that is not in the range [0,2^32-1], but it will generate `OUT_OF_RANGE` if asked to read from an offset past the current file size. There is a fair bit of overlap between `FAILED_PRECONDITION` and `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific error) when it applies so that callers who are iterating through a space can easily look for an `OUT_OF_RANGE` error to detect when they are done. | +| UNIMPLEMENTED | 12 | The operation is not implemented or is not supported/enabled in this service. | +| INTERNAL | 13 | Internal errors. This means that some invariants expected by the underlying system have been broken. This error code is reserved for serious errors. | +| UNAVAILABLE | 14 | The service is currently unavailable. This is most likely a transient condition, which can be corrected by retrying with a backoff. Note that it is not always safe to retry non-idempotent operations. | +| DATA_LOSS | 15 | Unrecoverable data loss or corruption. | +| UNAUTHENTICATED | 16 | The request does not have valid authentication credentials for the operation. | + +All RPCs started at a client return a `status` object composed of an integer +`code` and a string `message`. The server-side can choose the status it +returns for a given RPC. + +The gRPC client and server-side implementations may also generate and +return `status` on their own when errors happen. Only a subset of +the pre-defined status codes are generated by the gRPC libraries. This +allows applications to be sure that any other code it sees was actually +returned by the application (although it is also possible for the +server-side to return one of the codes generated by the gRPC libraries). + +The following table lists the codes that may be returned by the gRPC +libraries (on either the client-side or server-side) and summarizes the +situations in which they are generated. + +| Case | Code | Generated at Client or Server | +| ------------- |:-------------| :-----:| +| Client Application cancelled the request | CANCELLED | Both | +| Deadline expires before server returns status | DEADLINE_EXCEEDED | Both | +| Method not found at server | UNIMPLEMENTED | Server| +| Server shutting down | UNAVAILABLE | Server| +| Server side application throws an exception (or does something other than returning a Status code to terminate an RPC) | UNKNOWN | Server| +| No response received before Deadline expires. This may occur either when the client is unable to send the request to the server or when the server fails to respond in time. | DEADLINE_EXCEEDED | Both| +| Some data transmitted (e.g., request metadata written to TCP connection) before connection breaks | UNAVAILABLE | Client | +| Could not decompress, but compression algorithm supported (Client -> Server) | INTERNAL | Server | +| Could not decompress, but compression algorithm supported (Server -> Client) | INTERNAL | Client | +| Compression mechanism used by client not supported at server | UNIMPLEMENTED | Server | +| Server temporarily out of resources (e.g., Flow-control resource limits reached) | RESOURCE_EXHAUSTED | Server| +| Client does not have enough memory to hold the server response | RESOURCE_EXHAUSTED | Client | +| Flow-control protocol violation | INTERNAL | Both | +| Error parsing returned status | UNKNOWN | Client | +| Incorrect Auth metadata ( Credentials failed to get metadata, Incompatible credentials set on channel and call, Invalid host set in `:authority` metadata, etc.) | UNAUTHENTICATED | Both | +| Request cardinality violation (method requires exactly one request but client sent some other number of requests) | UNIMPLEMENTED | Server| +| Response cardinality violation (method requires exactly one response but server sent some other number of responses) | UNIMPLEMENTED | Client| +| Error parsing response proto | INTERNAL | Client| +| Error parsing request proto | INTERNAL | Server| +| Sent or received message was larger than configured limit | RESOURCE_EXHAUSTED | Both | +| Keepalive watchdog times out | UNAVAILABLE | Both | + +The following status codes are never generated by the library: +- INVALID_ARGUMENT +- NOT_FOUND +- ALREADY_EXISTS +- FAILED_PRECONDITION +- ABORTED +- OUT_OF_RANGE +- DATA_LOSS + +Applications that may wish to [retry](https://github.com/grpc/proposal/blob/master/A6-client-retries.md) failed RPCs must decide which status codes on which to retry. As shown in the table above, the gRPC library can generate the same status code for different cases. Server applications can also return those same status codes. Therefore, there is no fixed list of status codes on which it is appropriate to retry in all applications. As a result, individual applications must make their own determination as to which status codes should cause an RPC to be retried. diff --git a/doc/unit_testing.md b/doc/unit_testing.md new file mode 100644 index 00000000..72fb1f6f --- /dev/null +++ b/doc/unit_testing.md @@ -0,0 +1,175 @@ +# How to write unit tests for gRPC C client. + +tl;dr: [Example code](https://github.com/grpc/grpc/blob/master/test/cpp/end2end/mock_test.cc). + +To unit-test client-side logic via the synchronous API, gRPC provides a mocked Stub based on googletest(googlemock) that can be programmed upon and easily incorporated in the test code. + +For instance, consider an EchoService like this: + + +```proto +service EchoTestService { + rpc Echo(EchoRequest) returns (EchoResponse); + rpc BidiStream(stream EchoRequest) returns (stream EchoResponse); +} +``` + +The code generated would look something like this: + +```c +class EchoTestService final { + public: + class StubInterface { + virtual ::grpc::Status Echo(::grpc::ClientContext* context, const ::grpc::testing::EchoRequest& request, ::grpc::testing::EchoResponse* response) = 0; + … + std::unique_ptr< ::grpc::ClientReaderWriterInterface< ::grpc::testing::EchoRequest, ::grpc::testing::EchoResponse>> BidiStream(::grpc::ClientContext* context) { + return std::unique_ptr< ::grpc::ClientReaderWriterInterface< ::grpc::testing::EchoRequest, ::grpc::testing::EchoResponse>>(BidiStreamRaw(context)); + } + … + private: + virtual ::grpc::ClientReaderWriterInterface< ::grpc::testing::EchoRequest, ::grpc::testing::EchoResponse>* BidiStreamRaw(::grpc::ClientContext* context) = 0; + … + } // End StubInterface +… +} // End EchoTestService +``` + + +If we mock the StubInterface and set expectations on the pure-virtual methods we can test client-side logic without having to make any rpcs. + +A mock for this StubInterface will look like this: + + +```c +class MockEchoTestServiceStub : public EchoTestService::StubInterface { + public: + MOCK_METHOD3(Echo, ::grpc::Status(::grpc::ClientContext* context, const ::grpc::testing::EchoRequest& request, ::grpc::testing::EchoResponse* response)); + MOCK_METHOD1(BidiStreamRaw, ::grpc::ClientReaderWriterInterface< ::grpc::testing::EchoRequest, ::grpc::testing::EchoResponse>*(::grpc::ClientContext* context)); +}; +``` + + +**Generating mock code:** + +Such a mock can be auto-generated by: + + + +1. Setting flag(generate_mock_code=true) on grpc plugin for protoc, or +1. Setting an attribute(generate_mocks) in your bazel rule. + +Protoc plugin flag: + +```sh +protoc -I . --grpc_out=generate_mock_code=true:. --plugin=protoc-gen-grpc=`which grpc_cpp_plugin` echo.proto +``` + +Bazel rule: + +```py +grpc_proto_library( + name = "echo_proto", + srcs = ["echo.proto"], + generate_mocks = True, +) +``` + + +By adding such a flag now a header file `echo_mock.grpc.pb.h` containing the mocked stub will also be generated. + +This header file can then be included in test files along with a gmock dependency. + +**Writing tests with mocked Stub.** + +Consider the following client a user might have: + +```c +class FakeClient { + public: + explicit FakeClient(EchoTestService::StubInterface* stub) : stub_(stub) {} + + void DoEcho() { + ClientContext context; + EchoRequest request; + EchoResponse response; + request.set_message("hello world"); + Status s = stub_->Echo(&context, request, &response); + EXPECT_EQ(request.message(), response.message()); + EXPECT_TRUE(s.ok()); + } + + void DoBidiStream() { + EchoRequest request; + EchoResponse response; + ClientContext context; + std::string msg("hello"); + + std::unique_ptr> + stream = stub_->BidiStream(&context); + + request.set_message(msg "0"); + EXPECT_TRUE(stream->Write(request)); + EXPECT_TRUE(stream->Read(&response)); + EXPECT_EQ(response.message(), request.message()); + + request.set_message(msg "1"); + EXPECT_TRUE(stream->Write(request)); + EXPECT_TRUE(stream->Read(&response)); + EXPECT_EQ(response.message(), request.message()); + + request.set_message(msg "2"); + EXPECT_TRUE(stream->Write(request)); + EXPECT_TRUE(stream->Read(&response)); + EXPECT_EQ(response.message(), request.message()); + + stream->WritesDone(); + EXPECT_FALSE(stream->Read(&response)); + + Status s = stream->Finish(); + EXPECT_TRUE(s.ok()); + } + + void ResetStub(EchoTestService::StubInterface* stub) { stub_ = stub; } + + private: + EchoTestService::StubInterface* stub_; +}; +``` + +A test could initialize this FakeClient with a mocked stub having set expectations on it: + +Unary RPC: + +```c +MockEchoTestServiceStub stub; +EchoResponse resp; +resp.set_message("hello world"); +EXPECT_CALL(stub, Echo(_,_,_)).Times(AtLeast(1)).WillOnce(DoAll(SetArgPointee<2>(resp), Return(Status::OK))); +FakeClient client(stub); +client.DoEcho(); +``` + +Streaming RPC: + +```c +ACTION_P(copy, msg) { + arg0->set_message(msg->message()); +} + + +auto rw = new MockClientReaderWriter(); +EchoRequest msg; +EXPECT_CALL(*rw, Write(_, _)).Times(3).WillRepeatedly(DoAll(SaveArg<0>(&msg), Return(true))); +EXPECT_CALL(*rw, Read(_)). + WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true))). + WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true))). + WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true))). + WillOnce(Return(false)); + +MockEchoTestServiceStub stub; +EXPECT_CALL(stub, BidiStreamRaw(_)).Times(AtLeast(1)).WillOnce(Return(rw)); + +FakeClient client(stub); +client.DoBidiStream(); +``` + diff --git a/doc/versioning.md b/doc/versioning.md new file mode 100644 index 00000000..3d6849c2 --- /dev/null +++ b/doc/versioning.md @@ -0,0 +1,45 @@ +# gRPC Versioning Guide + +## Versioning Overview + +All gRPC implementations use a three-part version number (`vX.Y.Z`) and follow [semantic versioning](https://semver.org/), which defines the semantics of major, minor and patch components of the version number. In addition to that, gRPC versions evolve according to these rules: +- **Major version bumps** only happen on rare occasions. In order to qualify for a major version bump, certain criteria described later in this document need to be met. Most importantly, a major version increase must not break wire compatibility with other gRPC implementations so that existing gRPC libraries remain fully interoperable. +- **Minor version bumps** happen approx. every 6 weeks as part of the normal release cycle as defined by the gRPC release process. A new release branch named vMAJOR.MINOR.PATCH) is cut every 6 weeks based on the [release schedule](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md). +- **Patch version bump** corresponds to bugfixes done on release branch. + +There are a few situations where we don't adhere to the Semantic Versioning 2.0.0 strictly: +- A **minor** version will not necessarily add new functionality. This follows from the fact that we cut minor releases on a regular schedule, so we can't guarantee there will always be new features in each of the supported languages. +- Backward compatibility can be broken by a **minor** release if the API affected by the change was marked as EXPERIMENTAL upon its introduction. + +There are also a few extra rules regarding adding new gRPC implementations (e.g. adding support for a new language) +- New implementations start at v0.x.y version and until they reach 1.0, they are considered not ready for production workloads. Breaking API changes are allowed in the 0.x releases as the library is not considered stable yet. +- The "1.0" release has semantics of GA (generally available) and being production ready. Requirements to reach this milestone are at least these + - basic RPC features are feature complete and tested + - implementation is tested for interoperability with other languages + - Public API is declared stable +- Once a gRPC library reaches 1.0 (or higher version), the normal rules for versioning apply. + +## Policy for updating the major version number + +To avoid user confusion and simplify reasoning, the gRPC releases in different languages try to stay synchronized in terms of major and minor version (all languages follow the same release schedule). Nevertheless, because we also strictly follow semantic versioning, there are circumstances in which a gRPC implementation needs to break the version synchronicity and do a major version bump independently of other languages. + +### Situations when it's ok to do a major version bump +- **change forced by the language ecosystem:** when the language itself or its standard libraries that we depend on make a breaking change (something which is out of our control), reacting with updating gRPC APIs may be the only adequate response. +- **voluntary change:** Even in non-forced situations, there might be circumstances in which a breaking API change makes sense and represents a net win, but as a rule of thumb breaking changes are very disruptive for users, cause user fragmentation and incur high maintenance costs. Therefore, breaking API changes should be very rare events that need to be considered with extreme care and the bar for accepting such changes is intentionally set very high. + Example scenarios where a breaking API change might be adequate: + - fixing a security problem which requires changes to API (need to consider the non-breaking alternatives first) + - the change leads to very significant gains to security, usability or development velocity. These gains need to be clearly documented and claims need to be supported by evidence (ideally by numbers). Costs to the ecosystem (impact on users, dev team etc.) need to be taken into account and the change still needs to be a net positive after subtracting the costs. + + All proposals to make a breaking change need to be documented as a gRFC document (in the grpc/proposal repository) that covers at least these areas: + - Description of the proposal including an explanation why the proposed change is one of the very rare events where a breaking change is introduced. + - Migration costs (= what does it mean for the users to migrate to the new API, what are the costs and risks associated with it) + - Pros of the change (what is gained and how) + - Cons of the change (e.g. user confusion, lost users and user trust, work needed, added maintenance costs) + - Plan for supporting users still using the old major version (in case migration to the new major version is not trivial or not everyone can migrate easily) + +Note that while major version bump allows changing APIs used by the users, it must not impact the interoperability of the implementation with other gRPC implementations and the previous major version released. That means that **no backward incompatible protocol changes are allowed**: old clients must continue interoperating correctly with new servers and new servers with old clients. + +### Situations that DON'T warrant a major version bump +- Because other languages do so. This is not a good enough reason because +doing a major version bump has high potential for disturbing and confusing the users of that language and fragmenting the user base and that is a bigger threat than having language implementations at different major version (provided the state is well documented). Having some languages at different major version seems to be unavoidable anyway (due to forced version bumps), unless we bump some languages artificially. +- "I don't like this API": In retrospect, some API decisions made in the past necessarily turn out more lucky than others, but without strong reasons that would be in favor of changing the API and without enough supporting evidence (see previous section), other strategy than making a breaking API change needs to be used. Possible options: Expand the API to make it useful again; mark API as deprecated while keeping its functionality and providing a new better API. diff --git a/doc/wait-for-ready.md b/doc/wait-for-ready.md new file mode 100644 index 00000000..c08f20c1 --- /dev/null +++ b/doc/wait-for-ready.md @@ -0,0 +1,14 @@ +gRPC Wait for Ready Semantics +============================= + +If an RPC is issued but the channel is in `TRANSIENT_FAILURE` or `SHUTDOWN` +states, the RPC is unable to be transmitted promptly. By default, gRPC +implementations SHOULD fail such RPCs immediately. This is known as "fail fast," +but usage of the term is historical. RPCs SHOULD NOT fail as a result of the +channel being in other states (`CONNECTING`, `READY`, or `IDLE`). + +gRPC implementations MAY provide a per-RPC option to not fail RPCs as a result +of the channel being in `TRANSIENT_FAILURE` state. Instead, the implementation +queues the RPCs until the channel is `READY`. This is known as "wait for ready." +The RPCs SHOULD still fail before `READY` if there are unrelated reasons, such +as the channel is `SHUTDOWN` or the RPC's deadline is reached. diff --git a/doc/workarounds.md b/doc/workarounds.md new file mode 100644 index 00000000..9b7f2f13 --- /dev/null +++ b/doc/workarounds.md @@ -0,0 +1,19 @@ +# gRPC Server Backward Compatibility Issues and Workarounds Management + +## Introduction +This document lists the workarounds implemented on gRPC servers for record and reference when users need to enable a certain workaround. + +## Workaround List + +### Cronet Compression + +**Workaround ID:** WORKAROUND\_ID\_CRONET\_COMPRESSION + +**Date added:** May 06, 2017 + +**Status:** Implemented in C core and C++ + +**Issue:** Before version v1.3.0-dev, gRPC iOS client's Cronet transport did not implement compression. However the clients still claim to support compression. As a result, a client fails to parse received message when the message is compressed. +The problem above was resolved in gRPC v1.3.0-dev. For backward compatibility, a server must forcingly disable compression for gRPC clients of version lower than or equal to v1.3.0-dev. + +**Workaround Description:** Implemented as a server channel filter in C core. The filter identifies the version of peer client with incoming `user-agent` header of each call. If the client's gRPC version is lower that or equal to v1.3.x, a flag GRPC_WRITE_NO_COMPRESS is marked for all send_message ops which prevents compression of the messages to be sent out. diff --git a/doc/xds-test-descriptions.md b/doc/xds-test-descriptions.md new file mode 100644 index 00000000..79c874dd --- /dev/null +++ b/doc/xds-test-descriptions.md @@ -0,0 +1,737 @@ +# xDS (Load-Balancing) Interop Test Case Descriptions + +Client and server use [test.proto](../src/proto/grpc/testing/test.proto). + +## Server + +The code for the xDS test server can be found at: +[Java](https://github.com/grpc/grpc-java/blob/master/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestServer.java) (other language implementations are in progress). + +Server should accept these arguments: + +* --port=PORT + * The port the test server will run on. +* --maintenance_port=PORT + * The port for the maintenance server running health, channelz, and admin(CSDS) services. +* --secure_mode=BOOLEAN + * When set to true it uses XdsServerCredentials with the test server for security test cases. + In case of secure mode, port and maintenance_port should be different. + +## Client + +The base behavior of the xDS test client is to send a constant QPS of unary +messages and record the remote-peer distribution of the responses. Further, the +client must expose an implementation of the `LoadBalancerStatsService` gRPC +service to allow the test driver to validate the load balancing behavior for a +particular test case (see below for more details). + +The code for the xDS test client can be at: +[Java](https://github.com/grpc/grpc-java/blob/master/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestClient.java) (other language implementations are in progress). + +Clients should accept these arguments: + +* --fail_on_failed_rpcs=BOOL + * If true, the client should exit with a non-zero return code if any RPCs + fail after at least one RPC has succeeded, indicating a valid xDS config + was received. This accounts for any startup-related delays in receiving + an initial config from the load balancer. Default is false. +* --num_channels=CHANNELS + * The number of channels to create to the server. +* --qps=QPS + * The QPS per channel. +* --server=HOSTNAME:PORT + * The server host to connect to. For example, "localhost:8080" +* --stats_port=PORT + * The port for to expose the client's `LoadBalancerStatsService` + implementation. +* --rpc_timeout_sec=SEC + * The timeout to set on all outbound RPCs. Default is 20. +* --secure_mode=BOOLEAN + * When set to true it uses XdsChannelCredentials with the test client for security test cases. + +### XdsUpdateClientConfigureService + +The xDS test client's behavior can be dynamically changed in the middle of tests. +This is achieved by invoking the `XdsUpdateClientConfigureService` gRPC service +on the test client. This can be useful for tests requiring special client behaviors +that are not desirable at test initialization and client warmup. The service is +defined as: + +``` +message ClientConfigureRequest { + // Type of RPCs to send. + enum RpcType { + EMPTY_CALL = 0; + UNARY_CALL = 1; + } + + // Metadata to be attached for the given type of RPCs. + message Metadata { + RpcType type = 1; + string key = 2; + string value = 3; + } + + // The types of RPCs the client sends. + repeated RpcType types = 1; + // The collection of custom metadata to be attached to RPCs sent by the client. + repeated Metadata metadata = 2; + // The deadline to use, in seconds, for all RPCs. If unset or zero, the + // client will use the default from the command-line. + int32 timeout_sec = 3; +} + +message ClientConfigureResponse {} + +service XdsUpdateClientConfigureService { + // Update the tes client's configuration. + rpc Configure(ClientConfigureRequest) returns (ClientConfigureResponse); +} +``` + +The test client changes its behavior right after receiving the +`ClientConfigureRequest`. Currently it only supports configuring the type(s) +of RPCs sent by the test client, metadata attached to each type of RPCs, and the timeout. + +## Test Driver + +Note that, unlike our other interop tests, neither the client nor the server has +any notion of which of the following test scenarios is under test. Instead, a +separate test driver is responsible for configuring the load balancer and the +server backends, running the client, and then querying the client's +`LoadBalancerStatsService` to validate load balancer behavior for each of the +tests described below. + +## LoadBalancerStatsService + +The service is defined as: + +``` +message LoadBalancerStatsRequest { + // Request stats for the next num_rpcs sent by client. + int32 num_rpcs = 1; + // If num_rpcs have not completed within timeout_sec, return partial results. + int32 timeout_sec = 2; +} + +message LoadBalancerStatsResponse { + message RpcsByPeer { + // The number of completed RPCs for each peer. + map rpcs_by_peer = 1; + } + // The number of completed RPCs for each peer. + map rpcs_by_peer = 1; + // The number of RPCs that failed to record a remote peer. + int32 num_failures = 2; + map rpcs_by_method = 3; +} + +message LoadBalancerAccumulatedStatsRequest {} + +message LoadBalancerAccumulatedStatsResponse { + // The total number of RPCs have ever issued for each type. + // Deprecated: use stats_per_method.rpcs_started instead. + map num_rpcs_started_by_method = 1 [deprecated = true]; + // The total number of RPCs have ever completed successfully for each type. + // Deprecated: use stats_per_method.result instead. + map num_rpcs_succeeded_by_method = 2 [deprecated = true]; + // The total number of RPCs have ever failed for each type. + // Deprecated: use stats_per_method.result instead. + map num_rpcs_failed_by_method = 3 [deprecated = true]; + + message MethodStats { + // The number of RPCs that were started for this method. + int32 rpcs_started = 1; + + // The number of RPCs that completed with each status for this method. The + // key is the integral value of a google.rpc.Code; the value is the count. + map result = 2; + } + + // Per-method RPC statistics. The key is the RpcType in string form; e.g. + // 'EMPTY_CALL' or 'UNARY_CALL' + map stats_per_method = 4; +} + +service LoadBalancerStatsService { + // Gets the backend distribution for RPCs sent by a test client. + rpc GetClientStats(LoadBalancerStatsRequest) + returns (LoadBalancerStatsResponse) {} + // Gets the accumulated stats for RPCs sent by a test client. + rpc GetClientAccumulatedStats(LoadBalancerAccumulatedStatsRequest) + returns (LoadBalancerAccumulatedStatsResponse) {} +} +``` + +Note that the `LoadBalancerStatsResponse` contains the remote peer distribution +of the next `num_rpcs` *sent* by the client after receiving the +`LoadBalancerStatsRequest`. It is important that the remote peer distribution be +recorded for a block of consecutive outgoing RPCs, to validate the intended +distribution from the load balancer, rather than just looking at the next +`num_rpcs` responses received from backends, as different backends may respond +at different rates. + +## Test Cases + +### ping_pong + +This test verifies that every backend receives traffic. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 +1. --fail_on_failed_rpc=true + +Load balancer configuration: + +1. 4 backends are created in a single managed instance group (MIG). + +Test driver asserts: + +1. All backends receive at least one RPC + +### round_robin + +This test verifies that RPCs are evenly routed according to an unweighted round +robin policy. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 +1. --fail_on_failed_rpc=true + +Load balancer configuration: + +1. 4 backends are created in a single MIG. + +Test driver asserts that: + +1. Once all backends receive at least one RPC, the following 100 RPCs are + evenly distributed across the 4 backends. + +### backends_restart + +This test verifies that the load balancer will resume sending traffic to a set +of backends that is stopped and then resumed. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 + +Load balancer configuration: + +1. 4 backends are created in a single MIG. + +Test driver asserts: + +1. All backends receive at least one RPC. + +The test driver records the peer distribution for a subsequent block of 100 RPCs +then stops the backends. + +Test driver asserts: + +1. No RPCs from the client are successful. + +The test driver resumes the backends. + +Test driver asserts: + +1. Once all backends receive at least one RPC, the distribution for a block of + 100 RPCs is the same as the distribution recorded prior to restart. + +### secondary_locality_gets_requests_on_primary_failure + +This test verifies that backends in a secondary locality receive traffic when +all backends in the primary locality fail. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 + +Load balancer configuration: + +1. The primary MIG with 2 backends in the same zone as the client +1. The secondary MIG with 2 backends in a different zone + +Test driver asserts: + +1. All backends in the primary locality receive at least 1 RPC. +1. No backends in the secondary locality receive RPCs. + +The test driver stops the backends in the primary locality. + +Test driver asserts: + +1. All backends in the secondary locality receive at least 1 RPC. + +The test driver resumes the backends in the primary locality. + +Test driver asserts: + +1. All backends in the primary locality receive at least 1 RPC. +1. No backends in the secondary locality receive RPCs. + +### secondary_locality_gets_no_requests_on_partial_primary_failure + +This test verifies that backends in a failover locality do not receive traffic +when at least one of the backends in the primary locality remain healthy. + +**Note:** Future TD features may change the expected behavior and require +changes to this test case. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 + +Load balancer configuration: + +1. The primary MIG with 2 backends in the same zone as the client +1. The secondary MIG with 2 backends in a different zone + +Test driver asserts: + +1. All backends in the primary locality receive at least 1 RPC. +1. No backends in the secondary locality receive RPCs. + +The test driver stops one of the backends in the primary locality. + +Test driver asserts: + +1. All backends in the primary locality receive at least 1 RPC. +1. No backends in the secondary locality receive RPCs. + +### remove_instance_group + +This test verifies that a remaining instance group can successfully serve RPCs +after removal of another instance group in the same zone. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 + +Load balancer configuration: + +1. Two MIGs with two backends each, using rate balancing mode. + +Test driver asserts: + +1. All backends receive at least one RPC. + +The test driver removes one MIG. + +Test driver asserts: + +1. All RPCs are directed to the two remaining backends (no RPC failures). + +### change_backend_service + +This test verifies that the backend service can be replaced and traffic routed +to the new backends. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 +1. --fail_on_failed_rpc=true + +Load balancer configuration: + +1. One MIG with two backends + +Test driver asserts: + +1. All backends receive at least one RPC. + +The test driver creates a new backend service containing a MIG with two backends +and changes the TD URL map to point to this new backend service. + +Test driver asserts: + +1. All RPCs are directed to the new backend service. + +### traffic_splitting + +This test verifies that the traffic will be distributed between backend +services with the correct weights when route action is set to weighted +backend services. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 + +Load balancer configuration: + +1. One MIG with one backend + +Assert: + +1. Once all backends receive at least one RPC, the following 1000 RPCs are +all sent to MIG_a. + +The test driver adds a new MIG with 1 backend, and changes the route action +to weighted backend services with {a: 20, b: 80}. + +Assert: + +1. Once all backends receive at least one RPC, the following 1000 RPCs are +distributed across the 2 backends as a: 20, b: 80. +### path_matching + +This test verifies that the traffic for a certain RPC can be routed to a +specific cluster based on the RPC path. + +Client parameters: + +1. –num_channels=1 +1. –qps=10 +1. –fail_on_failed_rpc=true +1. –rpc=“EmptyCall,UnaryCall” + +Load balancer configuration: + +1. 2 MIGs, each with 1 backend +1. routes + - “/”: MIG_default + +Assert: + +1. UnaryCall RPCs are sent to MIG_default +1. EmptyCall RPCs are sent to MIG_default + +The test driver changes route and asserts RPCs are sent to expected backends. **Note** that the default route `"/"` is always pointing to MIG_default, so all RPCs not matching the new route will be sent to MIG_default. + +- {path: `/grpc.testing.TestService/EmptyCall`}: MIG_2 + - UnaryCall -> MIG_default + - EmptyCall -> MIG_2 + +- {prefix: `/grpc.testing.TestService/Unary`}: MIG_2 + - UnaryCall -> MIG_2 + - EmptyCall -> MIG_default + +- {prefix: `/grpc.testing.TestService/Unary`}: MIG_default & {path: `/grpc.testing.TestService/EmptyCall`}: MIG_2 + - UnaryCall -> MIG_default + - EmptyCall -> MIG_2 + +- {regex: `^\/.*\/UnaryCall$`}: MIG_2 + - UnaryCall -> MIG_2 + - EmptyCall -> MIG_default + +- {path: `/gRpC.tEsTinG.tEstseRvice/empTycaLl`, ignoreCase: `True`}: MIG_2 + - UnaryCall -> MIG_default + - EmptyCall -> MIG_2 + +### header_matching + +This test verifies that the traffic for a certain RPC can be routed to a +specific cluster based on the RPC header (metadata). + +Client parameters: + +1. –num_channels=1 +1. –qps=10 +1. –fail_on_failed_rpc=true +1. –rpc=“EmptyCall,UnaryCall” +1. –rpc=“EmptyCall:xds_md:exact_match” + +Load balancer configuration: + +1. 2 MIGs, each with 1 backend +1. routes + - “/”: MIG_default + +Assert: + +1. UnaryCall RPCs are sent to MIG_default +1. EmptyCall RPCs are sent to MIG_default + +The test driver changes route and asserts RPCs are sent to expected backends. **Note** that the default route `"/"` is always pointing to MIG_default, so all RPCs not matching the new route will be sent to MIG_default. + +- {header `xds_md`, exact: `empty_ytpme`}: MIG_2 + - Unary -> MIG_default + - Empty -> MIG_2 + +- {header `xds_md`, prefix: `un`}: MIG_2 + - `un` is the prefix of metadata sent with UnaryCall + - Unary -> MIG_2 + - Empty -> MIG_default + +- {header `xds_md`, suffix: `me`}: MIG_2 + - `me` is the suffix of metadata sent with EmptyCall + - Unary -> MIG_default + - Empty to MIG_2 + +- {header `xds_md_numeric`, present: `True`}: MIG_2 + - Unary is sent with the metadata, so will be sent to alternative + - Unary -> MIG_2 + - Empty -> MIG_default + +- {header `xds_md`, exact: `unary_yranu`, invert: `True`}: MIG_2 + - Unary is sent with the metadata, so this will not match Unary, but will match Empty + - Unary -> MIG_default + - Empty to MIG_2 + +- {header `xds_md_numeric`, range `[100,200]`}: MIG_2 + - Unary is sent with the metadata in range + - Unary -> MIG_2 + - Empty -> MIG_default + +- {header `xds_md`, regex: `^em.*me$`}: MIG_2 + - EmptyCall is sent with the metadata + - Unary -> MIG_default + - Empty -> MIG_2 + +### gentle_failover + +This test verifies that traffic is partially diverted to a secondary locality +when > 50% of the instances in the primary locality are unhealthy. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 + +Load balancer configuration: + +1. The primary MIG with 3 backends in the same zone as the client +1. The secondary MIG with 2 backends in a different zone + +Test driver asserts: + +1. All backends in the primary locality receive at least 1 RPC. +1. No backends in the secondary locality receive RPCs. + +The test driver stops 2 of 3 backends in the primary locality. + +Test driver asserts: + +1. All backends in the secondary locality receive at least 1 RPC. +1. The remaining backend in the primary locality receives at least 1 RPC. + +The test driver resumes the backends in the primary locality. + +Test driver asserts: + +1. All backends in the primary locality receive at least 1 RPC. +1. No backends in the secondary locality receive RPCs. + + +### load_based_failover + +This test verifies that traffic is partially diverted to a secondary locality +when the QPS is greater than the configured RPS in the priority locality. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 + +Load balancer configuration: + +1. The primary MIG with 2 backends in the same zone as the client +1. The secondary MIG with 2 backends in a different zone + +Test driver asserts: + +1. All backends in the primary locality receive at least 1 RPC. +1. No backends in the secondary locality receive RPCs. + +The test driver sets `balancingMode` is `RATE`, and `maxRate` to 20 in the primary locality. + +Test driver asserts: + +1. All backends in the primary locality receive at least 1 RPC. +1. All backends in the secondary locality receive at least 1 RPC. + +The test driver set `maxRate` to 120 in the primary locality. + +Test driver asserts: + +1. All backends in the primary locality receive at least 1 RPC. +1. No backends in the secondary locality receive RPCs. + + +### circuit_breaking + +This test verifies that the maximum number of outstanding requests is limited +by circuit breakers of the backend service. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 + +Load balancer configuration: + +1. Two MIGs with each having two backends. + +The test driver configures the backend services with: + +1. path{“/grpc.testing.TestService/UnaryCall"}: MIG_1 +1. path{“/grpc.testing.TestService/EmptyCall"}: MIG_2 +1. MIG_1 circuit_breakers with max_requests = 500 +1. MIG_2 circuit breakers with max_requests = 1000 + +The test driver configures the test client to send both UnaryCall and EmptyCall, +with all RPCs keep-open. + +Assert: + +1. After reaching steady state, there are 500 UnaryCall RPCs in-flight +and 1000 EmptyCall RPCs in-flight. + +The test driver updates MIG_1's circuit breakers with max_request = 800. + +Test driver asserts: + +1. After reaching steady state, there are 800 UnaryCall RPCs in-flight. + +### timeout + +This test verifies that traffic along a route with a `max_stream_duration` set +will cause timeouts on streams open longer than that duration. + +Client parameters: + +1. `--num_channels=1` +1. `--qps=100` + +Route Configuration: + +Two routes: + +1. Path match for `/grpc.testing.TestService/UnaryCall`, with a `route_action` + containing `max_stream_duration` of 3 seconds. +1. Default route containing no `max_stream_duration` setting. + +There are four sub-tests: + +1. `app_timeout_exceeded` + 1. Test client configured to send UnaryCall RPCs with a 1s application + timeout, and metadata of `rpc-behavior: sleep-2`. + 1. Test driver asserts client recieves ~100% status `DEADLINE_EXCEEDED`. +1. `timeout_not_exceeded` + 1. Test client configured to send UnaryCall RPCs with the default + application timeout (20 seconds), and no metadata. + 1. Test driver asserts client recieves ~100% status `OK`. +1. `timeout_exceeded` (executed with the below test case) +1. `timeout_different_route` + 1. Test client configured to send UnaryCall RPCs and EmptyCall RPCs with + the default application timeout (20 seconds), and metadata of + `rpc-behavior: sleep-4`. + 1. Test driver asserts client recieves ~100% status `OK` for EmptyCall + and ~100% status `DEADLINE_EXCEEDED` for UnaryCall. + +### api_listener +The test case verifies a specific use case where it creates a second TD API +listener using the same name as the existing one and then delete the old one. +The test driver verifies this is a safe way to update the API listener +configuration while keep using the existing name. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 + +Load balancer configuration: + +1. One MIG with two backends. + +Assert: + +The test driver configuration steps: +1. The test driver creates the first set of forwarding rule + target proxy + +URL map with a test host name. +1. Then the test driver creates a second set of forwarding rule + target proxy + +URL map with the same test host name. +1. The test driver deletes the first set of configurations in step 1. + +The test driver verifies, at each configuration step, the traffic is always able +to reach the designated hosts. + +### metadata_filter +This test case verifies that metadata filter configurations in URL map match +rule are effective at Traffic Director for routing selection against downstream +node metadata. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 + +Load balancer configuration: + +1. Two MIGs in the same zone, each having two backends. + +There are four test sub-cases: +1. Test `MATCH_ALL` metadata filter criteria. +1. Test `MATCH_ANY` metadata filter criteria. +1. Test mixed `MATCH_ALL` and `MATCH_ANY` metadata filter criteria. +1. Test when multiple match rules with metadata filter all match. + +Assert: + +At each test sub-case described above, the test driver configures +and verifies: + +1. Set default URL map, and verify traffic goes to the original backend hosts. +1. Then patch URL map to update the match rule with metadata filter +configuration under test added. +1. Then it verifies traffic switches to alternate backend service hosts. + +This way, we test that TD correctly evaluates both matching and non-matching +configuration scenario. + +### forwarding_rule_port_match +This test verifies that request server uri port should match with the GCP +forwarding rule configuration port. + +Client parameters: + +1. --num_channels=1 +1. --qps=100 + +Load balancer configuration: + +1. One MIG with two backends. + +Assert: +1. The test driver configures matching port in the forwarding rule and in the +request server uri, then verifies traffic reaches backend service instances. +1. The test driver updates the forwarding rule to use a different port, then +verifies that the traffic stops going to those backend service instances. + +### forwarding_rule_default_port +This test verifies that omitting port in the request server uri should only +match with the default port(80) configuration in the forwarding rule. +In addition, request server uri port should exactly match that in the URL map +host rule, as described in +[public doc](https://cloud.google.com/traffic-director/docs/proxyless-overview#proxyless-url-map). + +Client parameters: + +1. --num_channels=1 +1. --qps=100 + +Load balancer configuration: + +1. One MIG with two backends. + +Assert: + +Test driver configures and verifies: +1. No traffic goes to backends when configuring the target URI +`xds:///myservice`, the forwarding rule with port *x != 80*, the URL map +host rule `myservice::x`. +1. Traffic goes to backends when configuring the target URI `xds:///myservice`, +the forwarding rule port `80` and the URL map host rule `myservice`. +1. No traffic goes to backends when configuring the target URI +`xds:///myservice`, the forwarding rule port `80` and the host rule +`myservice::80`. diff --git a/etc/README.md b/etc/README.md new file mode 100644 index 00000000..87af44f7 --- /dev/null +++ b/etc/README.md @@ -0,0 +1,4 @@ +The roots.pem file is periodically generated from: +https://hg.mozilla.org/mozilla-central/raw-file/tip/security/nss/lib/ckfw/builtins/certdata.txt +using +https://github.com/agl/extract-nss-root-certs diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 00000000..49751c53 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,21 @@ +# Examples + +This directory contains examples for all the C-based gRPC implementations. Each +language subdirectory contains a Hello World example and more: + +* [Android](android) +* [C#](csharp) +* [C++](cpp) +* [Node.js](node) +* [Objective-C](objective-c/helloworld) +* [PHP](php) +* [Python](python/helloworld) +* [Ruby](ruby) + +For a complete list of supported languages, see [Supported languages][lang]. + +For comprehensive documentation, including an [Introduction to gRPC][intro] and +tutorials that use this example code, visit [grpc.io](https://grpc.io). + +[intro]: https://grpc.io/docs/what-is-grpc/introduction +[lang]: https://grpc.io/docs/languages/ diff --git a/examples/android/binder/java/io/grpc/binder/cpp/README.md b/examples/android/binder/java/io/grpc/binder/cpp/README.md new file mode 100644 index 00000000..d0817c07 --- /dev/null +++ b/examples/android/binder/java/io/grpc/binder/cpp/README.md @@ -0,0 +1,21 @@ +# gRPC-core BinderTransport example apps + +WIP. + +## Build Instruction + +1. Install Android SDK and NDK. Currently we only support SDK version 30.0.3 and + NDK version 21.4.7075529 . Make sure you get these exact versions otherwise + Bazel might complain. + +2. Point environment variables to install locations of SDK and NDK + ``` + export ANDROID_HOME=$HOME/Android/Sdk/ + export ANDROID_NDK_HOME=$HOME/Android/Sdk/ndk/21.4.7075529 + ``` +3. `bazel build //examples/android/binder/java/io/grpc/binder/cpp/exampleclient:app` +4. `bazel build //examples/android/binder/java/io/grpc/binder/cpp/exampleserver:app` +5. `adb install + bazel-bin/examples/android/binder/java/io/grpc/binder/cpp/exampleclient/app.apk` +6. `adb install + bazel-bin/examples/android/binder/java/io/grpc/binder/cpp/exampleserver/app.apk` diff --git a/examples/android/helloworld/README.md b/examples/android/helloworld/README.md new file mode 100644 index 00000000..79acc026 --- /dev/null +++ b/examples/android/helloworld/README.md @@ -0,0 +1,24 @@ +gRPC on Android +============== + +Note: Building the protobuf dependency for Android requires +https://github.com/protocolbuffers/protobuf/pull/3878. This fix will be in the next +protobuf release, but until then must be manually patched in to +`third_party/protobuf` to build gRPC for Android. + +PREREQUISITES +------------- + +- Android SDK +- Android NDK +- `protoc` and `grpc_cpp_plugin` binaries on the host system + +INSTALL +------- + +The example application can be built via Android Studio or on the command line +using `gradle`: + + ```sh + $ ./gradlew installDebug + ``` diff --git a/examples/cpp/README.md b/examples/cpp/README.md new file mode 100644 index 00000000..7c281de3 --- /dev/null +++ b/examples/cpp/README.md @@ -0,0 +1,13 @@ +# gRPC C++ Examples + +- **[Hello World][]!** Eager to run your first gRPC example? You'll find + instructions for building gRPC and running a simple "Hello World" app in [Quick Start][]. +- **[Route Guide][].** For a basic tutorial on gRPC see [gRPC Basics][]. + +For information about the other examples in this directory, see their respective +README files. + +[gRPC Basics]: https://grpc.io/docs/languages/cpp/basics +[Hello World]: helloworld +[Quick Start]: https://grpc.io/docs/languages/cpp/quickstart +[Route Guide]: route_guide diff --git a/examples/cpp/compression/README.md b/examples/cpp/compression/README.md new file mode 100644 index 00000000..6ba42498 --- /dev/null +++ b/examples/cpp/compression/README.md @@ -0,0 +1,84 @@ +# gRPC C++ Message Compression Tutorial + +### Prerequisite +Make sure you have run the [hello world example](../helloworld) or understood the basics of gRPC. We will not dive into the details that have been discussed in the hello world example. + +### Get the tutorial source code + +The example code for this and our other examples lives in the `examples` directory. Clone this repository at the [latest stable release tag](https://github.com/grpc/grpc/releases) to your local machine by running the following command: + + +```sh +$ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc +``` + +Change your current directory to examples/cpp/compression + +```sh +$ cd examples/cpp/compression/ +``` + +### Generating gRPC code + +To generate the client and server side interfaces: + +```sh +$ make helloworld.grpc.pb.cc helloworld.pb.cc +``` +Which internally invokes the proto-compiler as: + +```sh +$ protoc -I ../../protos/ --grpc_out=. --plugin=protoc-gen-grpc=grpc_cpp_plugin ../../protos/helloworld.proto +$ protoc -I ../../protos/ --cpp_out=. ../../protos/helloworld.proto +``` + +### Writing a client and a server + +The client and the server can be based on the hello world example. + +Additionally, we can configure the compression settings. + +In the client, set the default compression algorithm of the channel via the channel arg. + +```cpp + ChannelArguments args; + // Set the default compression algorithm for the channel. + args.SetCompressionAlgorithm(GRPC_COMPRESS_GZIP); + GreeterClient greeter(grpc::CreateCustomChannel( + "localhost:50051", grpc::InsecureChannelCredentials(), args)); +``` + +Each call's compression configuration can be overwritten by client context. + +```cpp + // Overwrite the call's compression algorithm to DEFLATE. + context.set_compression_algorithm(GRPC_COMPRESS_DEFLATE); +``` + +In the server, set the default compression algorithm via the server builder. + +```cpp + ServerBuilder builder; + // Set the default compression algorithm for the server. + builder.SetDefaultCompressionAlgorithm(GRPC_COMPRESS_GZIP); +``` + +Each call's compression configuration can be overwritten by server context. + +```cpp + // Overwrite the call's compression algorithm to DEFLATE. + context->set_compression_algorithm(GRPC_COMPRESS_DEFLATE); +``` + +For a working example, refer to [greeter_client.cc](greeter_client.cc) and [greeter_server.cc](greeter_server.cc). + +Build and run the (compressing) client and the server by the following commands. + +```sh +make +./greeter_server +``` + +```sh +./greeter_client +``` diff --git a/examples/cpp/helloworld/README.md b/examples/cpp/helloworld/README.md new file mode 100644 index 00000000..a06cc8e3 --- /dev/null +++ b/examples/cpp/helloworld/README.md @@ -0,0 +1,6 @@ +# gRPC C++ Hello World Example + +You can find a complete set of instructions for building gRPC and running the +Hello World app in the [C++ Quick Start][]. + +[C++ Quick Start]: https://grpc.io/docs/languages/cpp/quickstart diff --git a/examples/cpp/load_balancing/README.md b/examples/cpp/load_balancing/README.md new file mode 100644 index 00000000..70c6c2a3 --- /dev/null +++ b/examples/cpp/load_balancing/README.md @@ -0,0 +1,65 @@ +# gRPC C++ Load Balancing Tutorial + +### Prerequisite +Make sure you have run the [hello world example](../helloworld) or understood the basics of gRPC. We will not dive into the details that have been discussed in the hello world example. + +### Get the tutorial source code + +The example code for this and our other examples lives in the `examples` directory. Clone this repository +at the [latest stable release tag](https://github.com/grpc/grpc/releases) to your local machine by running the following command: + + +```sh +$ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc +``` + +Change your current directory to examples/cpp/load_balancing + +```sh +$ cd examples/cpp/load_balancing/ +``` + +### Generating gRPC code + +To generate the client and server side interfaces: + +```sh +$ make helloworld.grpc.pb.cc helloworld.pb.cc +``` +Which internally invokes the proto-compiler as: + +```sh +$ protoc -I ../../protos/ --grpc_out=. --plugin=protoc-gen-grpc=grpc_cpp_plugin ../../protos/helloworld.proto +$ protoc -I ../../protos/ --cpp_out=. ../../protos/helloworld.proto +``` + +### Writing a client and a server + +The client and the server can be based on the hello world example. + +Additionally, we can configure the load balancing policy. (To see what load balancing policies are available, check out [this folder](https://github.com/grpc/grpc/tree/master/src/core/ext/filters/client_channel/lb_policy).) + +In the client, set the load balancing policy of the channel via the channel arg (to, for example, Round Robin). + +```cpp + ChannelArguments args; + // Set the load balancing policy for the channel. + args.SetLoadBalancingPolicyName("round_robin"); + GreeterClient greeter(grpc::CreateCustomChannel( + "localhost:50051", grpc::InsecureChannelCredentials(), args)); +``` + +For a working example, refer to [greeter_client.cc](greeter_client.cc) and [greeter_server.cc](greeter_server.cc). + +Build and run the client and the server with the following commands. + +```sh +make +./greeter_server +``` + +```sh +./greeter_client +``` + +(Note that the case in this example is trivial because there is only one server resolved from the name.) \ No newline at end of file diff --git a/examples/cpp/metadata/README.md b/examples/cpp/metadata/README.md new file mode 100644 index 00000000..334009bd --- /dev/null +++ b/examples/cpp/metadata/README.md @@ -0,0 +1,67 @@ +# Metadata Example + +## Overview + +This example shows you how to add custom headers on the client and server and +how to access them. + +Custom metadata must follow the "Custom-Metadata" format listed in +https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md, with the +exception of binary headers, which don't have to be base64 encoded. + +### Get the tutorial source code + The example code for this and our other examples lives in the `examples` directory. Clone this repository + at the [latest stable release tag](https://github.com/grpc/grpc/releases) to your local machine by running the following command: + ```sh +$ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc +``` + Change your current directory to examples/cpp/metadata + ```sh +$ cd examples/cpp/metadata +``` + +### Generating gRPC code + To generate the client and server side interfaces: + ```sh +$ make helloworld.grpc.pb.cc helloworld.pb.cc +``` +Which internally invokes the proto-compiler as: + ```sh +$ protoc -I ../../protos/ --grpc_out=. --plugin=protoc-gen-grpc=grpc_cpp_plugin ../../protos/helloworld.proto +$ protoc -I ../../protos/ --cpp_out=. ../../protos/helloworld.proto +``` +### Try it! +Build client and server: + +```sh +$ make +``` + +Run the server, which will listen on port 50051: + +```sh +$ ./greeter_server +``` + +Run the client (in a different terminal): + +```sh +$ ./greeter_client +``` + +If things go smoothly, you will see in the client terminal: + +"Client received initial metadata from server: initial metadata value" +"Client received trailing metadata from server: trailing metadata value" +"Client received message: Hello World" + + +And in the server terminal: + +"Header key: custom-bin , value: 01234567" +"Header key: custom-header , value: Custom Value" +"Header key: user-agent , value: grpc-c++/1.16.0-dev grpc-c/6.0.0-dev (linux; chttp2; gao)" + +We did not add the user-agent metadata as a custom header. This shows how +the gRPC framework adds some headers under the hood that may show up in the +metadata map. diff --git a/examples/cpp/route_guide/README.md b/examples/cpp/route_guide/README.md new file mode 100644 index 00000000..2b2376ca --- /dev/null +++ b/examples/cpp/route_guide/README.md @@ -0,0 +1,6 @@ +# gRPC Basics: C++ sample code + +The files in this folder are the samples used in [gRPC Basics: C++][], +a detailed tutorial for using gRPC in C++. + +[gRPC Basics: C++]:https://grpc.io/docs/languages/cpp/basics diff --git a/examples/csharp/Helloworld/README.md b/examples/csharp/Helloworld/README.md new file mode 100644 index 00000000..8897d4fc --- /dev/null +++ b/examples/csharp/Helloworld/README.md @@ -0,0 +1,39 @@ +gRPC in 3 minutes (C#) +======================== + +BACKGROUND +------------- +This is a version of the helloworld example using the dotnet SDK +tools to compile [helloworld.proto][] in a common library, build the server +and the client, and run them. + +PREREQUISITES +------------- + +- The [.NET Core SDK 2.1+](https://www.microsoft.com/net/core) + +You can also build the solution `Greeter.sln` using Visual Studio 2017, +but it's not a requirement. + +BUILD AND RUN +------------- + +- Build and run the server + + ``` + > dotnet run -p GreeterServer + ``` + +- Build and run the client + + ``` + > dotnet run -p GreeterClient + ``` + +Tutorial +-------- + +You can find a more detailed tutorial about Grpc in [gRPC Basics: C#][] + +[helloworld.proto]:../../protos/helloworld.proto +[gRPC Basics: C#]:https://grpc.io/docs/languages/csharp/basics diff --git a/examples/csharp/HelloworldLegacyCsproj/README.md b/examples/csharp/HelloworldLegacyCsproj/README.md new file mode 100644 index 00000000..3f3889fe --- /dev/null +++ b/examples/csharp/HelloworldLegacyCsproj/README.md @@ -0,0 +1,74 @@ +gRPC in 3 minutes (C#) +======================== + +BACKGROUND +------------- +This is a different version of the helloworld example, using the "classic" .csproj +files, the only format supported by VS2013 (and older versions of mono). +You can still use gRPC with the classic .csproj files, but [using the new-style +.csproj projects](../Helloworld/README.md) (supported by VS2017 v15.3 and above, +and dotnet SDK) is recommended. + +Example projects depend on the [Grpc](https://www.nuget.org/packages/Grpc/), +[Grpc.Tools](https://www.nuget.org/packages/Grpc.Tools/) +and [Google.Protobuf](https://www.nuget.org/packages/Google.Protobuf/) NuGet packages +which have been already added to the project for you. + +PREREQUISITES +------------- + +- Windows: .NET Framework 4.5+, Visual Studio 2013 or higher +- Linux: Mono 4+, MonoDevelop 5.9+ +- Mac OS X: Xamarin Studio 5.9+ + +BUILD +------- + +- Open solution `Greeter.sln` with Visual Studio, Monodevelop (on Linux) or Xamarin Studio (on Mac OS X) + +# Using Visual Studio + +* Select "Restore NuGet Packages" from the solution context menu. It is recommended + to close and re-open the solution after the packages have been restored from + Visual Studio. +* Build the solution. + +# Using Monodevelop or Xamarin Studio + +The NuGet add-in available for Xamarin Studio and Monodevelop IDEs is too old to +download all of the NuGet dependencies of gRPC. + +Using these IDEs, a workaround is as follows: +* Obtain a nuget executable for your platform and update it with + `nuget update -self`. +* Navigate to this directory and run `nuget restore`. +* Now that packages have been restored into their proper package folder, build the solution from your IDE. + +Try it! +------- + +- Run the server + + ``` + > cd GreeterServer/bin/Debug + > GreeterServer.exe + ``` + +- Run the client + + ``` + > cd GreeterClient/bin/Debug + > GreeterClient.exe + ``` + +You can also run the server and client directly from the IDE. + +On Linux or Mac, use `mono GreeterServer.exe` and `mono GreeterClient.exe` to run the server and client. + +Tutorial +-------- + +You can find a more detailed tutorial in [gRPC Basics: C#][] + +[helloworld.proto]:../../protos/helloworld.proto +[gRPC Basics: C#]:https://grpc.io/docs/languages/csharp/basics diff --git a/examples/csharp/HelloworldUnity/README.md b/examples/csharp/HelloworldUnity/README.md new file mode 100644 index 00000000..ec489e94 --- /dev/null +++ b/examples/csharp/HelloworldUnity/README.md @@ -0,0 +1,19 @@ +gRPC C# on Unity +======================== + +EXPERIMENTAL ONLY +------------- +Support of the Unity platform is currently experimental. + +PREREQUISITES +------------- + +- Unity 2018.3.5f1 + +BUILD +------- + +- Follow instructions in https://github.com/grpc/grpc/tree/master/src/csharp/experimental#unity to obtain the grpc_csharp_unity.zip + that contains gRPC C# for Unity. Unzip it under `Assets/Plugins` directory. +- Open the `HelloworldUnity.sln` in Unity Editor. +- Build using Unity Editor. diff --git a/examples/csharp/HelloworldXamarin/README.md b/examples/csharp/HelloworldXamarin/README.md new file mode 100644 index 00000000..153a4f1b --- /dev/null +++ b/examples/csharp/HelloworldXamarin/README.md @@ -0,0 +1,32 @@ +gRPC C# on Xamarin +======================== + +EXPERIMENTAL ONLY +------------- +Support of the Xamarin platform is currently experimental. + +BACKGROUND +------------- +The example project supports `Xamarin.Android` and `Xamarin.iOS`. + +For this sample, we've already generated the server and client stubs from [helloworld.proto][]. + +PREREQUISITES +------------- + +- The latest version Visual Studio 2017 or Visual Studio for Mac with Xamarin support installed. + +BUILD +------- + +- Open the `HelloworldXamarin.sln` in Visual Studio (or Visual Studio for Mac) +- Build the solution (Build -> Build All) + +Try it! +------- + +You can deploy the example apps directly through Visual Studio IDE. +Deployments can target both Android and iOS (both support physical device +deployment as well as simulator). + +[helloworld.proto]:../../protos/helloworld.proto diff --git a/examples/csharp/RouteGuide/README.md b/examples/csharp/RouteGuide/README.md new file mode 100644 index 00000000..bfdb9e14 --- /dev/null +++ b/examples/csharp/RouteGuide/README.md @@ -0,0 +1,6 @@ +# gRPC Basics: C# sample code + +The files in this folder are the samples used in [gRPC Basics: C#][], +a detailed tutorial for using gRPC in C#. + +[gRPC Basics: C#]:https://grpc.io/docs/languages/csharp/basics diff --git a/examples/csharp/Xds/README.md b/examples/csharp/Xds/README.md new file mode 100644 index 00000000..1d6370a6 --- /dev/null +++ b/examples/csharp/Xds/README.md @@ -0,0 +1,99 @@ +gRPC Hostname example (C#) +======================== + +BACKGROUND +------------- +This is a version of the helloworld example with a server whose response includes its hostname. It also supports health and reflection services. This makes it a good server to test infrastructure, such as XDS load balancing. + +PREREQUISITES +------------- + +- The [.NET Core SDK 2.1+](https://www.microsoft.com/net/core) + +You can also build the solution `Greeter.sln` using Visual Studio 2019, +but it's not a requirement. + +RUN THE EXAMPLE +------------- + +First, build and run the server, then verify the server is running and +check the server is behaving as expected (more on that below). + +``` +cd GreeterServer +dotnet run +``` + +After configuring your xDS server to track the gRPC server we just started, +create a bootstrap file as desribed in [gRFC A27](https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md): + +``` +{ + xds_servers": [ + { + "server_uri": , + "channel_creds": [ + { + "type": , + "config": + } + ] + } + ], + "node": +} +``` + +Then point the `GRPC_XDS_BOOTSTRAP` environment variable at the bootstrap file: + +``` +export GRPC_XDS_BOOTSTRAP=/etc/xds-bootstrap.json +``` + +Finally, run your client: + +``` +cd GreeterClient +dotnet run --server xds-experimental:///my-backend +``` + +VERIFYING THE SERVER +------------- + +`grpcurl` can be used to test your server. If you don't have it, +install [`grpcurl`](https://github.com/fullstorydev/grpcurl/releases). This will allow +you to manually test the service. + +Exercise your server's application-layer service: + +```sh +> grpcurl --plaintext -d '{"name": "you"}' localhost:30051 +{ + "message": "Hello you from jtatt.muc.corp.google.com!" +} +``` + +Make sure that all of your server's services are available via reflection: + +```sh +> grpcurl --plaintext localhost:30051 list +grpc.health.v1.Health +grpc.reflection.v1alpha.ServerReflection +helloworld.Greeter +``` + +Make sure that your services are reporting healthy: + +```sh +> grpcurl --plaintext -d '{"service": "helloworld.Greeter"}' localhost:30051 +grpc.health.v1.Health/Check +{ + "status": "SERVING" +} + +> grpcurl --plaintext -d '{"service": ""}' localhost:30051 +grpc.health.v1.Health/Check +{ + "status": "SERVING" +} +``` diff --git a/examples/node/README.md b/examples/node/README.md new file mode 100644 index 00000000..b45488b8 --- /dev/null +++ b/examples/node/README.md @@ -0,0 +1,50 @@ +gRPC in 3 minutes (Node.js) +=========================== + +PREREQUISITES +------------- + +- `node`: This requires Node 0.12.x or greater. + +INSTALL +------- + + ```sh + $ # Get the gRPC repository + $ export REPO_ROOT=grpc # REPO root can be any directory of your choice + $ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc $REPO_ROOT + $ cd $REPO_ROOT + + $ cd examples/node + $ npm install + ``` + +TRY IT! +------- + +There are two ways to generate the code needed to work with protocol buffers in Node.js - one approach uses [Protobuf.js](https://github.com/dcodeIO/ProtoBuf.js/) to dynamically generate the code at runtime, the other uses code statically generated using the protocol buffer compiler `protoc`. The examples behave identically, and either server can be used with either client. + + - Run the server + + ```sh + $ # from this directory + $ node ./dynamic_codegen/greeter_server.js & + $ # OR + $ node ./static_codegen/greeter_server.js & + ``` + + - Run the client + + ```sh + $ # from this directory + $ node ./dynamic_codegen/greeter_client.js + $ # OR + $ node ./static_codegen/greeter_client.js + ``` + +TUTORIAL +-------- +You can find a more detailed tutorial in [gRPC Basics: Node.js][] + +[Install gRPC Node]:../../src/node +[gRPC Basics: Node.js]:https://grpc.io/docs/languages/node/basics diff --git a/examples/node/dynamic_codegen/README.md b/examples/node/dynamic_codegen/README.md new file mode 100644 index 00000000..1a6ec17a --- /dev/null +++ b/examples/node/dynamic_codegen/README.md @@ -0,0 +1 @@ +This is the dynamic code generation variant of the Node examples. Code in these examples is generated at runtime using Protobuf.js. diff --git a/examples/node/dynamic_codegen/route_guide/README.md b/examples/node/dynamic_codegen/route_guide/README.md new file mode 100644 index 00000000..fcd14705 --- /dev/null +++ b/examples/node/dynamic_codegen/route_guide/README.md @@ -0,0 +1,5 @@ +# gRPC Basics: Node.js sample code + +The files in this folder are the samples used in [gRPC Basics: Node.js][], a detailed tutorial for using gRPC in Node.js. + +[gRPC Basics: Node.js]:https://grpc.io/docs/languages/node/basics diff --git a/examples/node/static_codegen/README.md b/examples/node/static_codegen/README.md new file mode 100644 index 00000000..bc087ec4 --- /dev/null +++ b/examples/node/static_codegen/README.md @@ -0,0 +1,8 @@ +This is the static code generation variant of the Node examples. Code in these examples is pre-generated using protoc and the Node gRPC protoc plugin, and the generated code can be found in various `*_pb.js` files. The command line sequence for generating those files is as follows (assuming that `protoc` and `grpc_node_plugin` are present, and starting in the directory which contains this README.md file): + +```sh +cd ../../protos +npm install -g grpc-tools +grpc_tools_node_protoc --js_out=import_style=commonjs,binary:../node/static_codegen/ --grpc_out=grpc_js:../node/static_codegen helloworld.proto +grpc_tools_node_protoc --js_out=import_style=commonjs,binary:../node/static_codegen/route_guide/ --grpc_out=grpc_js:../node/static_codegen/route_guide/ route_guide.proto +``` diff --git a/examples/node/static_codegen/route_guide/README.md b/examples/node/static_codegen/route_guide/README.md new file mode 100644 index 00000000..fcd14705 --- /dev/null +++ b/examples/node/static_codegen/route_guide/README.md @@ -0,0 +1,5 @@ +# gRPC Basics: Node.js sample code + +The files in this folder are the samples used in [gRPC Basics: Node.js][], a detailed tutorial for using gRPC in Node.js. + +[gRPC Basics: Node.js]:https://grpc.io/docs/languages/node/basics diff --git a/examples/objective-c/auth_sample/README.md b/examples/objective-c/auth_sample/README.md new file mode 100644 index 00000000..6e90be39 --- /dev/null +++ b/examples/objective-c/auth_sample/README.md @@ -0,0 +1,3 @@ +# OAuth2 on gRPC: Objective-C + +This is the supporting code for the tutorial "[OAuth2 on gRPC: Objective-C](https://grpc.io/docs/languages/objective-c/oauth2)." diff --git a/examples/objective-c/helloworld/README.md b/examples/objective-c/helloworld/README.md new file mode 100644 index 00000000..9142de9c --- /dev/null +++ b/examples/objective-c/helloworld/README.md @@ -0,0 +1,107 @@ +# gRPC in 3 minutes (Objective-C) + +There are currently two ways to build projects with the gRPC Objective-C library: +* Cocoapods & Xcode +* Bazel (experimental) + +## Cocoapods + +## Installation + +To run this example you should have [Cocoapods](https://cocoapods.org/#install) installed, as well +as the relevant tools to generate the client library code (and a server in another language, for +testing). You can obtain the latter by following [these setup instructions](https://github.com/grpc/homebrew-grpc). + +### Hello Objective-C gRPC! + +Here's how to build and run the Objective-C implementation of the [Hello World](../../protos/helloworld.proto) +example used in [Getting started](https://github.com/grpc/grpc/tree/master/examples). + +The example code for this and our other examples lives in the `examples` directory. Clone +this repository at the [latest stable release tag](https://github.com/grpc/grpc/releases) to your local machine by running the following commands: + + +```sh +$ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc +$ cd grpc +$ git submodule update --init +``` + +Change your current directory to `examples/objective-c/helloworld` + +```sh +$ cd examples/objective-c/helloworld +``` + +#### Try it! +To try the sample app, we need a gRPC server running locally. Let's compile and run, for example, +the C++ server in this repository: + +```shell +$ pushd ../../cpp/helloworld +$ make +$ ./greeter_server & +$ popd +``` + +Now have Cocoapods generate and install the client library for our .proto files: + +```shell +$ pod install +``` + +(This might have to compile OpenSSL, which takes around 15 minutes if Cocoapods doesn't have it yet +on your computer's cache.) + +Finally, open the XCode workspace created by Cocoapods, and run the app. You can check the calling +code in `main.m` and see the results in XCode's log console. + +The code sends a `HLWHelloRequest` containing the string "Objective-C" to a local server. The server +responds with a `HLWHelloResponse`, which contains a string that is then output to the log. + +## Bazel +### Installation +To run the examples in Bazel, you should have [Bazel](https://docs.bazel.build/versions/master/install-os-x.html) installed. + +### Hello Objective-C gRPC! +Here's how to build and run the Objective-C implementation of the [Hello World](helloworld) example. + +The code for the Hello World example and others live in the `examples` directory. Clone this repository to your local machine by running the following commands: +```shell +$ git clone --recursive https://github.com/grpc/grpc +``` + +Next, change your directory to `examples/objective-c` +```shell +$ cd grpc/examples/objective-c +``` + +Now build the Hello World project: +```shell +$ bazel build :HelloWorld +``` + +#### Try it! +To run the Hello World sample properly, we need a local server. Let's compile and run the corresponding C++ server: +```shell +$ bazel run //examples/cpp/helloworld:greeter_server +``` + +To run the sample, you need to know the available simulator runtimes in your machine. You could either list the available runtimes yourself by running: +```shell +$ xcrun simctl list +``` +Or just try running the app and it will let you know what is available in the error messages: +```shell +$ bazel run :HelloWorld +``` +Note that running this command will build the project even if it is not built beforehand. + +Finally, launch the app with one of the available runtimes: +```shell +$ bazel run :HelloWorld --ios_simulator_version='' --ios_sumlator_device='' +``` + +## Tutorial + +You can find a more detailed tutorial in [gRPC Basics: Objective-C](https://grpc.io/docs/languages/objective-c/basics). diff --git a/examples/objective-c/helloworld_macos/README.md b/examples/objective-c/helloworld_macos/README.md new file mode 100644 index 00000000..295701b5 --- /dev/null +++ b/examples/objective-c/helloworld_macos/README.md @@ -0,0 +1,6 @@ +# gRPC Objective-C Mac OS Hello World Example + +A hello world example app on Mac OS. Note that Mac OS is not a first class supported platform of gRPC +Objective-C library. This example is only for reference. + +Refer to [Hello World Example](../helloworld) for instructions on installation and running. diff --git a/examples/objective-c/route_guide/README.md b/examples/objective-c/route_guide/README.md new file mode 100644 index 00000000..4cc57ceb --- /dev/null +++ b/examples/objective-c/route_guide/README.md @@ -0,0 +1,4 @@ +# gRPC Basics: Objective-C + +This is the supporting code for the tutorial "[gRPC Basics: Objective-C](https://grpc.io/docs/languages/objective-c/basics)." + diff --git a/examples/php/README.md b/examples/php/README.md new file mode 100644 index 00000000..e027d327 --- /dev/null +++ b/examples/php/README.md @@ -0,0 +1,9 @@ +# gRPC PHP examples + +This directory contains the sources for the following PHP examples: + +- [Quick start][] +- [Basics tutorial][] + +[Quick start]: https://grpc.io/docs/languages/php/quickstart/ +[Basics tutorial]: https://grpc.io/docs/languages/php/basics/ diff --git a/examples/php/echo/README.md b/examples/php/echo/README.md new file mode 100644 index 00000000..c34ea468 --- /dev/null +++ b/examples/php/echo/README.md @@ -0,0 +1,86 @@ + +# gRPC PHP End-to-End Examples + +This page shows a number of ways to create a PHP gRPC client and connect with +a gRPC backend service. + + +## Run the Server + +For all the following examples, we use a simple gRPC server, written in Node. + +```sh +$ git clone https://github.com/grpc/grpc-web +$ cd grpc-web +$ docker-compose build common node-server +$ docker run -d -p 9090:9090 --name node-server grpcweb/node-server +``` + + +## Install the gRPC PECL extension + +All the following commands are assumed to be run from this current directory. + +```sh +$ cd grpc/examples/php/echo +``` + + +In order to build a PHP gRPC client, we need to install the `grpc` extension +first. + +```sh +$ docker build -t grpc-php/base -f ./base.Dockerfile . +``` + + +## CLI + + +Let's first build a simple CLI gRPC client: + +```sh +$ docker build -t grpc-php/echo-client -f ./cli.Dockerfile . +$ docker run -it --rm --link node-server:node-server grpc-php/echo-client +$ php client.php +``` + + + +## Apache + + +Now let's see how the gRPC PHP client can run with Apache: + +```sh +$ docker build -t grpc-php/apache -f ./apache.Dockerfile . +$ docker run -it --rm --link node-server:node-server -p 80:80 grpc-php/apache +``` + +Open the browser to `http://localhost`. + + + +## Nginx + FPM + + +We can also try running PHP-FPM and put Nginx in front of it. + + +The PHP-FPM part: + +```sh +$ docker build -t grpc-php/fpm -f ./fpm.Dockerfile . +$ docker run -it --rm --link node-server:node-server -p 9000:9000 \ + --name fpm grpc-php/fpm +``` + +The Nginx part: + +```sh +$ docker run -it --rm -v $(pwd)/nginx.conf:/etc/nginx/conf.d/default.conf:ro \ + --link fpm:fpm -p 80:80 nginx:1.17.4 +``` + + +Open the browser to `http://localhost`. diff --git a/examples/php/route_guide/README.md b/examples/php/route_guide/README.md new file mode 100644 index 00000000..e5abfbba --- /dev/null +++ b/examples/php/route_guide/README.md @@ -0,0 +1,6 @@ +# gRPC Basics: PHP sample code + +The files in this folder are the samples used in [gRPC Basics: PHP][], +a detailed tutorial for using gRPC in PHP. + +[gRPC Basics: PHP]:https://grpc.io/docs/languages/php/basics diff --git a/examples/protos/README.md b/examples/protos/README.md new file mode 100644 index 00000000..48df7c89 --- /dev/null +++ b/examples/protos/README.md @@ -0,0 +1,8 @@ +# Example protos + +## Contents + +- [helloworld.proto] + - The simple example used in the overview. +- [route_guide.proto] + - An example service described in detail in the tutorial. diff --git a/examples/python/README.md b/examples/python/README.md new file mode 100644 index 00000000..be57d897 --- /dev/null +++ b/examples/python/README.md @@ -0,0 +1 @@ +[This code's documentation lives on the grpc.io site.](https://grpc.io/docs/languages/python/quickstart) diff --git a/examples/python/async_streaming/README.md b/examples/python/async_streaming/README.md new file mode 100644 index 00000000..4dac53b0 --- /dev/null +++ b/examples/python/async_streaming/README.md @@ -0,0 +1,50 @@ +# gRPC Python Non-Blocking Streaming RPC Client Example + +The goal of this example is to demonstrate how to handle streaming responses +without blocking the current thread. Effectively, this can be achieved by +converting the gRPC Python streaming API into callback-based. + +In this example, the RPC service `Phone` simulates the life cycle of virtual +phone calls. It requires one thread to handle the phone-call session state +changes, and another thread to process the audio stream. In this case, the +normal blocking style API could not fulfill the need easily. Hence, we should +asynchronously execute the streaming RPC. + +## Steps to run this example + +Start the server in one session +``` +python3 server.py +``` + +Start the client in another session +``` +python3 client.py +``` + +## Example Output +``` +$ python3 server.py +INFO:root:Server serving at [::]:50051 +INFO:root:Received a phone call request for number [1415926535] +INFO:root:Created a call session [{ + "sessionId": "0", + "media": "https://link.to.audio.resources" +}] +INFO:root:Call finished [1415926535] +INFO:root:Call session cleaned [{ + "sessionId": "0", + "media": "https://link.to.audio.resources" +}] +``` + +``` +$ python3 client.py +INFO:root:Waiting for peer to connect [1415926535]... +INFO:root:Call toward [1415926535] enters [NEW] state +INFO:root:Call toward [1415926535] enters [ACTIVE] state +INFO:root:Consuming audio resource [https://link.to.audio.resources] +INFO:root:Call toward [1415926535] enters [ENDED] state +INFO:root:Audio session finished [https://link.to.audio.resources] +INFO:root:Call finished! +``` diff --git a/examples/python/auth/README.md b/examples/python/auth/README.md new file mode 100644 index 00000000..2fd044b8 --- /dev/null +++ b/examples/python/auth/README.md @@ -0,0 +1,112 @@ +# Authentication Extension Example in gRPC Python + +## Check Our Guide First + +For most common usage of authentication in gRPC Python, please see our +[Authentication](https://grpc.io/docs/guides/auth/) guide's Python section. The +Guide includes following scenarios: + +1. Server SSL credential setup +2. Client SSL credential setup +3. Authenticate with Google using a JWT +4. Authenticate with Google using an Oauth2 token + +Also, the guide talks about gRPC specific credential types. + +### Channel credentials + +Channel credentials are attached to a `Channel` object, the most common use case +are SSL credentials. + +### Call credentials + +Call credentials are attached to a `Call` object (corresponding to an RPC). +Under the hood, the call credentials is a function that takes in information of +the RPC and modify metadata through callback. + +## About This Example + +This example focuses on extending gRPC authentication mechanism: +1) Customize authentication plugin; +2) Composite client side credentials; +3) Validation through interceptor on server side. + +## AuthMetadataPlugin: Manipulate metadata for each call + +Unlike TLS/SSL based authentication, the authentication extension in gRPC Python +lives at a much higher level of networking. It relies on the transmission of +metadata (HTTP Header) between client and server, instead of alternating the +transport protocol. + +gRPC Python provides a way to intercept an RPC and append authentication related +metadata through +[`AuthMetadataPlugin`](https://grpc.github.io/grpc/python/grpc.html#grpc.AuthMetadataPlugin). +Those in need of a custom authentication method may simply provide a concrete +implementation of the following interface: + +```Python +class AuthMetadataPlugin: + """A specification for custom authentication.""" + + def __call__(self, context, callback): + """Implements authentication by passing metadata to a callback. + + Implementations of this method must not block. + + Args: + context: An AuthMetadataContext providing information on the RPC that + the plugin is being called to authenticate. + callback: An AuthMetadataPluginCallback to be invoked either + synchronously or asynchronously. + """ +``` + +Then pass the instance of the concrete implementation to +`grpc.metadata_call_credentials` function to be converted into a +`CallCredentials` object. Please NOTE that it is possible to pass a Python +function object directly, but we recommend to inherit from the base class to +ensure implementation correctness. + + +```Python +def metadata_call_credentials(metadata_plugin, name=None): + """Construct CallCredentials from an AuthMetadataPlugin. + + Args: + metadata_plugin: An AuthMetadataPlugin to use for authentication. + name: An optional name for the plugin. + + Returns: + A CallCredentials. + """ +``` + +The `CallCredentials` object can be passed directly into an RPC like: + +```Python +call_credentials = grpc.metadata_call_credentials(my_foo_plugin) +stub.FooRpc(request, credentials=call_credentials) +``` + +Or you can use `ChannelCredentials` and `CallCredentials` at the same time by +combining them: + +```Python +channel_credentials = ... +call_credentials = ... +composite_credentials = grpc.composite_channel_credentials( + channel_credential, + call_credentials) +channel = grpc.secure_channel(server_address, composite_credentials) +``` + +It is also possible to apply multiple `CallCredentials` to a single RPC: + +```Python +call_credentials_foo = ... +call_credentials_bar = ... +call_credentials = grpc.composite_call_credentials( + call_credentials_foo, + call_credentials_bar) +stub.FooRpc(request, credentials=call_credentials) +``` diff --git a/examples/python/cancellation/README.md b/examples/python/cancellation/README.md new file mode 100644 index 00000000..87d0c76d --- /dev/null +++ b/examples/python/cancellation/README.md @@ -0,0 +1,127 @@ +### Cancellation + +In the example, we implement a silly algorithm. We search for bytestrings whose +hashes are similar to a given search string. For example, say we're looking for +the string "doctor". Our algorithm may return `JrqhZVkTDoctYrUlXDbL6pfYQHU=` or +`RC9/7mlM3ldy4TdoctOc6WzYbO4=`. This is a brute force algorithm, so the server +performing the search must be conscious of the resources it allows to each client +and each client must be conscientious of the resources it demands of the server. + +In particular, we ensure that client processes cancel the stream explicitly +before terminating and we ensure that server processes cancel RPCs that have gone on longer +than a certain number of iterations. + +#### Cancellation on the Client Side + +A client may cancel an RPC for several reasons. Perhaps the data it requested +has been made irrelevant. Perhaps you, as the client, want to be a good citizen +of the server and are conserving compute resources. + +##### Cancelling a Server-Side Unary RPC from the Client + +The default RPC methods on a stub will simply return the result of an RPC. + +```python +>>> stub = hash_name_pb2_grpc.HashFinderStub(channel) +>>> stub.Find(hash_name_pb2.HashNameRequest(desired_name=name)) + +``` + +But you may use the `future()` method to receive an instance of `grpc.Future`. +This interface allows you to wait on a response with a timeout, add a callback +to be executed when the RPC completes, or to cancel the RPC before it has +completed. + +In the example, we use this interface to cancel our in-progress RPC when the +user interrupts the process with ctrl-c. + +```python +stub = hash_name_pb2_grpc.HashFinderStub(channel) +future = stub.Find.future(hash_name_pb2.HashNameRequest(desired_name=name)) +def cancel_request(unused_signum, unused_frame): + future.cancel() + sys.exit(0) +signal.signal(signal.SIGINT, cancel_request) + +result = future.result() +print(result) +``` + +We also call `sys.exit(0)` to terminate the process. If we do not do this, then +`future.result()` with throw an `RpcError`. Alternatively, you may catch this +exception. + + +##### Cancelling a Server-Side Streaming RPC from the Client + +Cancelling a Server-side streaming RPC is even simpler from the perspective of +the gRPC API. The default stub method is already an instance of `grpc.Future`, +so the methods outlined above still apply. It is also a generator, so we may +iterate over it to yield the results of our RPC. + +```python +stub = hash_name_pb2_grpc.HashFinderStub(channel) +result_generator = stub.FindRange(hash_name_pb2.HashNameRequest(desired_name=name)) +def cancel_request(unused_signum, unused_frame): + result_generator.cancel() + sys.exit(0) +signal.signal(signal.SIGINT, cancel_request) +for result in result_generator: + print(result) +``` + +We also call `sys.exit(0)` here to terminate the process. Alternatively, you may +catch the `RpcError` raised by the for loop upon cancellation. + + +#### Cancellation on the Server Side + +A server is responsible for cancellation in two ways. It must respond in some way +when a client initiates a cancellation, otherwise long-running computations +could continue indefinitely. + +It may also decide to cancel the RPC for its own reasons. In our example, the +server can be configured to cancel an RPC after a certain number of hashes has +been computed in order to conserve compute resources. + +##### Responding to Cancellations from a Servicer Thread + +It's important to remember that a gRPC Python server is backed by a thread pool +with a fixed size. When an RPC is cancelled, the library does *not* terminate +your servicer thread. It is your responsibility as the application author to +ensure that your servicer thread terminates soon after the RPC has been +cancelled. + +In this example, we use the `ServicerContext.add_callback` method to set a +`threading.Event` object when the RPC is terminated. We pass this `Event` object +down through our hashing algorithm and ensure to check that the RPC is still +ongoing before each iteration. + +```python +stop_event = threading.Event() +def on_rpc_done(): + # Regain servicer thread. + stop_event.set() +context.add_callback(on_rpc_done) +secret = _find_secret(stop_event) +``` + +##### Initiating a Cancellation on the Server Side + +Initiating a cancellation from the server side is simpler. Just call +`ServicerContext.cancel()`. + +In our example, we ensure that no single client is monopolizing the server by +cancelling after a configurable number of hashes have been checked. + +```python +try: + for candidate in secret_generator: + yield candidate +except ResourceLimitExceededError: + print("Cancelling RPC due to exhausted resources.") + context.cancel() +``` + +In this type of situation, you may also consider returning a more specific error +using the [`grpcio-status`](https://pypi.org/project/grpcio-status/) package. diff --git a/examples/python/compression/README.md b/examples/python/compression/README.md new file mode 100644 index 00000000..c719bba0 --- /dev/null +++ b/examples/python/compression/README.md @@ -0,0 +1,58 @@ +## Compression with gRPC Python + +gRPC offers lossless compression options in order to decrease the number of bits +transferred over the wire. Three levels of compression are available: + + - `grpc.Compression.NoCompression` - No compression is applied to the payload. (default) + - `grpc.Compression.Deflate` - The "Deflate" algorithm is applied to the payload. + - `grpc.Compression.Gzip` - The Gzip algorithm is applied to the payload. + +The default option on both clients and servers is `grpc.Compression.NoCompression`. + +See [the gRPC Compression Spec](https://github.com/grpc/grpc/blob/master/doc/compression.md) +for more information. + +### Client Side Compression + +Compression may be set at two levels on the client side. + +#### At the channel level + +```python +with grpc.insecure_channel('foo.bar:1234', compression=grpc.Compression.Gzip) as channel: + use_channel(channel) +``` + +#### At the call level + +Setting the compression method at the call level will override any settings on +the channel level. + +```python +stub = helloworld_pb2_grpc.GreeterStub(channel) +response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'), + compression=grpc.Compression.Deflate) +``` + + +### Server Side Compression + +Additionally, compression may be set at two levels on the server side. + +#### On the entire server + +```python +server = grpc.server(futures.ThreadPoolExecutor(), + compression=grpc.Compression.Gzip) +``` + +#### For an individual RPC + +```python +def SayHello(self, request, context): + context.set_response_compression(grpc.Compression.NoCompression) + return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name) +``` + +Setting the compression method for an individual RPC will override any setting +supplied at server creation time. diff --git a/examples/python/data_transmission/README.cn.md b/examples/python/data_transmission/README.cn.md new file mode 100644 index 00000000..ecffa5d3 --- /dev/null +++ b/examples/python/data_transmission/README.cn.md @@ -0,0 +1,36 @@ +## Data transmission demo for using gRPC in Python + +在Python中使用gRPC时, 进行数据传输的四种方式 [官方指南]() + +- #### 一元模式 + + 在一次调用中, 客户端只能向服务器传输一次请求数据, 服务器也只能返回一次响应 + + `client.py: simple_method` + + `server.py: SimpleMethod` + +- #### 客户端流模式 + + 在一次调用中, 客户端可以多次向服务器传输数据, 但是服务器只能返回一次响应 + + `client.py: client_streaming_method ` + + `server.py: ClientStreamingMethod` + +- #### 服务端流模式 + + 在一次调用中, 客户端只能向服务器传输一次请求数据, 但是服务器可以多次返回响应 + + `client.py: server_streaming_method` + + `server.py: ServerStreamingMethod` + +- #### 双向流模式 + + 在一次调用中, 客户端和服务器都可以向对方多次收发数据 + + `client.py: bidirectional_streaming_method` + + `server.py: BidirectionalStreamingMethod` + diff --git a/examples/python/data_transmission/README.en.md b/examples/python/data_transmission/README.en.md new file mode 100644 index 00000000..b9359434 --- /dev/null +++ b/examples/python/data_transmission/README.en.md @@ -0,0 +1,37 @@ +## Data transmission demo for using gRPC in Python + +Four ways of data transmission when gRPC is used in Python. [Official Guide]() + +- #### unary-unary + + In a single call, the client can only send request once, and the server can only respond once. + + `client.py: simple_method` + + `server.py: SimpleMethod` + +- #### stream-unary + + In a single call, the client can transfer data to the server an arbitrary number of times, but the server can only return a response once. + + `client.py: client_streaming_method` + + `server.py: ClientStreamingMethod` + +- #### unary-stream + + In a single call, the client can only transmit data to the server at one time, but the server can return the response many times. + + `client.py: server_streaming_method` + + `server.py: ServerStreamingMethod` + +- #### stream-stream + + In a single call, both client and server can send and receive data + to each other multiple times. + + `client.py: bidirectional_streaming_method` + + `server.py: BidirectionalStreamingMethod` + diff --git a/examples/python/debug/README.md b/examples/python/debug/README.md new file mode 100644 index 00000000..ceed31ef --- /dev/null +++ b/examples/python/debug/README.md @@ -0,0 +1,68 @@ +# gRPC Python Debug Example + +This example demonstrate the usage of Channelz. For a better looking website, +the [gdebug](https://github.com/grpc/grpc-experiments/tree/master/gdebug) uses +gRPC-Web protocol and will serve all useful information in web pages. + +## Channelz: Live Channel Tracing + +Channelz is a channel tracing feature. It will track statistics like how many +messages have been sent, how many of them failed, what are the connected +sockets. Since it is implemented in C-Core and has low-overhead, it is +recommended to turn on for production services. See [Channelz design +doc](https://github.com/grpc/proposal/blob/master/A14-channelz.md). + +## How to enable tracing log +The tracing log generation might have larger overhead, especially when you try +to trace transport. It would result in replicating the traffic loads. However, +it is still the most powerful tool when you need to dive in. + +### The Most Verbose Tracing Log + +Specify environment variables, then run your application: + +``` +GRPC_VERBOSITY=debug +GRPC_TRACE=all +``` + +For more granularity, please see +[environment_variables](https://github.com/grpc/grpc/blob/master/doc/environment_variables.md). + +### Debug Transport Protocol + +``` +GRPC_VERBOSITY=debug +GRPC_TRACE=tcp,http,secure_endpoint,transport_security +``` + +### Debug Connection Behavior + +``` +GRPC_VERBOSITY=debug +GRPC_TRACE=call_error,connectivity_state,pick_first,round_robin,glb +``` + +## How to debug your application? + +`pdb` is a debugging tool that is available for Python interpreters natively. +You can set breakpoint, and execute commands while the application is stopped. + +The simplest usage is add a single line in the place you want to inspect: +`import pdb; pdb.set_trace()`. When interpreter see this line, it would pop out +a interactive command line interface for you to inspect the application state. + +For more detailed usage, see https://docs.python.org/3/library/pdb.html. + +**Caveat**: gRPC Python uses C-Extension under-the-hood, so `pdb` may not be +able to trace through the whole stack. + +## gRPC Command Line Tool + +`grpc_cli` is a handy tool to interact with gRPC backend easily. Imageine you can +inspect what service does a server provide without writing any code, and make +gRPC calls just like `curl`. + +The installation guide: https://github.com/grpc/grpc/blob/master/doc/command_line_tool.md#code-location +The usage guide: https://github.com/grpc/grpc/blob/master/doc/command_line_tool.md#usage +The source code: https://github.com/grpc/grpc/blob/master/test/cpp/util/grpc_cli.cc diff --git a/examples/python/errors/README.md b/examples/python/errors/README.md new file mode 100644 index 00000000..2cfc26a9 --- /dev/null +++ b/examples/python/errors/README.md @@ -0,0 +1,107 @@ +# gRPC Python Error Handling Example + +The goal of this example is sending error status from server that is more complicated than a code and detail string. + +The definition for an RPC method in proto files contains request message and response message. There are many error states that can be shared across RPC methods (e.g. stack trace, insufficient quota). Using a different path to handle error will make the code more maintainable. + +Ideally, the final status of an RPC should be described in the trailing headers of HTTP2, and gRPC Python provides helper functions in `grpcio-status` package to assist the packing and unpacking of error status. + + +### Requirement +``` +grpcio>=1.18.0 +grpcio-status>=1.18.0 +googleapis-common-protos>=1.5.5 +``` + + +### Error Detail Proto + +You may provide any custom proto message as error detail in your implementation. Here are protos are defined by Google Cloud Library Team: + +* [code.proto]([https://github.com/googleapis/api-common-protos/blob/master/google/rpc/code.proto](https://github.com/googleapis/api-common-protos/blob/87185dfffad4afa5a33a8c153f0e1ea53b4f85dc/google/rpc/code.proto)) contains definition of RPC error codes. +* [error_details.proto]([https://github.com/googleapis/api-common-protos/blob/master/google/rpc/error_details.proto](https://github.com/googleapis/api-common-protos/blob/87185dfffad4afa5a33a8c153f0e1ea53b4f85dc/google/rpc/error_details.proto)) contains definitions of common error details. + + +### Definition of Status Proto + +Here is the definition of Status proto. For full text, please see [status.proto](https://github.com/googleapis/api-common-protos/blob/87185dfffad4afa5a33a8c153f0e1ea53b4f85dc/google/rpc/status.proto). + +```proto +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. It is used by +// [gRPC](https://github.com/grpc). The error model is designed to be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error message, +// and error details. The error code should be an enum value of +// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The +// error message should be a developer-facing English message that helps +// developers *understand* and *resolve* the error. If a localized user-facing +// error message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain arbitrary +// information about the error. There is a predefined set of error detail types +// in the package `google.rpc` that can be used for common error conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error model, but it +// is not necessarily the actual wire format. When the `Status` message is +// exposed in different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety of +// environments, either with or without APIs, to provide a +// consistent developer experience across different environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the client, +// it may embed the `Status` in the normal response to indicate the partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch response, the +// `Status` message should be used directly inside batch response, one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message `Status` could +// be used directly after any stripping needed for security/privacy reasons. +message Status { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + int32 code = 1; + + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + string message = 2; + + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + repeated google.protobuf.Any details = 3; +} +``` + + +### Usage of Well-Known-Proto `Any` + +Please check [ProtoBuf Document: Any](https://developers.google.com/protocol-buffers/docs/reference/python-generated#any) + +```Python +any_message.Pack(message) +any_message.Unpack(message) +assert any_message.Is(message.DESCRIPTOR) +``` diff --git a/examples/python/helloworld/README.md b/examples/python/helloworld/README.md new file mode 100644 index 00000000..be57d897 --- /dev/null +++ b/examples/python/helloworld/README.md @@ -0,0 +1 @@ +[This code's documentation lives on the grpc.io site.](https://grpc.io/docs/languages/python/quickstart) diff --git a/examples/python/metadata/README.md b/examples/python/metadata/README.md new file mode 100644 index 00000000..26ffa44f --- /dev/null +++ b/examples/python/metadata/README.md @@ -0,0 +1,6 @@ +An example showing how to add custom HTTP2 headers (or [metadata](https://grpc.io/grpc/python/glossary.html) in gRPC glossary) + +HTTP2 supports initial headers and trailing headers, which gRPC utilizes both of them ([learn more](https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md)). + +More complete documentation lives at [grpc.io](https://grpc.io/docs/languages/python/basics). +For API reference please see [API](https://grpc.io/grpc/python/grpc.html). diff --git a/examples/python/multiplex/README.md b/examples/python/multiplex/README.md new file mode 100644 index 00000000..8afdb4df --- /dev/null +++ b/examples/python/multiplex/README.md @@ -0,0 +1,3 @@ +An example showing two stubs sharing a channel and two servicers sharing a server. + +More complete documentation lives at [grpc.io](https://grpc.io/docs/languages/python/basics). diff --git a/examples/python/multiprocessing/README.md b/examples/python/multiprocessing/README.md new file mode 100644 index 00000000..5dce50ad --- /dev/null +++ b/examples/python/multiprocessing/README.md @@ -0,0 +1,74 @@ +## Multiprocessing with gRPC Python + +Multiprocessing allows application developers to sidestep the Python global +interpreter lock and achieve true parallelism on multicore systems. +Unfortunately, using multiprocessing and gRPC Python is not yet as simple as +instantiating your server with a `futures.ProcessPoolExecutor`. + +The library is implemented as a C extension, maintaining much of the state that +drives the system in native code. As such, upon calling +[`fork`](http://man7.org/linux/man-pages/man2/fork.2.html), any threads in a +critical section may leave the state of the gRPC library invalid in the child +process. See this [excellent research +paper](https://www.microsoft.com/en-us/research/uploads/prod/2019/04/fork-hotos19.pdf) +for a thorough discussion of the topic. + +Calling `fork` without `exec` in your process *is* supported +before any gRPC servers have been instantiated. Application developers can +take advantage of this to parallelize their CPU-intensive operations. + +## Calculating Prime Numbers with Multiple Processes + +This example calculates the first 10,000 prime numbers as an RPC. We instantiate +one server per subprocess, balancing requests between the servers using the +[`SO_REUSEPORT`](https://lwn.net/Articles/542629/) socket option. + +```python +_PROCESS_COUNT = multiprocessing.cpu_count() +``` + +On the server side, we detect the number of CPUs available on the system and +spawn exactly that many child processes. If we spin up fewer, we won't be taking +full advantage of the hardware resources available. + +## Running the Example + +To run the server, +[ensure `bazel` is installed](https://docs.bazel.build/versions/master/install.html) +and run: + +``` +bazel run //examples/python/multiprocessing:server & +``` + +Note the address at which the server is running. For example, + +``` +... +[PID 107153] Binding to '[::]:33915' +[PID 107507] Starting new server. +[PID 107508] Starting new server. +... +``` + +Note that several servers have been started, each with its own PID. + +Now, start the client by running + +``` +bazel run //examples/python/multiprocessing:client -- [SERVER_ADDRESS] +``` + +For example, + +``` +bazel run //examples/python/multiprocessing:client -- [::]:33915 +``` + +Alternatively, generate code using the following and then run the client and server +directly: + +```python +cd examples/python/helloworld +python -m grpc_tools.protoc -I . prime.proto --python_out=. --grpc_python_out=. +``` diff --git a/examples/python/retry/README.md b/examples/python/retry/README.md new file mode 100644 index 00000000..75cdb35b --- /dev/null +++ b/examples/python/retry/README.md @@ -0,0 +1,48 @@ +# Retry Example in gRPC Python + +## Prerequisite + +* grpcio >= 1.39.0 +* grpcio-tools >= 1.39.0 + +## Running the example + +In terminal 1, start the flaky server: + +```sh +python3 flaky_server.py +``` + +In terminal 2, start the retry clients: + +```sh +python3 retry_client.py +# Or +python3 async_retry_client.py +``` + +## Expect results + +The client RPC will succeed, even with server injecting multiple errors. Here is an example server log: + +```sh +$ python3 flaky_server.py +INFO:root:Starting flaky server on [::]:50051 +INFO:root:Injecting error to RPC from ipv6:[::1]:54471 +INFO:root:Successfully responding to RPC from ipv6:[::1]:54473 +INFO:root:Injecting error to RPC from ipv6:[::1]:54491 +INFO:root:Injecting error to RPC from ipv6:[::1]:54581 +INFO:root:Injecting error to RPC from ipv6:[::1]:54581 +INFO:root:Injecting error to RPC from ipv6:[::1]:54581 +INFO:root:Injecting error to RPC from ipv6:[::1]:54581 +INFO:root:Successfully responding to RPC from ipv6:[::1]:54581 +INFO:root:Injecting error to RPC from ipv6:[::1]:55474 +INFO:root:Injecting error to RPC from ipv6:[::1]:55474 +INFO:root:Injecting error to RPC from ipv6:[::1]:55474 +INFO:root:Injecting error to RPC from ipv6:[::1]:55474 +INFO:root:Successfully responding to RPC from ipv6:[::1]:55474 +INFO:root:Injecting error to RPC from ipv6:[::1]:55533 +INFO:root:Injecting error to RPC from ipv6:[::1]:55533 +INFO:root:Injecting error to RPC from ipv6:[::1]:55533 +INFO:root:Successfully responding to RPC from ipv6:[::1]:55533 +``` diff --git a/examples/python/route_guide/README.md b/examples/python/route_guide/README.md new file mode 100644 index 00000000..6f7a2f0f --- /dev/null +++ b/examples/python/route_guide/README.md @@ -0,0 +1 @@ +[This code's documentation lives on the grpc.io site.](https://grpc.io/docs/languages/python/basics) diff --git a/examples/python/wait_for_ready/README.md b/examples/python/wait_for_ready/README.md new file mode 100644 index 00000000..6e873d22 --- /dev/null +++ b/examples/python/wait_for_ready/README.md @@ -0,0 +1,32 @@ +# gRPC Python Example for Wait-for-ready + +The default behavior of an RPC is to fail instantly if the server is not ready yet. This example demonstrates how to change that behavior. + + +### Definition of 'wait-for-ready' semantics +> If an RPC is issued but the channel is in TRANSIENT_FAILURE or SHUTDOWN states, the RPC is unable to be transmitted promptly. By default, gRPC implementations SHOULD fail such RPCs immediately. This is known as "fail fast," but the usage of the term is historical. RPCs SHOULD NOT fail as a result of the channel being in other states (CONNECTING, READY, or IDLE). +> +> gRPC implementations MAY provide a per-RPC option to not fail RPCs as a result of the channel being in TRANSIENT_FAILURE state. Instead, the implementation queues the RPCs until the channel is READY. This is known as "wait for ready." The RPCs SHOULD still fail before READY if there are unrelated reasons, such as the channel is SHUTDOWN or the RPC's deadline is reached. +> +> From https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md + + +### Use cases for 'wait-for-ready' + +When developers spin up gRPC clients and servers at the same time, it is very like to fail first couple RPC calls due to unavailability of the server. If developers failed to prepare for this situation, the result can be catastrophic. But with 'wait-for-ready' semantics, developers can initialize the client and server in any order, especially useful in testing. + +Also, developers may ensure the server is up before starting client. But in some cases like transient network failure may result in a temporary unavailability of the server. With 'wait-for-ready' semantics, those RPC calls will automatically wait until the server is ready to accept incoming requests. + + +### DEMO Snippets + +```Python +# Per RPC level +stub = ...Stub(...) + +stub.important_transaction_1(..., wait_for_ready=True) +stub.unimportant_transaction_2(...) +stub.important_transaction_3(..., wait_for_ready=True) +stub.unimportant_transaction_4(...) +# The unimportant transactions can be status report, or health check, etc. +``` diff --git a/examples/python/xds/README.md b/examples/python/xds/README.md new file mode 100644 index 00000000..cba8c069 --- /dev/null +++ b/examples/python/xds/README.md @@ -0,0 +1,123 @@ +gRPC Hostname Example +===================== + +The hostname example is a Hello World server whose response includes its +hostname. It also supports health and reflection services. This makes it a good +server to test infrastructure, like load balancing. This example depends on a +gRPC version of 1.28.1 or newer. + +### Run the Server + +1. Navigate to this directory: + +```sh +cd grpc/examples/python/xds +``` + +2. Run the server + +```sh +virtualenv venv -p python3 +source venv/bin/activate +pip install -r requirements.txt +python server.py +``` + +### Run the Client + +1. Set up xDS configuration. + +After configuring your xDS server to track the gRPC server we just started, +create a bootstrap file as desribed in [gRFC A27](https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md): + +``` +{ + xds_servers": [ + { + "server_uri": , + "channel_creds": [ + { + "type": , + "config": + } + ] + } + ], + "node": +} +``` + +2. Point the `GRPC_XDS_BOOTSTRAP` environment variable at the bootstrap file: + +``` +export GRPC_XDS_BOOTSTRAP=/etc/xds-bootstrap.json +``` + +3. Run the client: + +``` +python client.py xds:///my-backend +``` + +### Verifying Configuration with a CLI Tool + +Alternatively, `grpcurl` can be used to verify your server. If you don't have it, +install [`grpcurl`](https://github.com/fullstorydev/grpcurl/releases). This will allow +you to manually test the service. + +Be sure to set up the bootstrap file and `GRPC_XDS_BOOTSTRAP` as in the previous +section. + +1. Verify the server's application-layer service: + +```sh +> grpcurl --plaintext -d '{"name": "you"}' localhost:50051 +{ + "message": "Hello you from rbell.svl.corp.google.com!" +} +``` + +2. Verify that all services are available via reflection: + +```sh +> grpcurl --plaintext localhost:50051 list +grpc.health.v1.Health +grpc.reflection.v1alpha.ServerReflection +helloworld.Greeter +``` + +3. Verify that all services are reporting healthy: + +```sh +> grpcurl --plaintext -d '{"service": "helloworld.Greeter"}' localhost:50051 +grpc.health.v1.Health/Check +{ + "status": "SERVING" +} + +> grpcurl --plaintext -d '{"service": ""}' localhost:50051 +grpc.health.v1.Health/Check +{ + "status": "SERVING" +} +``` + +### Running with Proxyless Security + +#### Run the Server with Secure Credentials + +Add the `--secure true` flag to the invocation outlined above. + +```sh +python server.py --secure true +``` + +#### Run the Client with Secure Credentials + +Add the `--secure true` flag to the invocation outlined above. + +3. Run the client: + +``` +python client.py xds:///my-backend --secure true +``` diff --git a/examples/ruby/README.md b/examples/ruby/README.md new file mode 100644 index 00000000..2a3dec33 --- /dev/null +++ b/examples/ruby/README.md @@ -0,0 +1,63 @@ +gRPC in 3 minutes (Ruby) +======================== + +BACKGROUND +------------- +For this sample, we've already generated the server and client stubs from [helloworld.proto][] + +PREREQUISITES +------------- + +- Ruby 2.x +This requires Ruby 2.x, as the gRPC API surface uses keyword args. +If you don't have that installed locally, you can use [RVM][] to use Ruby 2.x for testing without upgrading the version of Ruby on your whole system. +RVM is also useful if you don't have the necessary privileges to update your system's Ruby. + + ```sh + $ # RVM installation as specified at https://rvm.io/rvm/install + $ gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 + $ \curl -sSL https://get.rvm.io | bash -s stable --ruby=ruby-2 + $ + $ # follow the instructions to ensure that your're using the latest stable version of Ruby + $ # and that the rvm command is installed + ``` +- *N.B* Make sure your run `source $HOME/.rvm/scripts/rvm` as instructed to complete the set-up of RVM. + +INSTALL +------- +- [Install gRPC Ruby][] + +- Use bundler to install the example package's dependencies + + ```sh + $ # from this directory + $ gem install bundler # if you don't already have bundler available + $ bundle install + ``` + +Try it! +------- + +- Run the server + + ```sh + $ # from this directory + $ bundle exec ./greeter_server.rb & + ``` + +- Run the client + + ```sh + $ # from this directory + $ bundle exec ./greeter_client.rb + ``` + +Tutorial +-------- + +You can find a more detailed tutorial in [gRPC Basics: Ruby][] + +[helloworld.proto]:../protos/helloworld.proto +[RVM]:https://www.rvm.io/ +[Install gRPC ruby]:../../src/ruby#installation +[gRPC Basics: Ruby]:https://grpc.io/docs/languages/ruby/basics diff --git a/examples/ruby/errors_and_cancellation/README.md b/examples/ruby/errors_and_cancellation/README.md new file mode 100644 index 00000000..661bd847 --- /dev/null +++ b/examples/ruby/errors_and_cancellation/README.md @@ -0,0 +1,25 @@ +# Errors and Cancelletion code samples for grpc-ruby + +The examples in this directory show use of grpc errors. + +On the server side, errors are returned from service +implementations by raising a certain `GRPC::BadStatus` exception. + +On the client side, GRPC errors get raised when either: + * the call completes (unary and client-streaming call types) + * the response `Enumerable` is iterated through (server-streaming and + bidi call types). + +## To run the examples here: + +Start the server: + +``` +> ruby error_examples_server.rb +``` + +Then run the client: + +``` +> ruby error_examples_client.rb +``` diff --git a/examples/ruby/route_guide/README.md b/examples/ruby/route_guide/README.md new file mode 100644 index 00000000..83501d4b --- /dev/null +++ b/examples/ruby/route_guide/README.md @@ -0,0 +1,6 @@ +# gRPC Basics: Ruby sample code + +The files in this folder are the samples used in [gRPC Basics: Ruby][], +a detailed tutorial for using gRPC in Ruby. + +[gRPC Basics: Ruby]:https://grpc.io/docs/languages/ruby/basics diff --git a/examples/ruby/without_protobuf/README.md b/examples/ruby/without_protobuf/README.md new file mode 100644 index 00000000..f9611648 --- /dev/null +++ b/examples/ruby/without_protobuf/README.md @@ -0,0 +1,6 @@ +gRPC (Ruby) without protobuf +======================== + +This directory contains a simple example of using gRPC without protobuf. + +This is mainly intended to show basic usage of the GRPC::GenericService module diff --git a/include/grpc++/README.md b/include/grpc++/README.md new file mode 100644 index 00000000..5842eb3a --- /dev/null +++ b/include/grpc++/README.md @@ -0,0 +1,7 @@ +# include/grpc++ + +This was the original directory name for all C++ header files but it +conflicted with the naming scheme required for some build systems. It +is superseded by `include/grpcpp` but the old directory structure is +still present to avoid breaking code that used the old include files. +All new include files are only in `include/grpcpp`. diff --git a/include/grpc/event_engine/README.md b/include/grpc/event_engine/README.md new file mode 100644 index 00000000..b2d4fef8 --- /dev/null +++ b/include/grpc/event_engine/README.md @@ -0,0 +1,38 @@ +# gRPC EventEngine + +An EventEngine handles all cross-platform I/O, task execution, and DNS +resolution for gRPC. A default, cross-platform implementation is provided with +gRPC, but part of the intent here is to provide an interface for external +integrators to bring their own functionality. This allows for integration with +external event loops, siloing I/O and task execution between channels or +servers, and other custom integrations that were previously unsupported. + +*WARNING*: This is experimental code and is subject to change. + +## High level expectations of an EventEngine implementation + +### Provide their own I/O threads +EventEngines are expected to internally create whatever threads are required to +perform I/O and execute callbacks. For example, an EventEngine implementation +may want to spawn separate thread pools for polling and callback execution. + +### Provisioning data buffers via Slice allocation +At a high level, gRPC provides a `ResourceQuota` system that allows gRPC to +reclaim memory and degrade gracefully when memory reaches application-defined +thresholds. To enable this feature, the memory allocation of read/write buffers +within an EventEngine must be acquired in the form of Slices from +SliceAllocators. This is covered more fully in the gRFC and code. + +### Documentating expectations around callback execution +Some callbacks may be expensive to run. EventEngines should decide on and +document whether callback execution might block polling operations. This way, +application developers can plan accordingly (e.g., run their expensive callbacks +on a separate thread if necessary). + +### Handling concurrent usage +Assume that gRPC may use an EventEngine concurrently across multiple threads. + +## TODO: documentation + +* Example usage +* Link to gRFC diff --git a/include/grpc/impl/codegen/README.md b/include/grpc/impl/codegen/README.md new file mode 100644 index 00000000..1bafd001 --- /dev/null +++ b/include/grpc/impl/codegen/README.md @@ -0,0 +1,22 @@ +# Welcome to `include/grpc/impl/codegen` + +## Why is this directory here? + +This directory exists so that generated C++ code can include selected files upon +which it depends without having to depend on the entire gRPC C++ library. This +directory thus exists to support `include/grpcpp/impl/codegen`. This constraint +is particularly relevant for users of bazel, particularly if they use the +multi-lingual `proto_library` target type. Generated code that uses this target +only depends on the gRPC C++ targets associated with these header files, not the +entire gRPC C++ codebase since that would make the build time of these types of +targets excessively large (particularly when they are not even C++ specific). + +## What should user code do? + +User code should *not* include anything from this directory. Only generated code +and gRPC library code should include contents from this directory. C++ user code +should instead include contents from the main `grpcpp` directory or its +accessible subcomponents like `grpcpp/support`. It is possible that we may +remove this directory altogether if the motivations for its existence are no +longer strong enough (e.g., if the gRPC C++ library no longer has a need for an +`impl/codegen` directory of its own). diff --git a/include/grpcpp/impl/README.md b/include/grpcpp/impl/README.md new file mode 100644 index 00000000..612150ca --- /dev/null +++ b/include/grpcpp/impl/README.md @@ -0,0 +1,4 @@ +**The APIs in this directory are not stable!** + +This directory contains header files that need to be installed but are not part +of the public API. Users should not use these headers directly. diff --git a/include/grpcpp/impl/codegen/README.md b/include/grpcpp/impl/codegen/README.md new file mode 100644 index 00000000..ade9d054 --- /dev/null +++ b/include/grpcpp/impl/codegen/README.md @@ -0,0 +1,21 @@ +# Welcome to `include/grpcpp/impl/codegen` + +## Why is this directory here? + +This directory exists so that generated code can include selected files upon +which it depends without having to depend on the entire gRPC C++ library. This +is particularly relevant for users of bazel, particularly if they use the +multi-lingual `proto_library` target type. Generated code that uses this target +only depends on the gRPC C++ targets associated with these header files, not the +entire gRPC C++ codebase since that would make the build time of these types of +targets excessively large (particularly when they are not even C++ specific). + +## What should user code do? + +User code should *not* include anything from this directory. Only generated code +and gRPC library code should include contents from this directory. User code +should instead include contents from the main `grpcpp` directory or its +accessible subcomponents like `grpcpp/support`. It is possible that we may +remove this directory altogether if the motivations for its existence are no +longer strong enough (e.g., if most users migrate away from the `proto_library` +target type or if the additional overhead of depending on gRPC C++ is not high). diff --git a/src/android/test/interop/README.md b/src/android/test/interop/README.md new file mode 100644 index 00000000..e399dab0 --- /dev/null +++ b/src/android/test/interop/README.md @@ -0,0 +1,37 @@ +gRPC on Android +============== + +Note: Building the protobuf dependency for Android requires +https://github.com/protocolbuffers/protobuf/pull/3878. This fix will be in the next +protobuf release, but until then must be manually patched in to +`third_party/protobuf` to build gRPC for Android. + +PREREQUISITES +------------- + +- Android SDK +- Android NDK +- `protoc` and `grpc_cpp_plugin` binaries on the host system + +INSTALL +------- + +The example application can be built via Android Studio or on the command line +using `gradle`: + + ```sh + $ ./gradlew installDebug + ``` + +INSTRUMENTATION TESTS +--------------------- + +The instrumentation tests can be run via the following `gradle` command. This +requires an emulator already running on your computer. + +``` +$ ./gradlew connectedAndroidTest \ + -Pandroid.testInstrumentationRunnerArguments.server_host=grpc-test.sandbox.googleapis.com \ + -Pandroid.testInstrumentationRunnerArguments.server_port=443 \ + -Pandroid.testInstrumentationRunnerArguments.use_tls=true +``` diff --git a/src/compiler/README.md b/src/compiler/README.md new file mode 100644 index 00000000..d5684af7 --- /dev/null +++ b/src/compiler/README.md @@ -0,0 +1,4 @@ +# Overview + +This directory contains source code for gRPC protocol buffer compiler (*protoc*) plugins. Along with `protoc`, +these plugins are used to generate gRPC client and server stubs from `.proto` files. diff --git a/src/core/README.md b/src/core/README.md new file mode 100644 index 00000000..d0f77c53 --- /dev/null +++ b/src/core/README.md @@ -0,0 +1,6 @@ +# gRPC core library + +This shared library provides all of gRPC's core functionality through a low +level API. gRPC libraries for the other languages supported in this repo, are +built on top of this shared core library. + diff --git a/src/core/ext/README.md b/src/core/ext/README.md new file mode 100644 index 00000000..0812b208 --- /dev/null +++ b/src/core/ext/README.md @@ -0,0 +1,5 @@ +Optional plugins for gRPC Core: Modules in this directory extend gRPC Core in +useful ways. All optional code belongs here. + +NOTE: The movement of code between lib and ext is an ongoing effort, so this +directory currently contains too much of the core library. diff --git a/src/core/ext/filters/client_channel/README.md b/src/core/ext/filters/client_channel/README.md new file mode 100644 index 00000000..ffb09fd3 --- /dev/null +++ b/src/core/ext/filters/client_channel/README.md @@ -0,0 +1,49 @@ +Client Configuration Support for GRPC +===================================== + +This library provides high level configuration machinery to construct client +channels and load balance between them. + +Each `grpc_channel` is created with a `Resolver`. It is the resolver's duty +to resolve a name into a set of arguments for the channel. Such arguments +might include: + +- a list of (ip, port) addresses to connect to +- a load balancing policy to decide which server to send a request to +- a set of filters to mutate outgoing requests (say, by adding metadata) + +The resolver provides this data as a stream of `grpc_channel_args` objects to +the channel. We represent arguments as a stream so that they can be changed +by the resolver during execution, by reacting to external events (such as +new service configuration data being pushed to some store). + + +Load Balancing +-------------- + +Load balancing configuration is provided by a `LoadBalancingPolicy` object. + +The primary job of the load balancing policies is to pick a target server +given only the initial metadata for a request. It does this by providing +a `ConnectedSubchannel` object to the owning channel. + + +Sub-Channels +------------ + +A sub-channel provides a connection to a server for a client channel. It has a +connectivity state like a regular channel, and so can be connected or +disconnected. This connectivity state can be used to inform load balancing +decisions (for example, by avoiding disconnected backends). + +Configured sub-channels are fully setup to participate in the grpc data plane. +Their behavior is specified by a set of grpc channel filters defined at their +construction. To customize this behavior, transports build +`ClientChannelFactory` objects, which customize construction arguments for +concrete subchannel instances. + + +Naming for GRPC +=============== + +See [/doc/naming.md](gRPC name resolution). diff --git a/src/core/ext/filters/client_channel/resolver/README.md b/src/core/ext/filters/client_channel/resolver/README.md new file mode 100644 index 00000000..b0e234e9 --- /dev/null +++ b/src/core/ext/filters/client_channel/resolver/README.md @@ -0,0 +1,4 @@ +# Resolver + +Implementations of various name resolution schemes. +See the [naming spec](/doc/naming.md). diff --git a/src/core/ext/filters/client_channel/resolver/binder/README.md b/src/core/ext/filters/client_channel/resolver/binder/README.md new file mode 100644 index 00000000..b6029676 --- /dev/null +++ b/src/core/ext/filters/client_channel/resolver/binder/README.md @@ -0,0 +1,9 @@ +Support for resolving the scheme used by binder transport implementation. + +The URI's authority is required to be empty. + +The path is used as the identifiers of endpoint binder objects and the length +limit of the identifier is the same as unix socket length limit. + +The length limit of the path should at least be 100 characters long. This is +guaranteed by `static_assert` in the implementation. diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/README.md b/src/core/ext/filters/client_channel/resolver/dns/native/README.md new file mode 100644 index 00000000..695de47b --- /dev/null +++ b/src/core/ext/filters/client_channel/resolver/dns/native/README.md @@ -0,0 +1,2 @@ +dns: scheme name resolution, using getaddrbyname +(or other OS specific implementation) diff --git a/src/core/ext/filters/client_channel/resolver/sockaddr/README.md b/src/core/ext/filters/client_channel/resolver/sockaddr/README.md new file mode 100644 index 00000000..e307ba88 --- /dev/null +++ b/src/core/ext/filters/client_channel/resolver/sockaddr/README.md @@ -0,0 +1 @@ +Support for resolving ipv4:, ipv6:, unix: schemes diff --git a/src/core/ext/transport/README.md b/src/core/ext/transport/README.md new file mode 100644 index 00000000..22905687 --- /dev/null +++ b/src/core/ext/transport/README.md @@ -0,0 +1 @@ +Transports for gRPC diff --git a/src/core/ext/transport/binder/README.md b/src/core/ext/transport/binder/README.md new file mode 100644 index 00000000..2a797d3b --- /dev/null +++ b/src/core/ext/transport/binder/README.md @@ -0,0 +1,6 @@ +# Binder transport for cross process IPC on Android + +Under construction. + +This transport implements +[BinderChannel for native cross-process communication on Android](https://github.com/grpc/proposal/blob/master/L73-java-binderchannel.md) diff --git a/src/core/ext/transport/chttp2/README.md b/src/core/ext/transport/chttp2/README.md new file mode 100644 index 00000000..8880a474 --- /dev/null +++ b/src/core/ext/transport/chttp2/README.md @@ -0,0 +1 @@ +CHTTP2 - gRPC's implementation of a HTTP2 based transport diff --git a/src/core/ext/transport/chttp2/client/insecure/README.md b/src/core/ext/transport/chttp2/client/insecure/README.md new file mode 100644 index 00000000..fa114633 --- /dev/null +++ b/src/core/ext/transport/chttp2/client/insecure/README.md @@ -0,0 +1 @@ +Plugin for creating insecure channels using chttp2 diff --git a/src/core/ext/transport/chttp2/client/secure/README.md b/src/core/ext/transport/chttp2/client/secure/README.md new file mode 100644 index 00000000..405a86e5 --- /dev/null +++ b/src/core/ext/transport/chttp2/client/secure/README.md @@ -0,0 +1 @@ +Plugin for creating secure channels using chttp2 diff --git a/src/core/ext/transport/chttp2/server/insecure/README.md b/src/core/ext/transport/chttp2/server/insecure/README.md new file mode 100644 index 00000000..fc0bc14e --- /dev/null +++ b/src/core/ext/transport/chttp2/server/insecure/README.md @@ -0,0 +1 @@ +Plugin for creating insecure servers using chttp2 diff --git a/src/core/ext/transport/chttp2/server/secure/README.md b/src/core/ext/transport/chttp2/server/secure/README.md new file mode 100644 index 00000000..6bda696a --- /dev/null +++ b/src/core/ext/transport/chttp2/server/secure/README.md @@ -0,0 +1 @@ +Plugin for creating secure servers using chttp2 diff --git a/src/core/ext/transport/chttp2/transport/README.md b/src/core/ext/transport/chttp2/transport/README.md new file mode 100644 index 00000000..4684e587 --- /dev/null +++ b/src/core/ext/transport/chttp2/transport/README.md @@ -0,0 +1,4 @@ +chttp2 transport plugin - implements grpc over http2 + +Used by chttp2/{client,server}/{insecure,secure} plugins to implement most of +their functionality diff --git a/src/core/lib/README.md b/src/core/lib/README.md new file mode 100644 index 00000000..69b6bce2 --- /dev/null +++ b/src/core/lib/README.md @@ -0,0 +1,6 @@ +Required elements of gRPC Core: Each module in this directory is required to +build gRPC. If it's possible to envisage a configuration where code is not +required, then that code belongs in ext/ instead. + +NOTE: The movement of code between lib and ext is an ongoing effort, so this +directory currently contains too much of the core library. diff --git a/src/core/lib/channel/README.md b/src/core/lib/channel/README.md new file mode 100644 index 00000000..2dfcfe6e --- /dev/null +++ b/src/core/lib/channel/README.md @@ -0,0 +1,4 @@ +# Channel + +Provides channel/call stack implementation, and implementation of common filters +for that implementation. diff --git a/src/core/lib/gpr/README.md b/src/core/lib/gpr/README.md new file mode 100644 index 00000000..21fb0c79 --- /dev/null +++ b/src/core/lib/gpr/README.md @@ -0,0 +1,8 @@ +# GPR - Google Portable Runtime for C + +The files in this directory contain basic utility code and platform +abstractions for C code. None of this code is gRPC-specific; anything +here may also be useful for other open source projects written in C. + +Note that this is one of the few places in src/core where we allow +the use of portability macros. diff --git a/src/core/lib/gprpp/README.md b/src/core/lib/gprpp/README.md new file mode 100644 index 00000000..f5b87481 --- /dev/null +++ b/src/core/lib/gprpp/README.md @@ -0,0 +1,8 @@ +# GPR++ - Google Portable Runtime for C++ + +The files in this directory contain various utility code for C++ code. +None of this code is gRPC-specific; anything here may also be useful +for other open source projects written in C++. + +Note that this is one of the few places in src/core where we allow +the use of portability macros. diff --git a/src/core/lib/iomgr/README.md b/src/core/lib/iomgr/README.md new file mode 100644 index 00000000..9b22b76c --- /dev/null +++ b/src/core/lib/iomgr/README.md @@ -0,0 +1,6 @@ +# iomgr + +Platform abstractions for I/O (mostly network). + +Provides abstractions over TCP/UDP I/O, file loading, polling, and concurrency +management for various operating systems. diff --git a/src/core/lib/surface/README.md b/src/core/lib/surface/README.md new file mode 100644 index 00000000..74cbd711 --- /dev/null +++ b/src/core/lib/surface/README.md @@ -0,0 +1,4 @@ +# Surface + +Surface provides the bulk of the gRPC Core public API, and translates it into +calls against core components. diff --git a/src/core/lib/transport/README.md b/src/core/lib/transport/README.md new file mode 100644 index 00000000..e7e135ed --- /dev/null +++ b/src/core/lib/transport/README.md @@ -0,0 +1,7 @@ +# Transport + +Common implementation details for gRPC Transports. + +Transports multiplex messages across some single connection. In ext/ there are +implementations atop [a custom http2 implementation](/src/core/ext/transport/chttp2/README.md) +and atop [cronet](/src/core/ext/transport/cronet/README.md). diff --git a/src/core/tsi/README.md b/src/core/tsi/README.md new file mode 100644 index 00000000..3ca3c1ef --- /dev/null +++ b/src/core/tsi/README.md @@ -0,0 +1,2 @@ +# Transport Security Interface +An abstraction library over crypto and auth modules (typically OpenSSL) diff --git a/src/cpp/README.md b/src/cpp/README.md new file mode 100644 index 00000000..5c1c40ac --- /dev/null +++ b/src/cpp/README.md @@ -0,0 +1,225 @@ +# gRPC C++ + +This directory contains the C++ implementation of gRPC. + +# To start using gRPC C++ + +This section describes how to add gRPC as a dependency to your C++ project. + +In the C++ world, there's no universally accepted standard for managing project dependencies. +Therefore, gRPC supports several major build systems, which should satisfy most users. + +## Supported Platforms + +* Officially Supported: These platforms are officially supported. We test our + code on these platform and have automated continuous integration tests for + them. + +* Best Effort: We do not have continous integration tests for these, but we are + fairly confident that gRPC C++ would work on them. We will make our best + effort to support them, and we welcome patches for such platforms, but we + might need to declare bankruptcy on some issues. + +* Community Supported: These platforms are supported by contributions from the + open source community, there is no official support for them. Breakages on + these platforms may go unnoticed, and the community is responsible for all + maintenance. Unmaintained code for these platforms may be deleted. + +| Operating System | Architectures | Versions | Support Level | +|------------------|---------------|----------|---------------| +| Linux - Debian, Ubuntu, CentOS | x86, x64 | clang 4+, GCC 4.9+ | Officially Supported | +| Windows 10+ | x86, x64 | Visual Studio 2015+ | Officially Supported | +| MacOS | x86, x64 | XCode 7.2+ | Officially Supported | +| Linux - Others | x86, x64 | clang 4+, GCC 4.9+ | Best Effort | +| Linux | ARM | | Best Effort | +| iOS | | | Best Effort | +| Android | | | Best Effort | +| Asylo | | | Best Effort | +| FreeBSD | | | Community Supported | +| OpenBSD | | | Community Supported | +| AIX | | | Community Supported | +| Solaris | | | Community Supported | +| NaCL | | | Community Supported | +| Fuchsia | | | Community Supported | + +## Bazel + +Bazel is the primary build system used by the core gRPC development team. Bazel +provides fast builds and it easily handles dependencies that support bazel. + +To add gRPC as a dependency in bazel: +1. determine commit SHA for the grpc release you want to use +2. Use the [http_archive](https://docs.bazel.build/versions/master/repo/http.html#http_archive) bazel rule to include gRPC source + ``` + http_archive( + name = "com_github_grpc_grpc", + urls = [ + "https://github.com/grpc/grpc/archive/YOUR_GRPC_COMMIT_SHA.tar.gz", + ], + strip_prefix = "grpc-YOUR_GRPC_COMMIT_SHA", + ) + load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps") + grpc_deps() + load("@com_github_grpc_grpc//bazel:grpc_extra_deps.bzl", "grpc_extra_deps") + grpc_extra_deps() + ``` + +## CMake + +`cmake` is your best option if you cannot use bazel. It supports building on Linux, +MacOS and Windows (official support) but also has a good chance of working on +other platforms (no promises!). `cmake` has good support for crosscompiling and +can be used for targeting the Android platform. + +To build gRPC C++ from source, follow the [BUILDING guide](../../BUILDING.md). + +### find_package + +The canonical way to discover dependencies in CMake is the +[`find_package` command](https://cmake.org/cmake/help/latest/command/find_package.html). + +```cmake +find_package(gRPC CONFIG REQUIRED) +add_executable(my_exe my_exe.cc) +target_link_libraries(my_exe gRPC::grpc++) +``` +[Full example](../../examples/cpp/helloworld/CMakeLists.txt) + +`find_package` can only find software that has already been installed on your +system. In practice that means you'll need to install gRPC using cmake first. +gRPC's cmake support provides the option to install gRPC either system-wide +(not recommended) or under a directory prefix in a way that you can later +easily use it with the `find_package(gRPC CONFIG REQUIRED)` command. + +The following sections describe strategies to automatically build gRPC +as part of your project. + +### FetchContent +If you are using CMake v3.11 or newer you should use CMake's +[FetchContent module](https://cmake.org/cmake/help/latest/module/FetchContent.html). +The first time you run CMake in a given build directory, FetchContent will +clone the gRPC repository and its submodules. `FetchContent_MakeAvailable()` +also sets up an `add_subdirectory()` rule for you. This causes gRPC to be +built as part of your project. + +```cmake +cmake_minimum_required(VERSION 3.15) +project(my_project) + +include(FetchContent) +FetchContent_Declare( + gRPC + GIT_REPOSITORY https://github.com/grpc/grpc + GIT_TAG RELEASE_TAG_HERE # e.g v1.28.0 +) +set(FETCHCONTENT_QUIET OFF) +FetchContent_MakeAvailable(gRPC) + +add_executable(my_exe my_exe.cc) +target_link_libraries(my_exe grpc++) +``` + +Note that you need to +[install the prerequisites](../../BUILDING.md#pre-requisites) +before building gRPC. + +### git submodule +If you cannot use FetchContent, another approach is to add the gRPC source tree +to your project as a +[git submodule](https://git-scm.com/book/en/v2/Git-Tools-Submodules). +You can then add it to your CMake project with `add_subdirectory()`. +[Example](../../examples/cpp/helloworld/CMakeLists.txt) + +### Support system-installed gRPC + +If your project builds gRPC you should still consider the case where a user +wants to build your software using a previously installed gRPC. Here's a +code snippet showing how this is typically done. + +```cmake +option(USE_SYSTEM_GRPC "Use system installed gRPC" OFF) +if(USE_SYSTEM_GRPC) + # Find system-installed gRPC + find_package(gRPC CONFIG REQUIRED) +else() + # Build gRPC using FetchContent or add_subdirectory +endif() +``` + +[Full example](../../examples/cpp/helloworld/CMakeLists.txt) + +## pkg-config + +If your project does not use CMake (e.g. you're using `make` directly), you can +first install gRPC C++ using CMake, and have your non-CMake project rely on the +`pkgconfig` files which are provided by gRPC installation. +[Example](../../test/distrib/cpp/run_distrib_test_cmake_pkgconfig.sh) + +**Note for CentOS 7 users** + +CentOS-7 ships with `pkg-config` 0.27.1, which has a +[bug](https://bugs.freedesktop.org/show_bug.cgi?id=54716) that can make +invocations take extremely long to complete. If you plan to use `pkg-config`, +you'll want to upgrade it to something newer. + +## make (deprecated) + +The default choice for building on UNIX based systems used to be `make`, but we are no longer recommending it. +You should use `bazel` or `cmake` instead. + +To install gRPC for C++ on your system using `make`, follow the [Building gRPC C++](../../BUILDING.md) +instructions to build from source and then install locally using `make install`. +This also installs the protocol buffer compiler `protoc` (if you don't have it already), +and the C++ gRPC plugin for `protoc`. + +WARNING: After installing with `make install` there is no easy way to uninstall, which can cause issues +if you later want to remove the grpc and/or protobuf installation or upgrade to a newer version. + +## Packaging systems + +We do not officially support any packaging system for C++, but there are some community-maintained packages that are kept up-to-date +and are known to work well. More contributions and support for popular packaging systems are welcome! + +### Install using vcpkg package +gRPC is available using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: + +``` +# install vcpkg package manager on your system using the official instructions +git clone https://github.com/Microsoft/vcpkg.git +cd vcpkg + +# Bootstrap on Linux: +./bootstrap-vcpkg.sh +# Bootstrap on Windows instead: +# ./bootstrap-vcpkg.bat + +./vcpkg integrate install + +# install gRPC using vcpkg package manager +./vcpkg install grpc +``` + +The gRPC port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. + + +## Examples & Additional Documentation + +You can find out how to build and run our simplest gRPC C++ example in our +[C++ quick start](../../examples/cpp). + +For more detailed documentation on using gRPC in C++ , see our main +documentation site at [grpc.io](https://grpc.io), specifically: + +* [Overview](https://grpc.io/docs): An introduction to gRPC with a simple + Hello World example in all our supported languages, including C++. +* [gRPC Basics - C++](https://grpc.io/docs/languages/cpp/basics): + A tutorial that steps you through creating a simple gRPC C++ example + application. +* [Asynchronous Basics - C++](https://grpc.io/docs/languages/cpp/async): + A tutorial that shows you how to use gRPC C++'s asynchronous/non-blocking + APIs. + + +# To start developing gRPC C++ + +For instructions on how to build gRPC C++ from source, follow the [Building gRPC C++](../../BUILDING.md) instructions. diff --git a/src/csharp/BUILD-INTEGRATION.md b/src/csharp/BUILD-INTEGRATION.md new file mode 100644 index 00000000..b8c30fd2 --- /dev/null +++ b/src/csharp/BUILD-INTEGRATION.md @@ -0,0 +1,411 @@ +Protocol Buffers/gRPC Codegen Integration Into .NET Build +================================================= + +With Grpc.Tools package version 1.17 we made it easier to compile .proto files +in your project using the `dotnet build` command, Visual Studio, or command-line +MSBuild. You need to configure the .csproj project according to the way you want +to integrate Protocol Buffer files into your build. + +There is also a Reference section at the end of the file. + +Common scenarios +---------------- + +### I just want to compile .proto files into my library + +This is the approach taken by the examples in the `csharp/examples` directory. +Protoc output files (for example, `Helloworld.cs` and `HelloworldGrpc.cs` +compiled from `helloworld.proto`) are placed among *object* and other temporary +files of your project, and automatically provided as inputs to the C# compiler. +As with other automatically generated .cs files, they are included in the source +and symbols NuGet package, if you build one. + +Simply reference your .proto files in a `` item group. The following +example will add all .proto files in a project and all its subdirectories +(excluding special directories such as `bin` and `obj`): + +```xml + + + +``` + +You must add a reference to the NuGet packages Grpc.Tools and Grpc (the latter +is a meta-package, in turn referencing Grpc.Core and Google.Protobuf packages). +It is **very important** to mark Grpc.Tools as a development-only dependency, so +that the *users* of your library do not fetch the tools package: + +* "dotnet SDK" .csproj (Visual Studio, `dotnet new`): Add an attribute + `PrivateAssets="All"` to the Grpc.Tools package reference. See an example in the + [Greeter.csproj](../../examples/csharp/Helloworld/Greeter/Greeter.csproj#L10) + example project in this repository. If adding a package reference in Visual + Studio, edit the project file and add this attribute. [This is a bug in NuGet + client](https://github.com/NuGet/Home/issues/4125). + + * "Classic" .csproj with `packages.config` (Visual Studio, Mono): This is + handled automatically by NuGet. See the attribute added by Visual Studio to the + [packages.config](../../examples/csharp/HelloworldLegacyCsproj/Greeter/packages.config#L6) + file in the HelloworldLegacyCsproj/Greeter example. + +If building a NuGet package from your library with the nuget command line tool +from a .nuspec file, then the spec file may (and probably should) reference the +Grpc metapackage, but **do not add a reference to Grpc.Tools** to it. "dotnet SDK" +projects handle this automatically when called from `dotnet pack` by excluding +any packages with private assets, such as thus marked Grpc.Tools. + +#### Per-file options that can be set in Visual Studio + +For a "dotnet SDK" project, you have more control of some frequently used options. +**You may need to open and close Visual Studio** for this form to appear in the +properties window after adding a reference to Grpc.Tools package (we do not know +whether this is a bug or by design, but it looks like a bug): + +![Properties in an SDK project](doc/integration.md-fig.2-sdk.png) + +You can also change options of multiple files at once by selecting them in the +Project Explorer together. + +For a "classic" project, you can only add .proto files with all options set to +default (if you find it necessary to modify these options, then hand-edit the +.csproj file). Click on the "show all files" button, add files to project, then +change file type of the .proto files to "Protobuf" in the Properties window +drop-down. This menu item will appear after you import the Grpc.Tools package: + +![Properties in a classic project](doc/integration.md-fig.1-classic.png) + +See the Reference section at end of this file for options that can be set +per-file by modifying the source .csproj directly. + +#### My .proto files are in a directory outside the project + +Refer to the example files +[RouteGuide.csproj](../../examples/csharp/RouteGuide/RouteGuide/RouteGuide.csproj#L58-L60) +and [Greeter.csproj](../../examples/csharp/Helloworld/Greeter/Greeter.csproj#L11) +in this repository. For the files to show up in Visual Studio properly, add a +`Link` attribute with just a filename to the `` item. This will be the +display name of the file. In the `Include` attribute, specify the complete path +to file. A relative path is based off the project directory. + +Or, if using Visual Studio, add files _as links_ from outside directory. In the +Add Files dialog, there is a little [down arrow near the Open +button](https://stackoverflow.com/a/9770061). Click on it, and choose "Add as +link". If you do not select this option, Visual Studio will copy files to the +project directory instead. + +#### My .proto files have same filename in different folders + +Starting from Grpc.Tools version 2.31, protocol buffers compilation preserves original folder structure for generated files. Eg. + +- `../ProjectFolder/Protos/v2/http.proto` +- `../ProjectFolder/Protos/v3/http.proto` + +Will result in: + +- `../ProjectFolder/obj/CONFIGURATION/FRAMEWORK/Protos/v2/Greet.cs` +- `../ProjectFolder/obj/CONFIGURATION/FRAMEWORK/Protos/v2/GreetGrpc.cs` +- `../ProjectFolder/obj/CONFIGURATION/FRAMEWORK/Protos/v3/Greet.cs` +- `../ProjectFolder/obj/CONFIGURATION/FRAMEWORK/Protos/v3/GreetGrpc.cs` + +This feature resolves problems we have faced in large projects. Moreover, There is now also a project-wide new option Protobuf_ProtoRoot to define the fallback ProtoRoot. If the ProtoRoot is set, this also reduces the amount of problems that lead to duplicates. Eg. + +```xml + + + +``` + +Before Grpc.Tools version 2.31 all .proto files were compiled into `obj` directory, flattening relative paths. For proto files with duplicated names it cause following errors `NETSDK1022 Duplicate 'Compile' items were included. [...]` or `MSB3105 [...] Duplicate items are not supported by the "Sources" parameter`. The workaround for this problem was introducing relative paths in your `obj` folder, by manipulating output path. Eg. + +```xml + + + + +``` + +__Note, this was a workaround approach, we recommend updating Grpc.Tools to the latest version.__ + +### I just want to generate proto and gRPC C# sources from my .proto files (no C# compile) + +Suppose you want to place generated files right beside each respective source +.proto file. Create a .csproj library file in the common root of your .proto +tree, and add a reference to Grpc.Tools package (this works in Windows too, `$` +below stands for a command prompt in either platform): + +``` +/myproject/myprotofiles$ dotnet new classlib + . . . + Restoring packages for /myproject/myprotofiles/myprotofiles.csproj... + . . . +/myproject/myprotofiles$ rm *.cs <-- remove all *.cs files from template; +C:\myproject\myprotofiles> del *.cs /y <-- on Windows, use the del command instead. +/myproject/myprotofiles$ dotnet add package Grpc.Tools +``` + +(the latter command also accepts an optional `--version X.Y` switch for a +specific version of package, should you need one). Next open the generated +.csproj file in a text editor. + +Since you are not building a package, you may not worry about adding +`PrivateAssets="All"` attribute, but it will not hurt, in case you are +repurposing the project at some time later. The important part is (1) tell the +gRPC tools to select the whole directory of files; (2) order placement of each +output besides its source, and (3) not compile the generated .cs files. Add the +following stanza under the `` xml node: + +```xml + + + +``` + +The `Include` tells the build system to recursively examine project directory +and its subdirectories (`**`) include all files matching the wildcard `*.proto`. +You can instead selectively include your files or selectively exclude files from +the glob pattern; [MSBuild documentation explains +that](https://docs.microsoft.com/visualstudio/msbuild/msbuild-items). The +`OutputDir="%(RelativeDir)"` orders the output directory for each .cs file be +same as the corresponding .proto directory. Finally, `CompileOutputs="false"` +prevents compiling the generated files into an assembly. + +Note that an empty assembly is still generated, but you should ignore it. As +with any build system, it is used to detect out-of-date dependencies and +recompile them. + +#### I am getting a warning about a missing expected file! + +When we are preparing compile, there is no way to know whether a given proto +file will produce a *Grpc.cs output or not. If the proto file has a `service` +clause, it will; otherwise, it won't, but the build script cannot know that in +advance. When we are treating generated .cs files as temporary, this is ok, but +when generating them for you, creating empty files is probably not. You need to +tell the compiler which files should be compiled with gRPC services, and which +only contain protobuffer message definitions. + +One option is just ignore the warning. Another is quench it by setting the +property `Protobuf_NoWarnMissingExpected` to `true`: + +```xml + + true + +``` + +For a small to medium projects this is sufficient. But because of a missing +output dependency, the corresponding .proto file will be recompiled on every +build. If your project is large, or if other large builds depend on generated +files, and are also needlessly recompiled, you'll want to prevent these rebuilds +when files have not in fact changed, as follows: + +##### Explicitly tell protoc for which files it should use the gRPC plugin + +You need to set the `Protobuf` item property `GrpcServices` to `None` for those +.proto inputs which do not have a `service` declared (or, optionally, those +which do but you do not want a service/client stub for). The default value for +the `GrpcServices` is `Both` (both client and server stub are generated). This +is easy enough to do with glob patterns if your files are laid out in +directories according to their service use, for example: + +```xml + + + + +``` + +In this sample, all .proto files are compiled with `GrpcServices="None"`, except +for .proto files in subdirectories on any tree level named `hello/` and `bye`, +which will take `GrpcServices="Both"` Note the use of the `Update` attribute +instead of `Include`. If you write `Include` by mistake, the files will be added +to compile *twice*, once with, and once without GrpcServices. Pay attention not +to do that! + +Another example would be the use of globbing if your service .proto files are +named according to a pattern, for example `*_services.proto`. In this case, The +`Update` attribute can be written as `Update="**/*_service.proto"`, to set the +attribute `GrpcServices="Both"` only on these files. + +But what if no patterns work, and you cannot sort a large set of .proto file +into those containing a service and those not? As a last resort, + +##### Force creating empty .cs files for missing outputs. + +Naturally, this results in a dirtier compiler output tree, but you may clean it +using other ways (for example, by not copying zero-length .cs files to their +final destination). Remember, though, that the files are still important to keep +in their output locations to prevent needless recompilation. You may force +generating empty files by setting the property `Protobuf_TouchMissingExpected` +to `true`: + +```xml + + true + +``` + +#### But I do not use gRPC at all, I need only protobuffer messages compiled + +Set `GrpcServices="None"` on all proto files: + +```xml + + + +``` + +#### That's good so far, but I do not want the `bin` and `obj` directories in my tree + +You may create the project in a subdirectory of the root of your files, such as, +for example, `.build`. In this case, you want to refer to the proto files +relative to that `.build/` directory as + +```xml + + + +``` + +Pay attention to the `ProtoRoot` property. It needs to be set to the directory +where `import` declarations in the .proto files are looking for files, since the +project root is no longer the same as the proto root. + +Alternatively, you may place the project in a directory *above* your proto root, +and refer to the files with a subdirectory name: + +```xml + + + +``` + +### Alas, this all is nice, but my scenario is more complex, -OR- +### I'll investigate that when I have time. I just want to run protoc as I did before. + +One option is examine our [.targets and .props files](Grpc.Tools/build/) and see +if you can create your own build sequence from the provided targets so that it +fits your needs. Also please open an issue (and tag @kkm000 in it!) with your +scenario. We'll try to support it if it appears general enough. + +But if you just want to run `protoc` using MsBuild `` task, as you +probably did before the version 1.17 of Grpc.Tools, we have a few build +variables that point to resolved names of tools and common protoc imports. +You'll have to roll your own dependency checking (or go with a full +recompilation each time, if that works for you), but at the very least each +version of the Tools package will point to the correct location of the files, +and resolve the compiler and plugin executables appropriate for the host system. +These property variables are: + +* `Protobuf_ProtocFullPath` points to the full path and filename of protoc executable, e. g., + "C:\Users\kkm\.nuget\packages\grpc.tools\1.17.0\build\native\bin\windows\protoc.exe". + +* `gRPC_PluginFullPath` points to the full path and filename of gRPC plugin, such as + "C:\Users\kkm\.nuget\packages\grpc.tools\1.17.0\build\native\bin\windows\grpc_csharp_plugin.exe" + +* `Protobuf_StandardImportsPath` points to the standard proto import directory, for example, + "C:\Users\kkm\.nuget\packages\grpc.tools\1.17.0\build\native\include". This is + the directory where a declaration such as `import "google/protobuf/wrappers.proto";` + in a proto file would find its target. + +Use MSBuild property expansion syntax `$(VariableName)` in your protoc command +line to substitute these variables, for instance, + +```xml + + + $(Protobuf_ProtocFullPath) --plugin=protoc-gen-grpc=$(gRPC_PluginFullPath) -I $(Protobuf_StandardImportsPath) ....rest of your command.... + + + + +``` + +Also make sure *not* to include any file names to the `Protobuf` item +collection, otherwise they will be compiled by default. If, by any chance, you +used that name for your build scripting, you must rename it. + +### What about C++ projects? + +This is in the works. Currently, the same variables as above are set to point to +the protoc binary, C++ gRPC plugin and the standard imports, but nothing else. +Do not use the `Protobuf` item collection name so that your project remains +future-proof. We'll use it for C++ projects too. + +Reference +--------- + +### Protobuf item metadata reference + +The following metadata are recognized on the `` items. + +| Name | Default | Value | Synopsis | +|----------------|-----------|----------------------|----------------------------------| +| Access | `public` | `public`, `internal` | Generated class access | +| AdditionalProtocArguments | | arbitrary cmdline arguments | Extra command line flags passed to `protoc` command | +| ProtoCompile | `true` | `true`, `false` | Pass files to protoc? | +| ProtoRoot | See notes | A directory | Common root for set of files | +| CompileOutputs | `true` | `true`, `false` | C#-compile generated files? | +| OutputDir | See notes | A directory | Directory for generated C# files with protobuf messages | +| OutputOptions | | arbitrary options | Extra options passed to C# codegen as `--csharp_opt=opt1,opt2` | +| GrpcOutputDir | See notes | A directory | Directory for generated gRPC stubs | +| GrpcOutputOptions | | arbitrary options | Extra options passed to gRPC codegen as `--grpc_opt=opt1,opt2` | +| GrpcServices | `both` | `none`, `client`, `server`, `both` | Generated gRPC stubs | + +__Notes__ + +* __ProtoRoot__ +For files _inside_ the project cone, `ProtoRoot` is set by default to the +project directory. For every file _outside_ of the project directory, the value +is set to this file's containing directory name, individually per file. If you +include a subtree of proto files that lies outside of the project directory, you +need to set this metadatum. There is an example in this file above. The path in +this variable is relative to the project directory. + +* __OutputDir__ +The default value for this metadatum is the value of the property +`Protobuf_OutputPath`. This property, in turn, unless you set it in your +project, will be set to the value of the standard MSBuild property +`IntermediateOutputPath`, which points to the location of compilation object +outputs, such as "obj/Release/netstandard1.5/". The path in this property is +considered relative to the project directory. + +* __GrpcOutputDir__ +Unless explicitly set, will follow `OutputDir` for any given file. + +* __Access__ +Sets generated class access on _both_ generated message and gRPC stub classes. + +* __AdditionalProtocArguments__ +Pass additional commandline arguments to the `protoc` command being invoked. +Normally this option should not be used, but it exists for scenarios when you need to pass +otherwise unsupported (e.g. experimental) flags to protocol buffer compiler. + +* __GrpcOutputOptions__ +Pass additional options to the `grpc_csharp_plugin` in form of the `--grpc_opt` flag. +Normally this option should not be used as it's values are already controlled by "Access" +and "GrpcServices" metadata, but it might be useful in situations where you want +to explicitly pass some otherwise unsupported (e.g. experimental) options to the +`grpc_csharp_plugin`. + +`grpc_csharp_plugin` command line options +--------- + +Under the hood, the `Grpc.Tools` build integration invokes the `protoc` and `grpc_csharp_plugin` binaries +to perform code generation. Here is an overview of the available `grpc_csharp_plugin` options: + +| Name | Default | Synopsis | +|---------------- |-----------|----------------------------------------------------------| +| no_client | off | Don't generate the client stub | +| no_server | off | Don't generate the server-side stub | +| internal_access | off | Generate classes with "internal" visibility | + +Note that the protocol buffer compiler has a special commandline syntax for plugin options. +Example: +``` +protoc --plugin=protoc-gen-grpc=grpc_csharp_plugin --csharp_out=OUT_DIR \ + --grpc_out=OUT_DIR --grpc_opt=lite_client,no_server \ + -I INCLUDE_DIR foo.proto +``` diff --git a/src/csharp/README.md b/src/csharp/README.md new file mode 100644 index 00000000..19bb9926 --- /dev/null +++ b/src/csharp/README.md @@ -0,0 +1,125 @@ +[![Nuget](https://img.shields.io/nuget/v/Grpc.svg)](http://www.nuget.org/packages/Grpc/) +gRPC C# +======= + +A C# implementation of gRPC based on the native gRPC Core library. + +The implementation in this directory is the original implementation of gRPC for C# (i.e. `Grpc.Core` nuget package) +and it is currently in maintenance mode. We plan to deprecate it in the future +in favor of the [grpc-dotnet](https://github.com/grpc/grpc-dotnet) implementation. +See [blogpost](https://grpc.io/blog/grpc-csharp-future/) for more details. + +The following documentation is for the original gRPC C# implementation only (the `Grpc.Core` nuget package). + +SUPPORTED PLATFORMS +------------------ + +- [.NET Core](https://dotnet.github.io/) on Linux, Windows and Mac OS X +- .NET Framework 4.5+ (Windows) +- Mono 4+ on Linux, Windows and Mac OS X + +PREREQUISITES +-------------- + +When using gRPC C# under .NET Core you only need to [install .NET Core](https://www.microsoft.com/net/core). + +In addition to that, you can also use gRPC C# with these runtimes / IDEs +- Windows: .NET Framework 4.5+, Visual Studio 2013 or newer, Visual Studio Code +- Linux: Mono 4+, Visual Studio Code +- Mac OS X: Mono 4+, Visual Studio Code, Visual Studio for Mac + +HOW TO USE +-------------- + +**Windows, Linux, Mac OS X** + +- Open Visual Studio and start a new project/solution (alternatively, you can create a new project from command line with `dotnet` SDK) + +- Add the [Grpc](https://www.nuget.org/packages/Grpc/) NuGet package as a dependency (Project options -> Manage NuGet Packages). + +- To be able to generate code from Protocol Buffer (`.proto`) file definitions, add the [Grpc.Tools](https://www.nuget.org/packages/Grpc.Tools/) NuGet package which provides [code generation integrated into your build](BUILD-INTEGRATION.md). + +**Xamarin.Android and Xamarin.iOS (Experimental only)** + +See [Experimentally supported platforms](experimental) for instructions. + +**Unity (Experimental only)** + +See [Experimentally supported platforms](experimental) for instructions. + +NUGET DEVELOPMENT FEED (NIGHTLY BUILDS) +-------------- + +In production, you should use officially released stable packages available on http://nuget.org, but if you want to test the newest upstream bug fixes and features early, you can use the development nuget feed where new nuget builds are uploaded nightly. + +Feed URL (NuGet v2): https://grpc.jfrog.io/grpc/api/nuget/grpc-nuget-dev + +Feed URL (NuGet v3): https://grpc.jfrog.io/grpc/api/nuget/v3/grpc-nuget-dev + +The same development nuget packages and packages for other languages can also be found at https://packages.grpc.io/ + +BUILD FROM SOURCE +----------------- + +You only need to go through these steps if you are planning to develop gRPC C#. +If you are a user of gRPC C#, go to Usage section above. + +**Prerequisites for contributors** + +- [dotnet SDK](https://www.microsoft.com/net/core) +- [Mono 4+](https://www.mono-project.com/) (only needed for Linux and MacOS) +- Prerequisites mentioned in [BUILDING.md](../../BUILDING.md#pre-requisites) + to be able to compile the native code. + +**Windows, Linux or Mac OS X** + +- The easiest way to build is using the `run_tests.py` script that will take care of building the `grpc_csharp_ext` native library. + + ``` + # NOTE: make sure all necessary git submodules with dependencies + # are available by running "git submodule update --init" + + # from the gRPC repository root + $ python tools/run_tests/run_tests.py -l csharp -c dbg --build_only + ``` + +- Use Visual Studio 2017 (on Windows) to open the solution `Grpc.sln` or use Visual Studio Code with C# extension (on Linux and Mac). gRPC C# code has been migrated to + dotnet SDK `.csproj` projects that are much simpler to maintain, but are not yet supported by Xamarin Studio or Monodevelop (the NuGet packages still + support both `net45` and `netstandard` and can be used in all IDEs). + +RUNNING TESTS +------------- + +gRPC C# is using NUnit as the testing framework. + +Under Visual Studio, make sure NUnit test adapter is installed (under "Extensions and Updates"). +Then you should be able to run all the tests using Test Explorer. + +gRPC team uses a Python script to facilitate running tests for +different languages. + +``` +# from the gRPC repository root +$ python tools/run_tests/run_tests.py -l csharp -c dbg +``` + +DOCUMENTATION +------------- +- [.NET Build Integration](BUILD-INTEGRATION.md) +- [API Reference][] +- [Helloworld Example][] +- [RouteGuide Tutorial][] + +PERFORMANCE +----------- + +For best gRPC C# performance, use [.NET Core](https://dotnet.github.io/) and the Server GC mode `"System.GC.Server": true` for your applications. + +THE NATIVE DEPENDENCY +--------------- + +Internally, gRPC C# uses a native library written in C (gRPC C core) and invokes its functionality via P/Invoke. The fact that a native library is used should be fully transparent to the users and just installing the `Grpc.Core` NuGet package is the only step needed to use gRPC C# on all supported platforms. + +[API Reference]: https://grpc.io/grpc/csharp/api/Grpc.Core.html +[Helloworld Example]: ../../examples/csharp/Helloworld +[RouteGuide Tutorial]: https://grpc.io/docs/languages/csharp/basics diff --git a/src/csharp/docfx/README.md b/src/csharp/docfx/README.md new file mode 100644 index 00000000..427f457e --- /dev/null +++ b/src/csharp/docfx/README.md @@ -0,0 +1,28 @@ +DocFX-generated C# API Reference +-------------------------------- + +## Generating docs manually (on Windows) + +Install docfx based on instructions here: https://github.com/dotnet/docfx + +``` +# generate docfx documentation into ./html directory +$ docfx +``` + +## Release process: script for regenerating the docs automatically + +After each gRPC C# release, the docs need to be regenerated +and updated on the grpc.io site. The automated script will +re-generate the docs (using dockerized docfx installation) +and make everything ready for creating a PR to update the docs. + +``` +# 1. Run the script on Linux with docker installed +$ ./generate_reference_docs.sh + +# 2. Enter the git repo with updated "gh-pages" branch +$ cd grpc-gh-pages + +# 3. Review the changes and create a pull request +``` diff --git a/src/csharp/experimental/README.md b/src/csharp/experimental/README.md new file mode 100644 index 00000000..f0c9e5c8 --- /dev/null +++ b/src/csharp/experimental/README.md @@ -0,0 +1,103 @@ +This directory contains useful resources for getting gRPC C# to work on +platforms that are not yet fully supported. + +# Xamarin + +gRPC C# now has experimental support for Xamarin. +See [HelloworldXamarin](/examples/csharp/HelloworldXamarin) for an example how to use it. + +Starting from gRPC C# 2.34.x: in addition to the regular `Grpc.Core` dependency, you will also +need to add `Grpc.Core.Xamarin` dependency to your project (which has the mobile-specific builds of c# native extension library). +The `Grpc.Core` and `Grpc.Core.Xamarin` package versions must always match exactly for things to work. +Also note that the `Grpc.Core.Xamarin` needs to be added to your `*.Android` and `*.iOS` projects +in order for the native library bindings to be registered correctly (see https://github.com/grpc/grpc/issues/16250). + +What's currently supported: + +Xamarin.Android +- supported API level: Kitkat 4.4+ (= API level 19) +- supported ABIs: `armeabi-v7a` (vast majority of Android devices out there), + `arm64-v8a` (some newer Android devices), `x86` (for emulator) + +Xamarin.iOS +- supported architectures: armv7, arm64 (iPhone 6+) and x86_64 (iPhone simulator) + +# Unity + +gRPC C# now has experimental support for Unity. Please try using gRPC with +Unity and provide feedback! + +How to test gRPC in a Unity project + +1. Create a Unity project that targets .NET 4.x Equivalent (Edit -> Project Settings -> Player -> Configuration -> Scripting Runtime Version). gRPC uses APIs that are only available in .NET4.5+ so this is a requirement. + +2. Download the latest development build of `grpc_unity_package.VERSION.zip` from + [daily builds](https://packages.grpc.io/) + +3. Extract the `.zip` file in the `Assets` directory in your Unity project + +4. Unity IDE will pick up all the bundled files and add them to project automatically. + You should be able to use gRPC and Protobuf in your scripts from now on. + +5. (optional) Extra steps for iOS, see below + +What's currently bundled in the `grpc_unity_package` +- Grpc.Core and its dependencies +- Google.Protobuf +- Precompiled native libraries for Linux, MacOS, Windows, Android and iOS. + +Please note that `Grpc.Core` is now in maintenance mode (see [The future of gRPC in C# belongs to grpc-dotnet](https://grpc.io/blog/grpc-csharp-future/)). There is a plan to support Unity in `Grpc.Net.Client`, which depends on Unity's .NET 5 or .NET 6 support. See [this issue](https://github.com/grpc/grpc-dotnet/issues/1309) for more information. + +## Building for iOS + +To build a Unity app on iOS, there are extra steps to do to make it work: + +1. Add a `Assets/link.xml` asset file to your Unity project with the following content: + ```xml + + + + + + + + ``` + If you don't, you might encounter the following error: `System.IO.FileNotFoundException: Error loading native library. Not found in any of the possible locations:` with a list of paths that point to the `libgrpc_csharp_ext.x64.dylib` file. +2. Due to the growing build size, bitcode has been disabled for the gRPC library. You must disable it in your XCode project as well. +3. Add the `libz` framework. + +Steps 2 and 3 can be automated by adding the following `Assets/Scripts/BuildIos.cs` script in your Unity project, and attaching it to a Unity game object: + +```cs +#if UNITY_EDITOR && UNITY_IOS +using System.IO; +using UnityEngine; +using UnityEditor; +using UnityEditor.Callbacks; +using UnityEditor.iOS.Xcode; + +public class BuildIos +{ + [PostProcessBuild] + public static void OnPostProcessBuild(BuildTarget target, string path) + { + var projectPath = PBXProject.GetPBXProjectPath(path); + var project = new PBXProject(); + project.ReadFromString(File.ReadAllText(projectPath)); +#if UNITY_2019_3_OR_NEWER + var targetGuid = project.GetUnityFrameworkTargetGuid(); +#else + var targetGuid = project.TargetGuidByName(PBXProject.GetUnityTargetName()); +#endif + + // libz.tbd for grpc ios build + project.AddFrameworkToProject(targetGuid, "libz.tbd", false); + + // bitode is disabled for libgrpc_csharp_ext, so need to disable it for the whole project + project.SetBuildProperty(targetGuid, "ENABLE_BITCODE", "NO"); + + File.WriteAllText(projectPath, project.WriteToString()); + } +} +#endif +``` diff --git a/src/csharp/keys/README.md b/src/csharp/keys/README.md new file mode 100644 index 00000000..968749aa --- /dev/null +++ b/src/csharp/keys/README.md @@ -0,0 +1,9 @@ +Contents +-------- + +- Grpc.public.snk: + Public key to verify strong name of gRPC assemblies. +- Grpc.snk: + Signing key to provide strong name of gRPC assemblies. + As per https://msdn.microsoft.com/en-us/library/wd40t7ad(v=vs.110).aspx + signing key should be checked into the repository. \ No newline at end of file diff --git a/src/csharp/unitypackage/README.md b/src/csharp/unitypackage/README.md new file mode 100644 index 00000000..aa8ffb2e --- /dev/null +++ b/src/csharp/unitypackage/README.md @@ -0,0 +1,13 @@ +# Scripts for building gRPC C# package for Unity + +Scripts in this directory are of no interest for end users. They are part +of internal tooling to automate building of the gRPC C# package for Unity. + +- `unitypackage_skeleton` - preconfigured `.meta` files for the unity package + layout. The actual assemblies and native libraries will be added into + this hierarchy while building the package. + Note: The `.meta` file were created by the Unity IDE by manually adding the assemblies/native libraries + to a Unity project and configuring their target plaform/architecture in the UI (these setting get recorded in + `.meta` files). The `.meta` format is not very well documented and there seems to be no easy way to generate them + automatically. + \ No newline at end of file diff --git a/src/objective-c/GRPCClient/README.md b/src/objective-c/GRPCClient/README.md new file mode 100644 index 00000000..9b87f031 --- /dev/null +++ b/src/objective-c/GRPCClient/README.md @@ -0,0 +1,4 @@ +This is a generic gRPC client for Objective-C on iOS. + +If you're trying to get started with the library or with gRPC, you should first +read GRPCCall.h. diff --git a/src/objective-c/NetworkTransitionBehavior.md b/src/objective-c/NetworkTransitionBehavior.md new file mode 100644 index 00000000..bf129216 --- /dev/null +++ b/src/objective-c/NetworkTransitionBehavior.md @@ -0,0 +1,92 @@ + +# gRPC iOS Network Transition Behaviors +Network connectivity on an iOS device may transition between cellular, WIFI, or +no network connectivity. This document describes how these network changes +should be handled by gRPC and current known issues. + +## Expected Network Transition Behaviors +The expected gRPC iOS channel and network transition behaviors are: +* Channel connection to a particular host is established at the time of + starting the first call to the channel and remains connected for future calls + to the same host. +* If the underlying connection to the remote host is broken, the channel is + disconnected and enters TRANSIENT\_FAILURE state. +* A channel is broken if the channel connection is no longer viable. This + happens when + * The network interface is no longer available, e.g. WiFi or cellular + interface is turned off or goes offline, airplane mode turned on, etc; + * The underlying TCP connection is no longer valid, e.g. WiFi connects to + another hotspot, cellular data switched from LTE to 4G, etc; + * A network interface more preferable by the OS is valid, e.g. WiFi gets + connected when the channel is already connected via cellular. +* A channel in TRANSIENT\_FAILURE state attempts reconnection on start of the + next call to the same host, but only after a certain backoff period (see + corresponding + [doc](https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md)). + During the backoff period, any call to the same host will wait until the + first of the following events occur: + * Connection succeeded; calls will be made using this channel; + * Connection failed; calls will be failed and return UNAVAILABLE status code; + * The call's deadline is reached; the call will fail and return + DEADLINE\_EXCEEDED status code. + The length of backoff period of a channel is reset whenever a connection + attempt is successful. + +## Implementations +### gRPC iOS with TCP Sockets +gRPC's default implementation is to use TCP sockets for networking. It turns +out that although Apple supports this type of usage, it is [not recommended by +Apple](https://developer.apple.com/library/archive/documentation/NetworkingInternetWeb/Conceptual/NetworkingOverview/SocketsAndStreams/SocketsAndStreams.html) +and has some issues described below. + +#### Issues with TCP Sockets +The TCP sockets on iOS is flawed in that it does not reflect the viability of +the channel connection. Particularly, we observed the following issues when +using TCP sockets: +* When a TCP socket connection is established on cellular data and WiFi + becomes available, the TCP socket neither return an error event nor continue + sending/receiving data on it, but still accepts write on it. +* A TCP socket does not report certain events that happen in the + background. When a TCP connection breaks in the background for the reason + like WiFi connects to another hotspot, the socket neither return an error nor + continue sending/receiving data on it, but still accepts write on it. +In both situations, the user will see the call freeze for an extended period of +time before the TCP socket times out. + +#### gRPC iOS library's resolution to TCP socket issues +We introduced +[`ConnectivityMonitor`](https://developer.apple.com/library/archive/documentation/NetworkingInternetWeb/Conceptual/NetworkingOverview/SocketsAndStreams/SocketsAndStreams.html) +in gRPC iOS library v0.14.0 to alleviate these issues in TCP sockets, +which changes the network transition behaviors a bit. + +We classify network connectivity state of the device into three categories +based on flags obtained from `SCNetworkReachability` API: + +| Reachable | ConnectionRequired | IsWWAN | **Category** | +|:---------:|:------------------:|:------:|:------------:| +| 0 | X | X | None | +| X | 1 | X | None | +| 1 | 0 | 0 | WiFi | +| 1 | 0 | 1 | Cellular | + +Whenever there is a transition of network between two of these categories, all +previously existing channels are assumed to be broken and are actively +destroyed. If there is an unfinished call, the call should return with status +code `UNAVAILABLE`. + +`ConnectivityMonitor` is able to detect the scenario of the first issue above +and actively destroy the channels. However, the second issue is not resolvable. +To solve that issue the best solution is to switch to CFStream implementation +which eliminates all of them. + +### gRPC iOS with CFStream +gRPC iOS with CFStream implementation (introduced in v1.13.0) uses Apple's +networking API to make connections. It resolves the issues with TCP sockets +mentioned above. Users are recommended to use this implementation rather than +TCP socket implementation. The detailed behavior of streams in CFStream is not +documented by Apple, but our experiments show that it accords to the expected +behaviors. With CFStream implementation, an event is always received when the +underlying connection is no longer viable. For more detailed information and +usages of CFStream implementation, refer to the +[user guide](https://github.com/grpc/grpc/blob/master/src/objective-c/README-CFSTREAM.md). + diff --git a/src/objective-c/README-CFSTREAM.md b/src/objective-c/README-CFSTREAM.md new file mode 100644 index 00000000..94a43ff3 --- /dev/null +++ b/src/objective-c/README-CFSTREAM.md @@ -0,0 +1,40 @@ +[![Cocoapods](https://img.shields.io/cocoapods/v/gRPC.svg)](https://cocoapods.org/pods/gRPC) +# gRPC Objective-C with CFStream + +gRPC now provides the option to use Apple's CFStream API (rather than TCP +sockets) for networking. Using CFStream resolves a bunch of network connectivity transition issues +(see the [doc](https://github.com/grpc/grpc/blob/master/src/objective-c/NetworkTransitionBehavior.md) +for more information). + +CFStream integration is now in experimental state. You will need explicit opt-in to use it to get +the benefits of resolving the issues above. We expect to make CFStream the default networking +interface that gRPC uses when it is ready for production. + +As of v1.21.0, CFStream integration is now the default networking stack being used by gRPC +Objective-C on iOS layer. You get to use it automatically without special configuration needed. See +below on how to disable CFStream in case of problem. + +As of v1.23.0, CFStream is enabled by default on iOS for all wrapped languages. See below on how to +disable CFStream in case of a problem. + +## Usage +If you use gRPC on iOS, CFStream is on automatically. If you use it on other +platforms, you can turn it on with macro `GRPC_CFSTREAM=1` for the pod 'gRPC-Core' and 'gRPC'. In +case of problem and you want to disable CFStream on iOS, you can set environment variable +"grpc\_cfstream=0". + +## Caveats +It is known to us that the CFStream API has some bug (FB6162039) which will cause gRPC's CFStream +networking layer to stall occasionally. The issue mostly occur on MacOS systems (including iOS +simulators on MacOS); iOS may be affected too but we have not seen issue there. gRPC provides a +workaround to this problem with an alternative poller based on CFRunLoop. The poller can be enabled +by setting environment variable `GRPC_CFSTREAM_RUN_LOOP=1`. Note that the poller is a client side +only poller that does not support running a server on it. That means if an app opts in to the +CFRunLoop-based poller, the app cannot host a gRPC server (gRPC Objective-C does not support running +a server but other languages running on iOS do support it). + +## Notes + +- Currently we do not support platforms other than iOS, although it is likely that this integration + can run on MacOS targets with Apple's compiler. +- Let us know if you meet any issue by filing issue and ping @stanleycheung. diff --git a/src/objective-c/README.md b/src/objective-c/README.md new file mode 100644 index 00000000..c5d52ec0 --- /dev/null +++ b/src/objective-c/README.md @@ -0,0 +1,253 @@ +[![Cocoapods](https://img.shields.io/cocoapods/v/gRPC.svg)](https://cocoapods.org/pods/gRPC) +# gRPC for Objective-C +gRPC Objective C library provides Objective C API for users to make gRPC calls on iOS or OS X +platforms. Currently, the minimum supported iOS version is 9.0 and OS X version is 10.10 (Yosemite). + +While gRPC doesn't require the use of an IDL to describe the API of services, using one simplifies +usage and adds some interoperability guarantees. Here we use [Protocol Buffers][], and provide a +plugin for the Protobuf Compiler (_protoc_) to generate client libraries to communicate with gRPC +services. + +- [Write your API declaration in proto format](#write-protos) +- [Integrate a proto library in your project](#cocoapods) +- [Use the generated library in your code](#use) +- [Use gRPC without Protobuf](#no-proto) +- [Alternatives to the steps above](#alternatives) + - [Install protoc with the gRPC plugin](#install) + - [Install protoc and the gRPC plugin without using Homebrew](#no-homebrew) + - [Integrate the generated gRPC library without using Cocoapods](#no-cocoapods) + + +## Write your API declaration in proto format + +For this you can consult the [Protocol Buffers][]' official documentation, or learn from a quick +example [here](https://github.com/grpc/grpc/tree/master/examples#defining-a-service). + + +## Integrate a proto library in your project + +Install [Cocoapods](https://cocoapods.org/#install). + +You need to create a Podspec file for your proto library. You may simply copy the following example +to the directory where your `.proto` files are located, updating the name, version and license as +necessary. You also need to set the `pods_root` variable to the correct value, depending on where +you place this podspec relative to your Podfile. + +```ruby +Pod::Spec.new do |s| + s.name = '' + s.version = '0.0.1' + s.license = '...' + s.authors = { '' => '' } + s.homepage = '...' + s.summary = '...' + s.source = { :git => 'https://github.com/...' } + + s.ios.deployment_target = '9.0' + s.osx.deployment_target = '10.10' + + # Base directory where the .proto files are. + src = '.' + + # We'll use protoc with the gRPC plugin. + s.dependency '!ProtoCompiler-gRPCPlugin', '~> 1.0' + + # Pods directory corresponding to this app's Podfile, relative to the location of this podspec. + pods_root = '/Pods' + + # Path where Cocoapods downloads protoc and the gRPC plugin. + protoc_dir = "#{pods_root}/!ProtoCompiler" + protoc = "#{protoc_dir}/protoc" + plugin = "#{pods_root}/!ProtoCompiler-gRPCPlugin/grpc_objective_c_plugin" + + # Directory where you want the generated files to be placed. This is an example. + dir = "#{pods_root}/#{s.name}" + + # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. + # You can run this command manually if you later change your protos and need to regenerate. + # Alternatively, you can advance the version of this podspec and run `pod update`. + s.prepare_command = <<-CMD + mkdir -p #{dir} + #{protoc} \ + --plugin=protoc-gen-grpc=#{plugin} \ + --objc_out=#{dir} \ + --grpc_out=#{dir} \ + -I #{src} \ + -I #{protoc_dir} \ + #{src}/*.proto + CMD + + # The --objc_out plugin generates a pair of .pbobjc.h/.pbobjc.m files for each .proto file. + s.subspec 'Messages' do |ms| + ms.source_files = "#{dir}/*.pbobjc.{h,m}" + ms.header_mappings_dir = dir + ms.requires_arc = false + # The generated files depend on the protobuf runtime. + ms.dependency 'Protobuf' + end + + # The --objcgrpc_out plugin generates a pair of .pbrpc.h/.pbrpc.m files for each .proto file with + # a service defined. + s.subspec 'Services' do |ss| + ss.source_files = "#{dir}/*.pbrpc.{h,m}" + ss.header_mappings_dir = dir + ss.requires_arc = true + # The generated files depend on the gRPC runtime, and on the files generated by `--objc_out`. + ss.dependency 'gRPC-ProtoRPC' + ss.dependency "#{s.name}/Messages" + end + + s.pod_target_xcconfig = { + # This is needed by all pods that depend on Protobuf: + 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) GPB_USE_PROTOBUF_FRAMEWORK_IMPORTS=1', + # This is needed by all pods that depend on gRPC-RxLibrary: + 'CLANG_ALLOW_NON_MODULAR_INCLUDES_IN_FRAMEWORK_MODULES' => 'YES', + } +end +``` + +The file should be named `.podspec`. + +Note: If your proto files are in a directory hierarchy, you might want to adjust the _globs_ used in +the sample Podspec above. For example, you could use: + +```ruby + s.prepare_command = <<-CMD + ... + `find . -name *.proto -print | xargs` + CMD + ... + ms.source_files = "#{dir}/*.pbobjc.{h,m}", "#{dir}/**/*.pbobjc.{h,m}" + ... + ss.source_files = "#{dir}/*.pbrpc.{h,m}", "#{dir}/**/*.pbrpc.{h,m}" +``` + +Once your library has a Podspec, Cocoapods can install it into any XCode project. For that, go into +your project's directory and create a Podfile by running: + +```sh +pod init +``` + +Next add a line to your Podfile to refer to your library's Podspec. Use `:path` as described +[here](https://guides.cocoapods.org/using/the-podfile.html#using-the-files-from-a-folder-local-to-the-machine): + +```ruby +pod '', :path => 'path/to/the/directory/of/your/podspec' +``` + +You can look at this [example Podfile][]. + +Finally, in your project's directory, run: + +```sh +pod install +``` + + +## Use the generated library in your code + +Please check the [example apps][] for examples of how to use a generated gRPC library. + + +## Use gRPC without Protobuf + +This [tests file](https://github.com/grpc/grpc/tree/master/src/objective-c/tests/GRPCClientTests.m) +shows how to use the generic gRPC Objective-C client without generated protobuf files. + + +## Alternatives to the steps above + + +### Install _protoc_ with the gRPC plugin + +Although it's not recommended (because it can lead to hard-to-solve version conflicts), it is +sometimes more convenient to install _protoc_ and the gRPC plugin in your development machine, +instead of letting Cocoapods download the appropriate versions for you. To do so, on Mac OS X or +later, install [homebrew][]. + +The run the following command to install _protoc_ and the gRPC _protoc_ plugin: +```sh +$ curl -fsSL https://goo.gl/getgrpc | bash - +``` +This will download and run the [gRPC install script][]. + + +### Install _protoc_ and the gRPC plugin without using Homebrew + +First install v3 of the Protocol Buffers compiler (_protoc_), by cloning +[its Git repository](https://github.com/protocolbuffers/protobuf) and following these +[installation instructions](https://github.com/protocolbuffers/protobuf#c-installation---unix) +(the ones titled C++; don't miss the note for Mac users). + +Then clone this repository and execute the following commands from the root directory where it was +cloned. + +Compile the gRPC plugins for _protoc_: +```sh +make grpc_objective_c_plugin +``` + +Create a symbolic link to the compiled plugin binary somewhere in your `$PATH`: +```sh +ln -s `pwd`/bins/opt/grpc_objective_c_plugin /usr/local/bin/protoc-gen-objcgrpc +``` +(Notice that the name of the created link must begin with "`protoc-gen-`" for _protoc_ to recognize +it as a plugin). + +If you don't want to create the symbolic link, you can alternatively copy the binary (with the +appropriate name). Or you might prefer instead to specify the plugin's path as a flag when invoking +_protoc_, in which case no system modification nor renaming is necessary. + + +### Integrate the generated gRPC library without using Cocoapods + +You need to compile the generated `.pbobjc.*` files (the enums and messages) without ARC support, +and the generated `.pbrpc.*` files (the services) with ARC support. The generated code depends on +v0.12+ of the Objective-C gRPC runtime library and v3.0.0-alpha-4+ of the Objective-C Protobuf +runtime library. + +These libraries need to be integrated into your project as described in their respective Podspec +files: + +* [Podspec](https://github.com/grpc/grpc/blob/master/gRPC.podspec) for the Objective-C gRPC runtime +library. This can be tedious to configure manually. +* [Podspec](https://github.com/protocolbuffers/protobuf/blob/master/Protobuf.podspec) for the +Objective-C Protobuf runtime library. + +[Protocol Buffers]:https://developers.google.com/protocol-buffers/ +[homebrew]:http://brew.sh +[gRPC install script]:https://raw.githubusercontent.com/grpc/homebrew-grpc/master/scripts/install +[example Podfile]:https://github.com/grpc/grpc/blob/master/examples/objective-c/helloworld/Podfile +[example apps]: https://github.com/grpc/grpc/tree/master/examples/objective-c + +## Use gRPC with OpenSSL +gRPC uses BoringSSL as its dependency, which is a fork of OpenSSL and export a number of symbols +that are the same as OpenSSL. gRPC avoids conflicts of these symbols by renaming BoringSSL symbols. + +If you need gRPC to use OpenSSL instead of BoringSSL (e.g. for the benefit of reducing the binary +size of your product), you need to make a local `gRPC-Core` podspec and tweak it accordingly: +- Copy the version of `/gRPC-Core.podspec` you wish to use from Github into the repository of your + app; +- In your `Podfile`, add the following line: +``` +pod `gRPC-Core`, :podspec => "." # assuming gRPC-Core.podspec is in the same directory as your Podfile +``` +- Remove [the + macro](https://github.com/grpc/grpc/blob/b24b212ee585d376c618235905757b2445ac6461/gRPC-Core.podspec#L186) + `GRPC_SHADOW_BORINGSSL_SYMBOLS` to disable symbol renaming; +- Substitude the `BoringSSL-GRPC` + [dependency](https://github.com/grpc/grpc/blob/b24b212ee585d376c618235905757b2445ac6461/gRPC-Core.podspec#L184) + to whatever pod of OpenSSL your other libraries use. + +These steps should allow gRPC to use OpenSSL and drop BoringSSL dependency. If you see any issue, +file an issue to us. + +## Upgrade issue with BoringSSL +If you were using an old version of gRPC (<= v1.14) which depended on pod `BoringSSL` rather than +`BoringSSL-GRPC` and meet issue with the library like: +``` +ld: framework not found openssl +``` +updating `-framework openssl` in Other Linker Flags to `-framework openssl_grpc` in your project +may resolve this issue (see [#16821](https://github.com/grpc/grpc/issues/16821)). diff --git a/src/objective-c/RxLibrary/README.md b/src/objective-c/RxLibrary/README.md new file mode 100644 index 00000000..88e90723 --- /dev/null +++ b/src/objective-c/RxLibrary/README.md @@ -0,0 +1,8 @@ +This is a generic Reactive Extensions library for Objective-C, created to ease +the implementation of the gRPC Objective-C runtime. + +It has no dependencies on gRPC nor other libraries, and should eventually be +moved under its own GitHub project. + +If you're trying to get started on the library, you might want to first read +GRXWriter.h and then GRXWriteable.h. diff --git a/src/objective-c/examples/Sample/README.md b/src/objective-c/examples/Sample/README.md new file mode 100644 index 00000000..716241bb --- /dev/null +++ b/src/objective-c/examples/Sample/README.md @@ -0,0 +1 @@ +This sample app requires the use of Cocoapods. After installing Cocoapods, run `pod install` in this directory to recreate its dependencies. (This will compile OpenSSL, which takes some time). diff --git a/src/objective-c/tests/Connectivity/README.md b/src/objective-c/tests/Connectivity/README.md new file mode 100644 index 00000000..907821e8 --- /dev/null +++ b/src/objective-c/tests/Connectivity/README.md @@ -0,0 +1,14 @@ +This app can be used to manually test gRPC under changing network conditions. + +It makes RPCs in a loop, logging when the request is sent and the response is received. + +To test on the simulator, run `pod install`, open the workspace created by Cocoapods, and run the +app on an iOS device. Once running, tap a few times of each of the two buttons to make a few unary and streaming +calls. Then disable/enable different network interfaces (WiFi, cellular) on your device. + +The expected behavior is that the pending streaming calls fails immediately with error UNAVAILABLE. +Moreover, when network comes back, new calls have the same behavior. + +``` +2016-06-29 16:51:29.443 ConnectivityTestingApp[73129:3567949] Sending request. +``` diff --git a/src/php/README.md b/src/php/README.md new file mode 100644 index 00000000..757b7e5e --- /dev/null +++ b/src/php/README.md @@ -0,0 +1,405 @@ + +# Overview + +This directory contains source code for PHP implementation of gRPC layered on +shared C library. The same installation guides with more examples and +tutorials can be seen at [grpc.io](https://grpc.io/docs/languages/php/quickstart). +gRPC PHP installation instructions for Google Cloud Platform is in +[cloud.google.com](https://cloud.google.com/php/grpc). + +## Environment + +### Prerequisites + +* `php`: version 7.0 or above (PHP 5.x support is deprecated from Sep 2020). +* `pecl` +* `composer` +* `phpunit` (optional) + + +## Install the _grpc_ extension + +There are two ways to install the `grpc` extension. +* Via `pecl` +* Build from source + +### Install from PECL + +```sh +$ [sudo] pecl install grpc +``` + +or specific version + +```sh +$ [sudo] pecl install grpc-1.30.0 +``` + +Please make sure your `gcc` version satisfies the minimum requirement as +specified [here](https://grpc.io/docs/languages/#official-support). + + +### Install on Windows + +You can download the pre-compiled `grpc.dll` extension from the PECL +[website](https://pecl.php.net/package/grpc). + +### Build from source + +Clone this repository at the [latest stable release tag](https://github.com/grpc/grpc/releases). + +```sh +$ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc +$ cd grpc +``` + +#### Build the gRPC C core library + +```sh +$ git submodule update --init +$ EXTRA_DEFINES=GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK make +``` + +#### Build and install the `grpc` extension + +Compile the `grpc` extension from source + +```sh +$ grpc_root="$(pwd)" +$ cd src/php/ext/grpc +$ phpize +$ GRPC_LIB_SUBDIR=libs/opt ./configure --enable-grpc="${grpc_root}" +$ make +$ [sudo] make install +``` + +This will compile and install the `grpc` extension into the +standard PHP extension directory. You should be able to run +the [unit tests](#unit-tests), with the `grpc` extension installed. + + +### Update php.ini + +After installing the `grpc` extension, make sure you add this line to your +`php.ini` file, depending on where your PHP installation is, to enable the +`grpc` extension. + +```sh +extension=grpc.so +``` + +## Composer package + +In addition to the `grpc` extension, you will need to install the `grpc/grpc` +composer package as well. Add this to your project's `composer.json` file. + +```json + "require": { + "grpc/grpc": "~1.30.0" + } +``` + +To run tests with generated stub code from `.proto` files, you will also +need the `composer` and `protoc` binaries. You can find out how to get these +below. + +## Protocol Buffers + +gRPC PHP supports +[protocol buffers](https://developers.google.com/protocol-buffers) +out-of-the-box. You will need the following things to get started: + +* `protoc`: the protobuf compiler binary to generate PHP classes for your +messages and service definition. +* `grpc_php_plugin`: a plugin for `protoc` to generate the service stub +classes. +* `protobuf.so`: the `protobuf` extension runtime library. + +### `protoc` compiler + +If you don't have it already, you need to install the protobuf compiler +`protoc`, version 3.5.0+ (the newer the better) for the current gRPC version. +If you installed already, make the protobuf version is compatible to the +grpc version you installed. If you build grpc.so from the souce, you can check +the version of grpc inside package.xml file. + +The compatibility between the grpc and protobuf version is listed as table +below: + +grpc | protobuf | grpc | protobuf | grpc | protobuf +--- | --- | --- | --- | --- | --- +v1.0.0 | 3.0.0(GA) | v1.12.0 | 3.5.2 | v1.22.0 | 3.8.0 +v1.0.1 | 3.0.2 | v1.13.1 | 3.5.2 | v1.23.1 | 3.8.0 +v1.1.0 | 3.1.0 | v1.14.2 | 3.5.2 | v1.24.0 | 3.8.0 +v1.2.0 | 3.2.0 | v1.15.1 | 3.6.1 | v1.25.0 | 3.8.0 +v1.2.0 | 3.2.0 | v1.16.1 | 3.6.1 | v1.26.0 | 3.8.0 +v1.3.4 | 3.3.0 | v1.17.2 | 3.6.1 | v1.27.3 | 3.11.2 +v1.3.5 | 3.2.0 | v1.18.0 | 3.6.1 | v1.28.1 | 3.11.2 +v1.4.0 | 3.3.0 | v1.19.1 | 3.6.1 | v1.29.0 | 3.11.2 +v1.6.0 | 3.4.0 | v1.20.1 | 3.7.0 | v1.30.0 | 3.12.2 +v1.8.0 | 3.5.0 | v1.21.3 | 3.7.0 + +If `protoc` hasn't been installed, you can download the `protoc` binary from +the protocol buffers +[Github repository](https://github.com/protocolbuffers/protobuf/releases). +Then unzip this file and update the environment variable `PATH` to include the +path to the protoc binary file. + +If you really must compile `protoc` from source, you can run the following +commands, but this is risky because there is no easy way to uninstall / +upgrade to a newer release. + +```sh +$ cd grpc/third_party/protobuf +$ ./autogen.sh && ./configure && make +$ [sudo] make install +``` + +### `grpc_php_plugin` protoc plugin + +You need the `grpc_php_plugin` to generate the PHP client stub classes. This +plugin works with the main `protoc` binary to generate classes that you can +import into your project. + +You can build `grpc_php_plugin` with `cmake`: + +```sh +$ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc +$ cd grpc +$ git submodule update --init +$ mkdir -p cmake/build +$ cd cmake/build +$ cmake ../.. +$ make protoc grpc_php_plugin +``` + +The commands above will make `protoc` and `grpc_php_plugin` available +in `cmake/build/third_party/protobuf/protoc` and `cmake/build/grpc_php_plugin`. + +Alternatively, you can also build the `grpc_php_plugin` with `bazel`: + +```sh +$ bazel build @com_google_protobuf//:protoc +$ bazel build src/compiler:grpc_php_plugin +``` + +The `protoc` binary will be found in +`bazel-bin/external/com_google_protobuf/protoc`. +The `grpc_php_plugin` binary will be found in +`bazel-bin/src/compiler/grpc_php_plugin`. + +Plugin may use the new feature of the new protobuf version, thus please also +make sure that the protobuf version installed is compatible with the grpc +version you build this plugin. + +### `protobuf` runtime library + +There are two `protobuf` runtime libraries to choose from. They are identical +in terms of APIs offered. The C implementation provides better performance, +while the native implementation is easier to install. + +#### C implementation (for better performance) + +Install the `protobuf` extension from PECL: + +``` sh +$ [sudo] pecl install protobuf +``` +or specific version + +``` sh +$ [sudo] pecl install protobuf-3.12.2 +``` + +And add this to your `php.ini` file: + +```sh +extension=protobuf.so +``` + +#### PHP implementation (for easier installation) + +Or require the `google/protobuf` composer package. Add this to your +`composer.json` file: + +```json + "require": { + "google/protobuf": "~v3.12.2" + } +``` + +### Generate PHP classes from your service definition + +With all the above done, now you can define your message and service definition +in a `.proto` file and generate the corresponding PHP classes, which you can +import into your project, with a command similar to the following: + +``` +$ protoc -I=. echo.proto --php_out=. --grpc_out=. \ +--plugin=protoc-gen-grpc= +``` + +## Unit Tests + +You will need the source code to run tests + +```sh +$ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc +$ cd grpc +$ git submodule update --init +``` + +Run unit tests + +```sh +$ cd grpc/src/php +$ ./bin/run_tests.sh +``` + +## Generated Code Tests + +This section specifies the prerequisites for running the generated code tests, +as well as how to run the tests themselves. + +### Composer + +Install the runtime dependencies via `composer install`. + +```sh +$ cd grpc/src/php +$ composer install +``` + + +### Client Stub + +The generate client stub classes have already been generated from `.proto` files +by the `./bin/generate_proto_php.sh` script. + +### Run test server + +Run a local server serving the `Math` +[service](https://github.com/grpc/grpc/blob/master/src/proto/math/math.proto#L42). + +```sh +$ cd grpc/src/php/tests/generated_code +$ npm install +$ node math_server.js +``` + +### Run test client + +Run the generated code tests + +```sh +$ cd grpc/src/php +$ ./bin/run_gen_code_test.sh +``` + +## Apache, PHP-FPM and Nginx + +For more information on how you can run the `grpc` library with Apache, +PHP-FPM and Nginx, you can check out +[this guide](https://github.com/grpc/grpc/tree/master/examples/php/echo). +There you will find a series of Docker images where you can quickly run an +end-to-end example. + +## Misc Config Options + +### SSL credentials + +Here's how you can specify SSL credentials when creating your PHP client: + +```php +$client = new Helloworld\GreeterClient('localhost:50051', [ + 'credentials' => Grpc\ChannelCredentials::createSsl( + file_get_contents('')) +]); +``` + +### pcntl_fork() support + +To make sure the `grpc` extension works with `pcntl_fork()` and related +functions, add the following lines to your `php.ini` file: + +``` +grpc.enable_fork_support = 1 +grpc.poll_strategy = epoll1 +``` + +### Tracing and Logging + +To turn on gRPC tracing, add the following lines to your `php.ini` file. For +all possible values of the `grpc.grpc.trace` option, please check +[this doc](https://github.com/grpc/grpc/blob/master/doc/environment_variables.md). + +``` +grpc.grpc_verbosity=debug +grpc.grpc_trace=all,-polling,-polling_api,-pollable_refcount,-timer,-timer_check +grpc.log_filename=/var/log/grpc.log +``` + +> Make sure the log file above is writable, by doing the following: +> ``` +> $ sudo touch /var/log/grpc.log +> $ sudo chmod 666 /var/log/grpc.log +> ``` +> Note: The log file does grow pretty quickly depending on how much logs are +> being printed out. Make sure you have other mechanisms (perhaps another +> cronjob) to zero out the log file from time to time, +> e.g. `cp /dev/null /var/log/grpc.log`, or turn these off when logs or tracing +> are not necessary for debugging purposes. + +### User agent string + +You can customize the user agent string for your gRPC PHP client by specifying +this `grpc.primary_user_agent` option when constructing your PHP client: + +```php +$client = new Helloworld\GreeterClient('localhost:50051', [ + 'credentials' => Grpc\ChannelCredentials::createInsecure(), + 'grpc.primary_user_agent' => 'my-user-agent-identifier', +]); +``` + +### Maximum message size + +To change the default maximum message size, specify this +`grpc.max_receive_message_length` option when constructing your PHP client: + +```php +$client = new Helloworld\GreeterClient('localhost:50051', [ + 'credentials' => Grpc\ChannelCredentials::createInsecure(), + 'grpc.max_receive_message_length' => 8*1024*1024, +]); +``` + +### Compression + +You can customize the compression behavior on the client side, by specifying the following options when constructing your PHP client. + +```php +$client = new Helloworld\GreeterClient('localhost:50051', [ + 'credentials' => Grpc\ChannelCredentials::createInsecure(), + 'grpc.default_compression_algorithm' => 2, + 'grpc.default_compression_level' => 2, +]); +``` + +Possible values for `grpc.default_compression_algorithm`: + +``` +0: No compression +1: Compress with DEFLATE algorithm +2: Compress with GZIP algorithm +3: Stream compression with GZIP algorithm +``` + +Possible values for `grpc.default_compression_level`: + +``` +0: None +1: Low level +2: Medium level +3: High level +``` diff --git a/src/php/docker/README.md b/src/php/docker/README.md new file mode 100644 index 00000000..26741d26 --- /dev/null +++ b/src/php/docker/README.md @@ -0,0 +1,182 @@ + +# Docker Images for Testing + +This directory contains a number of docker images to assist testing the +[gRPC PECL extension](http://pecl.php.net/package/grpc) against various +different PHP environments. + + +## Build and Run Tests +```sh +$ cd grpc +``` + +To build all docker images: +```sh +$ ./src/php/bin/build_all_docker_images.sh +``` + +Or to only build some selected images +```sh +$ ./src/php/bin/build_all_docker_images.sh grpc-ext php-src +``` + +Or to only print out individual `docker build` commands +```sh +$ ./src/php/bin/build_all_docker_images.sh --cmds +``` + +To run all tests: +```sh +$ ./src/php/bin/run_all_docker_images.sh +``` + +Or to only run some selected images +```sh +$ ./src/php/bin/run_all_docker_images.sh grpc-ext php-src +``` + +Or to only print out individual `docker run` commands +```sh +$ ./src/php/bin/run_all_docker_images.sh --cmds +``` + +## Build and Run Specified Image +### `grpc-ext` +This image builds the full `grpc` PECL extension (effectively the current +release candidate), installs it against the current PHP version, and runs the +unit tests. + +Build `grpc-ext` docker image: +```sh +$ cd grpc +$ docker build -t grpc-php/grpc-ext -f ./src/php/docker/grpc-ext/Dockerfile . +``` + +Run image: +```sh +$ docker run -it --rm grpc-php/grpc-ext +``` + +### `grpc-src` + +This image builds the `grpc` PECL extension in a 'thin' way, only containing +the gRPC extension source files. The gRPC C Core library is expected to be +installed separately and dynamically linked. The extension is installed +against the current PHP version. + +This also allows us to compile our `grpc` extension with some additional +configure options, like `--enable-tests`, which allows some additional unit +tests to be run. + +Build `grpc-src` docker image: +```sh +$ cd grpc +$ docker build -t grpc-php/grpc-src -f ./src/php/docker/grpc-src/Dockerfile . +``` + +Run image: +```sh +$ docker run -it --rm grpc-php/grpc-src +``` + +### `alpine` + +This image builds the `grpc` extension against the current PHP version in an +Alpine-Linux base image. + +Build `alpine` docker image: +```sh +$ cd grpc +$ docker build -t grpc-php/alpine -f ./src/php/docker/alpine/Dockerfile . +``` + +Run image: +```sh +$ docker run -it --rm grpc-php/alpine +``` +### `centos7` + +This image builds the `grpc` extension against the GCC version in Centos7 base image. The default version of gcc in centos7 is gcc-4.8.5. Run `scl enable devtoolset-7 bash` command to enable gcc-7.3.1. + +Build `centos7` docker image: +```sh +$ cd grpc +$ docker build -t grpc-gcc7/centos -f ./src/php/docker/centos7/Dockerfile . +``` + +Run image: +```sh +$ docker run -it --rm grpc-gcc7/centos +``` + +### `php-src` + +Instead of using a general purpose base docker image provided by PHP, here we +compile PHP itself from +[source](https://github.com/php/php-src). This will allow us to change some +`configure` options, like `--enable-debug`. Then we proceed to build the full +`grpc` PECL extension and run the unit tests. + +Build `php-src` docker image: +```sh +$ cd grpc +$ docker build -t grpc-php/php-src -f ./src/php/docker/php-src/Dockerfile . +``` + +Run image: +```sh +$ docker run -it --rm grpc-php/php-src +``` + +### `php-zts` + +This image builds the `grpc` extension against the current PHP version with ZTS +enabled. + +Build `php-zts` docker image: +```sh +$ cd grpc +$ docker build -t grpc-php/php-zts -f ./src/php/docker/php-zts/Dockerfile . +``` + +Run image: +```sh +$ docker run -it --rm grpc-php/php-zts +``` + +### `php-future` + +This image builds the `grpc` extension against the next future PHP version +currently in alpha, beta or release candidate stage. + +Build `php-future` docker image: +```sh +$ cd grpc +$ docker build -t grpc-php/php-future -f ./src/php/docker/php-future/Dockerfile . +``` + +Run image: +```sh +$ docker run -it --rm grpc-php/php-future +``` +### `php5` + +NOTE: PHP 5.x has reached the end-of-life state and is no longer supported. + + +### `fork-support` + +This image tests `pcntl_fork()` support and makes sure scripts using +`pcntl_fork()` don't freeze or crash. + +Build `grpc-ext` docker image: +```sh +$ cd grpc +$ docker build -t grpc-php/fork-support -f ./src/php/docker/fork-support/Dockerfile . +``` + +Run image: +```sh +$ docker run -it --rm grpc-php/fork-support +``` \ No newline at end of file diff --git a/src/python/grpcio_tests/tests/unit/credentials/README.md b/src/python/grpcio_tests/tests/unit/credentials/README.md new file mode 100644 index 00000000..100b43c1 --- /dev/null +++ b/src/python/grpcio_tests/tests/unit/credentials/README.md @@ -0,0 +1,15 @@ +These are test keys *NOT* to be used in production. + +The `certificate_hierarchy_1` and `certificate_hierarchy_2` contain +two disjoint but similarly organized certificate hierarchies. Each +contains: + +* The respective root CA cert in `certs/ca.cert.pem` + +* The intermediate CA cert in + `intermediate/certs/intermediate.cert.pem`, signed by the root CA + +* A client cert and a server cert--both signed by the intermediate + CA--in `intermediate/certs/client.cert.pem` and + `intermediate/certs/localhost-1.cert.pem`; the corresponding keys + are in `intermediate/private` diff --git a/src/ruby/CHANGELOG.md b/src/ruby/CHANGELOG.md new file mode 100644 index 00000000..8ec6e3cf --- /dev/null +++ b/src/ruby/CHANGELOG.md @@ -0,0 +1,11 @@ +## 0.6.1 (2015-04-14) + +### Changes + +* Begins this ChangeLog ([@tbetbetbe][]) +* Updates to version 0.4 of googleauth. ([@tbetbetbe][]) +* Switch the extension to use the call API. ([@tbetbetbe][]) +* Refactor the C extension to avoid identifiers used by ruby ([@yugui][]) + +[@tbetbetbe]: https://github.com/tbetbetbe +[@yugui]: https://github.com/yugui diff --git a/src/ruby/README.md b/src/ruby/README.md new file mode 100644 index 00000000..a98f6f03 --- /dev/null +++ b/src/ruby/README.md @@ -0,0 +1,83 @@ +[![Gem](https://img.shields.io/gem/v/grpc.svg)](https://rubygems.org/gems/grpc/) +gRPC Ruby +========= + +A Ruby implementation of gRPC. + +PREREQUISITES +------------- + +- Ruby 2.x. The gRPC API uses keyword args. + +INSTALLATION +--------------- + +**Linux and Mac OS X:** + +```sh +gem install grpc +``` + +If using a Gemfile and you wish to pull from a git repository or GitHub, make sure to tell bundler to retrieve submodules: +``` +gem 'grpc', github: 'grpc/grpc', submodules: true +``` + +BUILD FROM SOURCE +--------------------- +- Clone this repository + +- Init submodules + +```sh +git submodule update --init +``` + +- Install Ruby 2.x. Consider doing this with [RVM](http://rvm.io), it's a nice way of controlling + the exact ruby version that's used. +```sh +$ command curl -sSL https://rvm.io/mpapis.asc | gpg --import - +$ \curl -sSL https://get.rvm.io | bash -s stable --ruby=ruby-2 +$ +$ # follow the instructions to ensure that your're using the latest stable version of Ruby +$ # and that the rvm command is installed +``` +- Make sure your run `source $HOME/.rvm/scripts/rvm` as instructed to complete the set up of RVM + +- Install [bundler](http://bundler.io/) +``` +$ gem install bundler +``` + +- Finally, build and install the gRPC gem locally. +```sh +$ # from this directory +$ bundle install # creates the ruby bundle, including building the grpc extension +$ rake # runs the unit tests, see rake -T for other options +``` + +DOCUMENTATION +------------- +- rubydoc for the gRPC gem is available online at [rubydoc][]. +- the gRPC Ruby reference documentation is available online at [grpc.io][] + +CONTENTS +-------- +Directory structure is the layout for [ruby extensions][] +- ext: the gRPC ruby extension +- lib: the entrypoint gRPC ruby library to be used in a 'require' statement +- spec: Rspec unittests +- bin: example gRPC clients and servers, e.g, + + ```ruby + stub = Math::Math::Stub.new('my.test.math.server.com:8080', :this_channel_is_insecure) + req = Math::DivArgs.new(dividend: 7, divisor: 3) + GRPC.logger.info("div(7/3): req=#{req.inspect}") + resp = stub.div(req) + GRPC.logger.info("Answer: #{resp.inspect}") + ``` + +[ruby extensions]:http://guides.rubygems.org/gems-with-extensions/ +[rubydoc]: http://www.rubydoc.info/gems/grpc +[grpc.io]: https://grpc.io/docs/languages/ruby/quickstart +[Debian jessie-backports]:http://backports.debian.org/Instructions/ diff --git a/src/ruby/end2end/README.md b/src/ruby/end2end/README.md new file mode 100644 index 00000000..ea5ab6d4 --- /dev/null +++ b/src/ruby/end2end/README.md @@ -0,0 +1,18 @@ +This directory contains some grpc-ruby end to end tests. + +Each test here involves two files: a "driver" and a "client". For example, +the "channel_closing" test involves channel_closing_driver.rb +and channel_closing_client.rb. + +Typically, the "driver" will start up a simple "echo" server, and then +spawn a client. It gives the client the address of the "echo" server as +well as an address to listen on for control rpcs. Depending on the test, the +client usually starts up a "ClientControl" grpc server for the driver to +interact with (the driver can tell the client process to do strange things at +different times, depending on the test). + +So far these tests are mostly useful for testing process-shutdown related +situations, since the client's run in separate processes. + +These tests are invoked through the "tools/run_tests/run_tests.py" script (the +Rakefile doesn't start these). diff --git a/src/ruby/pb/README.md b/src/ruby/pb/README.md new file mode 100644 index 00000000..49327fe3 --- /dev/null +++ b/src/ruby/pb/README.md @@ -0,0 +1,42 @@ +Protocol Buffers +================ + +This folder contains protocol buffers provided with gRPC ruby, and the generated +code to them. + +PREREQUISITES +------------- + +The code is generated using the protoc (> 3.0.0.alpha.1) and the +grpc_ruby_plugin. These must be installed to regenerate the IDL defined +classes, but that's not necessary just to use them. + +health_check/v1 +-------------------- + +This package defines the surface of a simple health check service that gRPC +servers may choose to implement, and provides an implementation for it. To +re-generate the surface. + +```bash +$ # (from this directory) +$ protoc -I ../../proto ../../proto/grpc/health/v1/health.proto \ + --grpc_out=. \ + --ruby_out=. \ + --plugin=protoc-gen-grpc=`which grpc_ruby_plugin` +``` + +test +---- + +This package defines the surface of the gRPC interop test service and client +To re-generate the surface, it's necessary to have checked-out versions of +the grpc interop test proto, e.g, by having the full gRPC repository. E.g, + +```bash +$ # (from this directory within the grpc repo) +$ protoc -I../../.. ../../../test/proto/{messages,test,empty}.proto \ + --grpc_out=. \ + --ruby_out=. \ + --plugin=protoc-gen-grpc=`which grpc_ruby_plugin` +``` diff --git a/src/ruby/tools/README.md b/src/ruby/tools/README.md new file mode 100644 index 00000000..e43f223c --- /dev/null +++ b/src/ruby/tools/README.md @@ -0,0 +1,12 @@ +# Ruby gRPC Tools + +This package distributes protoc and the Ruby gRPC protoc plugin for Windows, Linux, and Mac. + +Before this package is published, the following directories should be filled with the corresponding `protoc` and `grpc_ruby_plugin` executables. + + - `bin/x86-linux` + - `bin/x86_64-linux` + - `bin/x86-macos` + - `bin/x86_64-macos` + - `bin/x86-windows` + - `bin/x86_64-windows` diff --git a/summerofcode/2016/siddharth_shukla.md b/summerofcode/2016/siddharth_shukla.md new file mode 100644 index 00000000..d753d8b8 --- /dev/null +++ b/summerofcode/2016/siddharth_shukla.md @@ -0,0 +1,65 @@ +Project Overview +================ +The project, titled 'GRPC Python compatibility support', involved +collaborating with the GRPC team to improve the library compatibility +for the GRPC Python library. + +Python is, originally, a specification for a programming language. This +specification has been implemented differently in different +implementations of the [language specification](https://docs.python.org/3/reference/). + +A small, and by no means exhaustive, list of some major python implementations +is: + +- [CPython](https://www.python.org/): The reference implementation +- [Jython](http://www.jython.org/): Python implemented in Java +- [Python for .NET](http://pythonnet.sourceforge.net/): CPython implementation that enables .NET library usage +- [IronPython](http://ironpython.net/): Python implemented in .NET +- [PyPy](http://pypy.org/): Python implemented completely in Python +- [Stackless](https://bitbucket.org/stackless-dev/stackless/wiki/Home): Replaces the dependency for the C call stack with it's own stack + +The development in this project revolved around +introducing changes to the codebase that enable support for latest +stable as well as development releases of the reference implementation +(CPython) of the Python programming language namely `Python 3.4`, +`Python 3.5`,and `Python 3.6` as well as the stable releases of the +PyPy implementation. Special changes were required to enable PyPy +support because PyPy has a non-deterministic garbage collector that does +not rely on reference counting unlike the CPython garbage collector. + +The changes to the codebase involved changes to the library code as well +as changes to the tests and scripts in the test infrastructure which +resulted in both the library as well as the testing infrastructure being +Python 3.x and PyPy compatible. + +The list of merged commits, as of 22.08.2016 23:59 CEST, is summarized +here for the perusal of those interested: + +- [Enable py35 and py36 testing](https://github.com/grpc/grpc/commit/c478214e475e103c5cdf477f0adc18bba2c03903) +- [Make testing toolchain python 3.x compliant](https://github.com/grpc/grpc/commit/0589e533cd65a2ca9e0e610cc1b284d016986572) +- [Add .idea folder to .gitignore](https://github.com/grpc/grpc/commit/365ef40947e22b5438a63f123679ae9a5474c47c) +- [Fix the ThreadPoolExecutor: max_workers can't be 0](https://github.com/grpc/grpc/commit/de84d566b8fad6808e5263a25a17fa231cb5713c) +- [Add PyPy to testing toolchain](https://github.com/grpc/grpc/commit/2135a1b557f8b992186d5317cb767ac4dbcdfe5c) +- [Switch init/shutdown: lib-wide -> per-object](https://github.com/grpc/grpc/commit/9eedb4ffd74aed8d246a07f8007960b2bc167f55) +- [Skip test run if running with pypy](https://github.com/grpc/grpc/commit/f0f58e68738abbc317f7f449c5104f7fbbff26bd) + +The list of unmerged pull requests is as follows: + +- [Add PyPy 5.3.1 to dockerfile and template](https://github.com/grpc/grpc/pull/7763) +- [remove skipIf from TypeSmokeTest (issue 7672)](https://github.com/grpc/grpc/pull/7831) + +The list of tasks that have pending unsubmitted pull requests is as follows: + +- Modify run_tests.py to enable testing of new languages without + affecting old branches. + + +Project Details +=============== +- Title: GRPC Python compatibility support +- Student: [Siddharth Shukla](https://github.com/thunderboltsid) +- Mentors: [Nathaniel Manista](https://github.com/nathanielmanistaatgoogle), [Masood Malekghassemi](https://github.com/soltanmm) +- Duration: May 23 - August 23 +- Hat tip: [Ken Payson](https://github.com/kpayson64), [Jan Tattermusch](https://github.com/jtattermusch), [Nicolas Noble](https://github.com/nicolasnoble) + + diff --git a/summerofcode/2018/naresh.md b/summerofcode/2018/naresh.md new file mode 100644 index 00000000..d471bff5 --- /dev/null +++ b/summerofcode/2018/naresh.md @@ -0,0 +1,191 @@ +# Project overview + +## Title + +Enable Building of gRPC Python with Bazel + +## Overview + +gRPC Python currently has a constellation of scripts written to build the +project, but it has a lot of limitations in terms of speed and maintainability. +[Bazel](https://bazel.build/) is the open-sourced variant of Google's internal +system, Blaze, which is an ideal replacement for building such projects in a +fast and declarative fashion. But Bazel in itself is still in active +development, especially in terms of Python (amongst a few other languages). + +The project aimed to fill this gap and build gRPC Python with Bazel. + +[Project page](https://summerofcode.withgoogle.com/projects/#6482576244473856) + +[Link to proposal](https://storage.googleapis.com/summerofcode-prod.appspot.com/gsoc/core_project/doc/5316764725411840_1522049732_Naresh_Ramesh_-_GSoC_proposal.pdf) + +## Thoughts and challenges + +### State of Bazel for Python + +Although previously speculated, the project didn't require any contributions +directly to [bazelbuild/bazel](https://github.com/bazelbuild/bazel). The Bazel +rules for Python are currently being separated out into their own repo at +[bazelbuild/rules_python](https://github.com/bazelbuild/rules_python/). + +Bazel is [still very much in active development for +Python](https://groups.google.com/forum/#!topic/bazel-sig-python/iQjV9sfSufw) +though. There's still challenges when it comes to building for Python 2 vs 3. +Using pip packages is still in experimental. Bazel Python support is currently +distributed across these two repositories and is yet to begin migration to one +place (which will be +[bazelbuild/rules_python](https://github.com/bazelbuild/rules_python/)). + +Bazel's roadmap for Python is publicly available [here as a Google +doc](https://docs.google.com/document/d/1A6J3j3y1SQ0HliS86_mZBnB5UeBe7vExWL2Ryd_EONI/edit). + +### Cross collaboration between projects + +Cross contribution surprisingly came up because of building protobuf sources +for Python, which is still not natively supported by Bazel. An existing +repository, [pubref/rules_protobuf](https://github.com/pubref/rules_protobuf), +which was maintained by an independent maintainer (i.e. not a part of Bazel) +helped solve this problem, but had [one major blocking +issue](https://github.com/pubref/rules_protobuf/issues/233) and could not be +resolved at the source. But [a solution to the +issue](https://github.com/pubref/rules_protobuf/pull/196) was proposed by user +dududko, which was not merged because of failing golang tests but worked well +for Python. Hence, a fork of this repo was made and is to be used with gRPC +until the solution can be merged back at the source. + +### Building Cython code + +Building Cython code is still not supported by Bazel, but the team at +[cython/cython](https://github.com/cython/cython) have added support for Bazel +on their side. The way it works is by including Cython as a third-party Bazel +dependency and using custom Bazel rules for building our Cython code using the +binary within the dependency. + +### Packaging Python code using Bazel + +pip and PyPI still remain the de-facto standard for distributing Python +packages. Although Bazel is pretty versatile and is amazing for it's +reproducible and incremental build capabilities, these can only be still used +by the contributors and developers for building and testing the gRPC code. But +there's no way yet to build Python packages for distribution. + +### Building gRPC Python with Bazel on Kokoro (internal CI) + +Integration with the internal CI was one of the areas that highlighted how +simple Bazel can be to use. gRPC was already using a dockerized Bazel setup to +build some of it's core code (but not as the primary build setup). Adding a new +job on the internal CI ended up being as simple as creating a new shell script +to install the required dependencies (which were python-dev and Bazel) and a +new configuration file which pointed to the subdirectiory (src/python) under +which to look for targets and run the tests accordingly. + +### Handling imports in Python code + +When writing Python packages, imports in nested modules are typically made +relative to the package root. But because of the way Bazel works, these paths +wouldn't make sense from the Workspace root. So, the folks at Bazel have added +a nifty `imports` parameter to all the Python rules which lets us specify for +each target, which path to consider as the root. This parameter allows for +relative paths like `imports = ["../",]`. + +### Fetching Python headers for Cython code to use + +Cython code makes use of `Python.h`, which pulls in the Python API for C +extension modules to use, but it's location depending on the Python version and +operating system the code is building on. To make this easier, the folks at +Tensorflow wrote [repository rules for Python +autoconfiguration](https://github.com/tensorflow/tensorflow/tree/e447ae4759317156d31a9421290716f0ffbffcd8/third_party/py). +This has been [adapted with some some +modifications](https://github.com/grpc/grpc/pull/15992) for use in gRPC Python +as well. + +## How to use + +All the Bazel tests for gRPC Python can be run using a single command: + +```bash +bazel test --spawn_strategy=standalone --genrule_strategy=standalone //src/python/... +``` + +If any specific test is to be run, like say `LoggingPoolTest` (which is present +in +`src/python/grpcio_tests/tests/unit/framework/foundation/_logging_pool_test.py`), +the command to run would be: + +```bash +bazel test --spawn_strategy=standalone --genrule_strategy=standalone //src/python/grpcio_tests/tests/unit/framework/foundation:logging_pool_test +``` + +where, `logging_pool_test` is the name of the Bazel target for this test. + +Similarly, to run a particular method, use: + +```bash +bazel test --spawn_strategy=standalone --genrule_strategy=standalone //src/python/grpcio_tests/tests/unit/_rpc_test --test_arg=RPCTest.testUnrecognizedMethod +``` + +## Useful Bazel flags + +- Use `bazel build` with a `-s` flag to see the logs being printed out to + standard output while building. +- Similarly, use `bazel test` with a `--test_output=streamed` to see the + test logs while testing. Something to know while using this flag is that all + tests will be run locally, without sharding, one at a time. + +## Contributions + +### Related to the project + +- [435c6f8](https://github.com/grpc/grpc/commit/435c6f8d1e53783ec049b3482445813afd8bc514) + Update grpc_gevent cython files to include .pxi +- [74426fd](https://github.com/grpc/grpc/commit/74426fd2164c51d6754732ebe372133c19ba718c) + Add gevent_util.h to grpc_base_c Bazel target +- [b6518af](https://github.com/grpc/grpc/commit/b6518afdd610f0115b42aee1ffc71520c6b0d6b1) + Upgrade Bazel to 0.15.0 +- [ebcf04d](https://github.com/grpc/grpc/commit/ebcf04d075333c42979536c5dd2091d363f67e5a) + Kokoro setup for building gRPC Python with Bazel +- [3af1aaa](https://github.com/grpc/grpc/commit/3af1aaadabf49bc6274711a11f81627c0f351a9a) + Basic setup to build gRPC Python with Bazel +- [11f199e](https://github.com/grpc/grpc/commit/11f199e34dc416a2bd8b56391b242a867bedade4) + Workspace changes to build gRPC Python with Bazel +- [848fd9d](https://github.com/grpc/grpc/commit/848fd9d75f6df10f00e8328ff052c0237b3002ab) + Minimal Bazel BUILD files for grpcio Python + +### Other contibutions + +- [89ce16b](https://github.com/grpc/grpc/commit/89ce16b6daaad4caeb1c9ba670c6c4b62ea1a93c) + Update Dockerfiles for python artifacts to use latest git version +- [32f7c48](https://github.com/grpc/grpc/commit/32f7c48dad71cac7af652bf994ab1dde3ddb0607) + Revert removals from python artifact dockerfiles +- [712eb9f](https://github.com/grpc/grpc/commit/712eb9ff91cde66af94e8381ec01ad512ed6d03c) + Make logging after success in jobset more apparent +- [c6e4372](https://github.com/grpc/grpc/commit/c6e4372f8a93bb0eb996b5f202465785422290f2) + Create README for gRPC Python reflection package +- [2e113ca](https://github.com/grpc/grpc/commit/2e113ca6b2cc31aa8a9687d40ee1bd759381654f) + Update logging in Python to use module-level logger + +### Pending PRs + +- BUILD files for all tests in + [tests.json](https://github.com/ghostwriternr/grpc/blob/70c8a58b2918a5369905e5a203d7ce7897b6207e/src/python/grpcio_tests/tests/tests.json). +- BUILD files for gRPC testing, gRPC health checking, gRPC reflection. +- (Yet to complete) BUILD files for grpcio_tools. One test depends on this. + +## Known issues + +- [grpc/grpc #16336](https://github.com/grpc/grpc/issues/16336) RuntimeError + for `_reconnect_test` Python unit test with Bazel +- Some tests in Bazel pass despite throwing an exception. Example: + `testAbortedStreamStream` in + `src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py`. +- [#14557](https://github.com/grpc/grpc/pull/14557) introduced a minor bug + where the module level loggers don't initialize a default logging handler. +- Sanity test doesn't make sense in the context of Bazel, and thus fails. +- There are some issues with Python2 vs Python3. Specifically, + - On some machines, “cygrpc.so: undefined symbol: _Py_FalseStruct” error + shows up. This is because of incorrect Python version being used to build + Cython. + - Some external packages like enum34 throw errors when used with Python 3 and + some extra packages are currently installed as Python version in current + build scripts. For now, the extra packages are added to a + `requirements.bazel.txt` file in the repository root. diff --git a/summerofcode/ideas.md b/summerofcode/ideas.md new file mode 100644 index 00000000..de59be82 --- /dev/null +++ b/summerofcode/ideas.md @@ -0,0 +1,36 @@ +# gRPC Summer of Code Project Ideas + +Hello students! + +We want gRPC to be the universal remote procedure call protocol for all +computing platforms and paradigms, so while these are our ideas of what we +think would make good projects for the summer, we're eager to hear your ideas +and proposals as well. +[Try us out](https://github.com/grpc/grpc/blob/master/CONTRIBUTING.md) and get +to know the gRPC code and team! + +**Required skills for all projects:** git version control, collaborative +software development on github.com, and software development in at least one +of gRPC's ten languages on at least one of Linux, macOS, and Windows. + +------------------------------------- + +gRPC Core: + +1. Implement ["early OK" semantics](https://github.com/grpc/grpc/issues/7032). The gRPC wire protocol allows servers to complete an RPC with OK status without having processed all requests ever sent to the client; it's the gRPC Core that currently restricts applications from so behaving. This behavioral gap in the gRPC Core should be filled in. + * **Required skills:** C programming language, C++ programming language. + * **Likely mentors:** [Nathaniel Manista](https://github.com/nathanielmanistaatgoogle), [Nicolas Noble](https://github.com/nicolasnoble). + +1. [Make channel-connectivity-watching cancellable](https://github.com/grpc/grpc/issues/3064). Anything worth waiting for is worth cancelling. The fact that channel connectivity is currently poll-based means that clean shutdown of gRPC channels can take as long as the poll interval. No one should have to wait two hundred milliseconds to garbage-collect an object. + * **Required skills:** C programming language, C++ programming language, Python programming language. + * **Likely mentors:** [Nathaniel Manista](https://github.com/nathanielmanistaatgoogle), [Vijay Pai](https://github.com/vjpai). + +gRPC Python: + +1. Support static type-checking of both gRPC Python itself and of code that uses gRPC Python. No one likes dynamic typing and Python is finally outgrowing it! There are probably errors in the implementation of gRPC Python that [pytype](https://github.com/google/pytype) or [mypy](http://mypy-lang.org/) could detect. There are certainly errors in other code that uses gRPC Python that they could detect. + * **Required skills:** Python programming language, open source development across multiple repositories and projects. + * **Likely mentors:** [Nathaniel Manista](https://github.com/nathanielmanistaatgoogle), [Kailash Sethuraman](https://github.com/hsaliak). + +1. [Enable building of gRPC Python with Bazel](https://github.com/grpc/grpc/issues/8079). Bazel is the designated replacement for our constellation of crufty build scripts, but it's still under active development itself. Up for a challenge? gRPC Python could easily be the most complex codebase to be built with Bazel. + * **Required skills:** Python programming language, Bazel toolchain, Cython, open source development across multiple repositories and projects. + * **Likely mentors:** [Nathaniel Manista](https://github.com/nathanielmanistaatgoogle). diff --git a/templates/README.md b/templates/README.md new file mode 100644 index 00000000..9c4626f5 --- /dev/null +++ b/templates/README.md @@ -0,0 +1,133 @@ +# Regenerating project files + +Prerequisites +- `python` +- `pip install mako` (the template processor) +- `pip install pyyaml` (to read the yaml files) +- `go` (required by boringssl dependency) + +``` +# Regenerate the projects files (and other generated files) using templates +tools/buildgen/generate_projects.sh +``` + +# Quick justification + +We've approached the problem of the build system from a lot of different +angles. The main issue was that there isn't a single build system that +was going to single handedly cover all of our usage cases. + +So instead we decided to work the following way: + +* A `build.yaml` file at the root is the source of truth for listing all the +targets and files needed to build grpc and its tests, as well as a basic system +for dependency description. + +* Most of the build systems supported by gRPC (e.g. Makefile, cmake, XCode) have a template defined in this directory. The templates use the information from the `build.yaml` file to generate the project files specific to a given build system. + +This way we can maintain as many project system as we see fit, without having +to manually maintain them when we add or remove new code to the repository. +Only the structure of the project file is relevant to the template. The actual +list of source code and targets isn't. + +# Structure of `build.yaml` + +The `build.yaml` file has the following structure: + +``` +settings: # global settings, such as version number + ... +filegroups: # groups of files that are automatically expanded + ... +libs: # list of libraries to build + ... +targets: # list of targets to build + ... +``` + +The `filegroups` are helpful to re-use a subset of files in multiple targets. +One `filegroups` entry has the following structure: + +``` +- name: "arbitrary string", # the name of the filegroup + public_headers: # list of public headers defined in that filegroup + - ... + headers: # list of headers defined in that filegroup + - ... + src: # list of source files defined in that filegroup + - ... +``` + +The `libs` collection contains the list of all the libraries we describe. Some may be +helper libraries for the tests. Some may be installable libraries. Some may be +helper libraries for installable binaries. + +The `targets` array contains the list of all the binary targets we describe. Some may +be installable binaries. + +One `libs` or `targets` entry has the following structure (see below for +details): + +``` +name: "arbitrary string", # the name of the library +build: "build type", # in which situation we want that library to be + # built and potentially installed (see below). +language: "...", # the language tag; "c" or "c++" +public_headers: # list of public headers to install +headers: # list of headers used by that target +src: # list of files to compile +baselib: boolean, # this is a low level library that has system + # dependencies +filegroups: # list of filegroups to merge to that project + # note that this will be expanded automatically +deps: # list of libraries this target depends on +dll: "..." # see below. +``` + +## The `"build"` tag + +Currently, the "`build`" tag have these meanings: + +* `"all"`: library to build on `"make all"`, and install on the system. +* `"protoc"`: a protoc plugin to build on `"make all"` and install on the system. +* `"private"`: a library to only build for tests. +* `"test"`: a test binary to run on `"make test"`. +* `"tool"`: a binary to be built upon `"make tools"`. + +All of the targets should always be present in the generated project file, if +possible and applicable. But the build tag is what should group the targets +together in a single build command. + +## The `"baselib"` boolean + +This means this is a library that will provide most of the features for gRPC. +In particular, if we're locally building OpenSSL, protobuf or zlib, then we +should merge OpenSSL, protobuf or zlib inside that library. That effect depends +on the `"language"` tag. OpenSSL and zlib are for `"c"` libraries, while +protobuf is for `"c++"` ones. + +# The template system + +We're currently using the [mako templates](http://www.makotemplates.org/) +renderer. That choice enables us to simply render text files without dragging +with us a lot of other features. Feel free to explore the current templates +in that directory. + +## The renderer engine + +As mentioned, the renderer is using [mako templates](http://www.makotemplates.org/), +but some glue is needed to process all of that. See the [buildgen folder](../tools/buildgen) +for more details. We're mainly loading the build.json file, and massaging it, +in order to get the list of properties we need, into a Python dictionary, that +is then passed to the template while rending it. + +## The plugins + +The file build.json itself isn't passed straight to the template files. It is +first processed and modified by a few plugins. For example, the version +expander is [a plugin](../tools/buildgen/plugins/expand_version.py). + +The structure of a plugin is simple. The plugin must defined the function +`mako_plugin` that takes a Python dictionary. That dictionary represents the +current state of the build.json contents. The plugin can alter it to whatever +feature it needs to add. diff --git a/test/core/event_engine/test_suite/README.md b/test/core/event_engine/test_suite/README.md new file mode 100644 index 00000000..8931a0cf --- /dev/null +++ b/test/core/event_engine/test_suite/README.md @@ -0,0 +1,35 @@ +A reusable test suite for EventEngine implementations. + +To exercise a custom EventEngine, simply link against `:event_engine_test_suite` +and provide a testing `main` function that sets a custom EventEngine factory: + +``` +#include "path/to/my_custom_event_engine.h" +#include "src/core/event_engine/test_suite/event_engine_test.h" + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + SetEventEngineFactory( + []() { return absl::make_unique(); }); + auto result = RUN_ALL_TESTS(); + return result; +} +``` + +And add a target to the `BUILD` file: + +``` +grpc_cc_test( + name = "my_custom_event_engine_test", + srcs = ["test_suite/my_custom_event_engine_test.cc"], + external_deps = [ + "gtest", + ], + language = "C++", + uses_polling = False, + deps = [ + ":event_engine_test_suite", + "//:grpc", + ], +) +``` diff --git a/test/cpp/README-iOS.md b/test/cpp/README-iOS.md new file mode 100644 index 00000000..89893108 --- /dev/null +++ b/test/cpp/README-iOS.md @@ -0,0 +1,52 @@ +## C++ tests on iOS + +[GTMGoogleTestRunner](https://github.com/google/google-toolbox-for-mac/blob/master/UnitTesting/GTMGoogleTestRunner.mm) is used to convert googletest cases to XCTest that can be run on iOS. GTMGoogleTestRunner doesn't execute the `main` function, so we can't have any test logic in `main`. +However, it's ok to call `::testing::InitGoogleTest` in `main`, as `GTMGoogleTestRunner` [calls InitGoogleTest](https://github.com/google/google-toolbox-for-mac/blob/master/UnitTesting/GTMGoogleTestRunner.mm#L151). +`grpc::testing::TestEnvironment` can also be called from `main`, as it does some test initialization (install crash handler, seed RNG) that's not strictly required to run testcases on iOS. + + +## Porting exising C++ tests to run on iOS + +Please follow these guidelines when porting tests to run on iOS: + +- Tests need to use the googletest framework +- Any setup/teardown code in `main` needs to be moved to `SetUpTestCase`/`TearDownTestCase`, and `TEST` needs to be changed to `TEST_F`. +- [Death tests](https://github.com/google/googletest/blob/master/googletest/docs/advanced.md#death-tests) are not supported on iOS, so use the `*_IF_SUPPORTED()` macros to ensure that your code compiles on iOS. + +For example, the following test +```c++ +TEST(MyTest, TestOne) { + ASSERT_DEATH(ThisShouldDie(), ""); +} + +int main(int argc, char** argv) { + grpc::testing::TestEnvironment env(argc, argv); + ::testing::InitGoogleTest(&argc, argv); + grpc_init(); + return RUN_ALL_TESTS(); + grpc_shutdown(); +} +``` + +should be changed to +```c++ +class MyTest : public ::testing::Test { + protected: + static void SetUpTestCase() { grpc_init(); } + static void TearDownTestCase() { grpc_shutdown(); } +}; + +TEST_F(MyTest, TestOne) { + ASSERT_DEATH_IF_SUPPORTED(ThisShouldDie(), ""); +} + +int main(int argc, char** argv) { + grpc::testing::TestEnvironment env(argc, argv); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} +``` + +## Limitations + +Due to a [limitation](https://github.com/google/google-toolbox-for-mac/blob/master/UnitTesting/GTMGoogleTestRunner.mm#L48-L56) in GTMGoogleTestRunner, `SetUpTestCase`/`TeardownTestCase` will be called before/after *every* individual test case, similar to `SetUp`/`TearDown`. diff --git a/third_party/ABSEIL_MANUAL.md b/third_party/ABSEIL_MANUAL.md new file mode 100644 index 00000000..793e45d3 --- /dev/null +++ b/third_party/ABSEIL_MANUAL.md @@ -0,0 +1,31 @@ +# Abseil in gRPC + +This document explains how to use Abseil throughout gRPC. Note that this isn't +supposed to explain general usage of Abseil. + +## The version of Abseil + +gRPC intends to use the LTS versions of Abseil only because it simplifies +dependency management. Abseil is being distributed via package distribution +systems such as vcpkg and cocoapods. If gRPC depends on the certain version +that aren't registered, gRPC in that system cannot get the right version of +Abseil when being built, resulting in a build failure. +Therefore, gRPC will use the LTS version only, preferably the latest one. + +## Libraries that are not ready to use + +Most of Abseil libraries are okay to use but there are some exceptions +because they're not going well yet on some of our test machinaries or +platforms it supports. The following is a list of targets that are NOT +ready to use. + +- `absl/synchronization:*`: Blocked by b/186685878. +- `absl/random`: [WIP](https://github.com/grpc/grpc/pull/23346). + +## Implemetation only + +You can use Abseil in gRPC Core and gRPC C++. But you cannot use it in +the public interface of gRPC C++ because i) it doesn't gurantee no breaking +API changes like gRPC C++ does and ii) it may make users change their build +system to address Abseil. + \ No newline at end of file diff --git a/third_party/README.md b/third_party/README.md new file mode 100644 index 00000000..19ab6d42 --- /dev/null +++ b/third_party/README.md @@ -0,0 +1,142 @@ +# Third-party libraries + +gRPC depends on several third-party libraries, their source code is available +(usually as a git submodule) in this directory. + +## Guidelines on updating submodules + +- IMPORTANT: whenever possible, try to only update to a stable release of a library (= not to master / random commit). Depending on unreleased revisions + makes gRPC installation harder for users, as it forces them to always build the dependency from source and prevents them from using more + convenient installation channels (linux packages, package managers etc.) + +- bazel BUILD uses a different dependency model - whenever updating a submodule, also update the revision in `grpc_deps.bzl` so that bazel and + non-bazel builds stay in sync (this is actually enforced by a sanity check in some cases) + +## Considerations when adding a new third-party dependency + +- gRPC C++ needs to stay buildable/installable even if the submodules are not present (e.g. the tar.gz archive with gRPC doesn't contain the submodules), + assuming that the dependencies are already installed. This is a requirement for being able to provide a reasonable install process (e.g. using cmake) + and to support package managers for gRPC C++. + +- Adding a new dependency is a lot of work (both for us and for the users). + We currently support multiple build systems (BAZEL, cmake, make, ...) so adding a new dependency usually requires updates in multiple build systems + (often not trivial). The installation process also needs to continue to work (we do have distrib tests to test many of the possible installation scenarios, + but they are not perfect). Adding a new dependency also usually affects the installation instructions that need to be updated. + Also keep in mind that adding a new dependency can be quite disruptive + for the users and community - it means that all users will need to update their projects accordingly (for C++ projects often non-trivial) and + the community-provided C++ packages (e.g. vcpkg) will need to be updated as well. + +## Checklist for adding a new third-party dependency + +**READ THIS BEFORE YOU ADD A NEW DEPENDENCY** + +- [ ] Make sure you understand the hidden costs of adding a dependency (see section above) and that you understand the complexities of updating the build files. Maintenance of the build files isn't for free, so expect to be involved in maintenance tasks, cleanup and support (e.g resolving user bugs) of the build files in the future. + +- [ ] Once your change is ready, start an [adhoc run of artifact - packages - distribtests flow](https://fusion.corp.google.com/projectanalysis/summary/KOKORO/prod%3Agrpc%2Fcore%2Fexperimental%2Fgrpc_build_artifacts_multiplatform) and make sure everything passes (for technical reasons, not all the distribtests can run on each PR automatically). + +- [ ] Check the impact of the new dependency on the size of our distribution packages (compare BEFORE and AFTER) and post the comparison on your PR (it should not be approved without checking the impact sizes of packages first). The package sizes AFTER can be obtained from the adhoc package build from bullet point above. + +## Instructions for updating dependencies + +Usually the process is + +1. update the submodule to selected commit (see guidance above) +2. update the dependency in `grpc_deps.bzl` to the same commit +3. update `tools/run_tests/sanity/check_submodules.sh` to make the sanity test pass +4. (when needed) run `tools/buildgen/generate_projects.sh` to regenerate the generated files +5. populate the bazel download mirror by running `bazel/update_mirror.sh` + +Updating some dependencies requires extra care. + +### Updating third_party/abseil-cpp + +- Two additional steps should be done before running `generate_projects.sh` above. + - Running `src/abseil-cpp/preprocessed_builds.yaml.gen.py`. + - Updating `abseil_version =` scripts in `templates/gRPC-C++.podspec.template` and + `templates/gRPC-Core.podspec.template`. +- You can see an example of previous [upgrade](https://github.com/grpc/grpc/pull/24270). + +### Updating third_party/boringssl-with-bazel + +- Update the `third_party/boringssl-with-bazel` submodule to the latest [`main-with-bazel`](https://github.com/google/boringssl/tree/main-with-bazel) branch +``` +git submodule update --init # just to start in a clean state +cd third_party/boringssl-with-bazel +git fetch origin # fetch what's new in the boringssl repository +git checkout origin/main-with-bazel # checkout the current state of main-with-bazel branch in the boringssl repo +# Note the latest commit SHA on main-with-bazel branch +cd ../.. # go back to grpc repo root +git status # will show that there are new commits in third_party/boringssl-with-bazel +git add third_party/boringssl-with-bazel # we actually want to update the changes to the submodule +git commit -m "update submodule boringssl-with-bazel with origin/main-with-bazel" # commit +``` + +- Update boringssl dependency in `bazel/grpc_deps.bzl` to the same commit SHA as main-with-bazel branch + - Update `http_archive(name = "boringssl",` section by updating the sha in `strip_prefix` and `urls` fields. + - Also, set `sha256` field to "" as the existing value is not valid. This will be added later once we know what that value is. + +- Update `tools/run_tests/sanity/check_submodules.sh` with the same commit + +- Commit these changes `git commit -m "update boringssl dependency to main-with-bazel commit SHA"` + +- Run `tools/buildgen/generate_projects.sh` to regenerate the generated files + - Because `sha256` in `bazel/grpc_deps.bzl` was left empty, you will get a DEBUG msg like this one: + ``` + Rule 'boringssl' indicated that a canonical reproducible form can be obtained by modifying arguments sha256 = "SHA value" + ``` + - Commit the regenrated files `git commit -m "regenerate files"` + - Update `bazel/grpc_deps.bzl` with the SHA value shown in the above debug msg. Commit again `git commit -m "Updated sha256"` + +- Run `tools/distrib/generate_boringssl_prefix_header.sh` + - Commit again `git commit -m "generate boringssl prefix headers"` + +- Increment the boringssl podspec version number in + `templates/src/objective-c/BoringSSL-GRPC.podspec.template` and `templates/gRPC-Core.podspec.template`. + [example](https://github.com/grpc/grpc/pull/21527/commits/9d4411842f02f167209887f1f3d2b9ab5d14931a) + - Commit again `git commit -m "Increment podspec version"` + +- Run `tools/buildgen/generate_projects.sh` (yes, again) + - Commit again `git commit -m "Second regeneration"` + +- Create a PR with all the above commits. + +- Run `bazel/update_mirror.sh` to update GCS mirror. + +### Updating third_party/protobuf + +See http://go/grpc-third-party-protobuf-update-instructions (internal only) + +### Updating third_party/envoy-api + +Apart from the above steps, please perform the following two steps to generate the Python `xds-protos` package: + +1. Bump the version in the `tools/distrib/python/xds_protos/setup.py`; +2. Run `tools/distrib/python/xds_protos/build_validate_upload.sh` to upload the built wheel. + +### Updating third_party/upb + +Since upb is vendored in the gRPC repo, you cannot use submodule to update it. Please follow the steps below. + +1. Update third_party/upb directory by running + `git subtree pull --squash --prefix=third_party/upb https://github.com/protocolbuffers/upb.git master` +2. Update the dependency in `grpc_deps.bzl` to the same commit +3. Populate the bazel download mirror by running `bazel/update_mirror.sh` +4. Update `src/upb/gen_build_yaml.py` for newly added or removed upb files +5. Run `tools/buildgen/generate_projects.sh` to regenerate the generated files +6. Run `tools/codegen/core/gen_upb_api.sh` to regenerate upb files. + If you see breaking changes here, you may want to import upb into Google3 along with gRPC. + +### Updating third_party/xxhash + +TODO(https://github.com/Cyan4973/xxHash/issues/548): revisit LICENSE +instructions if upstream splits library and CLI. + +The upstream xxhash repository contains a bunch of files that we don't want, so +we employ a rather manual update flow to pick up just the bits we care about: + +``` +git remote add xxhash https://github.com/Cyan4973/xxHash.git +git fetch xxhash +git show xxhash/dev:xxhash.h > third_party/xxhash/xxhash.h +git show xxhash/dev:LICENSE | sed -nE '/^-+$/q;p' > third_party/xxhash/LICENSE +``` diff --git a/third_party/toolchains/README.md b/third_party/toolchains/README.md new file mode 100644 index 00000000..4a268256 --- /dev/null +++ b/third_party/toolchains/README.md @@ -0,0 +1,5 @@ +# Windows RBE toolchain configuration + +- bazel_0.26.0_rbe_windows: The autogenerated toolchain for windows RBE. See go/rbe-windows-user-guide + +- rbe_win2019: The windows dockerfile to rebuild the docker image we use on RBE workers. See go/rbe-windows-user-guide \ No newline at end of file diff --git a/third_party/upb/CONTRIBUTING.md b/third_party/upb/CONTRIBUTING.md new file mode 100644 index 00000000..2f866b4e --- /dev/null +++ b/third_party/upb/CONTRIBUTING.md @@ -0,0 +1,7 @@ +## Signing the CLA + +Please sign the [Google Contributor License Agreement +(CLA)](https://cla.developers.google.com/) +before sending pull requests. For any code changes to be +accepted, the CLA must be signed. It's a quick process, I +promise! diff --git a/third_party/upb/DESIGN.md b/third_party/upb/DESIGN.md new file mode 100644 index 00000000..a7a8a284 --- /dev/null +++ b/third_party/upb/DESIGN.md @@ -0,0 +1,72 @@ + +μpb Design +---------- + +μpb has the following design goals: + +- C89 compatible. +- small code size (both for the core library and generated messages). +- fast performance (hundreds of MB/s). +- idiomatic for C programs. +- easy to wrap in high-level languages (Python, Ruby, Lua, etc) with + good performance and all standard protobuf features. +- hands-off about memory management, allowing for easy integration + with existing VMs and/or garbage collectors. +- offers binary ABI compatibility between apps, generated messages, and + the core library (doesn't require re-generating messages or recompiling + your application when the core library changes). +- provides all features that users expect from a protobuf library + (generated messages in C, reflection, text format, etc.). +- layered, so the core is small and doesn't require descriptors. +- tidy about symbol references, so that any messages or features that + aren't used by a C program can have their code GC'd by the linker. +- possible to use protobuf binary format without leaking message/field + names into the binary. + +μpb accomplishes these goals by keeping a very small core that does not contain +descriptors. We need some way of knowing what fields are in each message and +where they live, but instead of descriptors, we keep a small/lightweight summary +of the .proto file. We call this a `upb_msglayout`. It contains the bare +minimum of what we need to know to parse and serialize protobuf binary format +into our internal representation for messages, `upb_msg`. + +The core then contains functions to parse/serialize a message, given a `upb_msg*` +and a `const upb_msglayout*`. + +This approach is similar to [nanopb](https://github.com/nanopb/nanopb) which +also compiles message definitions to a compact, internal representation without +names. However nanopb does not aim to be a fully-featured library, and has no +support for text format, JSON, or descriptors. μpb is unique in that it has a +small core similar to nanopb (though not quite as small), but also offers a +full-featured protobuf library for applications that want reflection, text +format, JSON format, etc. + +Without descriptors, the core doesn't have access to field names, so it cannot +parse/serialize to protobuf text format or JSON. Instead this functionality +lives in separate modules that depend on the module implementing descriptors. +With the descriptor module we can parse/serialize binary descriptors and +validate that they follow all the rules of protobuf schemas. + +To provide binary compatibility, we version the structs that generated messages +use to create a `upb_msglayout*`. The current initializers are +`upb_msglayout_msginit_v1`, `upb_msglayout_fieldinit_v1`, etc. Then +`upb_msglayout*` uses these as its internal representation. If upb changes its +internal representation for a `upb_msglayout*`, it will also include code to +convert the old representation to the new representation. This will use some +more memory/CPU at runtime to convert between the two, but apps that statically +link μpb will never need to worry about this. + +TODO +---- + +1. revise our generated code until it is in a state where we feel comfortable + committing to API/ABI stability for it. In particular there is an open + question of whether non-ABI-compatible field accesses should have a + fastpath different from the ABI-compatible field access. +1. Add missing features (maps, extensions, unknown fields). +1. Flesh out C++ wrappers. +1. *(lower-priority)*: revise all of the existing encoders/decoders and + handlers. We probably will want to keep handlers, since they let us decouple + encoders/decoders from `upb_msg`, but we need to simplify all of that a LOT. + Likely we will want to make handlers only per-message instead of per-field, + except for variable-length fields. diff --git a/third_party/upb/README.md b/third_party/upb/README.md new file mode 100644 index 00000000..91147eaf --- /dev/null +++ b/third_party/upb/README.md @@ -0,0 +1,124 @@ + +# μpb - a small protobuf implementation in C + +|Platform|Build Status| +|--------|------------| +|macOS|[![Build Status](https://storage.googleapis.com/upb-kokoro-results/status-badge/macos.png)](https://fusion.corp.google.com/projectanalysis/summary/KOKORO/prod%3Aupb%2Fmacos%2Fcontinuous)| +|ubuntu|[![Build Status](https://storage.googleapis.com/upb-kokoro-results/status-badge/ubuntu.png)](https://fusion.corp.google.com/projectanalysis/summary/KOKORO/prod%3Aupb%2Fubuntu%2Fcontinuous)| + +μpb (often written 'upb') is a small protobuf implementation written in C. + +upb generates a C API for creating, parsing, and serializing messages +as declared in `.proto` files. upb is heavily arena-based: all +messages always live in an arena (note: the arena can live in stack or +static memory if desired). Here is a simple example: + +```c +#include "conformance/conformance.upb.h" + +void foo(const char* data, size_t size) { + upb_arena *arena; + + /* Generated message type. */ + conformance_ConformanceRequest *request; + conformance_ConformanceResponse *response; + + arena = upb_arena_new(); + request = conformance_ConformanceRequest_parse(data, size, arena); + response = conformance_ConformanceResponse_new(arena); + + switch (conformance_ConformanceRequest_payload_case(request)) { + case conformance_ConformanceRequest_payload_protobuf_payload: { + upb_strview payload = conformance_ConformanceRequest_protobuf_payload(request); + // ... + break; + } + + case conformance_ConformanceRequest_payload_NOT_SET: + fprintf(stderr, "conformance_upb: Request didn't have payload.\n"); + break; + + default: { + static const char msg[] = "Unsupported input format."; + conformance_ConformanceResponse_set_skipped( + response, upb_strview_make(msg, sizeof(msg))); + break; + } + } + + /* Frees all messages on the arena. */ + upb_arena_free(arena); +} +``` + +API and ABI are both subject to change! Please do not distribute +as a shared library for this reason (for now at least). + +## Using upb in your project + +Currently only Bazel is supported (CMake support is partial and incomplete +but full CMake support is an eventual goal). + +To use upb in your Bazel project, first add upb to your `WORKSPACE` file, +either as a `git_repository()` or as a `new_local_repository()` with a +Git Submodule. (For an example, see `examples/bazel/ in this repo). + +```python +# Add this to your WORKSPACE file. +load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") + +git_repository( + name = "upb", + remote = "https://github.com/protocolbuffers/upb.git", + commit = "d16bf99ac4658793748cda3251226059892b3b7b", +) + +load("@upb//bazel:workspace_deps.bzl", "upb_deps") + +upb_deps() +``` + +Then in your BUILD file you can add `upb_proto_library()` rules that +generate code for a corresponding `proto_library()` rule. For +example: + +```python +# Add this to your BUILD file. +load("@upb//bazel:upb_proto_library.bzl", "upb_proto_library") + +proto_library( + name = "foo_proto", + srcs = ["foo.proto"], +) + +upb_proto_library( + name = "foo_upbproto", + deps = [":foo_proto"], +) + +cc_binary( + name = "test_binary", + srcs = ["test_binary.c"], + deps = [":foo_upbproto"], +) +``` + +Then in your `.c` file you can #include the generated header: + +```c +#include "foo.upb.h" + +/* Insert code that uses generated types. */ +``` + +## Lua bindings + +This repo has some Lua bindings for the core library. These are +experimental and very incomplete. These are currently included in +order to validate that the C API is suitable for wrapping. As the +project matures these Lua bindings may become publicly available. + +## Contact + +Author: Josh Haberman ([jhaberman@gmail.com](mailto:jhaberman@gmail.com), +[haberman@google.com](mailto:haberman@google.com)) diff --git a/third_party/upb/cmake/README.md b/third_party/upb/cmake/README.md new file mode 100644 index 00000000..211a054a --- /dev/null +++ b/third_party/upb/cmake/README.md @@ -0,0 +1,23 @@ + +# upb CMake build (EXPERIMENTAL) + +upb's CMake support is experimental. The core library builds successfully +under CMake, and this is verified by the Bazel tests in this directory. +However there is no support for building the upb compiler or for generating +.upb.c/upb.h files. This means upb's CMake support is incomplete at best, +unless your application is intended to be purely reflective. + +If you find this CMake setup useful in its current state, please consider +filing an issue so we know. If you have suggestions for how it could be +more useful (and particularly if you can contribute some code for it) +please feel free to file an issue for that too. Do keep in mind that upb +does not currently provide any ABI stability, so we want to avoid providing +a shared library. + +The CMakeLists.txt is generated from the Bazel BUILD files using the Python +scripts in this directory. We want to avoid having two separate sources of +truth that both need to be updated when a file is added or removed. + +This directory also contains some generated files that would be created +on the fly during a Bazel build. These are automaticaly kept in sync by +the Bazel test `//cmake:test_generated_files`. diff --git a/tools/README.md b/tools/README.md new file mode 100644 index 00000000..3cef6181 --- /dev/null +++ b/tools/README.md @@ -0,0 +1,23 @@ +buildgen: Template renderer for our build system. + +distrib: Scripts to distribute language-specific packages and other distribution-related helper scripts. + +dockerfile: Docker files to test gRPC. + +doxygen: gRPC C/C++ documentation generation via Doxygen. + +gce: Scripts to help setup testing infrastructure on GCE. + +gcp: Helper scripts for interacting with various services on GCP (like Google +container engine, BigQuery etc) + +internal_ci: Support for running tests on an internal CI platform. + +interop_matrix: Scripts to build, upload, and run gRPC clients in docker with various language/runtimes. + +jenkins: Support for running tests on Jenkins. + +run_tests: Scripts to run gRPC tests in parallel. + +run_tests/performance: See the [README](./run_tests/performance/README.md) for +more notes on the performance tests. diff --git a/tools/http2_interop/README.md b/tools/http2_interop/README.md new file mode 100644 index 00000000..21688f09 --- /dev/null +++ b/tools/http2_interop/README.md @@ -0,0 +1,9 @@ +HTTP/2 Interop Tests +==== + +This is a suite of tests that check a server to see if it plays nicely with other HTTP/2 clients. To run, just type: + +`go test -spec :1234` + +Where ":1234" is the ip:port of a running server. + diff --git a/tools/internal_ci/README.md b/tools/internal_ci/README.md new file mode 100644 index 00000000..fdf70774 --- /dev/null +++ b/tools/internal_ci/README.md @@ -0,0 +1,7 @@ +# Kokoro CI job configurations and testing scripts + +gRPC uses a continuous integration tool called "Kokoro" (a.k.a "internal CI") +for running majority of its open source tests. +This directory contains the external part of kokoro test job configurations +(the actual job definitions live in an internal repository) and the shell +scripts that act as entry points to execute the actual tests. diff --git a/tools/interop_matrix/README.md b/tools/interop_matrix/README.md new file mode 100644 index 00000000..20913f56 --- /dev/null +++ b/tools/interop_matrix/README.md @@ -0,0 +1,53 @@ +# Overview + +This directory contains scripts that facilitate building and running gRPC interoperability tests for combinations of language/runtimes (known as matrix). + +The setup builds gRPC docker images for each language/runtime and upload it to Google Container Registry (GCR). These images, encapsulating gRPC stack +from specific releases/tag, are used to test version compatibility between gRPC release versions. + +## Step-by-step instructions for adding a GCR image for a new release for compatibility test +We have continuous nightly test setup to test gRPC backward compatibility between old clients and latest server. When a gRPC developer creates a new gRPC release, s/he is also responsible to add the just-released gRPC client to the nightly test. The steps are: +- Add (or update) an entry in `./client_matrix.py` file to reference the github tag for the release. +- Build new client docker image(s). For example, for C and wrapper languages release `v1.9.9`, do + - `tools/interop_matrix/create_matrix_images.py --git_checkout --release=v1.9.9 --upload_images --language cxx csharp python ruby php` +- Verify that the new docker image was built successfully and uploaded to GCR. For example, + - `gcloud container images list --repository gcr.io/grpc-testing` lists available images. + - `gcloud container images list-tags gcr.io/grpc-testing/grpc_interop_java` should show an image entry with tag `v1.9.9`. + - images can also be viewed in https://pantheon.corp.google.com/gcr/images/grpc-testing?project=grpc-testing +- Verify the just-created docker client image would pass backward compatibility test (it should). For example, + - `gcloud docker -- pull gcr.io/grpc-testing/grpc_interop_java:v1.9.9` followed by + - `docker_image=gcr.io/grpc-testing/grpc_interop_java:v1.9.9 tools/interop_matrix/testcases/java__master` +- Commit the change and create a PR to upstream/master. +- Trigger an adhoc run of interop matrix tests: https://fusion.corp.google.com/projectanalysis/summary/KOKORO/prod:grpc%2Fcore%2Fexperimental%2Flinux%2Fgrpc_interop_matrix_adhoc +- Once tests pass, request a PR review. +- (Optional) clean up the tmp directory to where grpc source is cloned at `/export/hda3/tmp/grpc_matrix/`. +For more details on each step, refer to sections below. + +## Instructions for adding new language/runtimes +- Create new `Dockerfile.template`, `build_interop.sh.template` for the language/runtime under `template/tools/dockerfile/`. +- Run `tools/buildgen/generate_projects.sh` to create corresponding files under `tools/dockerfile/`. +- Add language/runtimes to `client_matrix.py` following existing language/runtimes examples. +- Run `tools/interop_matrix/create_matrix_images.py` which will build (and upload) images to GCR. + +## Instructions for creating new test cases +- Create test cases by running `LANG= [RELEASE=] ./create_testcases.sh`. For example, + - `LANG=go ./create_testcases.sh` will generate `./testcases/go__master`, which is also a functional bash script. + - `LANG=go KEEP_IMAGE=1 ./create_testcases.sh` will generate `./testcases/go__master` and keep the local docker image so it can be invoked simply via `./testcases/go__master`. Note: remove local docker images manually afterwards with `docker rmi `. +- Stage and commit the generated test case file `./testcases/__`. + +## Instructions for running test cases against GCR images +- Run `tools/interop_matrix/run_interop_matrix_tests.py`. Useful options: + - `--release` specifies a git release tag. Defaults to `--release=all`. Make sure the GCR images with the tag have been created using `create_matrix_images.py` above. + - `--language` specifies a language. Defaults to `--language=all`. + For example, To test all languages for all gRPC releases across all runtimes, do `tools/interop_matrix/run_interop_matrix_test.py --release=all`. +- The output for all the test cases is recorded in a junit style xml file (defaults to 'report.xml'). + +## Instructions for running test cases against a GCR image manually +- Download docker image from GCR. For example: `gcloud docker -- pull gcr.io/grpc-testing/grpc_interop_go1.8:v1.16.0`. +- Run test cases by specifying `docker_image` variable inline with the test case script created above. +For example: + - `docker_image=gcr.io/grpc-testing/grpc_interop_go1.8:v1.16.0 ./testcases/go__master` will run go__master test cases against `go1.8` with gRPC release `v1.16.0` docker image in GCR. + +Note: +- File path starting with `tools/` or `template/` are relative to the grpc repo root dir. File path starting with `./` are relative to current directory (`tools/interop_matrix`). +- Creating and referencing images in GCR require read and write permission to Google Container Registry path gcr.io/grpc-testing. diff --git a/tools/interop_matrix/patches/README.md b/tools/interop_matrix/patches/README.md new file mode 100644 index 00000000..0c0893f6 --- /dev/null +++ b/tools/interop_matrix/patches/README.md @@ -0,0 +1,38 @@ +# Patches to grpc repo tags for the backwards compatibility interop tests + +This directory has patch files that can be applied to different tags +of the grpc git repo in order to run the interop tests for a specific +language based on that tag. + +For example, because the ruby interop tests do not run on the v1.0.1 tag out +of the box, but we still want to test compatibility of the 1.0.1 ruby release +with other versions, we can apply a patch to the v1.0.1 tag from this directory +that makes the necessary changes that are needed to run the ruby interop tests +from that tag. We can then use that patch to build the docker image for the +ruby v1.0.1 interop tests. + +## How to add a new patch to this directory + +Patch files in this directory are meant to be applied to a git tag +with a `git apply` command. + +1. Under the `patches` directory, create a new subdirectory +titled `_` for the git tag being modified. + +2. `git checkout ` + +3. Make necessary modifications to the git repo at that tag. + +4. + +``` +git diff > ~/git_repo.patch +git checkout +cp ~/git_repo.patch tools/interop_matrix/patches/_/ +``` + +5. Edit the `LANGUAGE_RELEASE_MATRIX` in `client_matrix.py` for your language/tag +and add a `'patch': [,....]` entry to it's `dictionary`. + +After doing this, the interop image creation script can apply that patch to the +tag with `git apply` before uploading to the test image repo. diff --git a/tools/profiling/microbenchmarks/README.md b/tools/profiling/microbenchmarks/README.md new file mode 100644 index 00000000..035888ee --- /dev/null +++ b/tools/profiling/microbenchmarks/README.md @@ -0,0 +1,4 @@ +Microbenchmarks +==== + +This directory contains helper scripts for the microbenchmark suites. diff --git a/tools/profiling/microbenchmarks/bm_diff/README.md b/tools/profiling/microbenchmarks/bm_diff/README.md new file mode 100644 index 00000000..caa47702 --- /dev/null +++ b/tools/profiling/microbenchmarks/bm_diff/README.md @@ -0,0 +1,116 @@ +The bm_diff Family +==== + +This family of python scripts can be incredibly useful for fast iteration over +different performance tweaks. The tools allow you to save performance data from +a baseline commit, then quickly compare data from your working branch to that +baseline data to see if you have made any performance wins. + +The tools operate with three concrete steps, which can be invoked separately, +or all together via the driver script, bm_main.py. This readme will describe +the typical workflow for these scripts, then it will include sections on the +details of every script for advanced usage. + +## Normal Workflow + +Let's say you are working on a performance optimization for grpc_error. You have +made some significant changes and want to see some data. From your branch, run +(ensure everything is committed first): + +`tools/profiling/microbenchmarks/bm_diff/bm_main.py -b bm_error -l 5 -d master` + +This will build the `bm_error` binary on your branch, and then it will checkout +master and build it there too. It will then run these benchmarks 5 times each. +Lastly it will compute the statistically significant performance differences +between the two branches. This should show the nice performance wins your +changes have made. + +If you have already invoked bm_main with `-d master`, you should instead use +`-o` for subsequent runs. This allows the script to skip re-building and +re-running the unchanged master branch. For example: + +`tools/profiling/microbenchmarks/bm_diff/bm_main.py -b bm_error -l 5 -o` + +This will only build and run `bm_error` on your branch. It will then compare +the output to the saved runs from master. + +## Advanced Workflow + +If you have a deeper knowledge of these scripts, you can use them to do more +fine tuned benchmark comparisons. For example, you could build, run, and save +the benchmark output from two different base branches. Then you could diff both +of these baselines against your working branch to see how the different metrics +change. The rest of this doc goes over the details of what each of the +individual modules accomplishes. + +## bm_build.py + +This scrips builds the benchmarks. It takes in a name parameter, and will +store the binaries based on that. Both `opt` and `counter` configurations +will be used. The `opt` is used to get cpu_time and real_time, and the +`counters` build is used to track other metrics like allocs, atomic adds, +etc etc etc. + +For example, if you were to invoke (we assume everything is run from the +root of the repo): + +`tools/profiling/microbenchmarks/bm_diff/bm_build.py -b bm_error -n baseline` + +then the microbenchmark binaries will show up under +`bm_diff_baseline/{opt,counters}/bm_error` + +## bm_run.py + +This script runs the benchmarks. It takes a name parameter that must match the +name that was passed to `bm_build.py`. The script then runs the benchmark +multiple times (default is 20, can be toggled via the loops parameter). The +output is saved as `....json` + +For example, if you were to run: + +`tools/profiling/microbenchmarks/bm_diff/bm_run.py -b bm_error -b baseline -l 5` + +Then an example output file would be `bm_error.opt.baseline.0.json` + +## bm_diff.py + +This script takes in the output from two benchmark runs, computes the diff +between them, and prints any significant improvements or regressions. It takes +in two name parameters, old and new. These must have previously been built and +run. + +For example, assuming you had already built and run a 'baseline' microbenchmark +from master, and then you also built and ran a 'current' microbenchmark from +the branch you were working on, you could invoke: + +`tools/profiling/microbenchmarks/bm_diff/bm_diff.py -b bm_error -o baseline -n current -l 5` + +This would output the percent difference between your branch and master. + +## bm_main.py + +This is the driver script. It uses the previous three modules and does +everything for you. You pass in the benchmarks to be run, the number of loops, +number of CPUs to use, and the commit to compare to. Then the script will: +* Build the benchmarks at head, then checkout the branch to compare to and + build the benchmarks there +* Run both sets of microbenchmarks +* Run bm_diff.py to compare the two, outputs the difference. + +For example, one might run: + +`tools/profiling/microbenchmarks/bm_diff/bm_main.py -b bm_error -l 5 -d master` + +This would compare the current branch's error benchmarks to master. + +This script is invoked by our infrastructure on every PR to protect against +regressions and demonstrate performance wins. + +However, if you are iterating over different performance tweaks quickly, it is +unnecessary to build and run the baseline commit every time. That is why we +provide a different flag in case you are sure that the baseline benchmark has +already been built and run. In that case use the --old flag to pass in the name +of the baseline. This will only build and run the current branch. For example: + +`tools/profiling/microbenchmarks/bm_diff/bm_main.py -b bm_error -l 5 -o old` + diff --git a/tools/remote_build/README.md b/tools/remote_build/README.md new file mode 100644 index 00000000..849ceb4f --- /dev/null +++ b/tools/remote_build/README.md @@ -0,0 +1,71 @@ +# Running Remote Builds with bazel + +This allows you to spawn gRPC C/C++ remote build and tests from your workstation with +configuration that's very similar to what's used by our CI Kokoro. + +Note that this will only work for gRPC team members (it requires access to the +remote build and execution cluster), others will need to rely on local test runs +and tests run by Kokoro CI. + + +## Prerequisites + +- See [Installing Bazel](https://docs.bazel.build/versions/master/install.html) for instructions how to install bazel on your system. + +- Setup application default credentials for running remote builds by following the ["Set credentials" section](https://cloud.google.com/remote-build-execution/docs/results-ui/getting-started-results-ui). (Note: for the ResultStore UI upload to work, you'll need a special kind of application default credentials, so if the build event upload doesn't work, doublecheck the instructions) + + +## Running remote build manually from dev workstation + +IMPORTANT: The OS from which you run the bazel command needs to always match your desired build & execution platform. If you want to run tests on linux, you need to run bazel from a linux machine, to execute tests on windows you need to be on windows etc. If you don't follow this guideline, the build might still appear like it's working, but you'll get nonsensical results (e.g. will be test configured as if on mac, but actually running on linux). + +### Linux + +For `opt` or `dbg` run this command: +``` +# manual run of bazel tests remotely on Foundry +bazel --bazelrc=tools/remote_build/manual.bazelrc test --config=opt //test/... +``` + +This also works for sanitizer runs (`asan`, `msan`, `tsan`, `ubsan`): +``` +# manual run of bazel tests remotely on Foundry with given sanitizer +bazel --bazelrc=tools/remote_build/manual.bazelrc test --config=asan //test/... +``` + +### Windows + +``` +# manual run of bazel tests remotely on RBE Windows (must be run from Windows machine) +bazel --bazelrc=tools/remote_build/windows.bazelrc test --config=windows_opt //test/... +``` + +NOTE: Unlike on Linux and Mac, the bazel version won't get autoselected for you, +so check that you're using the [right bazel version](https://github.com/grpc/grpc/blob/master/tools/bazel). + +### MacOS + +There is no such thing as Mac RBE cluster, so a real remote build on Macs is currently impossible. +The following setup will build and run test on you local mac machine, but will give +you the RBE-like look & feel (e.g. a results link will be generated and some extra configuration will +be used). + +``` +# manual run of bazel tests on Mac (must be run from Mac machine) +# NOTE: it's not really a "remote execution", but uploads results to ResultStore +bazel --bazelrc=tools/remote_build/mac.bazelrc test --config=opt //test/... +``` + +NOTE: Because this is essentially a local run, you'll need to run start port server first (`tools/run_tests/start_port_server.py`) + +## Running local builds with bazel + +On all platforms, you can generally still use bazel builds & tests locally without any extra settings, but you might need to +start port server first (`tools/run_tests/start_port_server.py`) to be able to run the tests locally. + +E.g.: `bazel test --config=opt //test/...` + +## Bazel command line options + +Available command line options can be found in +[Bazel command line reference](https://docs.bazel.build/versions/master/command-line-reference.html) diff --git a/tools/run_tests/README.md b/tools/run_tests/README.md new file mode 100644 index 00000000..cab917ef --- /dev/null +++ b/tools/run_tests/README.md @@ -0,0 +1,53 @@ +# Overview + +This directory contains scripts that facilitate building and running tests. We are using python scripts as entrypoint for our +tests because that gives us the opportunity to run tests using the same commandline regardless of the platform you are using. + +# Unit tests (run_tests.py) + +Builds gRPC in given language and runs unit tests. Use `tools/run_tests/run_tests.py --help` for more help. + +###### Example +`tools/run_tests/run_tests.py -l csharp -c dbg` + +###### Useful options (among many others) +- `--use_docker` Builds a docker container containing all the prerequisites for given language and runs the tests under that container. +- `--build_only` Only build, do not run the tests. + +Note: If you get an error such as `ImportError: No module named httplib2`, then you may be missing some Python modules. Install the module listed in the error and try again. + +Note: some tests may be flaky. Check the "Issues" tab for known flakes and other issues. + +The full suite of unit tests will take many minutes to run. + +# Interop tests (run_interop_tests.py) + +Runs tests for cross-platform/cross-language interoperability. For more details, see [Interop tests descriptions](/doc/interop-test-descriptions.md) +The script is also capable of running interop tests for grpc-java and grpc-go, using sources checked out alongside the ones of the grpc repository. + +###### Example +`tools/run_tests/run_interop_tests.py -l csharp -s c++ --use_docker` (run interop tests with C# client and C++ server) + +Note: if you see an error like `no space left on device` when running the +interop tests using Docker, make sure that Docker is building the image files in +a location with sufficient disk space. + +# Performance benchmarks (run_performance_tests.py) + +Runs predefined benchmark scenarios for given languages. Besides the simple configuration of running all the scenarios locally, +the script also supports orchestrating test runs with client and server running on different machines and uploading the results +to BigQuery. + +###### Example +`tools/run_tests/run_performance_tests.py -l c++ node` + +###### Useful options +- `--regex` use regex to select particular scenarios to run. + +# Artifacts & Packages (task_runner.py) + +A generalized framework for running predefined tasks based on their labels. We use this to building binary artifacts & distrib packages and testing them) + +###### Example +`tools/run_tests/task_runner.py -f python artifact linux x64` (build tasks with labels `python`, `artifact`, `linux`, and `x64`) + diff --git a/tools/run_tests/performance/README.md b/tools/run_tests/performance/README.md new file mode 100644 index 00000000..a912302f --- /dev/null +++ b/tools/run_tests/performance/README.md @@ -0,0 +1,462 @@ +# Overview of performance test suite + +For design of the tests, see https://grpc.io/docs/guides/benchmarking. + +This document contains documentation of on how to run gRPC end-to-end benchmarks +using the gRPC OSS benchmarks framework (recommended) or how to run them +manually (for experts only). + +## Approach 1: Use gRPC OSS benchmarks framework (Recommended) + +### gRPC OSS benchmarks + +The scripts in this section generate LoadTest configurations for the GKE-based +gRPC OSS benchmarks framework. This framework is stored in a separate +repository, [grpc/test-infra]. + +These scripts, together with tools defined in [grpc/test-infra], are used in the +continuous integration setup defined in [grpc_e2e_performance_gke.sh] and +[grpc_e2e_performance_v2.sh]. + +#### Generating scenarios + +The benchmarks framework uses the same test scenarios as the legacy one. The +script [scenario_config_exporter.py](./scenario_config_exporter.py) can be used +to export these scenarios to files, and also to count and analyze existing +scenarios. + +The language(s) and category of the scenarios are of particular importance to +the tests. Continuous runs will typically run tests in the `scalable` category. + +The following example counts scenarios in the `scalable` category: + +``` +$ ./tools/run_tests/performance/scenario_config_exporter.py --count_scenarios --category=scalable +Scenario count for all languages (category: scalable): +Count Language Client Server Categories + 77 c++ scalable + 19 python_asyncio scalable + 16 java scalable + 12 go scalable + 12 node node scalable + 12 node_purejs node scalable + 9 csharp scalable + 7 python scalable + 5 ruby scalable + 4 csharp c++ scalable + 4 php7 c++ scalable + 4 php7_protobuf_c c++ scalable + 3 python_asyncio c++ scalable + 2 ruby c++ scalable + 2 python c++ scalable + 1 csharp c++ scalable + + 189 total scenarios (category: scalable) +``` + +Client and server languages are only set for cross-language scenarios, where the +client or server language do not match the scenario language. + +#### Generating load test configurations + +The benchmarks framework uses LoadTest resources configured by YAML files. Each +LoadTest resource specifies a driver, a server, and one or more clients to run +the test. Each test runs one scenario. The scenario configuration is embedded in +the LoadTest configuration. Example configurations for various languages can be +found here: + +https://github.com/grpc/test-infra/tree/master/config/samples + +The script [loadtest_config.py](./loadtest_config.py) generates LoadTest +configurations for tests running a set of scenarios. The configurations are +written in multipart YAML format, either to a file or to stdout. Each +configuration contains a single embedded scenario. + +The LoadTest configurations are generated from a template. Any configuration can +be used as a template, as long as it contains the languages required by the set +of scenarios we intend to run (for instance, if we are generating configurations +to run go scenarios, the template must contain a go client and a go server; if +we are generating configurations for cross-language scenarios that need a go +client and a C++ server, the template must also contain a C++ server; and the +same for all other languages). + +The LoadTests specified in the script output all have unique names and can be +run by applying the test to a cluster running the LoadTest controller with +`kubectl apply`: + +``` +$ kubectl apply -f loadtest_config.yaml +``` + +> Note: The most common way of running tests generated by this script is to use +> a _test runner_. For details, see [running tests](#running-tests). + +A basic template for generating tests in various languages can be found here: +[loadtest_template_basic_all_languages.yaml](./templates/loadtest_template_basic_all_languages.yaml). +The following example generates configurations for C# and Java tests using this +template, including tests against C++ clients and servers, and running each test +twice: + +``` +$ ./tools/run_tests/performance/loadtest_config.py -l go -l java \ + -t ./tools/run_tests/performance/templates/loadtest_template_basic_all_languages.yaml \ + -s client_pool=workers-8core -s driver_pool=drivers \ + -s server_pool=workers-8core \ + -s big_query_table=e2e_benchmarks.experimental_results \ + -s timeout_seconds=3600 --category=scalable \ + -d --allow_client_language=c++ --allow_server_language=c++ \ + --runs_per_test=2 -o ./loadtest.yaml +``` + +The script `loadtest_config.py` takes the following options: + +- `-l`, `--language`
Language to benchmark. May be repeated. +- `-t`, `--template`
Template file. A template is a configuration file that + may contain multiple client and server configuration, and may also include + substitution keys. +- `-s`, `--substitution` Substitution keys, in the format `key=value`. These + keys are substituted while processing the template. Environment variables that + are set by the load test controller at runtime are ignored by default + (`DRIVER_PORT`, `KILL_AFTER`, `POD_TIMEOUT`). The user can override this + behavior by specifying these variables as keys. +- `-p`, `--prefix`
Test names consist of a prefix_joined with a uuid with a + dash. Test names are stored in `metadata.name`. The prefix is also added as + the `prefix` label in `metadata.labels`. The prefix defaults to the user name + if not set. +- `-u`, `--uniquifier_element`
Uniquifier elements may be passed to the test + to make the test name unique. This option may be repeated to add multiple + elements. The uniquifier elements (plus a date string and a run index, if + applicable) are joined with a dash to form a _uniquifier_. The test name uuid + is derived from the scenario name and the uniquifier. The uniquifier is also + added as the `uniquifier` annotation in `metadata.annotations`. +- `-d`
This option is a shorthand for the addition of a date string as a + uniquifier element. +- `-a`, `--annotation`
Metadata annotation to be stored in + `metadata.annotations`, in the form key=value. May be repeated. +- `-r`, `--regex`
Regex to select scenarios to run. Each scenario is + embedded in a LoadTest configuration containing a client and server of the + language(s) required for the test. Defaults to `.*`, i.e., select all + scenarios. +- `--category`
Select scenarios of a specified _category_, or of all + categories. Defaults to `all`. Continuous runs typically run tests in the + `scalable` category. +- `--allow_client_language`
Allows cross-language scenarios where the client + is of a specified language, different from the scenario language. This is + typically `c++`. This flag may be repeated. +- `--allow_server_language`
Allows cross-language scenarios where the server + is of a specified language, different from the scenario language. This is + typically `node` or `c++`. This flag may be repeated. +- `--instances_per_client`
This option generates multiple instances of the + clients for each test. The instances are named with the name of the client + combined with an index (or only an index, if no name is specified). If the + template specifies more than one client for a given language, it must also + specify unique names for each client. In the most common case, the template + contains only one unnamed client for each language, and the instances will be + named `0`, `1`, ... +- `--runs_per_test`
This option specifies that each test should be repeated + `n` times, where `n` is the value of the flag. If `n` > 1, the index of each + test run is added as a uniquifier element for that run. +- `-o`, `--output`
Output file name. The LoadTest configurations are added + to this file, in multipart YAML format. Output is streamed to `sys.stdout` if + not set. + +The script adds labels and annotations to the metadata of each LoadTest +configuration: + +The following labels are added to `metadata.labels`: + +- `language`
The language of the LoadTest scenario. +- `prefix`
The prefix used in `metadata.name`. + +The following annotations are added to `metadata.annotations`: + +- `scenario`
The name of the LoadTest scenario. +- `uniquifier`
The uniquifier used to generate the LoadTest name, including + the run index if applicable. + +[Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) +can be used in selectors in resource queries. Adding the prefix, in particular, +allows the user (or an automation script) to select the resources started from a +given run of the config generator. + +[Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) +contain additional information that is available to the user (or an automation +script) but is not indexed and cannot be used to select objects. Scenario name +and uniquifier are added to provide the elements of the LoadTest name uuid in +human-readable form. Additional annotations may be added later for automation. + +#### Concatenating load test configurations + +The LoadTest configuration generator can process multiple languages at a time, +assuming that they are supported by the template. The convenience script +[loadtest_concat_yaml.py](./loadtest_concat_yaml.py) is provided to concatenate +several YAML files into one, so configurations generated by multiple generator +invocations can be concatenated into one and run with a single command. The +script can be invoked as follows: + +``` +$ loadtest_concat_yaml.py -i infile1.yaml infile2.yaml -o outfile.yaml +``` + +#### Generating load test examples + +The script [loadtest_examples.sh](./loadtest_examples.sh) is provided to +generate example load test configurations in all supported languages. This +script takes only one argument, which is the output directory where the +configurations will be created. The script produces a set of basic +configurations, as well as a set of template configurations intended to be used +with prebuilt images. + +The [examples](https://github.com/grpc/test-infra/tree/master/config/samples) in +the repository [grpc/test-infra] are generated by this script. + +#### Generating configuration templates + +The script [loadtest_template.py](./loadtest_template.py) generates a load test +configuration template from a set of load test configurations. The source files +may be load test configurations or load test configuration templates. The +generated template supports all languages supported in any of the input +configurations or templates. + +The example template in +[loadtest_template_basic_template_all_languages.yaml](./templates/loadtest_template_basic_all_languages.yaml) +was generated from the example configurations in [grpc/test-infra] by the +following command: + +``` +$ ./tools/run_tests/performance/loadtest_template.py \ + -i ../test-infra/config/samples/*_example_loadtest.yaml \ + --inject_client_pool --inject_server_pool \ + --inject_big_query_table --inject_timeout_seconds \ + -o ./tools/run_tests/performance/templates/loadtest_template_basic_all_languages.yaml \ + --name basic_all_languages +``` + +The example template with prebuilt images in +[loadtest_template_prebuilt_all_languages.yaml](./templates/loadtest_template_prebuilt_all_languages.yaml) +was generated by the following command: + +``` +$ ./tools/run_tests/performance/loadtest_template.py \ + -i ../test-infra/config/samples/templates/*_example_loadtest_with_prebuilt_workers.yaml \ + --inject_client_pool --inject_driver_image --inject_driver_pool \ + --inject_server_pool --inject_big_query_table --inject_timeout_seconds \ + -o ./tools/run_tests/performance/templates/loadtest_template_prebuilt_all_languages.yaml \ + --name prebuilt_all_languages +``` + +The script `loadtest_template.py` takes the following options: + +- `-i`, `--inputs`
Space-separated list of the names of input files + containing LoadTest configurations. May be repeated. +- `-o`, `--output`
Output file name. Outputs to `sys.stdout` if not set. +- `--inject_client_pool`
If this option is set, the pool attribute of all + clients in `spec.clients` is set to `${client_pool}`, for later substitution. +- `--inject_driver_image`
If this option is set, the image attribute of the + driver(s) in `spec.drivers` is set to `${driver_image}`, for later + substitution. +- `--inject_driver_pool`
If this attribute is set, the pool attribute of the + driver(s) is set to `${driver_pool}`, for later substitution. +- `--inject_server_pool`
If this option is set, the pool attribute of all + servers in `spec.servers` is set to `${server_pool}`, for later substitution. +- `--inject_big_query_table`
If this option is set, + spec.results.bigQueryTable is set to `${big_query_table}`. +- `--inject_timeout_seconds`
If this option is set, `spec.timeoutSeconds` is + set to `${timeout_seconds}`. +- `--inject_ttl_seconds`
If this option is set, `spec.ttlSeconds` is set to + `${ttl_seconds}`. +- `-n`, `--name`
Name to be set in `metadata.name`. +- `-a`, `--annotation`
Metadata annotation to be stored in + `metadata.annotations`, in the form key=value. May be repeated. + +The options that inject substitution keys are the most useful for template +reuse. When running tests on different node pools, it becomes necessary to set +the pool, and usually also to store the data on a different table. When running +as part of a larger collection of tests, it may also be necessary to adjust test +timeout and time-to-live, to ensure that all tests have time to complete. + +The template name is replaced again by `loadtest_config.py`, and so is set only +as a human-readable memo. + +Annotations, on the other hand, are passed on to the test configurations, and +may be set to values or to substitution keys in themselves, allowing future +automation scripts to process the tests generated from these configurations in +different ways. + +#### Running tests + +Collections of tests generated by `loadtest_config.py` are intended to be run +with a test runner. The code for the test runner is stored in a separate +repository, [grpc/test-infra]. + +The test runner applies the tests to the cluster, and monitors the tests for +completion while they are running. The test runner can also be set up to run +collections of tests in parallel on separate node pools, and to limit the number +of tests running in parallel on each pool. + +For more information, see the +[tools README](https://github.com/grpc/test-infra/blob/master/tools/README.md) +in [grpc/test-infra]. + +For usage examples, see the continuous integration setup defined in +[grpc_e2e_performance_gke.sh] and [grpc_e2e_performance_v2.sh]. + +[grpc/test-infra]: https://github.com/grpc/test-infra +[grpc_e2e_performance_gke.sh]: ../../internal_ci/linux/grpc_e2e_performance_gke.sh +[grpc_e2e_performance_v2.sh]: ../../internal_ci/linux/grpc_e2e_performance_v2.sh + +## Approach 2: Running benchmarks locally via legacy tooling (still useful sometimes) + +This approach is much more involved than using the gRPC OSS benchmarks framework +(see above), but can still be useful for hands-on low-level experiments +(especially when you know what you are doing). + +### Prerequisites for running benchmarks manually: + +In general the benchmark workers and driver build scripts expect +[linux_performance_worker_init.sh](../../gce/linux_performance_worker_init.sh) +to have been ran already. + +### To run benchmarks locally: + +- From the grpc repo root, start the + [run_performance_tests.py](../run_performance_tests.py) runner script. + +### On remote machines, to start the driver and workers manually: + +The [run_performance_test.py](../run_performance_tests.py) top-level runner +script can also be used with remote machines, but for e.g., profiling the +server, it might be useful to run workers manually. + +1. You'll need a "driver" and separate "worker" machines. For example, you might + use one GCE "driver" machine and 3 other GCE "worker" machines that are in + the same zone. + +2. Connect to each worker machine and start up a benchmark worker with a + "driver_port". + +- For example, to start the grpc-go benchmark worker: + [grpc-go worker main.go](https://github.com/grpc/grpc-go/blob/master/benchmark/worker/main.go) + --driver_port + +#### Commands to start workers in different languages: + +- Note that these commands are what the top-level + [run_performance_test.py](../run_performance_tests.py) script uses to build + and run different workers through the + [build_performance.sh](./build_performance.sh) script and "run worker" scripts + (such as the [run_worker_java.sh](./run_worker_java.sh)). + +##### Running benchmark workers for C-core wrapped languages (C++, Python, C#, Node, Ruby): + +- These are more simple since they all live in the main grpc repo. + +``` +$ cd +$ tools/run_tests/performance/build_performance.sh +$ tools/run_tests/performance/run_worker_.sh +``` + +- Note that there is one "run_worker" script per language, e.g., + [run_worker_csharp.sh](./run_worker_csharp.sh) for c#. + +##### Running benchmark workers for gRPC-Java: + +- You'll need the [grpc-java](https://github.com/grpc/grpc-java) repo. + +``` +$ cd +$ ./gradlew -PskipCodegen=true -PskipAndroid=true :grpc-benchmarks:installDist +$ benchmarks/build/install/grpc-benchmarks/bin/benchmark_worker --driver_port +``` + +##### Running benchmark workers for gRPC-Go: + +- You'll need the [grpc-go repo](https://github.com/grpc/grpc-go) + +``` +$ cd /benchmark/worker && go install +$ # if profiling, it might be helpful to turn off inlining by building with "-gcflags=-l" +$ $GOPATH/bin/worker --driver_port +``` + +#### Build the driver: + +- Connect to the driver machine (if using a remote driver) and from the grpc + repo root: + +``` +$ tools/run_tests/performance/build_performance.sh +``` + +#### Run the driver: + +1. Get the 'scenario_json' relevant for the scenario to run. Note that "scenario + json" configs are generated from [scenario_config.py](./scenario_config.py). + The [driver](../../../test/cpp/qps/qps_json_driver.cc) takes a list of these + configs as a json string of the form: `{scenario: }` + in its `--scenarios_json` command argument. One quick way to get a valid json + string to pass to the driver is by running the + [run_performance_tests.py](./run_performance_tests.py) locally and copying + the logged scenario json command arg. + +2. From the grpc repo root: + +- Set `QPS_WORKERS` environment variable to a comma separated list of worker + machines. Note that the driver will start the "benchmark server" on the first + entry in the list, and the rest will be told to run as clients against the + benchmark server. + +Example running and profiling of go benchmark server: + +``` +$ export QPS_WORKERS=:<10000>,,10000,:10000 +$ bins/opt/qps_json_driver --scenario_json='' +``` + +### Example profiling commands + +While running the benchmark, a profiler can be attached to the server. + +Example to count syscalls in grpc-go server during a benchmark: + +- Connect to server machine and run: + +``` +$ netstat -tulpn | grep # to get pid of worker +$ perf stat -p -e syscalls:sys_enter_write # stop after test complete +``` + +Example memory profile of grpc-go server, with `go tools pprof`: + +- After a run is done on the server, see its alloc profile with: + +``` +$ go tool pprof --text --alloc_space http://localhost:/debug/heap +``` + +### Configuration environment variables: + +- QPS_WORKER_CHANNEL_CONNECT_TIMEOUT + + Consuming process: qps_worker + + Type: integer (number of seconds) + + This can be used to configure the amount of time that benchmark clients wait + for channels to the benchmark server to become ready. This is useful in + certain benchmark environments in which the server can take a long time to + become ready. Note: if setting this to a high value, then the scenario config + under test should probably also have a large "warmup_seconds". + +- QPS_WORKERS + + Consuming process: qps_json_driver + + Type: comma separated list of host:port + + Set this to a comma separated list of QPS worker processes/machines. Each + scenario in a scenario config has specifies a certain number of servers, + `num_servers`, and the driver will start "benchmark servers"'s on the first + `num_server` `host:port` pairs in the comma separated list. The rest will be + told to run as clients against the benchmark server. diff --git a/tools/run_tests/xds_k8s_test_driver/README.md b/tools/run_tests/xds_k8s_test_driver/README.md new file mode 100644 index 00000000..f64ea376 --- /dev/null +++ b/tools/run_tests/xds_k8s_test_driver/README.md @@ -0,0 +1,416 @@ +# xDS Kubernetes Interop Tests + +Proxyless Security Mesh Interop Tests executed on Kubernetes. + +### Experimental +Work in progress. Internal APIs may and will change. Please refrain from making +changes to this codebase at the moment. + +### Stabilization roadmap +- [ ] Replace retrying with tenacity +- [x] Generate namespace for each test to prevent resource name conflicts and + allow running tests in parallel +- [x] Security: run server and client in separate namespaces +- [ ] Make framework.infrastructure.gcp resources [first-class + citizen](https://en.wikipedia.org/wiki/First-class_citizen), support + simpler CRUD +- [x] Security: manage `roles/iam.workloadIdentityUser` role grant lifecycle for + dynamically-named namespaces +- [ ] Restructure `framework.test_app` and `framework.xds_k8s*` into a module + containing xDS-interop-specific logic +- [ ] Address inline TODOs in code +- [x] Improve README.md documentation, explain helpers in bin/ folder + +## Installation + +#### Requirements +1. Python v3.6+ +2. [Google Cloud SDK](https://cloud.google.com/sdk/docs/install) +3. `kubectl` + +`kubectl` can be installed via `gcloud components install kubectl`, or system package manager: https://kubernetes.io/docs/tasks/tools/#kubectl + +#### Configure GKE cluster +This is an example outlining minimal requirements to run `tests.baseline_test`. +For more details, and for the setup for security tests, see +["Setting up Traffic Director service security with proxyless gRPC"](https://cloud.google.com/traffic-director/docs/security-proxyless-setup) + user guide. + +Update gloud sdk: +```shell +gcloud -q components update +``` + +Pre-populate environment variables for convenience. To find project id, refer to +[Identifying projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects). +```shell +export PROJECT_ID="your-project-id" +export PROJECT_NUMBER=$(gcloud projects describe "${PROJECT_ID}" --format="value(projectNumber)") +# Compute Engine default service account +export GCE_SA="${PROJECT_NUMBER}-compute@developer.gserviceaccount.com" +# The prefix to name GCP resources used by the framework +export RESOURCE_PREFIX="xds-k8s-interop-tests" + +# The zone name your cluster, f.e. xds-k8s-test-cluster +export CLUSTER_NAME="${RESOURCE_PREFIX}-cluster" +# The zone of your cluster, f.e. us-central1-a +export ZONE="us-central1-a" +# Dedicated GCP Service Account to use with workload identity. +export WORKLOAD_SA_NAME="${RESOURCE_PREFIX}" +export WORKLOAD_SA_EMAIL="${WORKLOAD_SA_NAME}@${PROJECT_ID}.iam.gserviceaccount.com" +``` + +##### Create the cluster +Minimal requirements: [VPC-native](https://cloud.google.com/traffic-director/docs/security-proxyless-setup) +cluster with [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) enabled. +```shell +gcloud container clusters create "${CLUSTER_NAME}" \ + --scopes=cloud-platform \ + --zone="${ZONE}" \ + --enable-ip-alias \ + --workload-pool="${PROJECT_ID}.svc.id.goog" \ + --enable-workload-certificates \ + --workload-metadata=GKE_METADATA \ + --tags=allow-health-checks +``` + +For security tests you also need to create CAs and configure the cluster to use those CAs +as described +[here](https://cloud.google.com/traffic-director/docs/security-proxyless-setup#configure-cas). + +##### Create the firewall rule +Allow [health checking mechanisms](https://cloud.google.com/traffic-director/docs/set-up-proxyless-gke#creating_the_health_check_firewall_rule_and_backend_service) +to query the workloads health. +This step can be skipped, if the driver is executed with `--ensure_firewall`. +```shell +gcloud compute firewall-rules create "${RESOURCE_PREFIX}-allow-health-checks" \ + --network=default --action=allow --direction=INGRESS \ + --source-ranges="35.191.0.0/16,130.211.0.0/22" \ + --target-tags=allow-health-checks \ + --rules=tcp:8080-8100 +``` + +##### Setup GCP Service Account + +Create dedicated GCP Service Account to use +with [workload identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity). + +```shell +gcloud iam service-accounts create "${WORKLOAD_SA_NAME}" \ + --display-name="xDS K8S Interop Tests Workload Identity Service Account" +``` + +Enable the service account to [access the Traffic Director API](https://cloud.google.com/traffic-director/docs/prepare-for-envoy-setup#enable-service-account). +```shell +gcloud projects add-iam-policy-binding "${PROJECT_ID}" \ + --member="serviceAccount:${WORKLOAD_SA_EMAIL}" \ + --role="roles/trafficdirector.client" +``` + +##### Allow access to images +The test framework needs read access to the client and server images and the bootstrap +generator image. You may have these images in your project but if you want to use these +from the grpc-testing project you will have to grant the necessary access to these images +using https://cloud.google.com/container-registry/docs/access-control#grant or a +gsutil command. For example, to grant access to images stored in `grpc-testing` project GCR, run: + +```sh +gsutil iam ch "serviceAccount:${GCE_SA}:objectViewer" gs://artifacts.grpc-testing.appspot.com/ +``` + +##### Allow test driver to configure workload identity automatically +Test driver will automatically grant `roles/iam.workloadIdentityUser` to +allow the Kubernetes service account to impersonate the dedicated GCP workload +service account (corresponds to the step 5 +of [Authenticating to Google Cloud](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#authenticating_to)). +This action requires the test framework to have `iam.serviceAccounts.create` +permission on the project. + +If you're running test framework locally, and you have `roles/owner` to your +project, **you can skip this step**. +If you're configuring the test framework to run on a CI: use `roles/owner` +account once to allow test framework to grant `roles/iam.workloadIdentityUser`. + +```shell +# Assuming CI is using Compute Engine default service account. +gcloud projects add-iam-policy-binding "${PROJECT_ID}" \ + --member="serviceAccount:${GCE_SA}" \ + --role="roles/iam.serviceAccountAdmin" \ + --condition-from-file=<(cat <<-END +--- +title: allow_workload_identity_only +description: Restrict serviceAccountAdmin to granting role iam.workloadIdentityUser +expression: |- + api.getAttribute('iam.googleapis.com/modifiedGrantsByRole', []) + .hasOnly(['roles/iam.workloadIdentityUser']) +END +) +``` + +##### Configure GKE cluster access +```shell +# Unless you're using GCP VM with preconfigured Application Default Credentials, acquire them for your user +gcloud auth application-default login + +# Configuring GKE cluster access for kubectl +gcloud container clusters get-credentials "your_gke_cluster_name" --zone "your_gke_cluster_zone" + +# Save generated kube context name +export KUBE_CONTEXT="$(kubectl config current-context)" +``` + +#### Install python dependencies + +```shell +# Create python virtual environment +python3.6 -m venv venv + +# Activate virtual environment +. ./venv/bin/activate + +# Install requirements +pip install -r requirements.txt + +# Generate protos +python -m grpc_tools.protoc --proto_path=../../../ \ + --python_out=. --grpc_python_out=. \ + src/proto/grpc/testing/empty.proto \ + src/proto/grpc/testing/messages.proto \ + src/proto/grpc/testing/test.proto +``` + +# Basic usage + +### xDS Baseline Tests + +Test suite meant to confirm that basic xDS features work as expected. Executing +it before other test suites will help to identify whether test failure related +to specific features under test, or caused by unrelated infrastructure +disturbances. + +The client and server images are created based on Git commit hashes, but not +every single one of them. It is triggered nightly and per-release. For example, +the commit we are using below (`d22f93e1ade22a1e026b57210f6fc21f7a3ca0cf`) comes +from branch `v1.37.x` in `grpc-java` repo. + +```shell +# Help +python -m tests.baseline_test --help +python -m tests.baseline_test --helpfull + +# Run on grpc-testing cluster +python -m tests.baseline_test \ + --flagfile="config/grpc-testing.cfg" \ + --kube_context="${KUBE_CONTEXT}" \ + --server_image="gcr.io/grpc-testing/xds-interop/java-server:d22f93e1ade22a1e026b57210f6fc21f7a3ca0cf" \ + --client_image="gcr.io/grpc-testing/xds-interop/java-client:d22f93e1ade22a1e026b57210f6fc21f7a3ca0cf" +``` + +### xDS Security Tests +```shell +# Help +python -m tests.security_test --help +python -m tests.security_test --helpfull + +# Run on grpc-testing cluster +python -m tests.security_test \ + --flagfile="config/grpc-testing.cfg" \ + --kube_context="${KUBE_CONTEXT}" \ + --server_image="gcr.io/grpc-testing/xds-interop/java-server:d22f93e1ade22a1e026b57210f6fc21f7a3ca0cf" \ + --client_image="gcr.io/grpc-testing/xds-interop/java-client:d22f93e1ade22a1e026b57210f6fc21f7a3ca0cf" +``` + +## Local development +This test driver allows running tests locally against remote GKE clusters, right +from your dev environment. You need: + +1. Follow [installation](#installation) instructions +2. Authenticated `gcloud` +3. `kubectl` context (see [Configure GKE cluster access](#configure-gke-cluster-access)) +4. Run tests with `--debug_use_port_forwarding` argument. The test driver + will automatically start and stop port forwarding using + `kubectl` subprocesses. (experimental) + +### Making changes to the driver +1. Install additional dev packages: `pip install -r requirements-dev.txt` +2. Use `./bin/yapf.sh` and `./bin/isort.sh` helpers to auto-format code. + +### Setup test configuration + +There are many arguments to be passed into the test run. You can save the +arguments to a config file ("flagfile") for your development environment. +Use [`config/local-dev.cfg.example`](https://github.com/grpc/grpc/blob/master/tools/run_tests/xds_k8s_test_driver/config/local-dev.cfg.example) +as a starting point: + +```shell +cp config/local-dev.cfg.example config/local-dev.cfg +``` + +Learn more about flagfiles in [abseil documentation](https://abseil.io/docs/python/guides/flags#a-note-about---flagfile). + +### Helper scripts +You can use interop xds-k8s [`bin/`](https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver/bin) +scripts to configure TD, start k8s instances step-by-step, and keep them alive +for as long as you need. + +* To run helper scripts using local config: + * `python -m bin.script_name --flagfile=config/local-dev.cfg` + * `./run.sh bin/script_name.py` automatically appends the flagfile +* Use `--help` to see script-specific argument +* Use `--helpfull` to see all available argument + +#### Overview +```shell +# Helper tool to configure Traffic Director with different security options +python -m bin.run_td_setup --help + +# Helper tools to run the test server, client (with or without security) +python -m bin.run_test_server --help +python -m bin.run_test_client --help + +# Helper tool to verify different security configurations via channelz +python -m bin.run_channelz --help +``` + +#### `./run.sh` helper +Use `./run.sh` to execute helper scripts and tests with `config/local-dev.cfg`. + +```sh +USAGE: ./run.sh script_path [arguments] + script_path: path to python script to execute, relative to driver root folder + arguments ...: arguments passed to program in sys.argv + +ENVIRONMENT: + XDS_K8S_CONFIG: file path to the config flagfile, relative to + driver root folder. Default: config/local-dev.cfg + Will be appended as --flagfile="config_absolute_path" argument + XDS_K8S_DRIVER_VENV_DIR: the path to python virtual environment directory + Default: $XDS_K8S_DRIVER_DIR/venv +DESCRIPTION: +This tool performs the following: +1) Ensures python virtual env installed and activated +2) Exports test driver root in PYTHONPATH +3) Automatically appends --flagfile="\$XDS_K8S_CONFIG" argument + +EXAMPLES: +./run.sh bin/run_td_setup.py --help +./run.sh bin/run_td_setup.py --helpfull +XDS_K8S_CONFIG=./path-to-flagfile.cfg ./run.sh bin/run_td_setup.py --resource_suffix=override-suffix +./run.sh tests/baseline_test.py +./run.sh tests/security_test.py --verbosity=1 --logger_levels=__main__:DEBUG,framework:DEBUG +./run.sh tests/security_test.py SecurityTest.test_mtls --nocheck_local_certs +``` + +### Regular workflow +```shell +# Setup Traffic Director +./run.sh bin/run_td_setup.py + +# Start test server +./run.sh bin/run_test_server.py + +# Add test server to the backend service +./run.sh bin/run_td_setup.py --cmd=backends-add + +# Start test client +./run.sh bin/run_test_client.py +``` + +### Secure workflow +```shell +# Setup Traffic Director in mtls. See --help for all options +./run.sh bin/run_td_setup.py --security=mtls + +# Start test server in a secure mode +./run.sh bin/run_test_server.py --secure + +# Add test server to the backend service +./run.sh bin/run_td_setup.py --cmd=backends-add + +# Start test client in a secure more --secure +./run.sh bin/run_test_client.py --secure +``` + +### Sending RPCs +#### Start port forwarding +```shell +# Client: all services always on port 8079 +kubectl port-forward deployment.apps/psm-grpc-client 8079 + +# Server regular mode: all grpc services on port 8080 +kubectl port-forward deployment.apps/psm-grpc-server 8080 +# OR +# Server secure mode: TestServiceImpl is on 8080, +kubectl port-forward deployment.apps/psm-grpc-server 8080 +# everything else (channelz, healthcheck, CSDS) on 8081 +kubectl port-forward deployment.apps/psm-grpc-server 8081 +``` + +#### Send RPCs with grpccurl +```shell +# 8081 if security enabled +export SERVER_ADMIN_PORT=8080 + +# List server services using reflection +grpcurl --plaintext 127.0.0.1:$SERVER_ADMIN_PORT list +# List client services using reflection +grpcurl --plaintext 127.0.0.1:8079 list + +# List channels via channelz +grpcurl --plaintext 127.0.0.1:$SERVER_ADMIN_PORT grpc.channelz.v1.Channelz.GetTopChannels +grpcurl --plaintext 127.0.0.1:8079 grpc.channelz.v1.Channelz.GetTopChannels + +# Send GetClientStats to the client +grpcurl --plaintext -d '{"num_rpcs": 10, "timeout_sec": 30}' 127.0.0.1:8079 \ + grpc.testing.LoadBalancerStatsService.GetClientStats +``` + +### Cleanup +* First, make sure to stop port forwarding, if any +* Run `./bin/cleanup.sh` + +##### Partial cleanup +You can run commands below to stop/start, create/delete resources however you want. +Generally, it's better to remove resources in the opposite order of their creation. + +Cleanup regular resources: +```shell +# Cleanup TD resources +./run.sh bin/run_td_setup.py --cmd=cleanup +# Stop test client +./run.sh bin/run_test_client.py --cmd=cleanup +# Stop test server, and remove the namespace +./run.sh bin/run_test_server.py --cmd=cleanup --cleanup_namespace +``` + +Cleanup regular and security-specific resources: +```shell +# Cleanup TD resources, with security +./run.sh bin/run_td_setup.py --cmd=cleanup --security=mtls +# Stop test client (secure) +./run.sh bin/run_test_client.py --cmd=cleanup --secure +# Stop test server (secure), and remove the namespace +./run.sh bin/run_test_server.py --cmd=cleanup --cleanup_namespace --secure +``` + +In addition, here's some other helpful partial cleanup commands: +```shell +# Remove all backends from the backend services +./run.sh bin/run_td_setup.py --cmd=backends-cleanup + +# Stop the server, but keep the namespace +./run.sh bin/run_test_server.py --cmd=cleanup --nocleanup_namespace +``` + +### Known errors +#### Error forwarding port +If you stopped a test with `ctrl+c`, while using `--debug_use_port_forwarding`, +you might see an error like this: + +> `framework.infrastructure.k8s.PortForwardingError: Error forwarding port, unexpected output Unable to listen on port 8081: Listeners failed to create with the following errors: [unable to create listener: Error listen tcp4 127.0.0.1:8081: bind: address already in use]` + +Unless you're running `kubectl port-forward` manually, it's likely that `ctrl+c` +interrupted python before it could clean up subprocesses. + +You can do `ps aux | grep port-forward` and then kill the processes by id, +or with `killall kubectl`