From 31d0611d0ffb982414974adaf6940afd1863fa95 Mon Sep 17 00:00:00 2001 From: "tianqian.zyf" <445188383@qq.com> Date: Thu, 27 Sep 2018 22:56:57 +0800 Subject: [PATCH] ci: Add spell checker to ci (#4439) Correct commonly misspelled English words Signed-off-by: zyfjeff --- .circleci/config.yml | 1 + CONTRIBUTING.md | 2 +- api/envoy/api/v2/listener/listener.proto | 2 +- .../filter/http/jwt_authn/v2alpha/README.md | 2 +- .../v2/http_connection_manager.proto | 2 +- api/envoy/config/rbac/v2alpha/rbac.proto | 4 +- bazel/PPROF.md | 2 +- bazel/get_workspace_status | 2 +- bazel/repositories.bzl | 2 +- ci/README.md | 8 +- ci/do_ci.sh | 12 ++- ci/do_circle_ci.sh | 4 +- ci/run_envoy_docker.sh | 2 +- .../grpc_json_transcoder_filter.rst | 2 +- docs/root/configuration/secret.rst | 2 +- .../http_connection_management.rst | 2 +- docs/root/operations/admin.rst | 4 +- include/envoy/buffer/buffer.h | 2 +- include/envoy/http/filter.h | 2 +- include/envoy/network/connection.h | 2 +- include/envoy/network/transport_socket.h | 2 +- include/envoy/router/router.h | 2 +- include/envoy/server/hot_restart.h | 2 +- .../common/access_log/access_log_formatter.h | 4 +- source/common/common/perf_annotation.h | 4 +- source/common/common/utility.h | 2 +- source/common/event/dispatcher_impl.cc | 2 +- source/common/filesystem/filesystem_impl.h | 2 +- source/common/http/codec_client.h | 2 +- source/common/http/conn_manager_config.h | 2 +- source/common/http/conn_manager_impl.cc | 4 +- source/common/http/header_map_impl.h | 2 +- source/common/http/http1/codec_impl.h | 4 +- source/common/http/http2/codec_impl.cc | 4 +- source/common/http/http2/codec_impl.h | 6 +- source/common/http/utility.cc | 10 +-- .../addr_family_aware_socket_option_impl.cc | 2 +- source/common/network/connection_impl.cc | 4 +- source/common/network/connection_impl.h | 2 +- source/common/network/dns_impl.cc | 2 +- .../common/singleton/threadsafe_singleton.h | 2 +- source/common/stats/thread_local_store.cc | 2 +- source/common/stats/thread_local_store.h | 4 +- source/common/upstream/edf_scheduler.h | 2 +- source/common/upstream/health_checker_impl.h | 2 +- source/common/upstream/load_balancer_impl.h | 4 +- source/common/upstream/upstream_impl.h | 4 +- .../filters/http/common/jwks_fetcher.h | 4 +- .../filters/http/ext_authz/ext_authz.h | 2 +- .../filters/http/health_check/health_check.cc | 2 +- .../filters/http/jwt_authn/extractor.h | 2 +- .../extensions/filters/http/lua/lua_filter.h | 2 +- .../filters/http/squash/squash_filter.h | 12 +-- .../listener/tls_inspector/tls_inspector.cc | 2 +- .../thrift_proxy/compact_protocol_impl.cc | 2 +- .../network/thrift_proxy/conn_manager.h | 2 +- .../filters/network/thrift_proxy/protocol.h | 24 +++--- source/server/hot_restart_impl.h | 2 +- source/server/http/admin.cc | 2 +- source/server/options_impl.cc | 2 +- .../zlib_decompressor_impl_test.cc | 4 +- .../common/filesystem/filesystem_impl_test.cc | 6 +- test/common/http/http2/codec_impl_fuzz.proto | 2 +- test/common/http/http2/codec_impl_test.cc | 2 +- ...dr_family_aware_socket_option_impl_test.cc | 2 +- test/common/network/connection_impl_test.cc | 4 +- test/common/network/dns_impl_test.cc | 2 +- test/common/tcp_proxy/tcp_proxy_test.cc | 2 +- .../upstream/load_balancer_impl_test.cc | 2 +- test/common/upstream/ring_hash_lb_test.cc | 2 +- test/common/upstream/upstream_impl_test.cc | 2 +- .../json_transcoder_filter_test.cc | 2 +- .../filters/http/gzip/gzip_filter_test.cc | 4 +- .../http/jwt_authn/all_verifier_test.cc | 2 +- .../http/jwt_authn/group_verifier_test.cc | 4 +- .../client_ssl_auth/client_ssl_auth_test.cc | 2 +- .../network/ext_authz/ext_authz_test.cc | 2 +- .../compact_protocol_impl_test.cc | 2 +- .../network/thrift_proxy/conn_manager_test.cc | 2 +- .../twitter_protocol_impl_test.cc | 2 +- test/integration/autonomous_upstream.h | 2 +- test/integration/http_integration.cc | 4 +- test/main.cc | 2 +- test/run_envoy_bazel_coverage.sh | 2 +- test/server/guarddog_impl_test.cc | 2 +- test/test_common/environment.cc | 2 +- tools/check_spelling.sh | 82 +++++++++++++++++++ tools/deprecate_version/deprecate_version.py | 2 +- tools/envoy_collect/envoy_collect.py | 4 +- tools/socket_passing.py | 4 +- tools/spelling_skip_files.txt | 1 + tools/spelling_whitelist_words.txt | 5 ++ 92 files changed, 236 insertions(+), 133 deletions(-) create mode 100755 tools/check_spelling.sh create mode 100644 tools/spelling_skip_files.txt create mode 100644 tools/spelling_whitelist_words.txt diff --git a/.circleci/config.yml b/.circleci/config.yml index b097fc1ac647..e7007f4d6ea2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -114,6 +114,7 @@ jobs: - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - checkout - run: ci/do_circle_ci.sh check_format + - run: ci/do_circle_ci.sh check_spelling build_image: docker: - image: circleci/python:2.7 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 70c5c666ab8c..30f364d11205 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -212,7 +212,7 @@ git rebase -i HEAD^^ git push origin -f ``` -Note, that in general rewriting history in this way is a hinderance to the review process and this +Note, that in general rewriting history in this way is a hindrance to the review process and this should only be done to correct a DCO mistake. ## Triggering CI re-run without making changes diff --git a/api/envoy/api/v2/listener/listener.proto b/api/envoy/api/v2/listener/listener.proto index 1e8015dbb244..d72de02c458c 100644 --- a/api/envoy/api/v2/listener/listener.proto +++ b/api/envoy/api/v2/listener/listener.proto @@ -61,7 +61,7 @@ message Filter { // 4. Transport protocol. // 5. Application protocols (e.g. ALPN for TLS protocol). // -// For criterias that allow ranges or wildcards, the most specific value in any +// For criteria that allow ranges or wildcards, the most specific value in any // of the configured filter chains that matches the incoming connection is going // to be used (e.g. for SNI ``www.example.com`` the most specific match would be // ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md b/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md index d7aac1ad98ef..9d083389a5ae 100644 --- a/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md @@ -26,6 +26,6 @@ The next default location is in the query parameter as: If a custom location is desired, `from_headers` or `from_params` can be used to specify custom locations to extract JWT. -## HTTP header to pass sucessfully verified JWT +## HTTP header to pass successfully verified JWT If a JWT is valid, its payload will be passed to the backend in a new HTTP header specified in `forward_payload_header` field. Its value is base64 encoded JWT payload in JSON. diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 7e089d8ea80a..4c8c93acce67 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -219,7 +219,7 @@ message HttpConnectionManager { bool unix_sockets = 1; } - // Configures what network addresses are considered internal for stats and header sanitazion + // Configures what network addresses are considered internal for stats and header sanitation // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more // information about internal/external addresses. diff --git a/api/envoy/config/rbac/v2alpha/rbac.proto b/api/envoy/config/rbac/v2alpha/rbac.proto index c5d8f1d827fb..d7431eb0e154 100644 --- a/api/envoy/config/rbac/v2alpha/rbac.proto +++ b/api/envoy/config/rbac/v2alpha/rbac.proto @@ -108,7 +108,7 @@ message Permission { // When any is set, it matches any action. bool any = 3 [(validate.rules).bool.const = true]; - // A header (or psuedo-header such as :path or :method) on the incoming HTTP request. Only + // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. envoy.api.v2.route.HeaderMatcher header = 4; @@ -166,7 +166,7 @@ message Principal { // A CIDR block that describes the downstream IP. envoy.api.v2.core.CidrRange source_ip = 5; - // A header (or psuedo-header such as :path or :method) on the incoming HTTP request. Only + // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. envoy.api.v2.route.HeaderMatcher header = 6; diff --git a/bazel/PPROF.md b/bazel/PPROF.md index 09e7982f2d87..edaf791af07a 100644 --- a/bazel/PPROF.md +++ b/bazel/PPROF.md @@ -83,7 +83,7 @@ instantiation of `MainCommonBase::MainCommonBase`: Once these changes have been made in your working directory, it might make sense to save the diff as a patch (`git diff > file`), which can then be quickly -applied/unapplied for testing and commiting. (`git apply`, `git apply -R`) +applied/unapplied for testing and committing. (`git apply`, `git apply -R`) Build the binary using bazel, and run the binary without any environment variables: diff --git a/bazel/get_workspace_status b/bazel/get_workspace_status index 8f83d55cbf78..82bb7593c7f3 100755 --- a/bazel/get_workspace_status +++ b/bazel/get_workspace_status @@ -37,7 +37,7 @@ fi echo "BUILD_SCM_REVISION ${git_rev}" echo "STABLE_BUILD_SCM_REVISION ${git_rev}" -# Check whether there are any uncommited changes +# Check whether there are any uncommitted changes git diff-index --quiet HEAD -- if [[ $? == 0 ]]; then diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 176b499da27b..b910ee18df17 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -195,7 +195,7 @@ def _python_deps(): build_file = "@envoy//bazel/external:twitter_common_finagle_thrift.BUILD", ) -# Bazel native C++ dependencies. For the depedencies that doesn't provide autoconf/automake builds. +# Bazel native C++ dependencies. For the dependencies that doesn't provide autoconf/automake builds. def _cc_deps(): _repository_impl("grpc_httpjson_transcoding") native.bind( diff --git a/ci/README.md b/ci/README.md index e5b0a93f4c3e..cc9e144f518c 100644 --- a/ci/README.md +++ b/ci/README.md @@ -87,9 +87,11 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: * `bazel.release.server_only` — build Envoy static binary under `-c opt` with gcc. * `bazel.coverage` — build and run tests under `-c dbg` with gcc, generating coverage information in `$ENVOY_DOCKER_BUILD_DIR/envoy/generated/coverage/coverage.html`. * `bazel.coverity` — build Envoy static binary and run Coverity Scan static analysis. -* `bazel.tsan` — build and run tests under `-c dbg --config=clang-tsan` with clang. -* `check_format`— run `clang-format` and `buildifier` on entire source tree. -* `fix_format`— run and enforce `clang-format` and `buildifier` on entire source tree. +* `bazel.tsan` — build and run tests under `-c dbg --config=clang-tsan` with clang-6.0. +* `check_format`— run `clang-format-6.0` and `buildifier` on entire source tree. +* `fix_format`— run and enforce `clang-format-6.0` and `buildifier` on entire source tree. +* `check_spelling`— run `misspell` on entire project. +* `fix_spelling`— run and enforce `misspell` on entire project. * `docs`— build documentation tree in `generated/docs`. # Testing changes to the build image as a developer diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 97b289272e53..5c52e62bf0c1 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -5,7 +5,7 @@ set -e build_setup_args="" -if [[ "$1" == "fix_format" || "$1" == "check_format" ]]; then +if [[ "$1" == "fix_format" || "$1" == "check_format" || "$1" == "check_spelling" || "$1" == "fix_spelling" ]]; then build_setup_args="-nofetch" fi @@ -202,6 +202,16 @@ elif [[ "$1" == "check_format" ]]; then echo "check_format..." ./tools/check_format.py check exit 0 +elif [[ "$1" == "check_spelling" ]]; then + cd "${ENVOY_SRCDIR}" + echo "check_spelling..." + ./tools/check_spelling.sh check + exit 0 +elif [[ "$1" == "fix_spelling" ]];then + cd "${ENVOY_SRCDIR}" + echo "fix_spell..." + ./tools/check_spelling.sh fix + exit 0 elif [[ "$1" == "docs" ]]; then echo "generating docs..." docs/build.sh diff --git a/ci/do_circle_ci.sh b/ci/do_circle_ci.sh index 0d198f282f15..5dcdca04231b 100755 --- a/ci/do_circle_ci.sh +++ b/ci/do_circle_ci.sh @@ -4,7 +4,9 @@ set -e # bazel uses jgit internally and the default circle-ci .gitconfig says to # convert https://github.com to ssh://git@github.com, which jgit does not support. -mv ~/.gitconfig ~/.gitconfig_save +if [[ -e "~/.gitconfig" ]]; then + mv ~/.gitconfig ~/.gitconfig_save +fi export ENVOY_SRCDIR="$(pwd)" diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index 3fd04a01e283..9d5587ae70d3 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -4,7 +4,7 @@ set -e . ci/envoy_build_sha.sh -# We run as root and later drop permissions. This is requried to setup the USER +# We run as root and later drop permissions. This is required to setup the USER # in useradd below, which is need for correct Python execution in the Docker # environment. USER=root diff --git a/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst b/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst index cfeb5dd61789..83025e2d9731 100644 --- a/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst +++ b/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst @@ -74,7 +74,7 @@ Sending arbitrary content ------------------------- By default, when transcoding occurs, gRPC-JSON encodes the message output of a gRPC service method into -JSON and sets the HTTP response `Content-Type` header to `application/json`. To send abritrary content, +JSON and sets the HTTP response `Content-Type` header to `application/json`. To send arbitrary content, a gRPC service method can use `google.api.HttpBody `_ as its output message type. The implementation needs to set diff --git a/docs/root/configuration/secret.rst b/docs/root/configuration/secret.rst index 6e4eae8b05f5..c1d8566ebf08 100644 --- a/docs/root/configuration/secret.rst +++ b/docs/root/configuration/secret.rst @@ -82,7 +82,7 @@ In this example, certificates are specified in the bootstrap static_resource, th Examples two: SDS server ------------------------ -This example shows how to configurate secrets fetched from remote SDS servers: +This example shows how to configure secrets fetched from remote SDS servers: .. code-block:: yaml diff --git a/docs/root/intro/arch_overview/http_connection_management.rst b/docs/root/intro/arch_overview/http_connection_management.rst index 40415481322f..67793f94258f 100644 --- a/docs/root/intro/arch_overview/http_connection_management.rst +++ b/docs/root/intro/arch_overview/http_connection_management.rst @@ -65,4 +65,4 @@ Various configurable timeouts apply to an HTTP connection and its constituent st ` granularity. * Stream-level :ref:`per-route gRPC max timeout `: this bounds the upstream timeout and allows - the timeout to be overriden via the *grpc-timeout* request header. + the timeout to be overridden via the *grpc-timeout* request header. diff --git a/docs/root/operations/admin.rst b/docs/root/operations/admin.rst index c7a3bfcd6cf4..f0df50307c1d 100644 --- a/docs/root/operations/admin.rst +++ b/docs/root/operations/admin.rst @@ -217,7 +217,7 @@ The fields are: that contains "supported_quantiles" which lists the quantiles supported and an array of computed_quantiles that has the computed quantile for each histogram. - If a histogram is not updated during an interval, the ouput will have null for all the quantiles. + If a histogram is not updated during an interval, the output will have null for all the quantiles. Example histogram output: @@ -338,7 +338,7 @@ The fields are: `text/event-stream `_ format, as expected by the Hystrix dashboard. - If invoked from a browser or a terminal, the response will be shown as a continous stream, + If invoked from a browser or a terminal, the response will be shown as a continuous stream, sent in intervals defined by the :ref:`Bootstrap ` :ref:`stats_flush_interval ` diff --git a/include/envoy/buffer/buffer.h b/include/envoy/buffer/buffer.h index 0f6bfacf5455..ba60b2b5fab4 100644 --- a/include/envoy/buffer/buffer.h +++ b/include/envoy/buffer/buffer.h @@ -176,7 +176,7 @@ class Instance { virtual uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) PURE; /** - * Search for an occurence of a buffer within the larger buffer. + * Search for an occurrence of a buffer within the larger buffer. * @param data supplies the data to search for. * @param size supplies the length of the data to search for. * @param start supplies the starting index to search from. diff --git a/include/envoy/http/filter.h b/include/envoy/http/filter.h index 32a4087a949b..c0e484ca75ae 100644 --- a/include/envoy/http/filter.h +++ b/include/envoy/http/filter.h @@ -480,7 +480,7 @@ class StreamEncoderFilter : public StreamFilterBase { /** * Called with trailers to be encoded, implicitly ending the stream. - * @param trailers supplies the trailes to be encoded. + * @param trailers supplies the trailers to be encoded. */ virtual FilterTrailersStatus encodeTrailers(HeaderMap& trailers) PURE; diff --git a/include/envoy/network/connection.h b/include/envoy/network/connection.h index 503f458c099a..899dc5fe7c4f 100644 --- a/include/envoy/network/connection.h +++ b/include/envoy/network/connection.h @@ -129,7 +129,7 @@ class Connection : public Event::DeferredDeletable, public FilterManager { /** * @return std::string the next protocol to use as selected by network level negotiation. (E.g., - * ALPN). If network level negotation is not supported by the connection or no protocol + * ALPN). If network level negotiation is not supported by the connection or no protocol * has been negotiated the empty string is returned. */ virtual std::string nextProtocol() const PURE; diff --git a/include/envoy/network/transport_socket.h b/include/envoy/network/transport_socket.h index 59172fc1115a..a4eb7d64938d 100644 --- a/include/envoy/network/transport_socket.h +++ b/include/envoy/network/transport_socket.h @@ -92,7 +92,7 @@ class TransportSocket { /** * @return std::string the protocol to use as selected by network level negotiation. (E.g., ALPN). - * If network level negotation is not supported by the connection or no protocol + * If network level negotiation is not supported by the connection or no protocol * has been negotiated the empty string is returned. */ virtual std::string protocol() const PURE; diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index b59aab1a7218..e7442dba4f4a 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -305,7 +305,7 @@ class RouteSpecificFilterConfig { typedef std::shared_ptr RouteSpecificFilterConfigConstSharedPtr; /** - * Virtual host defintion. + * Virtual host definition. */ class VirtualHost { public: diff --git a/include/envoy/server/hot_restart.h b/include/envoy/server/hot_restart.h index b246b84219e1..458187668e2c 100644 --- a/include/envoy/server/hot_restart.h +++ b/include/envoy/server/hot_restart.h @@ -76,7 +76,7 @@ class HotRestart { virtual void shutdown() PURE; /** - * Return the hot restart compatability version so that operations code can decide whether to + * Return the hot restart compatibility version so that operations code can decide whether to * perform a full or hot restart. */ virtual std::string version() PURE; diff --git a/source/common/access_log/access_log_formatter.h b/source/common/access_log/access_log_formatter.h index b6cb0a8a2775..adaa88afb916 100644 --- a/source/common/access_log/access_log_formatter.h +++ b/source/common/access_log/access_log_formatter.h @@ -36,7 +36,7 @@ class AccessLogFormatParser { /** * General parse command utility. Will parse token from start position. Token is expected to end * with ')'. An optional ":max_length" may be specified after the closing ')' char. Token may - * contain multiple values separated by "seperator" string. First value will be populated in + * contain multiple values separated by "separator" string. First value will be populated in * "main" and any additional sub values will be set in the vector "subitems". For example token * of: "com.test.my_filter:test_object:inner_key):100" with separator of ":" will set the * following: @@ -46,7 +46,7 @@ class AccessLogFormatParser { * * @param token the token to parse * @param start the index to start parsing from - * @param seperator seperator between values + * @param separator separator between values * @param main the first value * @param sub_items any additional values * @param max_length optional max_length will be populated if specified diff --git a/source/common/common/perf_annotation.h b/source/common/common/perf_annotation.h index 9b0fbcb227e7..9ef83e0755a4 100644 --- a/source/common/common/perf_annotation.h +++ b/source/common/common/perf_annotation.h @@ -15,7 +15,7 @@ // bazel --define=perf_annotation=enabled ... // or, in individual .cc files: // #define ENVOY_PERF_ANNOTATION -// In the absense of such directives, the support classes are built and tested. +// In the absence of such directives, the support classes are built and tested. // However, the macros for instrumenting code for performance analysis will expand // to nothing. // @@ -59,7 +59,7 @@ #define PERF_CLEAR() Envoy::PerfAnnotationContext::clear() /** - * Controls whether performacne collection and reporting is thread safe. For now, + * Controls whether performances collection and reporting is thread safe. For now, * leaving this enabled for predictability across multiiple applications, on the assumption * that an uncontended mutex lock has vanishingly small cost. In the future we may try * to make this system thread-unsafe if mutex contention disturbs the metrics. diff --git a/source/common/common/utility.h b/source/common/common/utility.h index f8cd45af9227..b19c3802bc4d 100644 --- a/source/common/common/utility.h +++ b/source/common/common/utility.h @@ -273,7 +273,7 @@ class StringUtil { /** * Crop characters from a string view starting at the first character of the matched - * delimiter string view until the begining of the source string view. + * delimiter string view until the beginning of the source string view. * @param source supplies the string view to be processed. * @param delimiter supplies the string view that delimits the starting point for deletion. * @return sub-string of the string view if any. diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index 533c16fbb3ea..844e7ff6c89b 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -154,7 +154,7 @@ void DispatcherImpl::run(RunType type) { // Flush all post callbacks before we run the event loop. We do this because there are post // callbacks that have to get run before the initial event loop starts running. libevent does - // not gaurantee that events are run in any particular order. So even if we post() and call + // not guarantee that events are run in any particular order. So even if we post() and call // event_base_once() before some other event, the other event might get called first. runPostCallbacks(); diff --git a/source/common/filesystem/filesystem_impl.h b/source/common/filesystem/filesystem_impl.h index 2ce5d9090d86..0d1bb7a4116f 100644 --- a/source/common/filesystem/filesystem_impl.h +++ b/source/common/filesystem/filesystem_impl.h @@ -120,7 +120,7 @@ class FileImpl : public File { // not get interleaved by multiple processes writing to // the same file during hot-restart. Thread::MutexBasicLockable flush_lock_; // This lock is used to prevent simulataneous flushes from - // the flush thread and a syncronous flush. This protects + // the flush thread and a synchronous flush. This protects // concurrent access to the about_to_write_buffer_, fd_, // and all other data used during flushing and file // re-opening. diff --git a/source/common/http/codec_client.h b/source/common/http/codec_client.h index b5db01abe3fb..98956b273aa5 100644 --- a/source/common/http/codec_client.h +++ b/source/common/http/codec_client.h @@ -79,7 +79,7 @@ class CodecClient : Logger::Loggable, uint64_t id() { return connection_->id(); } /** - * @return size_t the number of oustanding requests that have not completed or been reset. + * @return size_t the number of outstanding requests that have not completed or been reset. */ size_t numActiveRequests() { return active_requests_.size(); } diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index 2be2aac148fc..e10875ecd807 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -198,7 +198,7 @@ class ConnectionManagerConfig { virtual DateProvider& dateProvider() PURE; /** - * @return the time in milliseconds the connection manager will wait betwen issuing a "shutdown + * @return the time in milliseconds the connection manager will wait between issuing a "shutdown * notice" to the time it will issue a full GOAWAY and not accept any new streams. */ virtual std::chrono::milliseconds drainTimeout() PURE; diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index e1cf11f18437..966be2858a38 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -543,7 +543,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, // The protocol may have shifted in the HTTP/1.0 case so reset it. request_info_.protocol(protocol); if (!connection_manager_.config_.http1Settings().accept_http_10_) { - // Send "Upgrade Required" if HTTP/1.0 support is not explictly configured on. + // Send "Upgrade Required" if HTTP/1.0 support is not explicitly configured on. sendLocalReply(false, Code::UpgradeRequired, "", nullptr, is_head_request_); return; } else { @@ -671,7 +671,7 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, decodeHeaders(nullptr, *request_headers_, end_stream); - // Reset it here for both global and overriden cases. + // Reset it here for both global and overridden cases. resetIdleTimer(); } diff --git a/source/common/http/header_map_impl.h b/source/common/http/header_map_impl.h index fd2ae9570e7e..75d16eaa30e5 100644 --- a/source/common/http/header_map_impl.h +++ b/source/common/http/header_map_impl.h @@ -138,7 +138,7 @@ class HeaderMapImpl : public HeaderMap, NonCopyable { * * Note: the internal iterators held in fields make this unsafe to copy and move, since the * reference to end() is not preserved across a move (see Notes in - * https://en.cppreference.com/w/cpp/container/list/list). The NonCopyable will supress both copy + * https://en.cppreference.com/w/cpp/container/list/list). The NonCopyable will suppress both copy * and move constructors/assignment. * TODO(htuch): Maybe we want this to movable one day; for now, our header map moves happen on * HeaderMapPtr, so the performance impact should not be evident. diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 6f8cafd2c971..c3ac78cab143 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -183,7 +183,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggablehd.type == NGHTTP2_HEADERS, ""); RELEASE_ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || frame->headers.cat == NGHTTP2_HCAT_HEADERS, @@ -803,7 +803,7 @@ int ClientConnectionImpl::onBeginHeaders(const nghttp2_frame* frame) { int ClientConnectionImpl::onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) { - // The client code explicitly does not currently suport push promise. + // The client code explicitly does not currently support push promise. ASSERT(frame->hd.type == NGHTTP2_HEADERS); ASSERT(frame->headers.cat == NGHTTP2_HCAT_RESPONSE || frame->headers.cat == NGHTTP2_HCAT_HEADERS); return saveHeader(frame, std::move(name), std::move(value)); diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index e5786a6670bc..6ea99059d390 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -88,7 +88,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::LoggablerunHighWatermarkCallbacks(); @@ -178,8 +178,8 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggablevalue().c_str(), xff_header->value().size()); - static const std::string seperator(","); + static const std::string separator(","); // Ignore the last num_to_skip addresses at the end of XFF. for (uint32_t i = 0; i < num_to_skip; i++) { - std::string::size_type last_comma = xff_string.rfind(seperator); + std::string::size_type last_comma = xff_string.rfind(separator); if (last_comma == std::string::npos) { return {nullptr, false}; } @@ -319,9 +319,9 @@ Utility::getLastAddressFromXFF(const Http::HeaderMap& request_headers, uint32_t } // The text after the last remaining comma, or the entirety of the string if there // is no comma, is the requested IP address. - std::string::size_type last_comma = xff_string.rfind(seperator); - if (last_comma != std::string::npos && last_comma + seperator.size() < xff_string.size()) { - xff_string = xff_string.substr(last_comma + seperator.size()); + std::string::size_type last_comma = xff_string.rfind(separator); + if (last_comma != std::string::npos && last_comma + separator.size() < xff_string.size()) { + xff_string = xff_string.substr(last_comma + separator.size()); } // Ignore the whitespace, since they are allowed in HTTP lists (see RFC7239#section-7.1). diff --git a/source/common/network/addr_family_aware_socket_option_impl.cc b/source/common/network/addr_family_aware_socket_option_impl.cc index 953baf599613..872a5304230d 100644 --- a/source/common/network/addr_family_aware_socket_option_impl.cc +++ b/source/common/network/addr_family_aware_socket_option_impl.cc @@ -47,7 +47,7 @@ bool AddrFamilyAwareSocketOptionImpl::setIpSocketOption( return ipv4_option->setOption(socket, state); } - // If the FD is v6, we first try the IPv6 variant if the platfrom supports it and fallback to the + // If the FD is v6, we first try the IPv6 variant if the platform supports it and fallback to the // IPv4 variant otherwise. ASSERT(ip->version() == Network::Address::IpVersion::v6); if (ipv6_option->isSupported()) { diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index d5eed98838ca..e89a00bace5d 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -280,7 +280,7 @@ void ConnectionImpl::readDisable(bool disable) { // When we disable reads, we still allow for early close notifications (the equivalent of // EPOLLRDHUP for an epoll backend). For backends that support it, this allows us to apply // back pressure at the kernel layer, but still get timely notification of a FIN. Note that - // we are not gaurenteed to get notified, so even if the remote has closed, we may not know + // we are not guaranteed to get notified, so even if the remote has closed, we may not know // until we try to write. Further note that currently we optionally don't correctly handle half // closed TCP connections in the sense that we assume that a remote FIN means the remote intends a // full close. @@ -293,7 +293,7 @@ void ConnectionImpl::readDisable(bool disable) { read_enabled_ = false; // If half-close semantics are enabled, we never want early close notifications; we - // always want to read all avaiable data, even if the other side has closed. + // always want to read all available data, even if the other side has closed. if (detect_early_close_ && !enable_half_close_) { file_event_->setEnabled(Event::FileReadyType::Write | Event::FileReadyType::Closed); } else { diff --git a/source/common/network/connection_impl.h b/source/common/network/connection_impl.h index 1d00a42f8a54..7f1fb01b9bc4 100644 --- a/source/common/network/connection_impl.h +++ b/source/common/network/connection_impl.h @@ -34,7 +34,7 @@ class ConnectionImplUtility { * @param previous_total supplies the previous final total buffer size. previous_total will be * updated to new_total when the call is complete. * @param stat_total supplies the counter to increment with the delta. - * @param stat_current supplies the guage that should be updated with the delta of previous_total + * @param stat_current supplies the gauge that should be updated with the delta of previous_total * and new_total. */ static void updateBufferStats(uint64_t delta, uint64_t new_total, uint64_t& previous_total, diff --git a/source/common/network/dns_impl.cc b/source/common/network/dns_impl.cc index ba8ae9369183..9456230a00ba 100644 --- a/source/common/network/dns_impl.cc +++ b/source/common/network/dns_impl.cc @@ -193,7 +193,7 @@ void DnsResolverImpl::onAresSocketStateChange(int fd, int read, int write) { ActiveDnsQuery* DnsResolverImpl::resolve(const std::string& dns_name, DnsLookupFamily dns_lookup_family, ResolveCb callback) { // TODO(hennna): Add DNS caching which will allow testing the edge case of a - // failed intial call to getHostbyName followed by a synchronous IPv4 + // failed initial call to getHostbyName followed by a synchronous IPv4 // resolution. std::unique_ptr pending_resolution( new PendingResolution(callback, dispatcher_, channel_, dns_name)); diff --git a/source/common/singleton/threadsafe_singleton.h b/source/common/singleton/threadsafe_singleton.h index 2786a195f185..fbd3af0a28c3 100644 --- a/source/common/singleton/threadsafe_singleton.h +++ b/source/common/singleton/threadsafe_singleton.h @@ -11,7 +11,7 @@ namespace Envoy { * accessible but can not be marked as const. All functions in the singleton class * *must* be threadsafe. * - * Note that there is heavy resistence in Envoy to adding this type of singleton + * Note that there is heavy resistance in Envoy to adding this type of singleton * if data will persist with state changes across tests, as it becomes difficult * to write clean unit tests if a state change in one test will persist into * another test. Be wary of using it. A example of acceptable usage is OsSyscallsImpl, diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index dcf96f6b9239..a099810e8d3b 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -171,7 +171,7 @@ absl::string_view ThreadLocalStoreImpl::truncateStatNameIfNeeded(absl::string_vi // Note that the heap-allocator does not truncate itself; we have to // truncate here if we are using heap-allocation as a fallback due to an - // exahusted shared-memory block + // exhausted shared-memory block if (name.size() > max_length) { ENVOY_LOG_MISC( warn, diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index e5f00cdeeaca..a179e081ef62 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -145,7 +145,7 @@ class TlsScope : public Scope { * - When a scope is destroyed, a cache flush operation is run on all threads to flush any cached * data owned by the destroyed scope. * - Scopes use a unique incrementing ID for the cache key. This ensures that if a new scope is - * created at the same address as a recently deleted scope, cache references will not accidently + * created at the same address as a recently deleted scope, cache references will not accidentally * reference the old scope which may be about to be cache flushed. * - Since it's possible to have overlapping scopes, we de-dup stats when counters() or gauges() is * called since these are very uncommon operations. @@ -162,7 +162,7 @@ class TlsScope : public Scope { * If there is one it will write to it, otherwise creates new one and writes to it. * During the flush process the following sequence is followed. * - The main thread starts the flush process by posting a message to every worker which tells the - * worker to swap its "active" histogram with its "backup" histogram. This is acheived via a call + * worker to swap its "active" histogram with its "backup" histogram. This is achieved via a call * to "beginMerge" method. * - Each TLS histogram has 2 histograms it makes use of, swapping back and forth. It manages a * current_active index via which it writes to the correct histogram. diff --git a/source/common/upstream/edf_scheduler.h b/source/common/upstream/edf_scheduler.h index d9ef0c267829..6515f4a8d5f3 100644 --- a/source/common/upstream/edf_scheduler.h +++ b/source/common/upstream/edf_scheduler.h @@ -88,7 +88,7 @@ template class EdfScheduler { }; // Current time in EDF scheduler. - // TOOD(htuch): Is it worth the small extra complexity to use integer time for performance + // TODO(htuch): Is it worth the small extra complexity to use integer time for performance // reasons? double current_time_{}; // Offset used during addition to break ties when entries have the same weight but should reflect diff --git a/source/common/upstream/health_checker_impl.h b/source/common/upstream/health_checker_impl.h index 0db4bfd70a3f..dc86aceb04eb 100644 --- a/source/common/upstream/health_checker_impl.h +++ b/source/common/upstream/health_checker_impl.h @@ -174,7 +174,7 @@ class ProdHttpHealthCheckerImpl : public HttpHealthCheckerImpl { * binary block can be of arbitrary length and is just concatenated together when sent. * * On the receive side, "fuzzy" matching is performed such that each binary block must be found, - * and in the order specified, but not necessarly contiguous. Thus, in the example above, + * and in the order specified, but not necessary contiguous. Thus, in the example above, * "FFFFFFFF" could be inserted in the response between "EEEEEEEE" and "01000000" and the check * would still pass. */ diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 7d165e9d31aa..e7a75b6d05f3 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -115,7 +115,7 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { // When deciding which hosts to use on an LB decision, we need to know how to index into the // priority_set. This priority_set cursor is used by ZoneAwareLoadBalancerBase subclasses, e.g. - // RoundRobinLoadBalancer, to index into auxillary data structures specific to the LB for + // RoundRobinLoadBalancer, to index into auxiliary data structures specific to the LB for // a given host set selection. struct HostsSource { enum class SourceType { @@ -245,7 +245,7 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { * with 1 / weight deadline, we will achieve the desired pick frequency for weighted RR in a given * interval. Naive implementations of weighted RR are either O(n) pick time or O(m * n) memory use, * where m is the weight range. We also explicitly check for the unweighted special case and use a - * simple index to acheive O(1) scheduling in that case. + * simple index to achieve O(1) scheduling in that case. * TODO(htuch): We use EDF at Google, but the EDF scheduler may be overkill if we don't want to * support large ranges of weights or arbitrary precision floating weights, we could construct an * explicit schedule, since m will be a small constant factor in O(m * n). This diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 94ec01f84637..1a148de617e2 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -577,7 +577,7 @@ class PriorityStateManager : protected Logger::Loggable { // priority is specified by locality_lb_endpoint.priority()). // // The specified health_checker_flag is used to set the registered-host's health-flag when the - // lb_endpoint health status is unhealty, draining or timeout. + // lb_endpoint health status is unhealthy, draining or timeout. void registerHostForPriority(const std::string& hostname, Network::Address::InstanceConstSharedPtr address, @@ -649,7 +649,7 @@ class BaseDynamicClusterImpl : public ClusterImplBase { * @param hosts_added_to_current_priority will be populated with hosts added to the priority. * @param hosts_removed_from_current_priority will be populated with hosts removed from the * priority. - * @param updated_hosts is used to aggregate the new state of all hosts accross priority, and will + * @param updated_hosts is used to aggregate the new state of all hosts across priority, and will * be updated with the hosts that remain in this priority after the update. * @return whether the hosts for the priority changed. */ diff --git a/source/extensions/filters/http/common/jwks_fetcher.h b/source/extensions/filters/http/common/jwks_fetcher.h index c76a76c8948b..ec948bbc4df2 100644 --- a/source/extensions/filters/http/common/jwks_fetcher.h +++ b/source/extensions/filters/http/common/jwks_fetcher.h @@ -24,9 +24,9 @@ class JwksFetcher { class JwksReceiver { public: enum class Failure { - /* A network error occured causing JWKS retrieval failure. */ + /* A network error occurred causing JWKS retrieval failure. */ Network, - /* A failure occured when trying to parse the retrieved JWKS data. */ + /* A failure occurred when trying to parse the retrieved JWKS data. */ InvalidJwks, }; diff --git a/source/extensions/filters/http/ext_authz/ext_authz.h b/source/extensions/filters/http/ext_authz/ext_authz.h index 67834be1c0ed..1da15b926946 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.h +++ b/source/extensions/filters/http/ext_authz/ext_authz.h @@ -145,7 +145,7 @@ class Filter : public Logger::Loggable, // the filter chain should stop. Otherwise the filter chain can continue to the next filter. enum class FilterReturn { ContinueDecoding, StopDecoding }; void initiateCall(const Http::HeaderMap& headers); - Http::HeaderMapPtr getHeaderMap(const Filters::Common::ExtAuthz::ResponsePtr& reponse); + Http::HeaderMapPtr getHeaderMap(const Filters::Common::ExtAuthz::ResponsePtr& response); FilterConfigSharedPtr config_; Filters::Common::ExtAuthz::ClientPtr client_; Http::StreamDecoderFilterCallbacks* callbacks_{}; diff --git a/source/extensions/filters/http/health_check/health_check.cc b/source/extensions/filters/http/health_check/health_check.cc index 426f1f283912..ec55410d2a38 100644 --- a/source/extensions/filters/http/health_check/health_check.cc +++ b/source/extensions/filters/http/health_check/health_check.cc @@ -118,7 +118,7 @@ void HealthCheckFilter::onComplete() { const auto& stats = cluster->info()->stats(); const uint64_t membership_total = stats.membership_total_.value(); if (membership_total == 0) { - // If the cluster exists but is empty, consider the service unhealty unless + // If the cluster exists but is empty, consider the service unhealthy unless // the specified minimum percent healthy for the cluster happens to be zero. if (min_healthy_percentage == 0.0) { continue; diff --git a/source/extensions/filters/http/jwt_authn/extractor.h b/source/extensions/filters/http/jwt_authn/extractor.h index 314a5bf721ff..a25705cfad99 100644 --- a/source/extensions/filters/http/jwt_authn/extractor.h +++ b/source/extensions/filters/http/jwt_authn/extractor.h @@ -15,7 +15,7 @@ namespace HttpFilters { namespace JwtAuthn { /** - * JwtLocation stores following token infomation: + * JwtLocation stores following token information: * * * extracted token string, * * the location where the JWT is extracted from, diff --git a/source/extensions/filters/http/lua/lua_filter.h b/source/extensions/filters/http/lua/lua_filter.h index bd306801606a..8616ad0a8712 100644 --- a/source/extensions/filters/http/lua/lua_filter.h +++ b/source/extensions/filters/http/lua/lua_filter.h @@ -71,7 +71,7 @@ class FilterCallbacks { /** * @return RequestInfo::RequestInfo& the current request info handle. This handle is mutable to - * accomodate write API e.g. setDynamicMetadata(). + * accommodate write API e.g. setDynamicMetadata(). */ virtual RequestInfo::RequestInfo& requestInfo() PURE; diff --git a/source/extensions/filters/http/squash/squash_filter.h b/source/extensions/filters/http/squash/squash_filter.h index 0361be1015fd..08c81053a574 100644 --- a/source/extensions/filters/http/squash/squash_filter.h +++ b/source/extensions/filters/http/squash/squash_filter.h @@ -26,14 +26,14 @@ class SquashFilterConfig : protected Logger::Loggable { const std::chrono::milliseconds& requestTimeout() { return request_timeout_; } private: - // Get the attachment body, and returns a JSON representations with envrionment variables + // Get the attachment body, and returns a JSON representations with environment variables // interpolated. static std::string getAttachment(const ProtobufWkt::Struct& attachment_template); - // Recursively interpolates envrionment variables inline in the struct. + // Recursively interpolates environment variables inline in the struct. static void updateTemplateInStruct(ProtobufWkt::Struct& attachment_template); - // Recursively interpolates envrionment variables inline in the value. + // Recursively interpolates environment variables inline in the value. static void updateTemplateInValue(ProtobufWkt::Value& curvalue); - // Interpolates envrionment variables in a string, and returns the new interpolated string. + // Interpolates environment variables in a string, and returns the new interpolated string. static std::string replaceEnv(const std::string& attachment_template); // The name of the squash server cluster. @@ -47,7 +47,7 @@ class SquashFilterConfig : protected Logger::Loggable { // The timeout for individual requests to the squash server. std::chrono::milliseconds request_timeout_; - // Defines the pattern for interpolating envrionment variables in to the attachment. + // Defines the pattern for interpolating environment variables in to the attachment. const static std::regex ENV_REGEX; }; @@ -130,7 +130,7 @@ class SquashFilter : public Http::StreamDecoderFilter, const static std::string SERVER_AUTHORITY; // The state of a debug attachment object when a debugger is successfully attached. const static std::string ATTACHED_STATE; - // The state of a debug attachment object when an error has occured. + // The state of a debug attachment object when an error has occurred. const static std::string ERROR_STATE; }; diff --git a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc index 6e56e2012ca1..434ded8e4b45 100644 --- a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc +++ b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc @@ -139,7 +139,7 @@ void Filter::onRead() { // even if previous data has not been read, which is always the case due to MSG_PEEK. When // the TlsInspector completes and passes the socket along, a new FileEvent is created for the // socket, so that new event is immediately signalled as readable because it is new and the socket - // is readable, even though no new events have ocurred. + // is readable, even though no new events have occurred. // // TODO(ggreenway): write an integration test to ensure the events work as expected on all // platforms. diff --git a/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc b/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc index 9fa04b37485f..b6cd3f05d3b7 100644 --- a/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/compact_protocol_impl.cc @@ -235,7 +235,7 @@ bool CompactProtocolImpl::readListBegin(Buffer::Instance& buffer, FieldType& ele } if (s < 0) { - throw EnvoyException(fmt::format("negative compact procotol list/set size {}", s)); + throw EnvoyException(fmt::format("negative compact protocol list/set size {}", s)); } sz = static_cast(s); diff --git a/source/extensions/filters/network/thrift_proxy/conn_manager.h b/source/extensions/filters/network/thrift_proxy/conn_manager.h index 26f9bacca852..42fa7f3fda36 100644 --- a/source/extensions/filters/network/thrift_proxy/conn_manager.h +++ b/source/extensions/filters/network/thrift_proxy/conn_manager.h @@ -241,7 +241,7 @@ class ConnectionManager : public Network::ReadFilter, void continueDecoding(); void dispatch(); - void sendLocalReply(MessageMetadata& metadata, const DirectResponse& reponse, bool end_stream); + void sendLocalReply(MessageMetadata& metadata, const DirectResponse& response, bool end_stream); void doDeferredRpcDestroy(ActiveRpc& rpc); void resetAllRpcs(bool local_reset); diff --git a/source/extensions/filters/network/thrift_proxy/protocol.h b/source/extensions/filters/network/thrift_proxy/protocol.h index 043ee93b345a..50cc877be752 100644 --- a/source/extensions/filters/network/thrift_proxy/protocol.h +++ b/source/extensions/filters/network/thrift_proxy/protocol.h @@ -59,7 +59,7 @@ class Protocol { * from the buffer. * @param buffer the buffer to read from * @param metadata MessageMetadata to be updated with name, message type, and sequence id. - * @return true if a message header was sucessfully read, false if more data is required + * @return true if a message header was successfully read, false if more data is required * @throw EnvoyException if the data is not a valid message header */ virtual bool readMessageBegin(Buffer::Instance& buffer, MessageMetadata& metadata) PURE; @@ -68,7 +68,7 @@ class Protocol { * Reads the end of a Thrift protocol message from the buffer. If successful, the message footer * is removed from the buffer. * @param buffer the buffer to read from - * @return true if a message footer was sucessfully read, false if more data is required + * @return true if a message footer was successfully read, false if more data is required * @throw EnvoyException if the data is not a valid message footer */ virtual bool readMessageEnd(Buffer::Instance& buffer) PURE; @@ -78,7 +78,7 @@ class Protocol { * value from the struct header. If successful, the struct header is removed from the buffer. * @param buffer the buffer to read from * @param name updated with the struct name on success only - * @return true if a struct header was sucessfully read, false if more data is required + * @return true if a struct header was successfully read, false if more data is required * @throw EnvoyException if the data is not a valid struct header */ virtual bool readStructBegin(Buffer::Instance& buffer, std::string& name) PURE; @@ -87,7 +87,7 @@ class Protocol { * Reads the end of a Thrift struct from the buffer. If successful, the struct footer is removed * from the buffer. * @param buffer the buffer to read from - * @return true if a struct footer was sucessfully read, false if more data is required + * @return true if a struct footer was successfully read, false if more data is required * @throw EnvoyException if the data is not a valid struct footer */ virtual bool readStructEnd(Buffer::Instance& buffer) PURE; @@ -100,7 +100,7 @@ class Protocol { * @param name updated with the field name on success only * @param field_type updated with the FieldType on success only * @param field_id updated with the field ID on success only - * @return true if a field header was sucessfully read, false if more data is required + * @return true if a field header was successfully read, false if more data is required * @throw EnvoyException if the data is not a valid field header */ virtual bool readFieldBegin(Buffer::Instance& buffer, std::string& name, FieldType& field_type, @@ -110,7 +110,7 @@ class Protocol { * Reads the end of a Thrift struct field from the buffer. If successful, the field footer is * removed from the buffer. * @param buffer the buffer to read from - * @return true if a field footer was sucessfully read, false if more data is required + * @return true if a field footer was successfully read, false if more data is required * @throw EnvoyException if the data is not a valid field footer */ virtual bool readFieldEnd(Buffer::Instance& buffer) PURE; @@ -123,7 +123,7 @@ class Protocol { * @param key_type updated with map key FieldType on success only * @param value_type updated with map value FieldType on success only * @param size updated with the number of key-value pairs in the map on success only - * @return true if a map header was sucessfully read, false if more data is required + * @return true if a map header was successfully read, false if more data is required * @throw EnvoyException if the data is not a valid map header */ virtual bool readMapBegin(Buffer::Instance& buffer, FieldType& key_type, FieldType& value_type, @@ -133,7 +133,7 @@ class Protocol { * Reads the end of a Thrift map from the buffer. If successful, the map footer is removed from * the buffer. * @param buffer the buffer to read from - * @return true if a map footer was sucessfully read, false if more data is required + * @return true if a map footer was successfully read, false if more data is required * @throw EnvoyException if the data is not a valid map footer */ virtual bool readMapEnd(Buffer::Instance& buffer) PURE; @@ -145,7 +145,7 @@ class Protocol { * @param buffer the buffer to read from * @param elem_type updated with list element FieldType on success only * @param size updated with the number of list members on success only - * @return true if a list header was sucessfully read, false if more data is required + * @return true if a list header was successfully read, false if more data is required * @throw EnvoyException if the data is not a valid list header */ virtual bool readListBegin(Buffer::Instance& buffer, FieldType& elem_type, uint32_t& size) PURE; @@ -154,7 +154,7 @@ class Protocol { * Reads the end of a Thrift list from the buffer. If successful, the list footer is removed from * the buffer. * @param buffer the buffer to read from - * @return true if a list footer was sucessfully read, false if more data is required + * @return true if a list footer was successfully read, false if more data is required * @throw EnvoyException if the data is not a valid list footer */ virtual bool readListEnd(Buffer::Instance& buffer) PURE; @@ -166,7 +166,7 @@ class Protocol { * @param buffer the buffer to read from * @param elem_type updated with set element FieldType on success only * @param size updated with the number of set members on success only - * @return true if a set header was sucessfully read, false if more data is required + * @return true if a set header was successfully read, false if more data is required * @throw EnvoyException if the data is not a valid set header */ virtual bool readSetBegin(Buffer::Instance& buffer, FieldType& elem_type, uint32_t& size) PURE; @@ -175,7 +175,7 @@ class Protocol { * Reads the end of a Thrift set from the buffer. If successful, the set footer is removed from * the buffer. * @param buffer the buffer to read from - * @return true if a set footer was sucessfully read, false if more data is required + * @return true if a set footer was successfully read, false if more data is required * @throw EnvoyException if the data is not a valid set footer */ virtual bool readSetEnd(Buffer::Instance& buffer) PURE; diff --git a/source/server/hot_restart_impl.h b/source/server/hot_restart_impl.h index 848093eb19ff..fef14834af81 100644 --- a/source/server/hot_restart_impl.h +++ b/source/server/hot_restart_impl.h @@ -41,7 +41,7 @@ class SharedMemory { }; // Due to the flexible-array-length of stats_set_data_, c-style allocation - // and initialization are neccessary. + // and initialization are necessary. SharedMemory() = delete; ~SharedMemory() = delete; diff --git a/source/server/http/admin.cc b/source/server/http/admin.cc index ed0499d6fdc4..d24129b77fff 100644 --- a/source/server/http/admin.cc +++ b/source/server/http/admin.cc @@ -569,7 +569,7 @@ Http::Code AdminImpl::handlerStats(absl::string_view url, Http::HeaderMap& respo for (auto stat : all_stats) { response.add(fmt::format("{}: {}\n", stat.first, stat.second)); } - // TOOD(ramaraochavali): See the comment in ThreadLocalStoreImpl::histograms() for why we use a + // TODO(ramaraochavali): See the comment in ThreadLocalStoreImpl::histograms() for why we use a // multimap here. This makes sure that duplicate histograms get output. When shared storage is // implemented this can be switched back to a normal map. std::multimap all_histograms; diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index 08227524cd5c..1b68c862f25b 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -89,7 +89,7 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv, TCLAP::ValueArg restart_epoch("", "restart-epoch", "hot restart epoch #", false, 0, "uint32_t", cmd); TCLAP::SwitchArg hot_restart_version_option("", "hot-restart-version", - "hot restart compatability version", cmd); + "hot restart compatibility version", cmd); TCLAP::ValueArg service_cluster("", "service-cluster", "Cluster name", false, "", "string", cmd); TCLAP::ValueArg service_node("", "service-node", "Node name", false, "", "string", diff --git a/test/common/decompressor/zlib_decompressor_impl_test.cc b/test/common/decompressor/zlib_decompressor_impl_test.cc index a3f608e149f3..388ee63f6ec1 100644 --- a/test/common/decompressor/zlib_decompressor_impl_test.cc +++ b/test/common/decompressor/zlib_decompressor_impl_test.cc @@ -66,10 +66,10 @@ class ZlibDecompressorImplDeathTest : public ZlibDecompressorImplTest { static void unitializedDecompressorTestHelper() { Buffer::OwnedImpl input_buffer; - Buffer::OwnedImpl ouput_buffer; + Buffer::OwnedImpl output_buffer; ZlibDecompressorImpl decompressor; TestUtility::feedBufferWithRandomCharacters(input_buffer, 100); - decompressor.decompress(input_buffer, ouput_buffer); + decompressor.decompress(input_buffer, output_buffer); } }; diff --git a/test/common/filesystem/filesystem_impl_test.cc b/test/common/filesystem/filesystem_impl_test.cc index e4a8f5cf354e..2a1c706dce47 100644 --- a/test/common/filesystem/filesystem_impl_test.cc +++ b/test/common/filesystem/filesystem_impl_test.cc @@ -89,8 +89,8 @@ TEST(FileSystemImpl, fileReadToEndDoesNotExist) { TEST(FilesystemImpl, CanonicalPathSuccess) { EXPECT_EQ("/", Filesystem::canonicalPath("//")); } TEST(FilesystemImpl, CanonicalPathFail) { - EXPECT_THROW_WITH_MESSAGE(Filesystem::canonicalPath("/_some_non_existant_file"), EnvoyException, - "Unable to determine canonical path for /_some_non_existant_file"); + EXPECT_THROW_WITH_MESSAGE(Filesystem::canonicalPath("/_some_non_existent_file"), EnvoyException, + "Unable to determine canonical path for /_some_non_existent_file"); } TEST(FilesystemImpl, IllegalPath) { @@ -101,7 +101,7 @@ TEST(FilesystemImpl, IllegalPath) { EXPECT_TRUE(Filesystem::illegalPath("/proc/")); EXPECT_TRUE(Filesystem::illegalPath("/sys")); EXPECT_TRUE(Filesystem::illegalPath("/sys/")); - EXPECT_TRUE(Filesystem::illegalPath("/_some_non_existant_file")); + EXPECT_TRUE(Filesystem::illegalPath("/_some_non_existent_file")); } TEST(FileSystemImpl, flushToLogFilePeriodically) { diff --git a/test/common/http/http2/codec_impl_fuzz.proto b/test/common/http/http2/codec_impl_fuzz.proto index 9b7a3a9a8387..4cec131bff51 100644 --- a/test/common/http/http2/codec_impl_fuzz.proto +++ b/test/common/http/http2/codec_impl_fuzz.proto @@ -36,7 +36,7 @@ message StreamAction { message MutateAction { // Buffer index. uint32 buffer = 1; - // Offset withing buffer. + // Offset within buffer. uint32 offset = 2; // Value to set (only lower byte is significant). uint32 value = 3; diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 186bb097f9f4..9b757d64320a 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -705,7 +705,7 @@ INSTANTIATE_TEST_CASE_P(Http2CodecImplFlowControlTest, Http2CodecImplFlowControl ::testing::Combine(HTTP2SETTINGS_SMALL_WINDOW_COMBINE, HTTP2SETTINGS_SMALL_WINDOW_COMBINE)); -// we seperate default/edge cases here to avoid combinatorial explosion +// we separate default/edge cases here to avoid combinatorial explosion #define HTTP2SETTINGS_DEFAULT_COMBINE \ ::testing::Combine(::testing::Values(Http2Settings::DEFAULT_HPACK_TABLE_SIZE), \ ::testing::Values(Http2Settings::DEFAULT_MAX_CONCURRENT_STREAMS), \ diff --git a/test/common/network/addr_family_aware_socket_option_impl_test.cc b/test/common/network/addr_family_aware_socket_option_impl_test.cc index 3c6c16a0d9c9..ddf9292bd644 100644 --- a/test/common/network/addr_family_aware_socket_option_impl_test.cc +++ b/test/common/network/addr_family_aware_socket_option_impl_test.cc @@ -61,7 +61,7 @@ TEST_F(AddrFamilyAwareSocketOptionImplTest, V6EmptyOptionNames) { } // If a platform suppports IPv4 and IPv6 socket option variants for an IPv4 address, we apply the -// IPv4 varient +// IPv4 variant TEST_F(AddrFamilyAwareSocketOptionImplTest, V4IgnoreV6) { Address::Ipv4Instance address("1.2.3.4", 5678); const int fd = address.socket(Address::SocketType::Stream); diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index 94bd7fcaa64a..24a21b8f45a7 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -235,7 +235,7 @@ TEST_P(ConnectionImplTest, CloseDuringConnectCallback) { TEST_P(ConnectionImplTest, ImmediateConnectError) { dispatcher_.reset(new Event::DispatcherImpl(time_system_)); - // Using a broadcast/multicast address as the connection destiantion address causes an + // Using a broadcast/multicast address as the connection destinations address causes an // immediate error return from connect(). Address::InstanceConstSharedPtr broadcast_address; if (socket_.localAddress()->ip()->version() == Address::IpVersion::v4) { @@ -745,7 +745,7 @@ TEST_P(ConnectionImplTest, WatermarkFuzzing) { for (int i = 0; i < 50; ++i) { // The bytes to read this loop. int bytes_to_write = rand.random() % 20 + 1; - // The bytes buffered at the begining of this loop. + // The bytes buffered at the beginning of this loop. bytes_buffered = new_bytes_buffered; // Bytes to flush upstream. int bytes_to_flush = std::min(rand.random() % 30 + 1, bytes_to_write + bytes_buffered); diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index a5e43c737edd..7e3e8929133a 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -164,7 +164,7 @@ class TestDnsServerQuery { int answer_size = ips != nullptr ? ips->size() : 0; answer_size += !encodedCname.empty() ? 1 : 0; - // The response begins with the intial part of the request + // The response begins with the initial part of the request // (including the question section). const size_t response_base_len = HFIXEDSZ + name_len + QFIXEDSZ; unsigned char response_base[response_base_len]; diff --git a/test/common/tcp_proxy/tcp_proxy_test.cc b/test/common/tcp_proxy/tcp_proxy_test.cc index 83e1c36eb94e..48451429e3b0 100644 --- a/test/common/tcp_proxy/tcp_proxy_test.cc +++ b/test/common/tcp_proxy/tcp_proxy_test.cc @@ -242,7 +242,7 @@ TEST(ConfigTest, Routes) { } { - // hit the route with all criterias present + // hit the route with all criteria present NiceMock connection; connection.local_address_ = std::make_shared("10.0.0.0", 10000); connection.remote_address_ = diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 19ea7e8e3e0f..6111ef75576a 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -25,7 +25,7 @@ namespace Upstream { class LoadBalancerTestBase : public ::testing::TestWithParam { protected: - // Run all tests aginst both priority 0 and priority 1 host sets, to ensure + // Run all tests against both priority 0 and priority 1 host sets, to ensure // all the load balancers have equivalent functonality for failover host sets. MockHostSet& hostSet() { return GetParam() ? host_set_ : failover_host_set_; } diff --git a/test/common/upstream/ring_hash_lb_test.cc b/test/common/upstream/ring_hash_lb_test.cc index fd852caa2b58..9b4fabcac2c3 100644 --- a/test/common/upstream/ring_hash_lb_test.cc +++ b/test/common/upstream/ring_hash_lb_test.cc @@ -42,7 +42,7 @@ class RingHashLoadBalancerTest : public ::testing::TestWithParam { lb_->initialize(); } - // Run all tests aginst both priority 0 and priority 1 host sets, to ensure + // Run all tests against both priority 0 and priority 1 host sets, to ensure // all the load balancers have equivalent functonality for failover host sets. MockHostSet& hostSet() { return GetParam() ? host_set_ : failover_host_set_; } diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 608d012216e5..e916cee942b5 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -390,7 +390,7 @@ TEST(StrictDnsClusterImplTest, HostRemovalActiveHealthSkipped) { resolver.dns_callback_(TestUtility::makeDnsResponse({"127.0.0.1", "127.0.0.2"})); // Verify that both endpoints are initially marked with FAILED_ACTIVE_HC, then - // clear the flag to simulate that these endpoints have been sucessfully health + // clear the flag to simulate that these endpoints have been successfully health // checked. { const auto& hosts = cluster.prioritySet().hostSetsPerPriority()[0]->hosts(); diff --git a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc index 6e8edc96cf95..7e8839609038 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter_test.cc @@ -616,7 +616,7 @@ TEST_F(GrpcJsonTranscoderFilterTest, TranscodingUnaryWithHttpBodyAsOutputAndSpli EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_.encodeData(response_data_first_part, false)); - // Finaly, since half of the response data buffer is moved already, here we can send the rest + // Finally, since half of the response data buffer is moved already, here we can send the rest // of it to the next data encoding step. EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_.encodeData(*response_data, false)); diff --git a/test/extensions/filters/http/gzip/gzip_filter_test.cc b/test/extensions/filters/http/gzip/gzip_filter_test.cc index 170aa143f790..0057e528dff6 100644 --- a/test/extensions/filters/http/gzip/gzip_filter_test.cc +++ b/test/extensions/filters/http/gzip/gzip_filter_test.cc @@ -175,14 +175,14 @@ TEST_F(GzipFilterTest, hasCacheControlNoTransform) { } } -// Verifies that compression is skipped when cache-control header has no-tranform value. +// Verifies that compression is skipped when cache-control header has no-transform value. TEST_F(GzipFilterTest, hasCacheControlNoTransformNoCompression) { doRequest({{":method", "get"}, {"accept-encoding", "gzip;q=0, deflate"}}, true); doResponseNoCompression( {{":method", "get"}, {"content-length", "256"}, {"cache-control", "no-transform"}}); } -// Verifies that compression is NOT skipped when cache-control header does NOT have no-tranform +// Verifies that compression is NOT skipped when cache-control header does NOT have no-transform // value. TEST_F(GzipFilterTest, hasCacheControlNoTransformCompression) { doRequest({{":method", "get"}, {"accept-encoding", "gzip, deflate"}}, true); diff --git a/test/extensions/filters/http/jwt_authn/all_verifier_test.cc b/test/extensions/filters/http/jwt_authn/all_verifier_test.cc index 96a8c9b43c7c..4bac5581a7b5 100644 --- a/test/extensions/filters/http/jwt_authn/all_verifier_test.cc +++ b/test/extensions/filters/http/jwt_authn/all_verifier_test.cc @@ -35,7 +35,7 @@ class AllVerifierTest : public ::testing::Test { MockVerifierCallbacks mock_cb_; }; -// tests rule that is just match no requries. +// tests rule that is just match no requires. TEST_F(AllVerifierTest, TestAllAllow) { proto_config_.mutable_rules(0)->clear_requires(); createVerifier(); diff --git a/test/extensions/filters/http/jwt_authn/group_verifier_test.cc b/test/extensions/filters/http/jwt_authn/group_verifier_test.cc index 38e86d81c727..44172c1ec9e2 100644 --- a/test/extensions/filters/http/jwt_authn/group_verifier_test.cc +++ b/test/extensions/filters/http/jwt_authn/group_verifier_test.cc @@ -307,7 +307,7 @@ TEST_F(GroupVerifierTest, TestRequiresAnyLastAuthOk) { EXPECT_FALSE(headers.has("other-auth-userinfo")); } -// Test requires any with both auth returning error. Requires any returns the error last recieved +// Test requires any with both auth returning error. Requires any returns the error last received // back to the caller. TEST_F(GroupVerifierTest, TestRequiresAnyAllAuthFailed) { MessageUtil::loadFromYaml(RequiresAnyConfig, proto_config_); @@ -385,7 +385,7 @@ TEST_F(GroupVerifierTest, TestAnyInAllBothInRequireAnyFailed) { } // Test contains a requires any which in turn has 2 requires all. Mock auths simulate JWKs cache -// hits and inline return of errors. Requires any returns the error last recieved back to the +// hits and inline return of errors. Requires any returns the error last received back to the // caller. TEST_F(GroupVerifierTest, TestAllInAnyBothRequireAllFailed) { MessageUtil::loadFromYaml(AnyWithAll, proto_config_); diff --git a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc index 516fb4998856..1adf9665b57c 100644 --- a/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc +++ b/test/extensions/filters/network/client_ssl_auth/client_ssl_auth_test.cc @@ -141,7 +141,7 @@ TEST_F(ClientSslAuthFilterTest, NoSsl) { setup(); Buffer::OwnedImpl dummy("hello"); - // Check no SSL case, mulitple iterations. + // Check no SSL case, multiple iterations. EXPECT_CALL(filter_callbacks_.connection_, ssl()).WillOnce(Return(nullptr)); EXPECT_EQ(Network::FilterStatus::Continue, instance_->onNewConnection()); EXPECT_EQ(Network::FilterStatus::Continue, instance_->onData(dummy, false)); diff --git a/test/extensions/filters/network/ext_authz/ext_authz_test.cc b/test/extensions/filters/network/ext_authz/ext_authz_test.cc index db8599cd0f09..39501f210697 100644 --- a/test/extensions/filters/network/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/network/ext_authz/ext_authz_test.cc @@ -204,7 +204,7 @@ TEST_F(ExtAuthzFilterTest, FailOpen) { TEST_F(ExtAuthzFilterTest, FailClose) { InSequence s; - // Explicitily set the failure_mode_allow to false. + // Explicitly set the failure_mode_allow to false. config_->setFailModeAllow(false); EXPECT_CALL(filter_callbacks_.connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); diff --git a/test/extensions/filters/network/thrift_proxy/compact_protocol_impl_test.cc b/test/extensions/filters/network/thrift_proxy/compact_protocol_impl_test.cc index 17ec70009c7d..70367c719caf 100644 --- a/test/extensions/filters/network/thrift_proxy/compact_protocol_impl_test.cc +++ b/test/extensions/filters/network/thrift_proxy/compact_protocol_impl_test.cc @@ -618,7 +618,7 @@ TEST_F(CompactProtocolTest, ReadListBegin) { addSeq(buffer, {0xFF, 0xFF, 0xFF, 0xFF, 0x1F}); // -1 EXPECT_THROW_WITH_MESSAGE(proto.readListBegin(buffer, elem_type, size), EnvoyException, - "negative compact procotol list/set size -1"); + "negative compact protocol list/set size -1"); EXPECT_EQ(elem_type, FieldType::String); EXPECT_EQ(size, 1); EXPECT_EQ(buffer.length(), 6); diff --git a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc index 4504397cef0d..7883a9cd43ea 100644 --- a/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/thrift_proxy/conn_manager_test.cc @@ -1114,7 +1114,7 @@ TEST_F(ThriftConnectionManagerTest, OnDataResumesWithNextFilterOnTransportEnd) { EXPECT_EQ(1U, store_.gauge("test.request_active").value()); } -// Tests multiple filters where one invokes sendLocalReply with a succesful reply. +// Tests multiple filters where one invokes sendLocalReply with a successful reply. TEST_F(ThriftConnectionManagerTest, OnDataWithFilterSendsLocalReply) { auto* filter = new NiceMock(); custom_filter_.reset(filter); diff --git a/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc b/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc index 46d0cf501341..5b1b77b9ae78 100644 --- a/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc +++ b/test/extensions/filters/network/thrift_proxy/twitter_protocol_impl_test.cc @@ -200,7 +200,7 @@ TEST_F(TwitterProtocolTest, Type) { EXPECT_EQ(proto.type(), ProtocolType::Twitter); } -// Tests readMessageBegin with insufficent data. +// Tests readMessageBegin with insufficient data. TEST_F(TwitterProtocolTest, ReadMessageBeginInsufficientData) { TwitterProtocolImpl proto; Buffer::OwnedImpl buffer; diff --git a/test/integration/autonomous_upstream.h b/test/integration/autonomous_upstream.h index fde036d21474..47d72486f5ba 100644 --- a/test/integration/autonomous_upstream.h +++ b/test/integration/autonomous_upstream.h @@ -6,7 +6,7 @@ class AutonomousUpstream; // A stream which automatically responds when the downstream request is // completely read. By default the response is 200: OK with 10 bytes of -// payload. This behavior can be overriden with custom request headers defined below. +// payload. This behavior can be overridden with custom request headers defined below. class AutonomousStream : public FakeStream { public: // The number of response bytes to send. Payload is randomized. diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index e0c99c249e3d..2d0a5ed81ebe 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -245,7 +245,7 @@ IntegrationStreamDecoderPtr HttpIntegrationTest::sendRequestAndWaitForResponse( response = codec_client_->makeHeaderOnlyRequest(request_headers); } waitForNextUpstreamRequest(); - // Send response headers, and end_stream if there is no respone body. + // Send response headers, and end_stream if there is no response body. upstream_request_->encodeHeaders(response_headers, response_size == 0); // Send any response data, with end_stream true. if (response_size) { @@ -1007,7 +1007,7 @@ void HttpIntegrationTest::testHittingEncoderFilterLimit() { codec_client_->sendData(*downstream_request, data, true); waitForNextUpstreamRequest(); - // Send the respone headers. + // Send the response headers. upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "200"}}, false); // Now send an overly large response body. At some point, too much data will diff --git a/test/main.cc b/test/main.cc index 34d2254070e8..9716712087d6 100644 --- a/test/main.cc +++ b/test/main.cc @@ -32,7 +32,7 @@ int main(int argc, char** argv) { // Select whether to test only for IPv4, IPv6, or both. The default is to // test for both. Options are {"v4only", "v6only", "all"}. Set // ENVOY_IP_TEST_VERSIONS to "v4only" if the system currently does not support IPv6 network - // operations. Similary set ENVOY_IP_TEST_VERSIONS to "v6only" if IPv4 has already been + // operations. Similarly set ENVOY_IP_TEST_VERSIONS to "v6only" if IPv4 has already been // phased out of network operations. Set to "all" (or don't set) if testing both // v4 and v6 addresses is desired. This feature is in progress and will be rolled out to all tests // in upcoming PRs. diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index a336d6949e44..73098b8b89d3 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -10,7 +10,7 @@ set -e [[ -z "${WORKSPACE}" ]] && WORKSPACE=envoy [[ -z "${VALIDATE_COVERAGE}" ]] && VALIDATE_COVERAGE=true -# This is the target that will be run to generate coverage data. It can be overriden by consumer +# This is the target that will be run to generate coverage data. It can be overridden by consumer # projects that want to run coverage on a different/combined target. [[ -z "${COVERAGE_TARGET}" ]] && COVERAGE_TARGET="//test/coverage:coverage_tests" diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index 6d7a4a9eb92c..b603c1c5772d 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -29,7 +29,7 @@ class GuardDogTestBase : public testing::Test { /** * Death test caveat: Because of the way we die gcov doesn't receive coverage - * information from the forked process that is checked for succesful death. + * information from the forked process that is checked for successful death. * This means that the lines dealing with the calls to PANIC are not seen as * green in the coverage report. However, rest assured from the results of the * test: these lines are in fact covered. diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index 222ab7fb1c0d..f641bf3c40fc 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -231,7 +231,7 @@ std::string TestEnvironment::temporaryFileSubstitute(const std::string& path, const std::string json_path = TestEnvironment::runfilesPath(path); std::string out_json_string = readFileToStringForTest(json_path); - // Substitude params. + // Substitute params. for (auto it : param_map) { const std::regex param_regex("\\{\\{ " + it.first + " \\}\\}"); out_json_string = std::regex_replace(out_json_string, param_regex, it.second); diff --git a/tools/check_spelling.sh b/tools/check_spelling.sh new file mode 100755 index 000000000000..7fa31c5b314d --- /dev/null +++ b/tools/check_spelling.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +# Applies requisite code formatters to the source tree +# check_spelling.sh + +# Why choose misspell? +# https://github.com/client9/misspell#what-are-other-misspelling-correctors-and-whats-wrong-with-them + +set -u +set -e + +VERSION="0.3.4" +LINUX_MISSPELL_SHA="34d489dbc5ddb4dfd6d3cfac9fde8660e6c37e6c" +MAC_MISSPELL_SHA="f2607e2297b9e8af562e384c38045033375c7433" +TMP_DIR="/tmp" +OS="" + +MISSPELL_ARGS="-error -o stderr" + +if [[ "$#" -lt 1 ]]; then + echo "Usage: $0 check|fix" + exit -1 +fi + +if [[ "$1" == "fix" ]]; then + MISSPELL_ARGS="-w" +fi + +if [[ "$(uname)" == "Darwin" ]]; then + OS="mac" +elif [[ "$(uname)" == "Linux" ]]; then + OS="linux" +else + echo "Current only support mac/Linux" + exit 1 +fi + +SCRIPTPATH=$( cd "$(dirname "$0")" ; pwd -P ) +ROOTDIR="${SCRIPTPATH}/.." +cd "$ROOTDIR" + +BIN_FILENAME="misspell_"${VERSION}"_"${OS}"_64bit.tar.gz" +# Install tools we need +if [[ ! -e "${TMP_DIR}/misspell" ]]; then + if ! wget https://github.com/client9/misspell/releases/download/v"${VERSION}"/"${BIN_FILENAME}" \ + -O "${TMP_DIR}/${BIN_FILENAME}" --no-verbose --tries=3 -o "${TMP_DIR}/wget.log"; then + cat "${TMP_DIR}/wget.log" + exit -1 + fi + tar -xvf "${TMP_DIR}/${BIN_FILENAME}" -C "${TMP_DIR}" &> /dev/null +fi + +ACTUAL_SHA="" +EXPECT_SHA="" + +if [[ "${OS}" == "linux" ]]; then + ACTUAL_SHA=$(sha1sum "${TMP_DIR}"/misspell|cut -d' ' -f1) + EXPECT_SHA="${LINUX_MISSPELL_SHA}" +else + ACTUAL_SHA=$(shasum -a 1 "${TMP_DIR}"/misspell|cut -d' ' -f1) + EXPECT_SHA="${MAC_MISSPELL_SHA}" +fi + +if [[ ! ${ACTUAL_SHA} == ${EXPECT_SHA} ]]; then + echo "Expect shasum is ${ACTUAL_SHA}, but actual is shasum ${EXPECT_SHA}" + exit 1 +fi + +chmod +x "${TMP_DIR}/misspell" + +# Spell checking +# All the skipping files are defined in tools/spelling_skip_files.txt +SPELLING_SKIP_FILES="${ROOTDIR}/tools/spelling_skip_files.txt" + +# All the ignore words are defined in tools/spelling_whitelist_words.txt +SPELLING_WHITELIST_WORDS_FILE="${ROOTDIR}/tools/spelling_whitelist_words.txt" + +WHITELIST_WORDS=$(echo -n $(cat "${SPELLING_WHITELIST_WORDS_FILE}" | \ + grep -v "^#"|grep -v "^$") | tr ' ' ',') +SKIP_FILES=$(echo $(cat "${SPELLING_SKIP_FILES}") | sed "s| | -e |g") +git ls-files | grep -v -e "${SKIP_FILES}" | xargs "${TMP_DIR}/misspell" -i \ + "${WHITELIST_WORDS}" ${MISSPELL_ARGS} diff --git a/tools/deprecate_version/deprecate_version.py b/tools/deprecate_version/deprecate_version.py index e0ef621d74d6..99b7565f8957 100644 --- a/tools/deprecate_version/deprecate_version.py +++ b/tools/deprecate_version/deprecate_version.py @@ -3,7 +3,7 @@ # # sh tools/deprecate_version/deprecate_version.sh # -# Direct usage (not recomended): +# Direct usage (not recommended): # # python tools/deprecate_version/deprecate_version.py # diff --git a/tools/envoy_collect/envoy_collect.py b/tools/envoy_collect/envoy_collect.py index b268651aafdd..e8e4ca93f90d 100755 --- a/tools/envoy_collect/envoy_collect.py +++ b/tools/envoy_collect/envoy_collect.py @@ -121,7 +121,7 @@ def run_envoy(envoy_shcmd_args, envoy_log_path, admin_address_path, print(envoy_shcmd) # Some process setup stuff to ensure the child process gets cleaned up properly if the - # collector dies and doesn't get its signals implicity. + # collector dies and doesn't get its signals implicitly. def envoy_preexec_fn(): os.setpgrp() libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True) @@ -247,7 +247,7 @@ def envoy_collect(parse_result, unknown_args): parser.add_argument( '--log-level', '-l', - help='Envoy log level. This will be overriden when invoking Envoy.') + help='Envoy log level. This will be overridden when invoking Envoy.') # envoy_collect specific args. parser.add_argument( '--performance', diff --git a/tools/socket_passing.py b/tools/socket_passing.py index cdbbcf0a9d99..eae1b3751cd3 100755 --- a/tools/socket_passing.py +++ b/tools/socket_passing.py @@ -1,11 +1,11 @@ #!/usr/bin/env python2.7 # This tool is a helper script that queries the admin address for all listener -# addresses after envoy startup. (The admin adress is written out to a file by +# addresses after envoy startup. (The admin address is written out to a file by # setting the -a flag in the envoy binary.) The script then outputs a new json # config file with updated listener addresses. This script is currently called # in the hot restart integration test to update listener addresses bound to -# port 0 in the intial json config file. +# port 0 in the initial json config file. from collections import OrderedDict diff --git a/tools/spelling_skip_files.txt b/tools/spelling_skip_files.txt new file mode 100644 index 000000000000..6ddcc10646b8 --- /dev/null +++ b/tools/spelling_skip_files.txt @@ -0,0 +1 @@ +OWNERS.md diff --git a/tools/spelling_whitelist_words.txt b/tools/spelling_whitelist_words.txt new file mode 100644 index 000000000000..7594b1c61fe4 --- /dev/null +++ b/tools/spelling_whitelist_words.txt @@ -0,0 +1,5 @@ +# One word per line, these words are not spell checked. +# you can add a comment to each word to explain why you don't need to do a spell check. + +# bazel keywords in bazel/cc_configure.bzl +overriden