diff --git a/src/core/telemetry/instrument.cc b/src/core/telemetry/instrument.cc index 64f019ea67b7b..3d3ca3a84bd5b 100644 --- a/src/core/telemetry/instrument.cc +++ b/src/core/telemetry/instrument.cc @@ -43,6 +43,7 @@ #include "absl/functional/function_ref.h" #include "absl/hash/hash.h" #include "absl/log/log.h" +#include "absl/memory/memory.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" @@ -75,7 +76,7 @@ InstrumentLabel::InstrumentLabel(absl::string_view label) { auto* current_value = labels[i].load(std::memory_order_acquire); while (current_value == nullptr) { if (label_copy == nullptr) { - label_copy.reset(new std::string(label)); + label_copy = absl::make_unique(label); } if (!labels[i].compare_exchange_weak(current_value, label_copy.get(), std::memory_order_acq_rel)) { diff --git a/src/core/telemetry/instrument.h b/src/core/telemetry/instrument.h index d3720c1c3a82f..cb1a99c7537d7 100644 --- a/src/core/telemetry/instrument.h +++ b/src/core/telemetry/instrument.h @@ -232,7 +232,7 @@ class InstrumentLabel { InstrumentLabel() : index_(kSentinelIndex) {} explicit InstrumentLabel(absl::string_view label); - InstrumentLabel(const char* label) + explicit InstrumentLabel(const char* label) : InstrumentLabel(absl::string_view(label)) {} static InstrumentLabel FromIndex(uint8_t index) { @@ -634,7 +634,7 @@ class QueryableDomain { protected: QueryableDomain(std::string name, InstrumentLabelList label_names, size_t map_shards_size) - : label_names_(std::move(label_names)), + : label_names_(label_names), map_shards_size_(label_names_.empty() ? 1 : map_shards_size), map_shards_(std::make_unique(map_shards_size_)), name_(std::move(name)) {} @@ -881,7 +881,6 @@ class MetricsQuery { void Apply(InstrumentLabelList label_names, absl::FunctionRef fn, MetricsSink& sink) const; - private: void ApplyLabelChecks(InstrumentLabelList label_names, absl::FunctionRef fn, MetricsSink& sink) const; @@ -1162,7 +1161,7 @@ class InstrumentDomain { protected: template - static const FixedInstrumentLabelList MakeLabels( + static FixedInstrumentLabelList MakeLabels( Label... labels) { InstrumentLabel l[] = {InstrumentLabel(labels)...}; for (size_t i = 0; i < sizeof...(Label); ++i) { diff --git a/test/core/telemetry/instrument_test.cc b/test/core/telemetry/instrument_test.cc index bf326ef3eb3dd..0a9176296960f 100644 --- a/test/core/telemetry/instrument_test.cc +++ b/test/core/telemetry/instrument_test.cc @@ -1057,7 +1057,7 @@ TEST_F(InstrumentLabelTest, CopyAndMove) { EXPECT_EQ(label1, label2); EXPECT_EQ(label1.index(), label2.index()); - InstrumentLabel label3(std::move(label1)); + InstrumentLabel label3(label1); EXPECT_EQ(label2, label3); EXPECT_EQ(label2.index(), label3.index()); @@ -1067,7 +1067,7 @@ TEST_F(InstrumentLabelTest, CopyAndMove) { EXPECT_EQ(label2.index(), label4.index()); InstrumentLabel label5("baz"); - label5 = std::move(label2); + label5 = label2; EXPECT_EQ(label3, label5); EXPECT_EQ(label3.index(), label5.index()); } @@ -1114,7 +1114,7 @@ TEST_F(InstrumentLabelListTest, CopyAndMove) { EXPECT_EQ(list2[0].label(), "foo"); EXPECT_EQ(list2[1].label(), "bar"); - InstrumentLabelList list3(std::move(list1)); + InstrumentLabelList list3(list1); EXPECT_EQ(list3.size(), 2); EXPECT_EQ(list3[0].label(), "foo"); EXPECT_EQ(list3[1].label(), "bar"); @@ -1126,7 +1126,7 @@ TEST_F(InstrumentLabelListTest, CopyAndMove) { EXPECT_EQ(list4[1].label(), "bar"); InstrumentLabelList list5; - list5 = std::move(list2); + list5 = list2; EXPECT_EQ(list5.size(), 2); EXPECT_EQ(list5[0].label(), "foo"); EXPECT_EQ(list5[1].label(), "bar"); diff --git a/test/cpp/interop/client.cc b/test/cpp/interop/client.cc index b0ea426f66fc8..b26463b7b2d41 100644 --- a/test/cpp/interop/client.cc +++ b/test/cpp/interop/client.cc @@ -229,7 +229,8 @@ int main(int argc, char** argv) { new grpc::testing::MetadataAndStatusLoggerInterceptorFactory()); } if (test_case == "max_concurrent_streams_connection_scaling") { - arguments.SetServiceConfigJSON("{\"connectionScaling\":{\"maxConnectionsPerSubchannel\": 2}}"); + arguments.SetServiceConfigJSON( + "{\"connectionScaling\":{\"maxConnectionsPerSubchannel\": 2}}"); } else { std::string service_config_json = absl::GetFlag(FLAGS_service_config_json); diff --git a/test/cpp/interop/interop_client.cc b/test/cpp/interop/interop_client.cc index 043e97747bf56..9cf926d156477 100644 --- a/test/cpp/interop/interop_client.cc +++ b/test/cpp/interop/interop_client.cc @@ -1378,13 +1378,13 @@ bool InteropClient::DoMcsConnectionScaling() { LOG(ERROR) << "DoMcsConnectionScaling(): stream1->Write() failed."; return TransientFailureOrAbort(); } - + if (!stream1->Read(&response1)) { LOG(ERROR) << "DoMcsConnectionScaling(): stream1->Read() failed."; return TransientFailureOrAbort(); } std::string clientSocketAddressInCall1 = response1.payload().body(); - GRPC_CHECK(clientSocketAddressInCall1.length() > 0); + GRPC_CHECK(!clientSocketAddressInCall1.empty()); VLOG(2) << "Sending Mcs connection scaling streaming rpc2 ..."; @@ -1402,7 +1402,7 @@ bool InteropClient::DoMcsConnectionScaling() { if (!stream2->Read(&response2)) { LOG(ERROR) << "DoMcsConnectionScaling(): stream2->Read() failed."; return TransientFailureOrAbort(); - } + } std::string clientSocketAddressInCall2 = response2.payload().body(); // The same connection should have been used for both streams. @@ -1424,7 +1424,7 @@ bool InteropClient::DoMcsConnectionScaling() { if (!stream3->Read(&response3)) { LOG(ERROR) << "DoMcsConnectionScaling(): stream3->Read() failed."; return TransientFailureOrAbort(); - } + } std::string clientSocketAddressInCall3 = response3.payload().body(); // A new connection should have been used for the 3rd stream. diff --git a/tools/run_tests/python_utils/dockerjob.py b/tools/run_tests/python_utils/dockerjob.py index 8324b1b652043..9ece97c5b52bd 100755 --- a/tools/run_tests/python_utils/dockerjob.py +++ b/tools/run_tests/python_utils/dockerjob.py @@ -155,7 +155,9 @@ class DockerJob: """Encapsulates a job""" def __init__(self, spec): - print('DockerJob:__init__ called with spec ' + spec.shortname, flush=True) + print( + "DockerJob:__init__ called with spec " + spec.shortname, flush=True + ) self._spec = spec self._job = jobset.Job( spec, newline_on_success=True, travis=True, add_env={} diff --git a/tools/run_tests/python_utils/jobset.py b/tools/run_tests/python_utils/jobset.py index 2f6375844c6b9..2a5e6a8222b89 100755 --- a/tools/run_tests/python_utils/jobset.py +++ b/tools/run_tests/python_utils/jobset.py @@ -280,7 +280,13 @@ class Job(object): def __init__( self, spec, newline_on_success, travis, add_env, quiet_success=False ): - print('Job called for spec ' + spec.shortname + ' quite_success: ' + str(quiet_success), flush=True) + print( + "Job called for spec " + + spec.shortname + + " quite_success: " + + str(quiet_success), + flush=True, + ) self._spec = spec self._newline_on_success = newline_on_success self._travis = travis @@ -311,7 +317,12 @@ def start(self): # error during the creation of temporary file. By using # NamedTemporaryFile, we defer the removal of file and directory. self._logfile = tempfile.NamedTemporaryFile(delete=False) - print('Job ' + self._spec.shortname + ' started with log file ' + self._logfile.name) + print( + "Job " + + self._spec.shortname + + " started with log file " + + self._logfile.name + ) env = dict(os.environ) env.update(self._spec.environ) env.update(self._add_env) diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py index 7d3f3b2b28272..18a4d485e62c0 100755 --- a/tools/run_tests/run_interop_tests.py +++ b/tools/run_tests/run_interop_tests.py @@ -1033,7 +1033,7 @@ def cloud_to_cloud_jobspec( docker_image=None, transport_security="tls", manual_cmd_log=None, - add_env={} + add_env={}, ): """Creates jobspec for cloud-to-cloud interop test""" interop_only_options = [ @@ -1131,7 +1131,11 @@ def cloud_to_cloud_jobspec( def server_jobspec( - language, docker_image, transport_security="tls", manual_cmd_log=None, set_max_concurrent_streams_limit=False + language, + docker_image, + transport_security="tls", + manual_cmd_log=None, + set_max_concurrent_streams_limit=False, ): """Create jobspec for running a server""" container_name = dockerjob.random_name( @@ -1715,27 +1719,27 @@ def aggregate_http2_results(stdout): (server_host, server_port) = server[1].split(":") server_addresses[server_name] = (server_host, server_port) -# for server_name, server_address in list(server_addresses.items()): -# (server_host, server_port) = server_address -# server_language = _LANGUAGES.get(server_name, None) -# skip_server = [] # test cases unimplemented by server -# if server_language: -# skip_server = server_language.unimplemented_test_cases_server() -# for language in languages: -# for test_case in _TEST_CASES: -# if not test_case in language.unimplemented_test_cases(): -# if not test_case in skip_server: -# test_job = cloud_to_cloud_jobspec( -# language, -# test_case, -# server_name, -# server_host, -# server_port, -# docker_image=docker_images.get(str(language)), -# transport_security=args.transport_security, -# manual_cmd_log=client_manual_cmd_log, -# ) -# jobs.append(test_job) + # for server_name, server_address in list(server_addresses.items()): + # (server_host, server_port) = server_address + # server_language = _LANGUAGES.get(server_name, None) + # skip_server = [] # test cases unimplemented by server + # if server_language: + # skip_server = server_language.unimplemented_test_cases_server() + # for language in languages: + # for test_case in _TEST_CASES: + # if not test_case in language.unimplemented_test_cases(): + # if not test_case in skip_server: + # test_job = cloud_to_cloud_jobspec( + # language, + # test_case, + # server_name, + # server_host, + # server_port, + # docker_image=docker_images.get(str(language)), + # transport_security=args.transport_security, + # manual_cmd_log=client_manual_cmd_log, + # ) + # jobs.append(test_job) if args.http2_interop: for test_case in _HTTP2_TEST_CASES: @@ -1809,7 +1813,9 @@ def aggregate_http2_results(stdout): if args.max_concurrent_streams_connection_scaling: if not args.use_docker: - print('MCS connection scaling test can only be run with --use-docker') + print( + "MCS connection scaling test can only be run with --use-docker" + ) else: languages_for_mcs_cs = set( _LANGUAGES[l] @@ -1817,35 +1823,41 @@ def aggregate_http2_results(stdout): if "all" in args.language or l in args.language ) if len(languages_for_mcs_cs) > 0: - print('Using java for MCS connection scaling server ignoring any args for server languages') + print( + "Using java for MCS connection scaling server ignoring any args for server languages" + ) mcs_server_jobspec = server_jobspec( - _LANGUAGES['java'], - docker_images.get('java'), + _LANGUAGES["java"], + docker_images.get("java"), args.transport_security, manual_cmd_log=server_manual_cmd_log, set_max_concurrent_streams_limit=True, ) mcs_server_job = dockerjob.DockerJob(mcs_server_jobspec) - + for language in languages_for_mcs_cs: test_job = cloud_to_cloud_jobspec( language, - 'max_concurrent_streams_connection_scaling', - 'java-mcs', - 'localhost', + "max_concurrent_streams_connection_scaling", + "java-mcs", + "localhost", mcs_server_job.mapped_port(_DEFAULT_SERVER_PORT), docker_image=docker_images.get(str(language)), transport_security=args.transport_security, manual_cmd_log=client_manual_cmd_log, - add_env={'GRPC_EXPERIMENTAL_MAX_CONCURRENT_STREAMS_CONNECTION_SCALING': 'true', - 'GRPC_EXPERIMENTS': 'subchannel_connection_scaling', - 'GRPC_VERBOSITY': 'debug', - 'GRPC_TRACE': 'all'}, + add_env={ + "GRPC_EXPERIMENTAL_MAX_CONCURRENT_STREAMS_CONNECTION_SCALING": "true", + "GRPC_EXPERIMENTS": "subchannel_connection_scaling", + "GRPC_VERBOSITY": "debug", + "GRPC_TRACE": "all", + }, ) jobs.append(test_job) else: - print('MCS connection scaling tests will be skipped since none of the supported client languages for MCS connection scaling testcases was specified') - + print( + "MCS connection scaling tests will be skipped since none of the supported client languages for MCS connection scaling testcases was specified" + ) + if not jobs: print("No jobs to run.") for image in docker_images.values(): @@ -1856,7 +1868,10 @@ def aggregate_http2_results(stdout): print("All tests will skipped --manual_run option is active.") if args.verbose: - print(str(len(jobs)) + " jobs to run: \n%s\n" % "\n".join(str(job) for job in jobs)) + print( + str(len(jobs)) + + " jobs to run: \n%s\n" % "\n".join(str(job) for job in jobs) + ) num_failures, resultset = jobset.run( jobs, @@ -1864,7 +1879,7 @@ def aggregate_http2_results(stdout): maxjobs=args.jobs, skip_jobs=args.manual_run, ) - print('num_failures from jobset.run: ' + str(num_failures)) + print("num_failures from jobset.run: " + str(num_failures)) if args.bq_result_table and resultset: upload_interop_results_to_bq(resultset, args.bq_result_table) if num_failures: