From 30dc4c5824950830223f4379f4d3e15730b9eff7 Mon Sep 17 00:00:00 2001 From: "David B. Tucker" Date: Mon, 15 Oct 2018 14:48:41 -0400 Subject: [PATCH 1/3] Add tests for Kubernetes watch reconnection. --- test/fake_http_server.cc | 26 ++++++++++++++++- test/fake_http_server.h | 4 ++- test/kubernetes_unittest.cc | 58 +++++++++++++++++++++++++++++++++---- 3 files changed, 80 insertions(+), 8 deletions(-) diff --git a/test/fake_http_server.cc b/test/fake_http_server.cc index 1797d164..85c836f5 100644 --- a/test/fake_http_server.cc +++ b/test/fake_http_server.cc @@ -89,11 +89,18 @@ void FakeServer::SendStreamResponse(const std::string& path, stream_it->second.SendToAllQueues(response); } -void FakeServer::TerminateAllStreams() { +bool FakeServer::TerminateAllStreams(time::seconds timeout) { // Send sentinel (empty string) to all queues. for (auto& s : handler_.path_streams) { s.second.SendToAllQueues(""); } + + // Block until all queues removed. + bool success = true; + for (auto& s : handler_.path_streams) { + success = success && s.second.WaitUntilNoWatchers(timeout); + } + return success; } void FakeServer::Handler::operator()(Server::request const &request, @@ -116,6 +123,7 @@ void FakeServer::Handler::operator()(Server::request const &request, while (true) { std::string s = stream.GetNextResponse(&my_queue); if (s.empty()) { + stream.RemoveQueue(&my_queue); break; } connection->write(s); @@ -195,6 +203,15 @@ void FakeServer::Handler::Stream::AddQueue(std::queue* queue) { cv_.notify_all(); } +void FakeServer::Handler::Stream::RemoveQueue(std::queue* queue) { + { + std::lock_guard lk(mutex_); + queues_.erase(std::remove(queues_.begin(), queues_.end(), queue), + queues_.end()); + } + cv_.notify_all(); +} + bool FakeServer::Handler::Stream::WaitForOneWatcher(time::seconds timeout) { std::unique_lock queues_lock(mutex_); return cv_.wait_for(queues_lock, @@ -202,6 +219,13 @@ bool FakeServer::Handler::Stream::WaitForOneWatcher(time::seconds timeout) { [this]{ return queues_.size() > 0; }); } +bool FakeServer::Handler::Stream::WaitUntilNoWatchers(time::seconds timeout) { + std::unique_lock queues_lock(mutex_); + return cv_.wait_for(queues_lock, + timeout, + [this]{ return queues_.empty(); }); +} + void FakeServer::Handler::Stream::SendToAllQueues(const std::string& response) { { std::lock_guard lk(mutex_); diff --git a/test/fake_http_server.h b/test/fake_http_server.h index a1d4e020..b185b674 100644 --- a/test/fake_http_server.h +++ b/test/fake_http_server.h @@ -76,7 +76,7 @@ class FakeServer { void SendStreamResponse(const std::string& path, const std::string& response); // Closes all open streams on the server. - void TerminateAllStreams(); + bool TerminateAllStreams(time::seconds timeout); private: struct Handler; @@ -95,7 +95,9 @@ class FakeServer { class Stream { public: void AddQueue(std::queue* queue); + void RemoveQueue(std::queue* queue); bool WaitForOneWatcher(time::seconds timeout); + bool WaitUntilNoWatchers(time::seconds timeout); void SendToAllQueues(const std::string& response); std::string GetNextResponse(std::queue* queue); diff --git a/test/kubernetes_unittest.cc b/test/kubernetes_unittest.cc index da07b331..bdde1f46 100644 --- a/test/kubernetes_unittest.cc +++ b/test/kubernetes_unittest.cc @@ -1198,10 +1198,11 @@ TEST_F(KubernetesTestFakeServer, MetadataQuery) { EXPECT_EQ(pod_metadata->ToString(), m[3].metadata().metadata->ToString()); } -class KubernetesTestFakeServerOneWatchRetry +class KubernetesTestFakeServerConfigurable : public KubernetesTestFakeServer { protected: virtual bool ClusterLevel() = 0; + virtual int WatchRetries() = 0; std::unique_ptr CreateConfig() override { return std::unique_ptr( new Configuration(std::istringstream( @@ -1215,22 +1216,32 @@ class KubernetesTestFakeServerOneWatchRetry "KubernetesEndpointHost: " + server->GetUrl() + "\n" "KubernetesNodeName: TestNodeName\n" "MetadataIngestionRawContentVersion: TestVersion\n" - "KubernetesUpdaterWatchConnectionRetries: 1\n" + "KubernetesUpdaterWatchConnectionRetries: " + + std::to_string(WatchRetries()) + "\n" "KubernetesUseWatch: true\n" ))); } }; class KubernetesTestFakeServerOneWatchRetryNodeLevelMetadata - : public KubernetesTestFakeServerOneWatchRetry { + : public KubernetesTestFakeServerConfigurable { protected: bool ClusterLevel() override { return false; } + int WatchRetries() override { return 1; } }; class KubernetesTestFakeServerOneWatchRetryClusterLevelMetadata - : public KubernetesTestFakeServerOneWatchRetry { + : public KubernetesTestFakeServerConfigurable { protected: bool ClusterLevel() override { return true; } + int WatchRetries() override { return 1; } +}; + +class KubernetesTestFakeServerThreeWatchRetriesNodeLevelMetadata + : public KubernetesTestFakeServerConfigurable { + protected: + bool ClusterLevel() override { return false; } + int WatchRetries() override { return 3; } }; namespace { @@ -1870,7 +1881,7 @@ TEST_F(KubernetesTestFakeServerOneWatchRetryNodeLevelMetadata, TestPods(*server, store, pods_watch_path); // Terminate the hanging GETs on the server so that the updater will finish. - server->TerminateAllStreams(); + EXPECT_TRUE(server->TerminateAllStreams(time::seconds(3))); } TEST_F(KubernetesTestFakeServerOneWatchRetryClusterLevelMetadata, @@ -1902,7 +1913,42 @@ TEST_F(KubernetesTestFakeServerOneWatchRetryClusterLevelMetadata, *server, store, services_watch_path, endpoints_watch_path); // Terminate the hanging GETs on the server so that the updater will finish. - server->TerminateAllStreams(); + EXPECT_TRUE(server->TerminateAllStreams(time::seconds(3))); +} + +TEST_F(KubernetesTestFakeServerThreeWatchRetriesNodeLevelMetadata, + KubernetesUpdaterReconnection) { + const std::string nodes_watch_path = + "/api/v1/watch/nodes/TestNodeName?watch=true"; + const std::string pods_watch_path = + "/api/v1/pods?fieldSelector=spec.nodeName%3DTestNodeName&watch=true"; + + // Create a fake server representing the Kubernetes master. + server->SetResponse("/api/v1/nodes?limit=1", "{}"); + server->SetResponse("/api/v1/pods?limit=1", "{}"); + server->AllowStream(nodes_watch_path); + server->AllowStream(pods_watch_path); + + MetadataStore store(*config); + KubernetesUpdater updater(*config, /*health_checker=*/nullptr, &store); + updater.Start(); + + // Step 1: Wait for initial connection from watchers, then terminate + // all streams. + server->WaitForOneStreamWatcher(nodes_watch_path, time::seconds(3)); + server->WaitForOneStreamWatcher(pods_watch_path, time::seconds(3)); + EXPECT_TRUE(server->TerminateAllStreams(time::seconds(3))); + + // Step 2: Wait for watchers to reconnect, then terminate again. + server->WaitForOneStreamWatcher(nodes_watch_path, time::seconds(3)); + server->WaitForOneStreamWatcher(pods_watch_path, time::seconds(3)); + EXPECT_TRUE(server->TerminateAllStreams(time::seconds(3))); + + // Step 3: Wait for final reconnection (configuration specifies 3 + // retries) then terminate. + server->WaitForOneStreamWatcher(nodes_watch_path, time::seconds(3)); + server->WaitForOneStreamWatcher(pods_watch_path, time::seconds(3)); + EXPECT_TRUE(server->TerminateAllStreams(time::seconds(3))); } } // namespace google From c7aa0373156158794ed3165dae82aadf586fd99b Mon Sep 17 00:00:00 2001 From: "David B. Tucker" Date: Tue, 16 Oct 2018 00:36:45 -0400 Subject: [PATCH 2/3] For Kubernetes watch, add tests of backoff and reconnection on failures. --- src/kubernetes.cc | 37 +++++++++++++++++----- src/kubernetes.h | 6 ++++ src/time.h | 23 ++++++++++++++ test/Makefile | 22 ++++++++++--- test/kubernetes_unittest.cc | 63 +++++++++++++++++++++++++++++++++++++ 5 files changed, 138 insertions(+), 13 deletions(-) diff --git a/src/kubernetes.cc b/src/kubernetes.cc index bc707e0b..390ebe34 100644 --- a/src/kubernetes.cc +++ b/src/kubernetes.cc @@ -78,8 +78,14 @@ class KubernetesReader::NonRetriableError KubernetesReader::KubernetesReader(const Configuration& config, HealthChecker* health_checker) + : KubernetesReader(config, health_checker, SleeperImpl::New()) {} + +KubernetesReader::KubernetesReader(const Configuration& config, + HealthChecker* health_checker, + std::unique_ptr sleeper) : config_(config), environment_(config), health_checker_(health_checker), - service_account_directory_(kServiceAccountDirectory) {} + service_account_directory_(kServiceAccountDirectory), + sleeper_(std::move(sleeper)) {} std::string KubernetesReader::SecretPath(const std::string& secret) const { return service_account_directory_ + "/" + secret; @@ -837,21 +843,29 @@ void KubernetesReader::WatchMaster( if (verbose) { LOG(INFO) << "WatchMaster(" << name << ") completed " << body(response); } + if (status(response) >= 300) { + throw boost::system::system_error( + boost::system::errc::make_error_code( + boost::system::errc::not_connected), + format::Substitute("Server responded with '{{message}}' ({{code}})", + {{"message", status_message(response)}, + {"code", format::str(status(response))}})); + } // Connection closed without an error; reset failure count. failures = 0; } catch (const boost::system::system_error& e) { LOG(ERROR) << "Failed to query " << endpoint << ": " << e.what(); - ++failures; - if (failures > config_.KubernetesUpdaterWatchMaxConnectionFailures()) { + if (failures >= config_.KubernetesUpdaterWatchMaxConnectionFailures()) { LOG(ERROR) << "WatchMaster(" << name << "): Exiting after " << failures << " failures"; throw QueryException(endpoint + " -> " + e.what()); } + ++failures; double backoff = fmin(pow(1.5, failures), 30); if (verbose) { LOG(INFO) << "Backing off for " << backoff << " seconds"; } - std::this_thread::sleep_for(time::seconds(backoff)); + sleeper_->SleepFor(backoff); } } } @@ -1310,10 +1324,17 @@ void KubernetesReader::WatchEndpoints( KubernetesUpdater::KubernetesUpdater(const Configuration& config, HealthChecker* health_checker, MetadataStore* store) - : reader_(config, health_checker), PollingMetadataUpdater( - config, store, "KubernetesUpdater", - config.KubernetesUpdaterIntervalSeconds(), - [=]() { return reader_.MetadataQuery(); }) { } + : KubernetesUpdater(config, health_checker, store, SleeperImpl::New()) {} + +KubernetesUpdater::KubernetesUpdater(const Configuration& config, + HealthChecker* health_checker, + MetadataStore* store, + std::unique_ptr sleeper) + : reader_(config, health_checker, std::move(sleeper)), + PollingMetadataUpdater( + config, store, "KubernetesUpdater", + config.KubernetesUpdaterIntervalSeconds(), + [=]() { return reader_.MetadataQuery(); }) { } void KubernetesUpdater::ValidateDynamicConfiguration() const throw(ConfigurationValidationError) { diff --git a/src/kubernetes.h b/src/kubernetes.h index a50c5c21..6e219bb7 100644 --- a/src/kubernetes.h +++ b/src/kubernetes.h @@ -40,6 +40,9 @@ class KubernetesReader { public: KubernetesReader(const Configuration& config, HealthChecker* health_checker); + KubernetesReader(const Configuration& config, + HealthChecker* health_checker, + std::unique_ptr sleeper); // A Kubernetes metadata query function. std::vector MetadataQuery() const; @@ -218,12 +221,15 @@ class KubernetesReader { HealthChecker* health_checker_; Environment environment_; std::string service_account_directory_; + std::unique_ptr sleeper_; }; class KubernetesUpdater : public PollingMetadataUpdater { public: KubernetesUpdater(const Configuration& config, HealthChecker* health_checker, MetadataStore* store); + KubernetesUpdater(const Configuration& config, HealthChecker* health_checker, + MetadataStore* store, std::unique_ptr sleeper); ~KubernetesUpdater() { if (node_watch_thread_.joinable()) { node_watch_thread_.join(); diff --git a/src/time.h b/src/time.h index 5b2b888a..5577552a 100644 --- a/src/time.h +++ b/src/time.h @@ -20,6 +20,7 @@ #include #include #include +#include #include "logging.h" @@ -146,6 +147,28 @@ class ExpirationImpl : public Expiration { typename Clock::time_point token_expiration_; }; +// Abstract class representing an object that can sleep. +// Used for mocking out std::this_thread::sleep_for() in tests. +class Sleeper { + public: + virtual ~Sleeper() = default; + + // Sleeps for the given number of seconds. + virtual void SleepFor(double seconds) = 0; +}; + +// Implementation of the Sleeper interface using std::this_thread::sleep_for(). +class SleeperImpl : public Sleeper { + public: + static std::unique_ptr New() { + return std::unique_ptr(new SleeperImpl()); + } + + void SleepFor(double seconds) override { + std::this_thread::sleep_for(time::seconds(seconds)); + } +}; + } #endif // TIME_H_ diff --git a/test/Makefile b/test/Makefile index 32ce7417..38352e56 100644 --- a/test/Makefile +++ b/test/Makefile @@ -20,6 +20,10 @@ GTEST_HEADERS=$(GTEST_DIR)/include/gtest/*.h \ $(GTEST_DIR)/include/gtest/internal/*.h GTEST_SRCS_=$(GTEST_SOURCEDIR)/*.cc $(GTEST_SOURCEDIR)/*.h $(GTEST_HEADERS) GMOCK_DIR=$(LIBDIR)/googletest/googlemock +GMOCK_SOURCEDIR=$(GMOCK_DIR)/src +GMOCK_HEADERS=$(GMOCK_DIR)/include/gtest/*.h \ + $(GMOCK_DIR)/include/gtest/internal/*.h +GMOCK_SRCS_=$(GMOCK_SOURCEDIR)/*.cc $(GMOCK_SOURCEDIR)/*.h $(GMOCK_HEADERS) # TODO: Factor out the common variables. CPP_NETLIB_DIR=$(LIBDIR)/cpp-netlib @@ -85,6 +89,7 @@ UTIL_SOURCES=$(TEST_DIR)/fake_clock.cc $(TEST_DIR)/fake_http_server.cc UTIL_OBJS=$(UTIL_SOURCES:$(TEST_DIR)/%.cc=%.o) GTEST_LIB=gtest_lib.a +GMOCK_LIB=gmock_lib.a all: $(SRC_DIR)/build-cpp-netlib $(SRC_DIR)/build-yaml-cpp $(TESTS) @@ -97,14 +102,14 @@ test: $(TESTS) done clean: - $(RM) $(TESTS) $(GTEST_LIB) $(TEST_OBJS) $(UTIL_OBJS) + $(RM) $(TESTS) $(GTEST_LIB) $(GMOCK_LIB) $(TEST_OBJS) $(UTIL_OBJS) purge: clean $(RM) init-submodules - (cd .. && git submodule deinit -f $(GTEST_MODULE:../%=%)) + (cd .. && git submodule deinit -f $(GTEST_MODULE:../%=%) && git submodule deinit -f $(GMOCK_MODULE:../%=%)) init-submodules: - (cd .. && git submodule update --init $(GTEST_MODULE:../%=%)) + (cd .. && git submodule update --init $(GTEST_MODULE:../%=%) && git submodule update --init $(GMOCK_MODULE:../%=%)) touch init-submodules $(SRC_DIR)/init-submodules: @@ -121,21 +126,28 @@ $(SRC_DIR)/%.o: $(SRC_DIR)/build-cpp-netlib $(SRC_DIR)/build-yaml-cpp $(SRC_DIR) cd $(SRC_DIR) && $(MAKE) $(@:$(SRC_DIR)/%=%) $(GTEST_SOURCEDIR)/gtest-all.cc $(GTEST_SOURCEDIR)/gtest_main.cc: init-submodules +$(GMOCK_SOURCEDIR)/gmock-all.cc $(GMOCK_SOURCEDIR)/gmock_main.cc: init-submodules gtest-all.o: $(GTEST_SOURCEDIR)/gtest-all.cc $(CXX) -c $(CPPFLAGS) -I$(GTEST_DIR) $(CXXFLAGS) -o $@ $^ +gmock-all.o: $(GMOCK_SOURCEDIR)/gmock-all.cc + $(CXX) -c $(CPPFLAGS) -I$(GMOCK_DIR) $(CXXFLAGS) -o $@ $^ gtest_main.o: $(GTEST_SOURCEDIR)/gtest_main.cc $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c -o $@ $^ +gmock_main.o: $(GMOCK_SOURCEDIR)/gmock_main.cc + $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c -o $@ $^ $(GTEST_LIB): gtest-all.o gtest_main.o $(AR) $(ARFLAGS) $@ $^ +$(GMOCK_LIB): gmock-all.o gmock_main.o + $(AR) $(ARFLAGS) $@ $^ -$(TESTS): $(GTEST_LIB) $(CPP_NETLIB_LIBS) $(YAML_CPP_LIBS) +$(TESTS): $(GTEST_LIB) $(GMOCK_LIB) $(CPP_NETLIB_LIBS) $(YAML_CPP_LIBS) # All unittest objects depend on GTEST_LIB. # Some headers need CPP_NETLIB_LIBS and YAML_CPP_LIBS. -$(TESTS:%=%.o): $(GTEST_LIB) $(CPP_NETLIB_LIBS) $(YAML_CPP_LIBS) +$(TESTS:%=%.o): $(GTEST_LIB) $(GMOCK_LIB) $(CPP_NETLIB_LIBS) $(YAML_CPP_LIBS) api_server_unittest: api_server_unittest.o $(SRC_DIR)/api_server.o $(SRC_DIR)/configuration.o $(SRC_DIR)/store.o $(SRC_DIR)/json.o $(SRC_DIR)/resource.o $(SRC_DIR)/logging.o $(SRC_DIR)/time.o $(SRC_DIR)/health_checker.o $(CXX) $(LDFLAGS) $^ $(LDLIBS) -o $@ diff --git a/test/kubernetes_unittest.cc b/test/kubernetes_unittest.cc index bdde1f46..fe463583 100644 --- a/test/kubernetes_unittest.cc +++ b/test/kubernetes_unittest.cc @@ -20,6 +20,7 @@ #include "../src/updater.h" #include "environment_util.h" #include "fake_http_server.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" #include "temp_file.h" @@ -1202,6 +1203,7 @@ class KubernetesTestFakeServerConfigurable : public KubernetesTestFakeServer { protected: virtual bool ClusterLevel() = 0; + virtual int WatchMaxFailures() = 0; virtual int WatchRetries() = 0; std::unique_ptr CreateConfig() override { return std::unique_ptr( @@ -1218,6 +1220,8 @@ class KubernetesTestFakeServerConfigurable "MetadataIngestionRawContentVersion: TestVersion\n" "KubernetesUpdaterWatchConnectionRetries: " + std::to_string(WatchRetries()) + "\n" + "KubernetesUpdaterWatchMaxConnectionFailures: " + + std::to_string(WatchMaxFailures()) + "\n" "KubernetesUseWatch: true\n" ))); } @@ -1227,6 +1231,7 @@ class KubernetesTestFakeServerOneWatchRetryNodeLevelMetadata : public KubernetesTestFakeServerConfigurable { protected: bool ClusterLevel() override { return false; } + int WatchMaxFailures() override { return 15; } int WatchRetries() override { return 1; } }; @@ -1234,6 +1239,7 @@ class KubernetesTestFakeServerOneWatchRetryClusterLevelMetadata : public KubernetesTestFakeServerConfigurable { protected: bool ClusterLevel() override { return true; } + int WatchMaxFailures() override { return 15; } int WatchRetries() override { return 1; } }; @@ -1241,9 +1247,18 @@ class KubernetesTestFakeServerThreeWatchRetriesNodeLevelMetadata : public KubernetesTestFakeServerConfigurable { protected: bool ClusterLevel() override { return false; } + int WatchMaxFailures() override { return 15; } int WatchRetries() override { return 3; } }; +class KubernetesTestFakeServerMaxThreeFailuresNodeLevelMetadata + : public KubernetesTestFakeServerConfigurable { + protected: + bool ClusterLevel() override { return false; } + int WatchMaxFailures() override { return 3; } + int WatchRetries() override { /* infinite retries */ return 0; } +}; + namespace { // Polls store until collected_at for resource is newer than @@ -1951,4 +1966,52 @@ TEST_F(KubernetesTestFakeServerThreeWatchRetriesNodeLevelMetadata, EXPECT_TRUE(server->TerminateAllStreams(time::seconds(3))); } +namespace { +class MockSleeper : public Sleeper { + public: + MOCK_METHOD1(SleepFor, void(double seconds)); +}; + +// Polls health_checker until it has at least one unhealthy component. +// Returns false if newer timestamp not found after 3 seconds (polling +// every 100 millis). +bool WaitForUnhealthyComponents(const HealthChecker& health_checker) { + for (int i = 0; i < 30; i++){ + if (health_checker.UnhealthyComponents().size() > 0) { + return true; + } + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + return false; +} +} + +TEST_F(KubernetesTestFakeServerMaxThreeFailuresNodeLevelMetadata, + KubernetesUpdaterBackoffOnFailures) { + // Create a fake server representing the Kubernetes master, but + // don't register streams for the nodes or pods watch paths, so they + // will return 404s. + // + // This will cause the updater to backoff 3 times and then give up. + server->SetResponse("/api/v1/nodes?limit=1", "{}"); + server->SetResponse("/api/v1/pods?limit=1", "{}"); + + MetadataStore store(*config); + HealthChecker health_checker(*config, store); + // Note: These expectations don't check the order in which the + // SleepFor() calls are invoked. Each is called twice, once each + // for the nodes & pods watchers. + auto sleeper = std::unique_ptr(new MockSleeper()); + EXPECT_CALL(*sleeper, SleepFor(1.5)).Times(2); + EXPECT_CALL(*sleeper, SleepFor(2.25)).Times(2); + EXPECT_CALL(*sleeper, SleepFor(3.375)).Times(2); + KubernetesUpdater updater(*config, &health_checker, &store, std::move(sleeper)); + updater.Start(); + + EXPECT_TRUE(WaitForUnhealthyComponents(health_checker)); + EXPECT_THAT(health_checker.UnhealthyComponents(), + ::testing::UnorderedElementsAre("kubernetes_node_thread", + "kubernetes_pod_thread")); +} + } // namespace google From 2e293d4606aef1e3c98278eb806d68529a9d66e8 Mon Sep 17 00:00:00 2001 From: "David B. Tucker" Date: Fri, 7 Dec 2018 11:57:35 -0500 Subject: [PATCH 3/3] Address comments. --- test/Makefile | 19 +++++++------------ test/kubernetes_unittest.cc | 28 +++++++++++++++------------- 2 files changed, 22 insertions(+), 25 deletions(-) diff --git a/test/Makefile b/test/Makefile index 38352e56..d4ae3c39 100644 --- a/test/Makefile +++ b/test/Makefile @@ -89,7 +89,6 @@ UTIL_SOURCES=$(TEST_DIR)/fake_clock.cc $(TEST_DIR)/fake_http_server.cc UTIL_OBJS=$(UTIL_SOURCES:$(TEST_DIR)/%.cc=%.o) GTEST_LIB=gtest_lib.a -GMOCK_LIB=gmock_lib.a all: $(SRC_DIR)/build-cpp-netlib $(SRC_DIR)/build-yaml-cpp $(TESTS) @@ -102,14 +101,14 @@ test: $(TESTS) done clean: - $(RM) $(TESTS) $(GTEST_LIB) $(GMOCK_LIB) $(TEST_OBJS) $(UTIL_OBJS) + $(RM) $(TESTS) $(GTEST_LIB) $(TEST_OBJS) $(UTIL_OBJS) purge: clean $(RM) init-submodules - (cd .. && git submodule deinit -f $(GTEST_MODULE:../%=%) && git submodule deinit -f $(GMOCK_MODULE:../%=%)) + (cd .. && git submodule deinit -f $(GTEST_MODULE:../%=%)) init-submodules: - (cd .. && git submodule update --init $(GTEST_MODULE:../%=%) && git submodule update --init $(GMOCK_MODULE:../%=%)) + (cd .. && git submodule update --init $(GTEST_MODULE:../%=%)) touch init-submodules $(SRC_DIR)/init-submodules: @@ -125,7 +124,7 @@ $(YAML_CPP_LIBS): $(SRC_DIR)/build-yaml-cpp $(SRC_DIR)/%.o: $(SRC_DIR)/build-cpp-netlib $(SRC_DIR)/build-yaml-cpp $(SRC_DIR)/%.cc cd $(SRC_DIR) && $(MAKE) $(@:$(SRC_DIR)/%=%) -$(GTEST_SOURCEDIR)/gtest-all.cc $(GTEST_SOURCEDIR)/gtest_main.cc: init-submodules +$(GTEST_SOURCEDIR)/gtest-all.cc: init-submodules $(GMOCK_SOURCEDIR)/gmock-all.cc $(GMOCK_SOURCEDIR)/gmock_main.cc: init-submodules gtest-all.o: $(GTEST_SOURCEDIR)/gtest-all.cc @@ -133,21 +132,17 @@ gtest-all.o: $(GTEST_SOURCEDIR)/gtest-all.cc gmock-all.o: $(GMOCK_SOURCEDIR)/gmock-all.cc $(CXX) -c $(CPPFLAGS) -I$(GMOCK_DIR) $(CXXFLAGS) -o $@ $^ -gtest_main.o: $(GTEST_SOURCEDIR)/gtest_main.cc - $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c -o $@ $^ gmock_main.o: $(GMOCK_SOURCEDIR)/gmock_main.cc $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c -o $@ $^ -$(GTEST_LIB): gtest-all.o gtest_main.o - $(AR) $(ARFLAGS) $@ $^ -$(GMOCK_LIB): gmock-all.o gmock_main.o +$(GTEST_LIB): gtest-all.o gmock-all.o gmock_main.o $(AR) $(ARFLAGS) $@ $^ -$(TESTS): $(GTEST_LIB) $(GMOCK_LIB) $(CPP_NETLIB_LIBS) $(YAML_CPP_LIBS) +$(TESTS): $(GTEST_LIB) $(CPP_NETLIB_LIBS) $(YAML_CPP_LIBS) # All unittest objects depend on GTEST_LIB. # Some headers need CPP_NETLIB_LIBS and YAML_CPP_LIBS. -$(TESTS:%=%.o): $(GTEST_LIB) $(GMOCK_LIB) $(CPP_NETLIB_LIBS) $(YAML_CPP_LIBS) +$(TESTS:%=%.o): $(GTEST_LIB) $(CPP_NETLIB_LIBS) $(YAML_CPP_LIBS) api_server_unittest: api_server_unittest.o $(SRC_DIR)/api_server.o $(SRC_DIR)/configuration.o $(SRC_DIR)/store.o $(SRC_DIR)/json.o $(SRC_DIR)/resource.o $(SRC_DIR)/logging.o $(SRC_DIR)/time.o $(SRC_DIR)/health_checker.o $(CXX) $(LDFLAGS) $^ $(LDLIBS) -o $@ diff --git a/test/kubernetes_unittest.cc b/test/kubernetes_unittest.cc index fe463583..2f6408f5 100644 --- a/test/kubernetes_unittest.cc +++ b/test/kubernetes_unittest.cc @@ -1262,12 +1262,12 @@ class KubernetesTestFakeServerMaxThreeFailuresNodeLevelMetadata namespace { // Polls store until collected_at for resource is newer than -// last_timestamp. Returns false if newer timestamp not found after 3 -// seconds (polling every 100 millis). +// last_timestamp. Returns false if newer timestamp not found after +// at least 3 seconds (polling every 100 millis at least). bool WaitForNewerCollectionTimestamp(const MetadataStore& store, const MonitoredResource& resource, Timestamp last_timestamp) { - for (int i = 0; i < 30; i++){ + for (int i = 0; i < 30; i++) { const auto metadata_map = store.GetMetadataMap(); const auto m = metadata_map.find(resource); if (m != metadata_map.end() && m->second.collected_at > last_timestamp) { @@ -1872,6 +1872,11 @@ void TestServicesAndEndpoints(testing::FakeServer& server, MetadataStore& store, } } +void InitHealthyServer(testing::FakeServer* server) { + server->SetResponse("/api/v1/nodes?limit=1", "{}"); + server->SetResponse("/api/v1/pods?limit=1", "{}"); +} + } // namespace TEST_F(KubernetesTestFakeServerOneWatchRetryNodeLevelMetadata, @@ -1882,8 +1887,7 @@ TEST_F(KubernetesTestFakeServerOneWatchRetryNodeLevelMetadata, "/api/v1/pods?fieldSelector=spec.nodeName%3DTestNodeName&watch=true"; // Create a fake server representing the Kubernetes master. - server->SetResponse("/api/v1/nodes?limit=1", "{}"); - server->SetResponse("/api/v1/pods?limit=1", "{}"); + InitHealthyServer(server.get()); server->AllowStream(nodes_watch_path); server->AllowStream(pods_watch_path); @@ -1910,8 +1914,7 @@ TEST_F(KubernetesTestFakeServerOneWatchRetryClusterLevelMetadata, const std::string endpoints_watch_path = "/api/v1/watch/endpoints/?watch=true"; - server->SetResponse("/api/v1/nodes?limit=1", "{}"); - server->SetResponse("/api/v1/pods?limit=1", "{}"); + InitHealthyServer(server.get()); server->AllowStream(nodes_watch_path); server->AllowStream(pods_watch_path); server->AllowStream(services_watch_path); @@ -1939,8 +1942,7 @@ TEST_F(KubernetesTestFakeServerThreeWatchRetriesNodeLevelMetadata, "/api/v1/pods?fieldSelector=spec.nodeName%3DTestNodeName&watch=true"; // Create a fake server representing the Kubernetes master. - server->SetResponse("/api/v1/nodes?limit=1", "{}"); - server->SetResponse("/api/v1/pods?limit=1", "{}"); + InitHealthyServer(server.get()); server->AllowStream(nodes_watch_path); server->AllowStream(pods_watch_path); @@ -1975,7 +1977,8 @@ class MockSleeper : public Sleeper { // Polls health_checker until it has at least one unhealthy component. // Returns false if newer timestamp not found after 3 seconds (polling // every 100 millis). -bool WaitForUnhealthyComponents(const HealthChecker& health_checker) { +bool WaitForUnhealthyComponentsAtLeast3Seconds( + const HealthChecker& health_checker) { for (int i = 0; i < 30; i++){ if (health_checker.UnhealthyComponents().size() > 0) { return true; @@ -1993,8 +1996,7 @@ TEST_F(KubernetesTestFakeServerMaxThreeFailuresNodeLevelMetadata, // will return 404s. // // This will cause the updater to backoff 3 times and then give up. - server->SetResponse("/api/v1/nodes?limit=1", "{}"); - server->SetResponse("/api/v1/pods?limit=1", "{}"); + InitHealthyServer(server.get()); MetadataStore store(*config); HealthChecker health_checker(*config, store); @@ -2008,7 +2010,7 @@ TEST_F(KubernetesTestFakeServerMaxThreeFailuresNodeLevelMetadata, KubernetesUpdater updater(*config, &health_checker, &store, std::move(sleeper)); updater.Start(); - EXPECT_TRUE(WaitForUnhealthyComponents(health_checker)); + EXPECT_TRUE(WaitForUnhealthyComponentsAtLeast3Seconds(health_checker)); EXPECT_THAT(health_checker.UnhealthyComponents(), ::testing::UnorderedElementsAre("kubernetes_node_thread", "kubernetes_pod_thread"));