reconciler_test.go 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951
  1. /*
  2. Copyright 2019 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package endpointslice
  14. import (
  15. "context"
  16. "fmt"
  17. "reflect"
  18. "strings"
  19. "testing"
  20. "time"
  21. dto "github.com/prometheus/client_model/go"
  22. "github.com/stretchr/testify/assert"
  23. corev1 "k8s.io/api/core/v1"
  24. discovery "k8s.io/api/discovery/v1beta1"
  25. apiequality "k8s.io/apimachinery/pkg/api/equality"
  26. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  27. "k8s.io/apimachinery/pkg/util/intstr"
  28. "k8s.io/client-go/informers"
  29. "k8s.io/client-go/kubernetes/fake"
  30. corelisters "k8s.io/client-go/listers/core/v1"
  31. k8stesting "k8s.io/client-go/testing"
  32. compmetrics "k8s.io/component-base/metrics"
  33. "k8s.io/kubernetes/pkg/controller"
  34. "k8s.io/kubernetes/pkg/controller/endpointslice/metrics"
  35. utilpointer "k8s.io/utils/pointer"
  36. )
  37. var defaultMaxEndpointsPerSlice = int32(100)
  38. // Even when there are no pods, we want to have a placeholder slice for each service
  39. func TestReconcileEmpty(t *testing.T) {
  40. client := newClientset()
  41. setupMetrics()
  42. namespace := "test"
  43. svc, _ := newServiceAndEndpointMeta("foo", namespace)
  44. r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
  45. reconcileHelper(t, r, &svc, []*corev1.Pod{}, []*discovery.EndpointSlice{}, time.Now())
  46. expectActions(t, client.Actions(), 1, "create", "endpointslices")
  47. slices := fetchEndpointSlices(t, client, namespace)
  48. assert.Len(t, slices, 1, "Expected 1 endpoint slices")
  49. assert.Regexp(t, "^"+svc.Name, slices[0].Name)
  50. assert.Equal(t, svc.Name, slices[0].Labels[discovery.LabelServiceName])
  51. assert.EqualValues(t, []discovery.EndpointPort{}, slices[0].Ports)
  52. assert.EqualValues(t, []discovery.Endpoint{}, slices[0].Endpoints)
  53. expectMetrics(t, expectedMetrics{desiredSlices: 1, actualSlices: 1, desiredEndpoints: 0, addedPerSync: 0, removedPerSync: 0, numCreated: 1, numUpdated: 0, numDeleted: 0})
  54. }
  55. // Given a single pod matching a service selector and no existing endpoint slices,
  56. // a slice should be created
  57. func TestReconcile1Pod(t *testing.T) {
  58. namespace := "test"
  59. ipv6Family := corev1.IPv6Protocol
  60. svcv4, _ := newServiceAndEndpointMeta("foo", namespace)
  61. svcv6, _ := newServiceAndEndpointMeta("foo", namespace)
  62. svcv6.Spec.IPFamily = &ipv6Family
  63. svcv6ClusterIP, _ := newServiceAndEndpointMeta("foo", namespace)
  64. svcv6ClusterIP.Spec.ClusterIP = "1234::5678:0000:0000:9abc:def1"
  65. pod1 := newPod(1, namespace, true, 1)
  66. pod1.Status.PodIPs = []corev1.PodIP{{IP: "1.2.3.4"}, {IP: "1234::5678:0000:0000:9abc:def0"}}
  67. pod1.Spec.Hostname = "example-hostname"
  68. node1 := &corev1.Node{
  69. ObjectMeta: metav1.ObjectMeta{
  70. Name: pod1.Spec.NodeName,
  71. Labels: map[string]string{
  72. "topology.kubernetes.io/zone": "us-central1-a",
  73. "topology.kubernetes.io/region": "us-central1",
  74. },
  75. },
  76. }
  77. testCases := map[string]struct {
  78. service corev1.Service
  79. expectedAddressType discovery.AddressType
  80. expectedEndpoint discovery.Endpoint
  81. }{
  82. "ipv4": {
  83. service: svcv4,
  84. expectedAddressType: discovery.AddressTypeIPv4,
  85. expectedEndpoint: discovery.Endpoint{
  86. Addresses: []string{"1.2.3.4"},
  87. Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
  88. Topology: map[string]string{
  89. "kubernetes.io/hostname": "node-1",
  90. "topology.kubernetes.io/zone": "us-central1-a",
  91. "topology.kubernetes.io/region": "us-central1",
  92. },
  93. TargetRef: &corev1.ObjectReference{
  94. Kind: "Pod",
  95. Namespace: namespace,
  96. Name: "pod1",
  97. },
  98. },
  99. },
  100. "ipv6": {
  101. service: svcv6,
  102. expectedAddressType: discovery.AddressTypeIPv6,
  103. expectedEndpoint: discovery.Endpoint{
  104. Addresses: []string{"1234::5678:0000:0000:9abc:def0"},
  105. Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
  106. Topology: map[string]string{
  107. "kubernetes.io/hostname": "node-1",
  108. "topology.kubernetes.io/zone": "us-central1-a",
  109. "topology.kubernetes.io/region": "us-central1",
  110. },
  111. TargetRef: &corev1.ObjectReference{
  112. Kind: "Pod",
  113. Namespace: namespace,
  114. Name: "pod1",
  115. },
  116. },
  117. },
  118. "ipv6-clusterip": {
  119. service: svcv6ClusterIP,
  120. expectedAddressType: discovery.AddressTypeIPv6,
  121. expectedEndpoint: discovery.Endpoint{
  122. Addresses: []string{"1234::5678:0000:0000:9abc:def0"},
  123. Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
  124. Topology: map[string]string{
  125. "kubernetes.io/hostname": "node-1",
  126. "topology.kubernetes.io/zone": "us-central1-a",
  127. "topology.kubernetes.io/region": "us-central1",
  128. },
  129. TargetRef: &corev1.ObjectReference{
  130. Kind: "Pod",
  131. Namespace: namespace,
  132. Name: "pod1",
  133. },
  134. },
  135. },
  136. }
  137. for name, testCase := range testCases {
  138. t.Run(name, func(t *testing.T) {
  139. client := newClientset()
  140. setupMetrics()
  141. triggerTime := time.Now()
  142. r := newReconciler(client, []*corev1.Node{node1}, defaultMaxEndpointsPerSlice)
  143. reconcileHelper(t, r, &testCase.service, []*corev1.Pod{pod1}, []*discovery.EndpointSlice{}, triggerTime)
  144. if len(client.Actions()) != 1 {
  145. t.Errorf("Expected 1 clientset action, got %d", len(client.Actions()))
  146. }
  147. slices := fetchEndpointSlices(t, client, namespace)
  148. if len(slices) != 1 {
  149. t.Fatalf("Expected 1 EndpointSlice, got %d", len(slices))
  150. }
  151. slice := slices[0]
  152. if !strings.HasPrefix(slice.Name, testCase.service.Name) {
  153. t.Errorf("Expected EndpointSlice name to start with %s, got %s", testCase.service.Name, slice.Name)
  154. }
  155. if slice.Labels[discovery.LabelServiceName] != testCase.service.Name {
  156. t.Errorf("Expected EndpointSlice to have label set with %s value, got %s", testCase.service.Name, slice.Labels[discovery.LabelServiceName])
  157. }
  158. if slice.Annotations[corev1.EndpointsLastChangeTriggerTime] != triggerTime.Format(time.RFC3339Nano) {
  159. t.Errorf("Expected EndpointSlice trigger time annotation to be %s, got %s", triggerTime.Format(time.RFC3339Nano), slice.Annotations[corev1.EndpointsLastChangeTriggerTime])
  160. }
  161. if len(slice.Endpoints) != 1 {
  162. t.Fatalf("Expected 1 Endpoint, got %d", len(slice.Endpoints))
  163. }
  164. endpoint := slice.Endpoints[0]
  165. if !reflect.DeepEqual(endpoint, testCase.expectedEndpoint) {
  166. t.Errorf("Expected endpoint: %+v, got: %+v", testCase.expectedEndpoint, endpoint)
  167. }
  168. expectMetrics(t, expectedMetrics{desiredSlices: 1, actualSlices: 1, desiredEndpoints: 1, addedPerSync: 1, removedPerSync: 0, numCreated: 1, numUpdated: 0, numDeleted: 0})
  169. })
  170. }
  171. }
  172. // given an existing endpoint slice and no pods matching the service, the existing
  173. // slice should be updated to a placeholder (not deleted)
  174. func TestReconcile1EndpointSlice(t *testing.T) {
  175. client := newClientset()
  176. setupMetrics()
  177. namespace := "test"
  178. svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace)
  179. endpointSlice1 := newEmptyEndpointSlice(1, namespace, endpointMeta, svc)
  180. _, createErr := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice1, metav1.CreateOptions{})
  181. assert.Nil(t, createErr, "Expected no error creating endpoint slice")
  182. numActionsBefore := len(client.Actions())
  183. r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
  184. reconcileHelper(t, r, &svc, []*corev1.Pod{}, []*discovery.EndpointSlice{endpointSlice1}, time.Now())
  185. assert.Len(t, client.Actions(), numActionsBefore+1, "Expected 1 additional clientset action")
  186. actions := client.Actions()
  187. assert.True(t, actions[numActionsBefore].Matches("update", "endpointslices"), "Action should be update endpoint slice")
  188. slices := fetchEndpointSlices(t, client, namespace)
  189. assert.Len(t, slices, 1, "Expected 1 endpoint slices")
  190. assert.Regexp(t, "^"+svc.Name, slices[0].Name)
  191. assert.Equal(t, svc.Name, slices[0].Labels[discovery.LabelServiceName])
  192. assert.EqualValues(t, []discovery.EndpointPort{}, slices[0].Ports)
  193. assert.EqualValues(t, []discovery.Endpoint{}, slices[0].Endpoints)
  194. expectMetrics(t, expectedMetrics{desiredSlices: 1, actualSlices: 1, desiredEndpoints: 0, addedPerSync: 0, removedPerSync: 0, numCreated: 0, numUpdated: 1, numDeleted: 0})
  195. }
  196. // when a Service has PublishNotReadyAddresses set to true, corresponding
  197. // Endpoints should be considered ready, even if the backing Pod is not.
  198. func TestReconcile1EndpointSlicePublishNotReadyAddresses(t *testing.T) {
  199. client := newClientset()
  200. namespace := "test"
  201. svc, _ := newServiceAndEndpointMeta("foo", namespace)
  202. svc.Spec.PublishNotReadyAddresses = true
  203. // start with 50 pods, 1/3 not ready
  204. pods := []*corev1.Pod{}
  205. for i := 0; i < 50; i++ {
  206. ready := !(i%3 == 0)
  207. pods = append(pods, newPod(i, namespace, ready, 1))
  208. }
  209. r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
  210. reconcileHelper(t, r, &svc, pods, []*discovery.EndpointSlice{}, time.Now())
  211. // Only 1 action, an EndpointSlice create
  212. assert.Len(t, client.Actions(), 1, "Expected 1 additional clientset action")
  213. expectActions(t, client.Actions(), 1, "create", "endpointslices")
  214. // Two endpoint slices should be completely full, the remainder should be in another one
  215. endpointSlices := fetchEndpointSlices(t, client, namespace)
  216. for _, endpointSlice := range endpointSlices {
  217. for i, endpoint := range endpointSlice.Endpoints {
  218. if !*endpoint.Conditions.Ready {
  219. t.Errorf("Expected endpoints[%d] to be ready", i)
  220. }
  221. }
  222. }
  223. expectUnorderedSlicesWithLengths(t, endpointSlices, []int{50})
  224. }
  225. // a simple use case with 250 pods matching a service and no existing slices
  226. // reconcile should create 3 slices, completely filling 2 of them
  227. func TestReconcileManyPods(t *testing.T) {
  228. client := newClientset()
  229. setupMetrics()
  230. namespace := "test"
  231. svc, _ := newServiceAndEndpointMeta("foo", namespace)
  232. // start with 250 pods
  233. pods := []*corev1.Pod{}
  234. for i := 0; i < 250; i++ {
  235. ready := !(i%3 == 0)
  236. pods = append(pods, newPod(i, namespace, ready, 1))
  237. }
  238. r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
  239. reconcileHelper(t, r, &svc, pods, []*discovery.EndpointSlice{}, time.Now())
  240. // This is an ideal scenario where only 3 actions are required, and they're all creates
  241. assert.Len(t, client.Actions(), 3, "Expected 3 additional clientset actions")
  242. expectActions(t, client.Actions(), 3, "create", "endpointslices")
  243. // Two endpoint slices should be completely full, the remainder should be in another one
  244. expectUnorderedSlicesWithLengths(t, fetchEndpointSlices(t, client, namespace), []int{100, 100, 50})
  245. expectMetrics(t, expectedMetrics{desiredSlices: 3, actualSlices: 3, desiredEndpoints: 250, addedPerSync: 250, removedPerSync: 0, numCreated: 3, numUpdated: 0, numDeleted: 0})
  246. }
  247. // now with preexisting slices, we have 250 pods matching a service
  248. // the first endpoint slice contains 62 endpoints, all desired
  249. // the second endpoint slice contains 61 endpoints, all desired
  250. // that leaves 127 to add
  251. // to minimize writes, our strategy is to create new slices for multiples of 100
  252. // that leaves 27 to drop in an existing slice
  253. // dropping them in the first slice will result in the slice being closest to full
  254. // this approach requires 1 update + 1 create instead of 2 updates + 1 create
  255. func TestReconcileEndpointSlicesSomePreexisting(t *testing.T) {
  256. client := newClientset()
  257. setupMetrics()
  258. namespace := "test"
  259. svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace)
  260. // start with 250 pods
  261. pods := []*corev1.Pod{}
  262. for i := 0; i < 250; i++ {
  263. ready := !(i%3 == 0)
  264. pods = append(pods, newPod(i, namespace, ready, 1))
  265. }
  266. // have approximately 1/4 in first slice
  267. endpointSlice1 := newEmptyEndpointSlice(1, namespace, endpointMeta, svc)
  268. for i := 1; i < len(pods)-4; i += 4 {
  269. endpointSlice1.Endpoints = append(endpointSlice1.Endpoints, podToEndpoint(pods[i], &corev1.Node{}, &svc))
  270. }
  271. // have approximately 1/4 in second slice
  272. endpointSlice2 := newEmptyEndpointSlice(2, namespace, endpointMeta, svc)
  273. for i := 3; i < len(pods)-4; i += 4 {
  274. endpointSlice2.Endpoints = append(endpointSlice2.Endpoints, podToEndpoint(pods[i], &corev1.Node{}, &svc))
  275. }
  276. existingSlices := []*discovery.EndpointSlice{endpointSlice1, endpointSlice2}
  277. cmc := newCacheMutationCheck(existingSlices)
  278. createEndpointSlices(t, client, namespace, existingSlices)
  279. numActionsBefore := len(client.Actions())
  280. r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
  281. reconcileHelper(t, r, &svc, pods, existingSlices, time.Now())
  282. actions := client.Actions()
  283. assert.Equal(t, numActionsBefore+2, len(actions), "Expected 2 additional client actions as part of reconcile")
  284. assert.True(t, actions[numActionsBefore].Matches("create", "endpointslices"), "First action should be create endpoint slice")
  285. assert.True(t, actions[numActionsBefore+1].Matches("update", "endpointslices"), "Second action should be update endpoint slice")
  286. // 1 new slice (0->100) + 1 updated slice (62->89)
  287. expectUnorderedSlicesWithLengths(t, fetchEndpointSlices(t, client, namespace), []int{89, 61, 100})
  288. expectMetrics(t, expectedMetrics{desiredSlices: 3, actualSlices: 3, desiredEndpoints: 250, addedPerSync: 127, removedPerSync: 0, numCreated: 1, numUpdated: 1, numDeleted: 0})
  289. // ensure cache mutation has not occurred
  290. cmc.Check(t)
  291. }
  292. // now with preexisting slices, we have 300 pods matching a service
  293. // this scenario will show some less ideal allocation
  294. // the first endpoint slice contains 74 endpoints, all desired
  295. // the second endpoint slice contains 74 endpoints, all desired
  296. // that leaves 152 to add
  297. // to minimize writes, our strategy is to create new slices for multiples of 100
  298. // that leaves 52 to drop in an existing slice
  299. // that capacity could fit if split in the 2 existing slices
  300. // to minimize writes though, reconcile create a new slice with those 52 endpoints
  301. // this approach requires 2 creates instead of 2 updates + 1 create
  302. func TestReconcileEndpointSlicesSomePreexistingWorseAllocation(t *testing.T) {
  303. client := newClientset()
  304. setupMetrics()
  305. namespace := "test"
  306. svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace)
  307. // start with 300 pods
  308. pods := []*corev1.Pod{}
  309. for i := 0; i < 300; i++ {
  310. ready := !(i%3 == 0)
  311. pods = append(pods, newPod(i, namespace, ready, 1))
  312. }
  313. // have approximately 1/4 in first slice
  314. endpointSlice1 := newEmptyEndpointSlice(1, namespace, endpointMeta, svc)
  315. for i := 1; i < len(pods)-4; i += 4 {
  316. endpointSlice1.Endpoints = append(endpointSlice1.Endpoints, podToEndpoint(pods[i], &corev1.Node{}, &svc))
  317. }
  318. // have approximately 1/4 in second slice
  319. endpointSlice2 := newEmptyEndpointSlice(2, namespace, endpointMeta, svc)
  320. for i := 3; i < len(pods)-4; i += 4 {
  321. endpointSlice2.Endpoints = append(endpointSlice2.Endpoints, podToEndpoint(pods[i], &corev1.Node{}, &svc))
  322. }
  323. existingSlices := []*discovery.EndpointSlice{endpointSlice1, endpointSlice2}
  324. cmc := newCacheMutationCheck(existingSlices)
  325. createEndpointSlices(t, client, namespace, existingSlices)
  326. numActionsBefore := len(client.Actions())
  327. r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
  328. reconcileHelper(t, r, &svc, pods, existingSlices, time.Now())
  329. actions := client.Actions()
  330. assert.Equal(t, numActionsBefore+2, len(actions), "Expected 2 additional client actions as part of reconcile")
  331. expectActions(t, client.Actions(), 2, "create", "endpointslices")
  332. // 2 new slices (100, 52) in addition to existing slices (74, 74)
  333. expectUnorderedSlicesWithLengths(t, fetchEndpointSlices(t, client, namespace), []int{74, 74, 100, 52})
  334. expectMetrics(t, expectedMetrics{desiredSlices: 3, actualSlices: 4, desiredEndpoints: 300, addedPerSync: 152, removedPerSync: 0, numCreated: 2, numUpdated: 0, numDeleted: 0})
  335. // ensure cache mutation has not occurred
  336. cmc.Check(t)
  337. }
  338. // In some cases, such as a service port change, all slices for that service will require a change
  339. // This test ensures that we are updating those slices and not calling create + delete for each
  340. func TestReconcileEndpointSlicesUpdating(t *testing.T) {
  341. client := newClientset()
  342. namespace := "test"
  343. svc, _ := newServiceAndEndpointMeta("foo", namespace)
  344. // start with 250 pods
  345. pods := []*corev1.Pod{}
  346. for i := 0; i < 250; i++ {
  347. ready := !(i%3 == 0)
  348. pods = append(pods, newPod(i, namespace, ready, 1))
  349. }
  350. r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
  351. reconcileHelper(t, r, &svc, pods, []*discovery.EndpointSlice{}, time.Now())
  352. numActionsExpected := 3
  353. assert.Len(t, client.Actions(), numActionsExpected, "Expected 3 additional clientset actions")
  354. slices := fetchEndpointSlices(t, client, namespace)
  355. numActionsExpected++
  356. expectUnorderedSlicesWithLengths(t, slices, []int{100, 100, 50})
  357. svc.Spec.Ports[0].TargetPort.IntVal = 81
  358. reconcileHelper(t, r, &svc, pods, []*discovery.EndpointSlice{&slices[0], &slices[1], &slices[2]}, time.Now())
  359. numActionsExpected += 3
  360. assert.Len(t, client.Actions(), numActionsExpected, "Expected 3 additional clientset actions")
  361. expectActions(t, client.Actions(), 3, "update", "endpointslices")
  362. expectUnorderedSlicesWithLengths(t, fetchEndpointSlices(t, client, namespace), []int{100, 100, 50})
  363. }
  364. // In this test, we start with 10 slices that only have 30 endpoints each
  365. // An initial reconcile makes no changes (as desired to limit writes)
  366. // When we change a service port, all slices will need to be updated in some way
  367. // reconcile repacks the endpoints into 3 slices, and deletes the extras
  368. func TestReconcileEndpointSlicesRecycling(t *testing.T) {
  369. client := newClientset()
  370. setupMetrics()
  371. namespace := "test"
  372. svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace)
  373. // start with 300 pods
  374. pods := []*corev1.Pod{}
  375. for i := 0; i < 300; i++ {
  376. ready := !(i%3 == 0)
  377. pods = append(pods, newPod(i, namespace, ready, 1))
  378. }
  379. // generate 10 existing slices with 30 pods/endpoints each
  380. existingSlices := []*discovery.EndpointSlice{}
  381. for i, pod := range pods {
  382. sliceNum := i / 30
  383. if i%30 == 0 {
  384. existingSlices = append(existingSlices, newEmptyEndpointSlice(sliceNum, namespace, endpointMeta, svc))
  385. }
  386. existingSlices[sliceNum].Endpoints = append(existingSlices[sliceNum].Endpoints, podToEndpoint(pod, &corev1.Node{}, &svc))
  387. }
  388. cmc := newCacheMutationCheck(existingSlices)
  389. createEndpointSlices(t, client, namespace, existingSlices)
  390. numActionsBefore := len(client.Actions())
  391. r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
  392. reconcileHelper(t, r, &svc, pods, existingSlices, time.Now())
  393. // initial reconcile should be a no op, all pods are accounted for in slices, no repacking should be done
  394. assert.Equal(t, numActionsBefore+0, len(client.Actions()), "Expected 0 additional client actions as part of reconcile")
  395. // changing a service port should require all slices to be updated, time for a repack
  396. svc.Spec.Ports[0].TargetPort.IntVal = 81
  397. reconcileHelper(t, r, &svc, pods, existingSlices, time.Now())
  398. // this should reflect 3 updates + 7 deletes
  399. assert.Equal(t, numActionsBefore+10, len(client.Actions()), "Expected 10 additional client actions as part of reconcile")
  400. // thanks to recycling, we get a free repack of endpoints, resulting in 3 full slices instead of 10 mostly empty slices
  401. expectUnorderedSlicesWithLengths(t, fetchEndpointSlices(t, client, namespace), []int{100, 100, 100})
  402. expectMetrics(t, expectedMetrics{desiredSlices: 3, actualSlices: 3, desiredEndpoints: 300, addedPerSync: 300, removedPerSync: 0, numCreated: 0, numUpdated: 3, numDeleted: 7})
  403. // ensure cache mutation has not occurred
  404. cmc.Check(t)
  405. }
  406. // In this test, we want to verify that endpoints are added to a slice that will
  407. // be closest to full after the operation, even when slices are already marked
  408. // for update.
  409. func TestReconcileEndpointSlicesUpdatePacking(t *testing.T) {
  410. client := newClientset()
  411. setupMetrics()
  412. namespace := "test"
  413. svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace)
  414. existingSlices := []*discovery.EndpointSlice{}
  415. pods := []*corev1.Pod{}
  416. slice1 := newEmptyEndpointSlice(1, namespace, endpointMeta, svc)
  417. for i := 0; i < 80; i++ {
  418. pod := newPod(i, namespace, true, 1)
  419. slice1.Endpoints = append(slice1.Endpoints, podToEndpoint(pod, &corev1.Node{}, &svc))
  420. pods = append(pods, pod)
  421. }
  422. existingSlices = append(existingSlices, slice1)
  423. slice2 := newEmptyEndpointSlice(2, namespace, endpointMeta, svc)
  424. for i := 100; i < 120; i++ {
  425. pod := newPod(i, namespace, true, 1)
  426. slice2.Endpoints = append(slice2.Endpoints, podToEndpoint(pod, &corev1.Node{}, &svc))
  427. pods = append(pods, pod)
  428. }
  429. existingSlices = append(existingSlices, slice2)
  430. cmc := newCacheMutationCheck(existingSlices)
  431. createEndpointSlices(t, client, namespace, existingSlices)
  432. // ensure that endpoints in each slice will be marked for update.
  433. for i, pod := range pods {
  434. if i%10 == 0 {
  435. pod.Status.Conditions = []corev1.PodCondition{{
  436. Type: corev1.PodReady,
  437. Status: corev1.ConditionFalse,
  438. }}
  439. }
  440. }
  441. // add a few additional endpoints - no more than could fit in either slice.
  442. for i := 200; i < 215; i++ {
  443. pods = append(pods, newPod(i, namespace, true, 1))
  444. }
  445. r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
  446. reconcileHelper(t, r, &svc, pods, existingSlices, time.Now())
  447. // ensure that both endpoint slices have been updated
  448. expectActions(t, client.Actions(), 2, "update", "endpointslices")
  449. expectMetrics(t, expectedMetrics{desiredSlices: 2, actualSlices: 2, desiredEndpoints: 115, addedPerSync: 15, removedPerSync: 0, numCreated: 0, numUpdated: 2, numDeleted: 0})
  450. // additional pods should get added to fuller slice
  451. expectUnorderedSlicesWithLengths(t, fetchEndpointSlices(t, client, namespace), []int{95, 20})
  452. // ensure cache mutation has not occurred
  453. cmc.Check(t)
  454. }
  455. // In this test, we want to verify that old EndpointSlices with a deprecated IP
  456. // address type will be replaced with a newer IPv4 type.
  457. func TestReconcileEndpointSlicesReplaceDeprecated(t *testing.T) {
  458. client := newClientset()
  459. setupMetrics()
  460. namespace := "test"
  461. svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace)
  462. endpointMeta.AddressType = discovery.AddressTypeIP
  463. existingSlices := []*discovery.EndpointSlice{}
  464. pods := []*corev1.Pod{}
  465. slice1 := newEmptyEndpointSlice(1, namespace, endpointMeta, svc)
  466. for i := 0; i < 80; i++ {
  467. pod := newPod(i, namespace, true, 1)
  468. slice1.Endpoints = append(slice1.Endpoints, podToEndpoint(pod, &corev1.Node{}, &corev1.Service{Spec: corev1.ServiceSpec{}}))
  469. pods = append(pods, pod)
  470. }
  471. existingSlices = append(existingSlices, slice1)
  472. slice2 := newEmptyEndpointSlice(2, namespace, endpointMeta, svc)
  473. for i := 100; i < 150; i++ {
  474. pod := newPod(i, namespace, true, 1)
  475. slice2.Endpoints = append(slice2.Endpoints, podToEndpoint(pod, &corev1.Node{}, &corev1.Service{Spec: corev1.ServiceSpec{}}))
  476. pods = append(pods, pod)
  477. }
  478. existingSlices = append(existingSlices, slice2)
  479. createEndpointSlices(t, client, namespace, existingSlices)
  480. cmc := newCacheMutationCheck(existingSlices)
  481. r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
  482. reconcileHelper(t, r, &svc, pods, existingSlices, time.Now())
  483. // ensure that both original endpoint slices have been deleted
  484. expectActions(t, client.Actions(), 2, "delete", "endpointslices")
  485. endpointSlices := fetchEndpointSlices(t, client, namespace)
  486. // since this involved replacing both EndpointSlices, the result should be
  487. // perfectly packed.
  488. expectUnorderedSlicesWithLengths(t, endpointSlices, []int{100, 30})
  489. for _, endpointSlice := range endpointSlices {
  490. if endpointSlice.AddressType != discovery.AddressTypeIPv4 {
  491. t.Errorf("Expected address type to be IPv4, got %s", endpointSlice.AddressType)
  492. }
  493. }
  494. // ensure cache mutation has not occurred
  495. cmc.Check(t)
  496. }
  497. // Named ports can map to different port numbers on different pods.
  498. // This test ensures that EndpointSlices are grouped correctly in that case.
  499. func TestReconcileEndpointSlicesNamedPorts(t *testing.T) {
  500. client := newClientset()
  501. setupMetrics()
  502. namespace := "test"
  503. portNameIntStr := intstr.IntOrString{
  504. Type: intstr.String,
  505. StrVal: "http",
  506. }
  507. svc := corev1.Service{
  508. ObjectMeta: metav1.ObjectMeta{Name: "named-port-example", Namespace: namespace},
  509. Spec: corev1.ServiceSpec{
  510. Ports: []corev1.ServicePort{{
  511. TargetPort: portNameIntStr,
  512. Protocol: corev1.ProtocolTCP,
  513. }},
  514. Selector: map[string]string{"foo": "bar"},
  515. },
  516. }
  517. // start with 300 pods
  518. pods := []*corev1.Pod{}
  519. for i := 0; i < 300; i++ {
  520. ready := !(i%3 == 0)
  521. portOffset := i % 5
  522. pod := newPod(i, namespace, ready, 1)
  523. pod.Spec.Containers[0].Ports = []corev1.ContainerPort{{
  524. Name: portNameIntStr.StrVal,
  525. ContainerPort: int32(8080 + portOffset),
  526. Protocol: corev1.ProtocolTCP,
  527. }}
  528. pods = append(pods, pod)
  529. }
  530. r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
  531. reconcileHelper(t, r, &svc, pods, []*discovery.EndpointSlice{}, time.Now())
  532. // reconcile should create 5 endpoint slices
  533. assert.Equal(t, 5, len(client.Actions()), "Expected 5 client actions as part of reconcile")
  534. expectActions(t, client.Actions(), 5, "create", "endpointslices")
  535. expectMetrics(t, expectedMetrics{desiredSlices: 5, actualSlices: 5, desiredEndpoints: 300, addedPerSync: 300, removedPerSync: 0, numCreated: 5, numUpdated: 0, numDeleted: 0})
  536. fetchedSlices := fetchEndpointSlices(t, client, namespace)
  537. // each slice should have 60 endpoints to match 5 unique variations of named port mapping
  538. expectUnorderedSlicesWithLengths(t, fetchedSlices, []int{60, 60, 60, 60, 60})
  539. // generate data structures for expected slice ports and address types
  540. protoTCP := corev1.ProtocolTCP
  541. expectedSlices := []discovery.EndpointSlice{}
  542. for i := range fetchedSlices {
  543. expectedSlices = append(expectedSlices, discovery.EndpointSlice{
  544. Ports: []discovery.EndpointPort{{
  545. Name: utilpointer.StringPtr(""),
  546. Protocol: &protoTCP,
  547. Port: utilpointer.Int32Ptr(int32(8080 + i)),
  548. }},
  549. AddressType: discovery.AddressTypeIPv4,
  550. })
  551. }
  552. // slices fetched should match expected address type and ports
  553. expectUnorderedSlicesWithTopLevelAttrs(t, fetchedSlices, expectedSlices)
  554. }
  555. // This test ensures that maxEndpointsPerSlice configuration results in
  556. // appropriate endpoints distribution among slices
  557. func TestReconcileMaxEndpointsPerSlice(t *testing.T) {
  558. namespace := "test"
  559. svc, _ := newServiceAndEndpointMeta("foo", namespace)
  560. // start with 250 pods
  561. pods := []*corev1.Pod{}
  562. for i := 0; i < 250; i++ {
  563. ready := !(i%3 == 0)
  564. pods = append(pods, newPod(i, namespace, ready, 1))
  565. }
  566. testCases := []struct {
  567. maxEndpointsPerSlice int32
  568. expectedSliceLengths []int
  569. expectedMetricValues expectedMetrics
  570. }{
  571. {
  572. maxEndpointsPerSlice: int32(50),
  573. expectedSliceLengths: []int{50, 50, 50, 50, 50},
  574. expectedMetricValues: expectedMetrics{desiredSlices: 5, actualSlices: 5, desiredEndpoints: 250, addedPerSync: 250, numCreated: 5},
  575. }, {
  576. maxEndpointsPerSlice: int32(80),
  577. expectedSliceLengths: []int{80, 80, 80, 10},
  578. expectedMetricValues: expectedMetrics{desiredSlices: 4, actualSlices: 4, desiredEndpoints: 250, addedPerSync: 250, numCreated: 4},
  579. }, {
  580. maxEndpointsPerSlice: int32(150),
  581. expectedSliceLengths: []int{150, 100},
  582. expectedMetricValues: expectedMetrics{desiredSlices: 2, actualSlices: 2, desiredEndpoints: 250, addedPerSync: 250, numCreated: 2},
  583. }, {
  584. maxEndpointsPerSlice: int32(250),
  585. expectedSliceLengths: []int{250},
  586. expectedMetricValues: expectedMetrics{desiredSlices: 1, actualSlices: 1, desiredEndpoints: 250, addedPerSync: 250, numCreated: 1},
  587. }, {
  588. maxEndpointsPerSlice: int32(500),
  589. expectedSliceLengths: []int{250},
  590. expectedMetricValues: expectedMetrics{desiredSlices: 1, actualSlices: 1, desiredEndpoints: 250, addedPerSync: 250, numCreated: 1},
  591. },
  592. }
  593. for _, testCase := range testCases {
  594. t.Run(fmt.Sprintf("maxEndpointsPerSlice: %d", testCase.maxEndpointsPerSlice), func(t *testing.T) {
  595. client := newClientset()
  596. setupMetrics()
  597. r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, testCase.maxEndpointsPerSlice)
  598. reconcileHelper(t, r, &svc, pods, []*discovery.EndpointSlice{}, time.Now())
  599. expectUnorderedSlicesWithLengths(t, fetchEndpointSlices(t, client, namespace), testCase.expectedSliceLengths)
  600. expectMetrics(t, testCase.expectedMetricValues)
  601. })
  602. }
  603. }
  604. func TestReconcileEndpointSlicesMetrics(t *testing.T) {
  605. client := newClientset()
  606. setupMetrics()
  607. namespace := "test"
  608. svc, _ := newServiceAndEndpointMeta("foo", namespace)
  609. // start with 20 pods
  610. pods := []*corev1.Pod{}
  611. for i := 0; i < 20; i++ {
  612. pods = append(pods, newPod(i, namespace, true, 1))
  613. }
  614. r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
  615. reconcileHelper(t, r, &svc, pods, []*discovery.EndpointSlice{}, time.Now())
  616. actions := client.Actions()
  617. assert.Equal(t, 1, len(actions), "Expected 1 additional client actions as part of reconcile")
  618. assert.True(t, actions[0].Matches("create", "endpointslices"), "First action should be create endpoint slice")
  619. expectMetrics(t, expectedMetrics{desiredSlices: 1, actualSlices: 1, desiredEndpoints: 20, addedPerSync: 20, removedPerSync: 0, numCreated: 1, numUpdated: 0, numDeleted: 0})
  620. fetchedSlices := fetchEndpointSlices(t, client, namespace)
  621. reconcileHelper(t, r, &svc, pods[0:10], []*discovery.EndpointSlice{&fetchedSlices[0]}, time.Now())
  622. expectMetrics(t, expectedMetrics{desiredSlices: 1, actualSlices: 1, desiredEndpoints: 10, addedPerSync: 20, removedPerSync: 10, numCreated: 1, numUpdated: 1, numDeleted: 0})
  623. }
  624. // Test Helpers
  625. func newReconciler(client *fake.Clientset, nodes []*corev1.Node, maxEndpointsPerSlice int32) *reconciler {
  626. informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
  627. nodeInformer := informerFactory.Core().V1().Nodes()
  628. indexer := nodeInformer.Informer().GetIndexer()
  629. for _, node := range nodes {
  630. indexer.Add(node)
  631. }
  632. return &reconciler{
  633. client: client,
  634. nodeLister: corelisters.NewNodeLister(indexer),
  635. maxEndpointsPerSlice: maxEndpointsPerSlice,
  636. endpointSliceTracker: newEndpointSliceTracker(),
  637. metricsCache: metrics.NewCache(maxEndpointsPerSlice),
  638. }
  639. }
  640. // ensures endpoint slices exist with the desired set of lengths
  641. func expectUnorderedSlicesWithLengths(t *testing.T, endpointSlices []discovery.EndpointSlice, expectedLengths []int) {
  642. assert.Len(t, endpointSlices, len(expectedLengths), "Expected %d endpoint slices", len(expectedLengths))
  643. lengthsWithNoMatch := []int{}
  644. desiredLengths := expectedLengths
  645. actualLengths := []int{}
  646. for _, endpointSlice := range endpointSlices {
  647. actualLen := len(endpointSlice.Endpoints)
  648. actualLengths = append(actualLengths, actualLen)
  649. matchFound := false
  650. for i := 0; i < len(desiredLengths); i++ {
  651. if desiredLengths[i] == actualLen {
  652. matchFound = true
  653. desiredLengths = append(desiredLengths[:i], desiredLengths[i+1:]...)
  654. break
  655. }
  656. }
  657. if !matchFound {
  658. lengthsWithNoMatch = append(lengthsWithNoMatch, actualLen)
  659. }
  660. }
  661. if len(lengthsWithNoMatch) > 0 || len(desiredLengths) > 0 {
  662. t.Errorf("Actual slice lengths (%v) don't match expected (%v)", actualLengths, expectedLengths)
  663. }
  664. }
  665. // ensures endpoint slices exist with the desired set of ports and address types
  666. func expectUnorderedSlicesWithTopLevelAttrs(t *testing.T, endpointSlices []discovery.EndpointSlice, expectedSlices []discovery.EndpointSlice) {
  667. t.Helper()
  668. assert.Len(t, endpointSlices, len(expectedSlices), "Expected %d endpoint slices", len(expectedSlices))
  669. slicesWithNoMatch := []discovery.EndpointSlice{}
  670. for _, endpointSlice := range endpointSlices {
  671. matchFound := false
  672. for i := 0; i < len(expectedSlices); i++ {
  673. if portsAndAddressTypeEqual(expectedSlices[i], endpointSlice) {
  674. matchFound = true
  675. expectedSlices = append(expectedSlices[:i], expectedSlices[i+1:]...)
  676. break
  677. }
  678. }
  679. if !matchFound {
  680. slicesWithNoMatch = append(slicesWithNoMatch, endpointSlice)
  681. }
  682. }
  683. assert.Len(t, slicesWithNoMatch, 0, "EndpointSlice(s) found without matching attributes")
  684. assert.Len(t, expectedSlices, 0, "Expected slices(s) not found in EndpointSlices")
  685. }
  686. func expectActions(t *testing.T, actions []k8stesting.Action, num int, verb, resource string) {
  687. t.Helper()
  688. for i := 0; i < num; i++ {
  689. relativePos := len(actions) - i - 1
  690. assert.Equal(t, verb, actions[relativePos].GetVerb(), "Expected action -%d verb to be %s", i, verb)
  691. assert.Equal(t, resource, actions[relativePos].GetResource().Resource, "Expected action -%d resource to be %s", i, resource)
  692. }
  693. }
  694. func portsAndAddressTypeEqual(slice1, slice2 discovery.EndpointSlice) bool {
  695. return apiequality.Semantic.DeepEqual(slice1.Ports, slice2.Ports) && apiequality.Semantic.DeepEqual(slice1.AddressType, slice2.AddressType)
  696. }
  697. func createEndpointSlices(t *testing.T, client *fake.Clientset, namespace string, endpointSlices []*discovery.EndpointSlice) {
  698. t.Helper()
  699. for _, endpointSlice := range endpointSlices {
  700. _, err := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
  701. if err != nil {
  702. t.Fatalf("Expected no error creating Endpoint Slice, got: %v", err)
  703. }
  704. }
  705. }
  706. func fetchEndpointSlices(t *testing.T, client *fake.Clientset, namespace string) []discovery.EndpointSlice {
  707. t.Helper()
  708. fetchedSlices, err := client.DiscoveryV1beta1().EndpointSlices(namespace).List(context.TODO(), metav1.ListOptions{})
  709. if err != nil {
  710. t.Fatalf("Expected no error fetching Endpoint Slices, got: %v", err)
  711. return []discovery.EndpointSlice{}
  712. }
  713. return fetchedSlices.Items
  714. }
  715. func reconcileHelper(t *testing.T, r *reconciler, service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time) {
  716. t.Helper()
  717. err := r.reconcile(service, pods, existingSlices, triggerTime)
  718. if err != nil {
  719. t.Fatalf("Expected no error reconciling Endpoint Slices, got: %v", err)
  720. }
  721. }
  722. // Metrics helpers
  723. type expectedMetrics struct {
  724. desiredSlices int
  725. actualSlices int
  726. desiredEndpoints int
  727. addedPerSync int
  728. removedPerSync int
  729. numCreated int
  730. numUpdated int
  731. numDeleted int
  732. }
  733. func expectMetrics(t *testing.T, em expectedMetrics) {
  734. t.Helper()
  735. actualDesiredSlices := getGaugeMetricValue(t, metrics.DesiredEndpointSlices.WithLabelValues())
  736. if actualDesiredSlices != float64(em.desiredSlices) {
  737. t.Errorf("Expected desiredEndpointSlices to be %d, got %v", em.desiredSlices, actualDesiredSlices)
  738. }
  739. actualNumSlices := getGaugeMetricValue(t, metrics.NumEndpointSlices.WithLabelValues())
  740. if actualDesiredSlices != float64(em.desiredSlices) {
  741. t.Errorf("Expected numEndpointSlices to be %d, got %v", em.actualSlices, actualNumSlices)
  742. }
  743. actualEndpointsDesired := getGaugeMetricValue(t, metrics.EndpointsDesired.WithLabelValues())
  744. if actualEndpointsDesired != float64(em.desiredEndpoints) {
  745. t.Errorf("Expected desiredEndpoints to be %d, got %v", em.desiredEndpoints, actualEndpointsDesired)
  746. }
  747. actualAddedPerSync := getHistogramMetricValue(t, metrics.EndpointsAddedPerSync.WithLabelValues())
  748. if actualAddedPerSync != float64(em.addedPerSync) {
  749. t.Errorf("Expected endpointsAddedPerSync to be %d, got %v", em.addedPerSync, actualAddedPerSync)
  750. }
  751. actualRemovedPerSync := getHistogramMetricValue(t, metrics.EndpointsRemovedPerSync.WithLabelValues())
  752. if actualRemovedPerSync != float64(em.removedPerSync) {
  753. t.Errorf("Expected endpointsRemovedPerSync to be %d, got %v", em.removedPerSync, actualRemovedPerSync)
  754. }
  755. actualCreated := getCounterMetricValue(t, metrics.EndpointSliceChanges.WithLabelValues("create"))
  756. if actualCreated != float64(em.numCreated) {
  757. t.Errorf("Expected endpointSliceChangesCreated to be %d, got %v", em.numCreated, actualCreated)
  758. }
  759. actualUpdated := getCounterMetricValue(t, metrics.EndpointSliceChanges.WithLabelValues("update"))
  760. if actualUpdated != float64(em.numUpdated) {
  761. t.Errorf("Expected endpointSliceChangesUpdated to be %d, got %v", em.numUpdated, actualUpdated)
  762. }
  763. actualDeleted := getCounterMetricValue(t, metrics.EndpointSliceChanges.WithLabelValues("delete"))
  764. if actualDeleted != float64(em.numDeleted) {
  765. t.Errorf("Expected endpointSliceChangesDeleted to be %d, got %v", em.numDeleted, actualDeleted)
  766. }
  767. }
  768. func setupMetrics() {
  769. metrics.RegisterMetrics()
  770. metrics.NumEndpointSlices.Delete(map[string]string{})
  771. metrics.DesiredEndpointSlices.Delete(map[string]string{})
  772. metrics.EndpointsDesired.Delete(map[string]string{})
  773. metrics.EndpointsAddedPerSync.Delete(map[string]string{})
  774. metrics.EndpointsRemovedPerSync.Delete(map[string]string{})
  775. metrics.EndpointSliceChanges.Delete(map[string]string{"operation": "create"})
  776. metrics.EndpointSliceChanges.Delete(map[string]string{"operation": "update"})
  777. metrics.EndpointSliceChanges.Delete(map[string]string{"operation": "delete"})
  778. }
  779. func getGaugeMetricValue(t *testing.T, metric compmetrics.GaugeMetric) float64 {
  780. t.Helper()
  781. metricProto := &dto.Metric{}
  782. if err := metric.Write(metricProto); err != nil {
  783. t.Errorf("Error writing metric: %v", err)
  784. }
  785. return metricProto.Gauge.GetValue()
  786. }
  787. func getCounterMetricValue(t *testing.T, metric compmetrics.CounterMetric) float64 {
  788. t.Helper()
  789. metricProto := &dto.Metric{}
  790. if err := metric.(compmetrics.Metric).Write(metricProto); err != nil {
  791. t.Errorf("Error writing metric: %v", err)
  792. }
  793. return metricProto.Counter.GetValue()
  794. }
  795. func getHistogramMetricValue(t *testing.T, metric compmetrics.ObserverMetric) float64 {
  796. t.Helper()
  797. metricProto := &dto.Metric{}
  798. if err := metric.(compmetrics.Metric).Write(metricProto); err != nil {
  799. t.Errorf("Error writing metric: %v", err)
  800. }
  801. return metricProto.Histogram.GetSampleSum()
  802. }