endpointslice_controller_test.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. /*
  2. Copyright 2019 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package endpointslice
  14. import (
  15. "context"
  16. "fmt"
  17. "reflect"
  18. "testing"
  19. "time"
  20. "github.com/stretchr/testify/assert"
  21. v1 "k8s.io/api/core/v1"
  22. discovery "k8s.io/api/discovery/v1beta1"
  23. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  24. "k8s.io/apimachinery/pkg/runtime"
  25. "k8s.io/apimachinery/pkg/util/intstr"
  26. "k8s.io/client-go/informers"
  27. "k8s.io/client-go/kubernetes/fake"
  28. k8stesting "k8s.io/client-go/testing"
  29. "k8s.io/client-go/tools/cache"
  30. "k8s.io/kubernetes/pkg/controller"
  31. endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
  32. utilpointer "k8s.io/utils/pointer"
  33. )
  34. // Most of the tests related to EndpointSlice allocation can be found in reconciler_test.go
  35. // Tests here primarily focus on unique controller functionality before the reconciler begins
  36. var alwaysReady = func() bool { return true }
  37. type endpointSliceController struct {
  38. *Controller
  39. endpointSliceStore cache.Store
  40. nodeStore cache.Store
  41. podStore cache.Store
  42. serviceStore cache.Store
  43. }
  44. func newController(nodeNames []string) (*fake.Clientset, *endpointSliceController) {
  45. client := newClientset()
  46. informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
  47. nodeInformer := informerFactory.Core().V1().Nodes()
  48. indexer := nodeInformer.Informer().GetIndexer()
  49. for _, nodeName := range nodeNames {
  50. indexer.Add(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
  51. }
  52. esController := NewController(
  53. informerFactory.Core().V1().Pods(),
  54. informerFactory.Core().V1().Services(),
  55. nodeInformer,
  56. informerFactory.Discovery().V1beta1().EndpointSlices(),
  57. int32(100),
  58. client)
  59. esController.nodesSynced = alwaysReady
  60. esController.podsSynced = alwaysReady
  61. esController.servicesSynced = alwaysReady
  62. esController.endpointSlicesSynced = alwaysReady
  63. return client, &endpointSliceController{
  64. esController,
  65. informerFactory.Discovery().V1beta1().EndpointSlices().Informer().GetStore(),
  66. informerFactory.Core().V1().Nodes().Informer().GetStore(),
  67. informerFactory.Core().V1().Pods().Informer().GetStore(),
  68. informerFactory.Core().V1().Services().Informer().GetStore(),
  69. }
  70. }
  71. // Ensure SyncService for service with no selector results in no action
  72. func TestSyncServiceNoSelector(t *testing.T) {
  73. ns := metav1.NamespaceDefault
  74. serviceName := "testing-1"
  75. client, esController := newController([]string{"node-1"})
  76. esController.serviceStore.Add(&v1.Service{
  77. ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns},
  78. Spec: v1.ServiceSpec{
  79. Ports: []v1.ServicePort{{TargetPort: intstr.FromInt(80)}},
  80. },
  81. })
  82. err := esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
  83. assert.Nil(t, err)
  84. assert.Len(t, client.Actions(), 0)
  85. }
  86. // Ensure SyncService for service with selector but no pods results in placeholder EndpointSlice
  87. func TestSyncServiceWithSelector(t *testing.T) {
  88. ns := metav1.NamespaceDefault
  89. serviceName := "testing-1"
  90. client, esController := newController([]string{"node-1"})
  91. standardSyncService(t, esController, ns, serviceName, "true")
  92. expectActions(t, client.Actions(), 1, "create", "endpointslices")
  93. sliceList, err := client.DiscoveryV1beta1().EndpointSlices(ns).List(context.TODO(), metav1.ListOptions{})
  94. assert.Nil(t, err, "Expected no error fetching endpoint slices")
  95. assert.Len(t, sliceList.Items, 1, "Expected 1 endpoint slices")
  96. slice := sliceList.Items[0]
  97. assert.Regexp(t, "^"+serviceName, slice.Name)
  98. assert.Equal(t, serviceName, slice.Labels[discovery.LabelServiceName])
  99. assert.EqualValues(t, []discovery.EndpointPort{}, slice.Ports)
  100. assert.EqualValues(t, []discovery.Endpoint{}, slice.Endpoints)
  101. assert.NotEmpty(t, slice.Annotations["endpoints.kubernetes.io/last-change-trigger-time"])
  102. }
  103. // Ensure SyncService gracefully handles a missing service. This test also
  104. // populates another existing service to ensure a clean up process doesn't
  105. // remove too much.
  106. func TestSyncServiceMissing(t *testing.T) {
  107. namespace := metav1.NamespaceDefault
  108. client, esController := newController([]string{"node-1"})
  109. // Build up existing service
  110. existingServiceName := "stillthere"
  111. existingServiceKey := endpointutil.ServiceKey{Name: existingServiceName, Namespace: namespace}
  112. esController.triggerTimeTracker.ServiceStates[existingServiceKey] = endpointutil.ServiceState{}
  113. esController.serviceStore.Add(&v1.Service{
  114. ObjectMeta: metav1.ObjectMeta{Name: existingServiceName, Namespace: namespace},
  115. Spec: v1.ServiceSpec{
  116. Ports: []v1.ServicePort{{TargetPort: intstr.FromInt(80)}},
  117. Selector: map[string]string{"foo": "bar"},
  118. },
  119. })
  120. // Add missing service to triggerTimeTracker to ensure the reference is cleaned up
  121. missingServiceName := "notthere"
  122. missingServiceKey := endpointutil.ServiceKey{Name: missingServiceName, Namespace: namespace}
  123. esController.triggerTimeTracker.ServiceStates[missingServiceKey] = endpointutil.ServiceState{}
  124. err := esController.syncService(fmt.Sprintf("%s/%s", namespace, missingServiceName))
  125. // nil should be returned when the service doesn't exist
  126. assert.Nil(t, err, "Expected no error syncing service")
  127. // That should mean no client actions were performed
  128. assert.Len(t, client.Actions(), 0)
  129. // TriggerTimeTracker should have removed the reference to the missing service
  130. assert.NotContains(t, esController.triggerTimeTracker.ServiceStates, missingServiceKey)
  131. // TriggerTimeTracker should have left the reference to the missing service
  132. assert.Contains(t, esController.triggerTimeTracker.ServiceStates, existingServiceKey)
  133. }
  134. // Ensure SyncService correctly selects Pods.
  135. func TestSyncServicePodSelection(t *testing.T) {
  136. client, esController := newController([]string{"node-1"})
  137. ns := metav1.NamespaceDefault
  138. pod1 := newPod(1, ns, true, 0)
  139. esController.podStore.Add(pod1)
  140. // ensure this pod will not match the selector
  141. pod2 := newPod(2, ns, true, 0)
  142. pod2.Labels["foo"] = "boo"
  143. esController.podStore.Add(pod2)
  144. standardSyncService(t, esController, ns, "testing-1", "true")
  145. expectActions(t, client.Actions(), 1, "create", "endpointslices")
  146. // an endpoint slice should be created, it should only reference pod1 (not pod2)
  147. slices, err := client.DiscoveryV1beta1().EndpointSlices(ns).List(context.TODO(), metav1.ListOptions{})
  148. assert.Nil(t, err, "Expected no error fetching endpoint slices")
  149. assert.Len(t, slices.Items, 1, "Expected 1 endpoint slices")
  150. slice := slices.Items[0]
  151. assert.Len(t, slice.Endpoints, 1, "Expected 1 endpoint in first slice")
  152. assert.NotEmpty(t, slice.Annotations["endpoints.kubernetes.io/last-change-trigger-time"])
  153. endpoint := slice.Endpoints[0]
  154. assert.EqualValues(t, endpoint.TargetRef, &v1.ObjectReference{Kind: "Pod", Namespace: ns, Name: pod1.Name})
  155. }
  156. // Ensure SyncService correctly selects and labels EndpointSlices.
  157. func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
  158. client, esController := newController([]string{"node-1"})
  159. ns := metav1.NamespaceDefault
  160. serviceName := "testing-1"
  161. // 5 slices, 3 with matching labels for our service
  162. endpointSlices := []*discovery.EndpointSlice{{
  163. ObjectMeta: metav1.ObjectMeta{
  164. Name: "matching-1",
  165. Namespace: ns,
  166. Labels: map[string]string{
  167. discovery.LabelServiceName: serviceName,
  168. discovery.LabelManagedBy: controllerName,
  169. },
  170. },
  171. AddressType: discovery.AddressTypeIPv4,
  172. }, {
  173. ObjectMeta: metav1.ObjectMeta{
  174. Name: "matching-2",
  175. Namespace: ns,
  176. Labels: map[string]string{
  177. discovery.LabelServiceName: serviceName,
  178. discovery.LabelManagedBy: controllerName,
  179. },
  180. },
  181. AddressType: discovery.AddressTypeIPv4,
  182. }, {
  183. ObjectMeta: metav1.ObjectMeta{
  184. Name: "partially-matching-1",
  185. Namespace: ns,
  186. Labels: map[string]string{
  187. discovery.LabelServiceName: serviceName,
  188. },
  189. },
  190. AddressType: discovery.AddressTypeIPv4,
  191. }, {
  192. ObjectMeta: metav1.ObjectMeta{
  193. Name: "not-matching-1",
  194. Namespace: ns,
  195. Labels: map[string]string{
  196. discovery.LabelServiceName: "something-else",
  197. discovery.LabelManagedBy: controllerName,
  198. },
  199. },
  200. AddressType: discovery.AddressTypeIPv4,
  201. }, {
  202. ObjectMeta: metav1.ObjectMeta{
  203. Name: "not-matching-2",
  204. Namespace: ns,
  205. Labels: map[string]string{
  206. discovery.LabelServiceName: serviceName,
  207. discovery.LabelManagedBy: "something-else",
  208. },
  209. },
  210. AddressType: discovery.AddressTypeIPv4,
  211. }}
  212. cmc := newCacheMutationCheck(endpointSlices)
  213. // need to add them to both store and fake clientset
  214. for _, endpointSlice := range endpointSlices {
  215. err := esController.endpointSliceStore.Add(endpointSlice)
  216. if err != nil {
  217. t.Fatalf("Expected no error adding EndpointSlice: %v", err)
  218. }
  219. _, err = client.DiscoveryV1beta1().EndpointSlices(ns).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
  220. if err != nil {
  221. t.Fatalf("Expected no error creating EndpointSlice: %v", err)
  222. }
  223. }
  224. // +1 for extra action involved in Service creation before syncService call.
  225. numActionsBefore := len(client.Actions()) + 1
  226. standardSyncService(t, esController, ns, serviceName, "false")
  227. if len(client.Actions()) != numActionsBefore+2 {
  228. t.Errorf("Expected 2 more actions, got %d", len(client.Actions())-numActionsBefore)
  229. }
  230. // only 2 slices should match, 2 should be deleted, 1 should be updated as a placeholder
  231. expectAction(t, client.Actions(), numActionsBefore, "update", "endpointslices")
  232. expectAction(t, client.Actions(), numActionsBefore+1, "delete", "endpointslices")
  233. // ensure cache mutation has not occurred
  234. cmc.Check(t)
  235. }
  236. // Ensure SyncService handles a variety of protocols and IPs appropriately.
  237. func TestSyncServiceFull(t *testing.T) {
  238. client, esController := newController([]string{"node-1"})
  239. namespace := metav1.NamespaceDefault
  240. serviceName := "all-the-protocols"
  241. ipv6Family := v1.IPv6Protocol
  242. pod1 := newPod(1, namespace, true, 0)
  243. pod1.Status.PodIPs = []v1.PodIP{{IP: "1.2.3.4"}}
  244. esController.podStore.Add(pod1)
  245. pod2 := newPod(2, namespace, true, 0)
  246. pod2.Status.PodIPs = []v1.PodIP{{IP: "1.2.3.5"}, {IP: "1234::5678:0000:0000:9abc:def0"}}
  247. esController.podStore.Add(pod2)
  248. // create service with all protocols and multiple ports
  249. serviceCreateTime := time.Now()
  250. service := &v1.Service{
  251. ObjectMeta: metav1.ObjectMeta{
  252. Name: serviceName,
  253. Namespace: namespace,
  254. CreationTimestamp: metav1.NewTime(serviceCreateTime),
  255. },
  256. Spec: v1.ServiceSpec{
  257. Ports: []v1.ServicePort{
  258. {Name: "tcp-example", TargetPort: intstr.FromInt(80), Protocol: v1.ProtocolTCP},
  259. {Name: "udp-example", TargetPort: intstr.FromInt(161), Protocol: v1.ProtocolUDP},
  260. {Name: "sctp-example", TargetPort: intstr.FromInt(3456), Protocol: v1.ProtocolSCTP},
  261. },
  262. Selector: map[string]string{"foo": "bar"},
  263. IPFamily: &ipv6Family,
  264. },
  265. }
  266. esController.serviceStore.Add(service)
  267. _, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
  268. assert.Nil(t, err, "Expected no error creating service")
  269. // run through full sync service loop
  270. err = esController.syncService(fmt.Sprintf("%s/%s", namespace, serviceName))
  271. assert.Nil(t, err)
  272. // last action should be to create endpoint slice
  273. expectActions(t, client.Actions(), 1, "create", "endpointslices")
  274. sliceList, err := client.DiscoveryV1beta1().EndpointSlices(namespace).List(context.TODO(), metav1.ListOptions{})
  275. assert.Nil(t, err, "Expected no error fetching endpoint slices")
  276. assert.Len(t, sliceList.Items, 1, "Expected 1 endpoint slices")
  277. // ensure all attributes of endpoint slice match expected state
  278. slice := sliceList.Items[0]
  279. assert.Len(t, slice.Endpoints, 1, "Expected 1 endpoints in first slice")
  280. assert.Equal(t, slice.Annotations["endpoints.kubernetes.io/last-change-trigger-time"], serviceCreateTime.Format(time.RFC3339Nano))
  281. assert.EqualValues(t, []discovery.EndpointPort{{
  282. Name: utilpointer.StringPtr("sctp-example"),
  283. Protocol: protoPtr(v1.ProtocolSCTP),
  284. Port: utilpointer.Int32Ptr(int32(3456)),
  285. }, {
  286. Name: utilpointer.StringPtr("udp-example"),
  287. Protocol: protoPtr(v1.ProtocolUDP),
  288. Port: utilpointer.Int32Ptr(int32(161)),
  289. }, {
  290. Name: utilpointer.StringPtr("tcp-example"),
  291. Protocol: protoPtr(v1.ProtocolTCP),
  292. Port: utilpointer.Int32Ptr(int32(80)),
  293. }}, slice.Ports)
  294. assert.ElementsMatch(t, []discovery.Endpoint{{
  295. Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
  296. Addresses: []string{"1234::5678:0000:0000:9abc:def0"},
  297. TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: namespace, Name: pod2.Name},
  298. Topology: map[string]string{"kubernetes.io/hostname": "node-1"},
  299. }}, slice.Endpoints)
  300. }
  301. // Test helpers
  302. func standardSyncService(t *testing.T, esController *endpointSliceController, namespace, serviceName, managedBySetup string) {
  303. t.Helper()
  304. createService(t, esController, namespace, serviceName, managedBySetup)
  305. err := esController.syncService(fmt.Sprintf("%s/%s", namespace, serviceName))
  306. assert.Nil(t, err, "Expected no error syncing service")
  307. }
  308. func createService(t *testing.T, esController *endpointSliceController, namespace, serviceName, managedBySetup string) *v1.Service {
  309. t.Helper()
  310. service := &v1.Service{
  311. ObjectMeta: metav1.ObjectMeta{
  312. Name: serviceName,
  313. Namespace: namespace,
  314. CreationTimestamp: metav1.NewTime(time.Now()),
  315. },
  316. Spec: v1.ServiceSpec{
  317. Ports: []v1.ServicePort{{TargetPort: intstr.FromInt(80)}},
  318. Selector: map[string]string{"foo": "bar"},
  319. },
  320. }
  321. esController.serviceStore.Add(service)
  322. _, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
  323. assert.Nil(t, err, "Expected no error creating service")
  324. return service
  325. }
  326. func expectAction(t *testing.T, actions []k8stesting.Action, index int, verb, resource string) {
  327. t.Helper()
  328. if len(actions) <= index {
  329. t.Fatalf("Expected at least %d actions, got %d", index+1, len(actions))
  330. }
  331. action := actions[index]
  332. if action.GetVerb() != verb {
  333. t.Errorf("Expected action %d verb to be %s, got %s", index, verb, action.GetVerb())
  334. }
  335. if action.GetResource().Resource != resource {
  336. t.Errorf("Expected action %d resource to be %s, got %s", index, resource, action.GetResource().Resource)
  337. }
  338. }
  339. // protoPtr takes a Protocol and returns a pointer to it.
  340. func protoPtr(proto v1.Protocol) *v1.Protocol {
  341. return &proto
  342. }
  343. // cacheMutationCheck helps ensure that cached objects have not been changed
  344. // in any way throughout a test run.
  345. type cacheMutationCheck struct {
  346. objects []cacheObject
  347. }
  348. // cacheObject stores a reference to an original object as well as a deep copy
  349. // of that object to track any mutations in the original object.
  350. type cacheObject struct {
  351. original runtime.Object
  352. deepCopy runtime.Object
  353. }
  354. // newCacheMutationCheck initializes a cacheMutationCheck with EndpointSlices.
  355. func newCacheMutationCheck(endpointSlices []*discovery.EndpointSlice) cacheMutationCheck {
  356. cmc := cacheMutationCheck{}
  357. for _, endpointSlice := range endpointSlices {
  358. cmc.Add(endpointSlice)
  359. }
  360. return cmc
  361. }
  362. // Add appends a runtime.Object and a deep copy of that object into the
  363. // cacheMutationCheck.
  364. func (cmc *cacheMutationCheck) Add(o runtime.Object) {
  365. cmc.objects = append(cmc.objects, cacheObject{
  366. original: o,
  367. deepCopy: o.DeepCopyObject(),
  368. })
  369. }
  370. // Check verifies that no objects in the cacheMutationCheck have been mutated.
  371. func (cmc *cacheMutationCheck) Check(t *testing.T) {
  372. for _, o := range cmc.objects {
  373. if !reflect.DeepEqual(o.original, o.deepCopy) {
  374. // Cached objects can't be safely mutated and instead should be deep
  375. // copied before changed in any way.
  376. t.Errorf("Cached object was unexpectedly mutated. Original: %+v, Mutated: %+v", o.deepCopy, o.original)
  377. }
  378. }
  379. }