service.go 103 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package network
  14. import (
  15. "bytes"
  16. "errors"
  17. "fmt"
  18. "math/rand"
  19. "net"
  20. "strconv"
  21. "strings"
  22. "time"
  23. compute "google.golang.org/api/compute/v1"
  24. v1 "k8s.io/api/core/v1"
  25. apierrors "k8s.io/apimachinery/pkg/api/errors"
  26. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  27. "k8s.io/apimachinery/pkg/labels"
  28. "k8s.io/apimachinery/pkg/util/intstr"
  29. "k8s.io/apimachinery/pkg/util/wait"
  30. clientset "k8s.io/client-go/kubernetes"
  31. cloudprovider "k8s.io/cloud-provider"
  32. "k8s.io/kubernetes/pkg/controller/endpoint"
  33. "k8s.io/kubernetes/test/e2e/framework"
  34. e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
  35. e2elog "k8s.io/kubernetes/test/e2e/framework/log"
  36. "k8s.io/kubernetes/test/e2e/framework/providers/gce"
  37. e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
  38. imageutils "k8s.io/kubernetes/test/utils/image"
  39. gcecloud "k8s.io/legacy-cloud-providers/gce"
  40. "github.com/onsi/ginkgo"
  41. "github.com/onsi/gomega"
  42. )
  43. const (
  44. defaultServeHostnameServicePort = 80
  45. defaultServeHostnameServiceName = "svc-hostname"
  46. )
  47. var (
  48. defaultServeHostnameService = v1.Service{
  49. ObjectMeta: metav1.ObjectMeta{
  50. Name: defaultServeHostnameServiceName,
  51. },
  52. Spec: v1.ServiceSpec{
  53. Ports: []v1.ServicePort{{
  54. Port: int32(defaultServeHostnameServicePort),
  55. TargetPort: intstr.FromInt(9376),
  56. Protocol: v1.ProtocolTCP,
  57. }},
  58. Selector: map[string]string{
  59. "name": defaultServeHostnameServiceName,
  60. },
  61. },
  62. }
  63. )
  64. func getServeHostnameService(name string) *v1.Service {
  65. svc := defaultServeHostnameService.DeepCopy()
  66. svc.ObjectMeta.Name = name
  67. svc.Spec.Selector["name"] = name
  68. return svc
  69. }
  70. var _ = SIGDescribe("Services", func() {
  71. f := framework.NewDefaultFramework("services")
  72. var cs clientset.Interface
  73. serviceLBNames := []string{}
  74. ginkgo.BeforeEach(func() {
  75. cs = f.ClientSet
  76. })
  77. ginkgo.AfterEach(func() {
  78. if ginkgo.CurrentGinkgoTestDescription().Failed {
  79. framework.DescribeSvc(f.Namespace.Name)
  80. }
  81. for _, lb := range serviceLBNames {
  82. e2elog.Logf("cleaning load balancer resource for %s", lb)
  83. framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
  84. }
  85. //reset serviceLBNames
  86. serviceLBNames = []string{}
  87. })
  88. // TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
  89. /*
  90. Release : v1.9
  91. Testname: Kubernetes Service
  92. Description: By default when a kubernetes cluster is running there MUST be a ‘kubernetes’ service running in the cluster.
  93. */
  94. framework.ConformanceIt("should provide secure master service ", func() {
  95. _, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
  96. framework.ExpectNoError(err, "failed to fetch the service object for the service named kubernetes")
  97. })
  98. /*
  99. Release : v1.9
  100. Testname: Service, endpoints
  101. Description: Create a service with a endpoint without any Pods, the service MUST run and show empty endpoints. Add a pod to the service and the service MUST validate to show all the endpoints for the ports exposed by the Pod. Add another Pod then the list of all Ports exposed by both the Pods MUST be valid and have corresponding service endpoint. Once the second Pod is deleted then set of endpoint MUST be validated to show only ports from the first container that are exposed. Once both pods are deleted the endpoints from the service MUST be empty.
  102. */
  103. framework.ConformanceIt("should serve a basic endpoint from pods ", func() {
  104. serviceName := "endpoint-test2"
  105. ns := f.Namespace.Name
  106. jig := framework.NewServiceTestJig(cs, serviceName)
  107. labels := map[string]string{
  108. "foo": "bar",
  109. "baz": "blah",
  110. }
  111. ginkgo.By("creating service " + serviceName + " in namespace " + ns)
  112. defer func() {
  113. err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
  114. framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
  115. }()
  116. ports := []v1.ServicePort{{
  117. Port: 80,
  118. TargetPort: intstr.FromInt(80),
  119. }}
  120. _, err := jig.CreateServiceWithServicePort(labels, ns, ports)
  121. framework.ExpectNoError(err, "failed to create service with ServicePorts in namespace: %s", ns)
  122. err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{})
  123. framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
  124. names := map[string]bool{}
  125. defer func() {
  126. for name := range names {
  127. err := cs.CoreV1().Pods(ns).Delete(name, nil)
  128. framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
  129. }
  130. }()
  131. name1 := "pod1"
  132. name2 := "pod2"
  133. framework.CreatePodOrFail(cs, ns, name1, labels, []v1.ContainerPort{{ContainerPort: 80}})
  134. names[name1] = true
  135. err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{name1: {80}})
  136. framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
  137. framework.CreatePodOrFail(cs, ns, name2, labels, []v1.ContainerPort{{ContainerPort: 80}})
  138. names[name2] = true
  139. err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{name1: {80}, name2: {80}})
  140. framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
  141. framework.DeletePodOrFail(cs, ns, name1)
  142. delete(names, name1)
  143. err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{name2: {80}})
  144. framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
  145. framework.DeletePodOrFail(cs, ns, name2)
  146. delete(names, name2)
  147. err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{})
  148. framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
  149. })
  150. /*
  151. Release : v1.9
  152. Testname: Service, endpoints with multiple ports
  153. Description: Create a service with two ports but no Pods are added to the service yet. The service MUST run and show empty set of endpoints. Add a Pod to the first port, service MUST list one endpoint for the Pod on that port. Add another Pod to the second port, service MUST list both the endpoints. Delete the first Pod and the service MUST list only the endpoint to the second Pod. Delete the second Pod and the service must now have empty set of endpoints.
  154. */
  155. framework.ConformanceIt("should serve multiport endpoints from pods ", func() {
  156. // repacking functionality is intentionally not tested here - it's better to test it in an integration test.
  157. serviceName := "multi-endpoint-test"
  158. ns := f.Namespace.Name
  159. jig := framework.NewServiceTestJig(cs, serviceName)
  160. defer func() {
  161. err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
  162. framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
  163. }()
  164. labels := map[string]string{"foo": "bar"}
  165. svc1port := "svc1"
  166. svc2port := "svc2"
  167. ginkgo.By("creating service " + serviceName + " in namespace " + ns)
  168. ports := []v1.ServicePort{
  169. {
  170. Name: "portname1",
  171. Port: 80,
  172. TargetPort: intstr.FromString(svc1port),
  173. },
  174. {
  175. Name: "portname2",
  176. Port: 81,
  177. TargetPort: intstr.FromString(svc2port),
  178. },
  179. }
  180. _, err := jig.CreateServiceWithServicePort(labels, ns, ports)
  181. framework.ExpectNoError(err, "failed to create service with ServicePorts in namespace: %s", ns)
  182. port1 := 100
  183. port2 := 101
  184. err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{})
  185. framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
  186. names := map[string]bool{}
  187. defer func() {
  188. for name := range names {
  189. err := cs.CoreV1().Pods(ns).Delete(name, nil)
  190. framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
  191. }
  192. }()
  193. containerPorts1 := []v1.ContainerPort{
  194. {
  195. Name: svc1port,
  196. ContainerPort: int32(port1),
  197. },
  198. }
  199. containerPorts2 := []v1.ContainerPort{
  200. {
  201. Name: svc2port,
  202. ContainerPort: int32(port2),
  203. },
  204. }
  205. podname1 := "pod1"
  206. podname2 := "pod2"
  207. framework.CreatePodOrFail(cs, ns, podname1, labels, containerPorts1)
  208. names[podname1] = true
  209. err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{podname1: {port1}})
  210. framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
  211. framework.CreatePodOrFail(cs, ns, podname2, labels, containerPorts2)
  212. names[podname2] = true
  213. err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{podname1: {port1}, podname2: {port2}})
  214. framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
  215. framework.DeletePodOrFail(cs, ns, podname1)
  216. delete(names, podname1)
  217. err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{podname2: {port2}})
  218. framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
  219. framework.DeletePodOrFail(cs, ns, podname2)
  220. delete(names, podname2)
  221. err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{})
  222. framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
  223. })
  224. ginkgo.It("should preserve source pod IP for traffic thru service cluster IP", func() {
  225. // This behavior is not supported if Kube-proxy is in "userspace" mode.
  226. // So we check the kube-proxy mode and skip this test if that's the case.
  227. if proxyMode, err := framework.ProxyMode(f); err == nil {
  228. if proxyMode == "userspace" {
  229. framework.Skipf("The test doesn't work with kube-proxy in userspace mode")
  230. }
  231. } else {
  232. e2elog.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
  233. }
  234. serviceName := "sourceip-test"
  235. ns := f.Namespace.Name
  236. ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
  237. jig := framework.NewServiceTestJig(cs, serviceName)
  238. servicePort := 8080
  239. tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort))
  240. jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
  241. defer func() {
  242. e2elog.Logf("Cleaning up the sourceip test service")
  243. err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
  244. framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
  245. }()
  246. serviceIP := tcpService.Spec.ClusterIP
  247. e2elog.Logf("sourceip-test cluster ip: %s", serviceIP)
  248. ginkgo.By("Picking multiple nodes")
  249. nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
  250. if len(nodes.Items) == 1 {
  251. framework.Skipf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider)
  252. }
  253. node1 := nodes.Items[0]
  254. node2 := nodes.Items[1]
  255. ginkgo.By("Creating a webserver pod be part of the TCP service which echoes back source ip")
  256. serverPodName := "echoserver-sourceip"
  257. jig.LaunchEchoserverPodOnNode(f, node1.Name, serverPodName)
  258. defer func() {
  259. e2elog.Logf("Cleaning up the echo server pod")
  260. err := cs.CoreV1().Pods(ns).Delete(serverPodName, nil)
  261. framework.ExpectNoError(err, "failed to delete pod: %s on node: %s", serverPodName, node1.Name)
  262. }()
  263. // Waiting for service to expose endpoint.
  264. err := e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{serverPodName: {servicePort}})
  265. framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
  266. ginkgo.By("Retrieve sourceip from a pod on the same node")
  267. sourceIP1, execPodIP1 := execSourceipTest(f, cs, ns, node1.Name, serviceIP, servicePort)
  268. ginkgo.By("Verifying the preserved source ip")
  269. gomega.Expect(sourceIP1).To(gomega.Equal(execPodIP1))
  270. ginkgo.By("Retrieve sourceip from a pod on a different node")
  271. sourceIP2, execPodIP2 := execSourceipTest(f, cs, ns, node2.Name, serviceIP, servicePort)
  272. ginkgo.By("Verifying the preserved source ip")
  273. gomega.Expect(sourceIP2).To(gomega.Equal(execPodIP2))
  274. })
  275. ginkgo.It("should be able to up and down services", func() {
  276. // TODO: use the ServiceTestJig here
  277. // this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
  278. framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
  279. // this test does not work if the Node does not support SSH Key
  280. framework.SkipUnlessSSHKeyPresent()
  281. ns := f.Namespace.Name
  282. numPods, servicePort := 3, defaultServeHostnameServicePort
  283. ginkgo.By("creating service1 in namespace " + ns)
  284. podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service1"), ns, numPods)
  285. framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns)
  286. ginkgo.By("creating service2 in namespace " + ns)
  287. podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service2"), ns, numPods)
  288. framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns)
  289. hosts, err := e2essh.NodeSSHHosts(cs)
  290. framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
  291. if len(hosts) == 0 {
  292. framework.Failf("No ssh-able nodes")
  293. }
  294. host := hosts[0]
  295. ginkgo.By("verifying service1 is up")
  296. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
  297. ginkgo.By("verifying service2 is up")
  298. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
  299. // Stop service 1 and make sure it is gone.
  300. ginkgo.By("stopping service1")
  301. framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1"))
  302. ginkgo.By("verifying service1 is not up")
  303. framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svc1IP, servicePort))
  304. ginkgo.By("verifying service2 is still up")
  305. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
  306. // Start another service and verify both are up.
  307. ginkgo.By("creating service3 in namespace " + ns)
  308. podNames3, svc3IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service3"), ns, numPods)
  309. framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc3IP, ns)
  310. if svc2IP == svc3IP {
  311. framework.Failf("service IPs conflict: %v", svc2IP)
  312. }
  313. ginkgo.By("verifying service2 is still up")
  314. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
  315. ginkgo.By("verifying service3 is up")
  316. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames3, svc3IP, servicePort))
  317. })
  318. ginkgo.It("should work after restarting kube-proxy [Disruptive]", func() {
  319. // TODO: use the ServiceTestJig here
  320. framework.SkipUnlessProviderIs("gce", "gke")
  321. ns := f.Namespace.Name
  322. numPods, servicePort := 3, defaultServeHostnameServicePort
  323. svc1 := "service1"
  324. svc2 := "service2"
  325. defer func() {
  326. framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc1))
  327. }()
  328. podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods)
  329. framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns)
  330. defer func() {
  331. framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc2))
  332. }()
  333. podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods)
  334. framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns)
  335. if svc1IP == svc2IP {
  336. framework.Failf("VIPs conflict: %v", svc1IP)
  337. }
  338. hosts, err := e2essh.NodeSSHHosts(cs)
  339. framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
  340. if len(hosts) == 0 {
  341. framework.Failf("No ssh-able nodes")
  342. }
  343. host := hosts[0]
  344. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
  345. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
  346. ginkgo.By(fmt.Sprintf("Restarting kube-proxy on %v", host))
  347. if err := framework.RestartKubeProxy(host); err != nil {
  348. framework.Failf("error restarting kube-proxy: %v", err)
  349. }
  350. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
  351. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
  352. ginkgo.By("Removing iptable rules")
  353. result, err := e2essh.SSH(`
  354. sudo iptables -t nat -F KUBE-SERVICES || true;
  355. sudo iptables -t nat -F KUBE-PORTALS-HOST || true;
  356. sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, framework.TestContext.Provider)
  357. if err != nil || result.Code != 0 {
  358. e2essh.LogResult(result)
  359. framework.Failf("couldn't remove iptable rules: %v", err)
  360. }
  361. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
  362. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
  363. })
  364. ginkgo.It("should work after restarting apiserver [Disruptive]", func() {
  365. // TODO: use the ServiceTestJig here
  366. framework.SkipUnlessProviderIs("gce", "gke")
  367. ns := f.Namespace.Name
  368. numPods, servicePort := 3, 80
  369. defer func() {
  370. framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1"))
  371. }()
  372. podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service1"), ns, numPods)
  373. framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns)
  374. hosts, err := e2essh.NodeSSHHosts(cs)
  375. framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
  376. if len(hosts) == 0 {
  377. framework.Failf("No ssh-able nodes")
  378. }
  379. host := hosts[0]
  380. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
  381. // Restart apiserver
  382. ginkgo.By("Restarting apiserver")
  383. if err := framework.RestartApiserver(cs); err != nil {
  384. framework.Failf("error restarting apiserver: %v", err)
  385. }
  386. ginkgo.By("Waiting for apiserver to come up by polling /healthz")
  387. if err := framework.WaitForApiserverUp(cs); err != nil {
  388. framework.Failf("error while waiting for apiserver up: %v", err)
  389. }
  390. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
  391. // Create a new service and check if it's not reusing IP.
  392. defer func() {
  393. framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service2"))
  394. }()
  395. podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service2"), ns, numPods)
  396. framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns)
  397. if svc1IP == svc2IP {
  398. framework.Failf("VIPs conflict: %v", svc1IP)
  399. }
  400. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
  401. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
  402. })
  403. // TODO: Run this test against the userspace proxy and nodes
  404. // configured with a default deny firewall to validate that the
  405. // proxy whitelists NodePort traffic.
  406. ginkgo.It("should be able to create a functioning NodePort service", func() {
  407. serviceName := "nodeport-test"
  408. ns := f.Namespace.Name
  409. jig := framework.NewServiceTestJig(cs, serviceName)
  410. nodeIP := framework.PickNodeIP(jig.Client) // for later
  411. ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
  412. service := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
  413. svc.Spec.Type = v1.ServiceTypeNodePort
  414. })
  415. jig.SanityCheckService(service, v1.ServiceTypeNodePort)
  416. nodePort := int(service.Spec.Ports[0].NodePort)
  417. ginkgo.By("creating pod to be part of service " + serviceName)
  418. jig.RunOrFail(ns, nil)
  419. ginkgo.By("hitting the pod through the service's NodePort")
  420. jig.TestReachableHTTP(nodeIP, nodePort, framework.KubeProxyLagTimeout)
  421. ginkgo.By("verifying the node port is locked")
  422. hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec")
  423. // Even if the node-ip:node-port check above passed, this hostexec pod
  424. // might fall on a node with a laggy kube-proxy.
  425. cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort)
  426. stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
  427. if err != nil {
  428. framework.Failf("expected node port %d to be in use, stdout: %v. err: %v", nodePort, stdout, err)
  429. }
  430. })
  431. // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
  432. ginkgo.It("should be able to change the type and ports of a service [Slow] [DisabledForLargeClusters]", func() {
  433. // requires cloud load-balancer support
  434. framework.SkipUnlessProviderIs("gce", "gke", "aws")
  435. loadBalancerSupportsUDP := !framework.ProviderIs("aws")
  436. loadBalancerLagTimeout := framework.LoadBalancerLagTimeoutDefault
  437. if framework.ProviderIs("aws") {
  438. loadBalancerLagTimeout = framework.LoadBalancerLagTimeoutAWS
  439. }
  440. loadBalancerCreateTimeout := framework.LoadBalancerCreateTimeoutDefault
  441. if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > framework.LargeClusterMinNodesNumber {
  442. loadBalancerCreateTimeout = framework.LoadBalancerCreateTimeoutLarge
  443. }
  444. // This test is more monolithic than we'd like because LB turnup can be
  445. // very slow, so we lumped all the tests into one LB lifecycle.
  446. serviceName := "mutability-test"
  447. ns1 := f.Namespace.Name // LB1 in ns1 on TCP
  448. e2elog.Logf("namespace for TCP test: %s", ns1)
  449. ginkgo.By("creating a second namespace")
  450. namespacePtr, err := f.CreateNamespace("services", nil)
  451. framework.ExpectNoError(err, "failed to create namespace")
  452. ns2 := namespacePtr.Name // LB2 in ns2 on UDP
  453. e2elog.Logf("namespace for UDP test: %s", ns2)
  454. jig := framework.NewServiceTestJig(cs, serviceName)
  455. nodeIP := framework.PickNodeIP(jig.Client) // for later
  456. // Test TCP and UDP Services. Services with the same name in different
  457. // namespaces should get different node ports and load balancers.
  458. ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1)
  459. tcpService := jig.CreateTCPServiceOrFail(ns1, nil)
  460. jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
  461. ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2)
  462. udpService := jig.CreateUDPServiceOrFail(ns2, nil)
  463. jig.SanityCheckService(udpService, v1.ServiceTypeClusterIP)
  464. ginkgo.By("verifying that TCP and UDP use the same port")
  465. if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port {
  466. framework.Failf("expected to use the same port for TCP and UDP")
  467. }
  468. svcPort := int(tcpService.Spec.Ports[0].Port)
  469. e2elog.Logf("service port (TCP and UDP): %d", svcPort)
  470. ginkgo.By("creating a pod to be part of the TCP service " + serviceName)
  471. jig.RunOrFail(ns1, nil)
  472. ginkgo.By("creating a pod to be part of the UDP service " + serviceName)
  473. jig.RunOrFail(ns2, nil)
  474. // Change the services to NodePort.
  475. ginkgo.By("changing the TCP service to type=NodePort")
  476. tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) {
  477. s.Spec.Type = v1.ServiceTypeNodePort
  478. })
  479. jig.SanityCheckService(tcpService, v1.ServiceTypeNodePort)
  480. tcpNodePort := int(tcpService.Spec.Ports[0].NodePort)
  481. e2elog.Logf("TCP node port: %d", tcpNodePort)
  482. ginkgo.By("changing the UDP service to type=NodePort")
  483. udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) {
  484. s.Spec.Type = v1.ServiceTypeNodePort
  485. })
  486. jig.SanityCheckService(udpService, v1.ServiceTypeNodePort)
  487. udpNodePort := int(udpService.Spec.Ports[0].NodePort)
  488. e2elog.Logf("UDP node port: %d", udpNodePort)
  489. ginkgo.By("hitting the TCP service's NodePort")
  490. jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout)
  491. ginkgo.By("hitting the UDP service's NodePort")
  492. jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout)
  493. // Change the services to LoadBalancer.
  494. // Here we test that LoadBalancers can receive static IP addresses. This isn't
  495. // necessary, but is an additional feature this monolithic test checks.
  496. requestedIP := ""
  497. staticIPName := ""
  498. if framework.ProviderIs("gce", "gke") {
  499. ginkgo.By("creating a static load balancer IP")
  500. staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunID)
  501. gceCloud, err := gce.GetGCECloud()
  502. framework.ExpectNoError(err, "failed to get GCE cloud provider")
  503. err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region())
  504. defer func() {
  505. if staticIPName != "" {
  506. // Release GCE static IP - this is not kube-managed and will not be automatically released.
  507. if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
  508. e2elog.Logf("failed to release static IP %s: %v", staticIPName, err)
  509. }
  510. }
  511. }()
  512. framework.ExpectNoError(err, "failed to create region address: %s", staticIPName)
  513. reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region())
  514. framework.ExpectNoError(err, "failed to get region address: %s", staticIPName)
  515. requestedIP = reservedAddr.Address
  516. e2elog.Logf("Allocated static load balancer IP: %s", requestedIP)
  517. }
  518. ginkgo.By("changing the TCP service to type=LoadBalancer")
  519. tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) {
  520. s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable
  521. s.Spec.Type = v1.ServiceTypeLoadBalancer
  522. })
  523. if loadBalancerSupportsUDP {
  524. ginkgo.By("changing the UDP service to type=LoadBalancer")
  525. udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) {
  526. s.Spec.Type = v1.ServiceTypeLoadBalancer
  527. })
  528. }
  529. serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(tcpService))
  530. if loadBalancerSupportsUDP {
  531. serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(udpService))
  532. }
  533. ginkgo.By("waiting for the TCP service to have a load balancer")
  534. // Wait for the load balancer to be created asynchronously
  535. tcpService = jig.WaitForLoadBalancerOrFail(ns1, tcpService.Name, loadBalancerCreateTimeout)
  536. jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
  537. if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
  538. framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort)
  539. }
  540. if requestedIP != "" && framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP {
  541. framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
  542. }
  543. tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
  544. e2elog.Logf("TCP load balancer: %s", tcpIngressIP)
  545. if framework.ProviderIs("gce", "gke") {
  546. // Do this as early as possible, which overrides the `defer` above.
  547. // This is mostly out of fear of leaking the IP in a timeout case
  548. // (as of this writing we're not 100% sure where the leaks are
  549. // coming from, so this is first-aid rather than surgery).
  550. ginkgo.By("demoting the static IP to ephemeral")
  551. if staticIPName != "" {
  552. gceCloud, err := gce.GetGCECloud()
  553. framework.ExpectNoError(err, "failed to get GCE cloud provider")
  554. // Deleting it after it is attached "demotes" it to an
  555. // ephemeral IP, which can be auto-released.
  556. if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
  557. framework.Failf("failed to release static IP %s: %v", staticIPName, err)
  558. }
  559. staticIPName = ""
  560. }
  561. }
  562. var udpIngressIP string
  563. if loadBalancerSupportsUDP {
  564. ginkgo.By("waiting for the UDP service to have a load balancer")
  565. // 2nd one should be faster since they ran in parallel.
  566. udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name, loadBalancerCreateTimeout)
  567. jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer)
  568. if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
  569. framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
  570. }
  571. udpIngressIP = framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])
  572. e2elog.Logf("UDP load balancer: %s", udpIngressIP)
  573. ginkgo.By("verifying that TCP and UDP use different load balancers")
  574. if tcpIngressIP == udpIngressIP {
  575. framework.Failf("Load balancers are not different: %s", framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
  576. }
  577. }
  578. ginkgo.By("hitting the TCP service's NodePort")
  579. jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout)
  580. ginkgo.By("hitting the UDP service's NodePort")
  581. jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout)
  582. ginkgo.By("hitting the TCP service's LoadBalancer")
  583. jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
  584. if loadBalancerSupportsUDP {
  585. ginkgo.By("hitting the UDP service's LoadBalancer")
  586. jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
  587. }
  588. // Change the services' node ports.
  589. ginkgo.By("changing the TCP service's NodePort")
  590. tcpService = jig.ChangeServiceNodePortOrFail(ns1, tcpService.Name, tcpNodePort)
  591. jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
  592. tcpNodePortOld := tcpNodePort
  593. tcpNodePort = int(tcpService.Spec.Ports[0].NodePort)
  594. if tcpNodePort == tcpNodePortOld {
  595. framework.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort)
  596. }
  597. if framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
  598. framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
  599. }
  600. e2elog.Logf("TCP node port: %d", tcpNodePort)
  601. ginkgo.By("changing the UDP service's NodePort")
  602. udpService = jig.ChangeServiceNodePortOrFail(ns2, udpService.Name, udpNodePort)
  603. if loadBalancerSupportsUDP {
  604. jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer)
  605. } else {
  606. jig.SanityCheckService(udpService, v1.ServiceTypeNodePort)
  607. }
  608. udpNodePortOld := udpNodePort
  609. udpNodePort = int(udpService.Spec.Ports[0].NodePort)
  610. if udpNodePort == udpNodePortOld {
  611. framework.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort)
  612. }
  613. if loadBalancerSupportsUDP && framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
  614. framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
  615. }
  616. e2elog.Logf("UDP node port: %d", udpNodePort)
  617. ginkgo.By("hitting the TCP service's new NodePort")
  618. jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout)
  619. ginkgo.By("hitting the UDP service's new NodePort")
  620. jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout)
  621. ginkgo.By("checking the old TCP NodePort is closed")
  622. jig.TestNotReachableHTTP(nodeIP, tcpNodePortOld, framework.KubeProxyLagTimeout)
  623. ginkgo.By("checking the old UDP NodePort is closed")
  624. jig.TestNotReachableUDP(nodeIP, udpNodePortOld, framework.KubeProxyLagTimeout)
  625. ginkgo.By("hitting the TCP service's LoadBalancer")
  626. jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
  627. if loadBalancerSupportsUDP {
  628. ginkgo.By("hitting the UDP service's LoadBalancer")
  629. jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
  630. }
  631. // Change the services' main ports.
  632. ginkgo.By("changing the TCP service's port")
  633. tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) {
  634. s.Spec.Ports[0].Port++
  635. })
  636. jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
  637. svcPortOld := svcPort
  638. svcPort = int(tcpService.Spec.Ports[0].Port)
  639. if svcPort == svcPortOld {
  640. framework.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort)
  641. }
  642. if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
  643. framework.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort)
  644. }
  645. if framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
  646. framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
  647. }
  648. ginkgo.By("changing the UDP service's port")
  649. udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) {
  650. s.Spec.Ports[0].Port++
  651. })
  652. if loadBalancerSupportsUDP {
  653. jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer)
  654. } else {
  655. jig.SanityCheckService(udpService, v1.ServiceTypeNodePort)
  656. }
  657. if int(udpService.Spec.Ports[0].Port) != svcPort {
  658. framework.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port)
  659. }
  660. if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
  661. framework.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort)
  662. }
  663. if loadBalancerSupportsUDP && framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
  664. framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
  665. }
  666. e2elog.Logf("service port (TCP and UDP): %d", svcPort)
  667. ginkgo.By("hitting the TCP service's NodePort")
  668. jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout)
  669. ginkgo.By("hitting the UDP service's NodePort")
  670. jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout)
  671. ginkgo.By("hitting the TCP service's LoadBalancer")
  672. jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
  673. if loadBalancerSupportsUDP {
  674. ginkgo.By("hitting the UDP service's LoadBalancer")
  675. jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
  676. }
  677. ginkgo.By("Scaling the pods to 0")
  678. jig.Scale(ns1, 0)
  679. jig.Scale(ns2, 0)
  680. ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort")
  681. jig.TestRejectedHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout)
  682. ginkgo.By("looking for ICMP REJECT on the UDP service's NodePort")
  683. jig.TestRejectedUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout)
  684. ginkgo.By("looking for ICMP REJECT on the TCP service's LoadBalancer")
  685. jig.TestRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
  686. if loadBalancerSupportsUDP {
  687. ginkgo.By("looking for ICMP REJECT on the UDP service's LoadBalancer")
  688. jig.TestRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
  689. }
  690. ginkgo.By("Scaling the pods to 1")
  691. jig.Scale(ns1, 1)
  692. jig.Scale(ns2, 1)
  693. ginkgo.By("hitting the TCP service's NodePort")
  694. jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout)
  695. ginkgo.By("hitting the UDP service's NodePort")
  696. jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout)
  697. ginkgo.By("hitting the TCP service's LoadBalancer")
  698. jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
  699. if loadBalancerSupportsUDP {
  700. ginkgo.By("hitting the UDP service's LoadBalancer")
  701. jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
  702. }
  703. // Change the services back to ClusterIP.
  704. ginkgo.By("changing TCP service back to type=ClusterIP")
  705. tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) {
  706. s.Spec.Type = v1.ServiceTypeClusterIP
  707. s.Spec.Ports[0].NodePort = 0
  708. })
  709. // Wait for the load balancer to be destroyed asynchronously
  710. tcpService = jig.WaitForLoadBalancerDestroyOrFail(ns1, tcpService.Name, tcpIngressIP, svcPort, loadBalancerCreateTimeout)
  711. jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
  712. ginkgo.By("changing UDP service back to type=ClusterIP")
  713. udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) {
  714. s.Spec.Type = v1.ServiceTypeClusterIP
  715. s.Spec.Ports[0].NodePort = 0
  716. })
  717. if loadBalancerSupportsUDP {
  718. // Wait for the load balancer to be destroyed asynchronously
  719. udpService = jig.WaitForLoadBalancerDestroyOrFail(ns2, udpService.Name, udpIngressIP, svcPort, loadBalancerCreateTimeout)
  720. jig.SanityCheckService(udpService, v1.ServiceTypeClusterIP)
  721. }
  722. ginkgo.By("checking the TCP NodePort is closed")
  723. jig.TestNotReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout)
  724. ginkgo.By("checking the UDP NodePort is closed")
  725. jig.TestNotReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout)
  726. ginkgo.By("checking the TCP LoadBalancer is closed")
  727. jig.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
  728. if loadBalancerSupportsUDP {
  729. ginkgo.By("checking the UDP LoadBalancer is closed")
  730. jig.TestNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
  731. }
  732. })
  733. ginkgo.It("should be able to update NodePorts with two same port numbers but different protocols", func() {
  734. serviceName := "nodeport-update-service"
  735. ns := f.Namespace.Name
  736. jig := framework.NewServiceTestJig(cs, serviceName)
  737. ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
  738. tcpService := jig.CreateTCPServiceOrFail(ns, nil)
  739. defer func() {
  740. e2elog.Logf("Cleaning up the updating NodePorts test service")
  741. err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
  742. framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
  743. }()
  744. jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
  745. svcPort := int(tcpService.Spec.Ports[0].Port)
  746. e2elog.Logf("service port TCP: %d", svcPort)
  747. // Change the services to NodePort and add a UDP port.
  748. ginkgo.By("changing the TCP service to type=NodePort and add a UDP port")
  749. newService := jig.UpdateServiceOrFail(ns, tcpService.Name, func(s *v1.Service) {
  750. s.Spec.Type = v1.ServiceTypeNodePort
  751. s.Spec.Ports = []v1.ServicePort{
  752. {
  753. Name: "tcp-port",
  754. Port: 80,
  755. Protocol: v1.ProtocolTCP,
  756. },
  757. {
  758. Name: "udp-port",
  759. Port: 80,
  760. Protocol: v1.ProtocolUDP,
  761. },
  762. }
  763. })
  764. jig.SanityCheckService(newService, v1.ServiceTypeNodePort)
  765. if len(newService.Spec.Ports) != 2 {
  766. framework.Failf("new service should have two Ports")
  767. }
  768. for _, port := range newService.Spec.Ports {
  769. if port.NodePort == 0 {
  770. framework.Failf("new service failed to allocate NodePort for Port %s", port.Name)
  771. }
  772. e2elog.Logf("new service allocates NodePort %d for Port %s", port.NodePort, port.Name)
  773. }
  774. })
  775. ginkgo.It("should be able to change the type from ExternalName to ClusterIP", func() {
  776. serviceName := "externalname-service"
  777. ns := f.Namespace.Name
  778. jig := framework.NewServiceTestJig(cs, serviceName)
  779. ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
  780. externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil)
  781. defer func() {
  782. e2elog.Logf("Cleaning up the ExternalName to ClusterIP test service")
  783. err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
  784. framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
  785. }()
  786. jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName)
  787. ginkgo.By("changing the ExternalName service to type=ClusterIP")
  788. clusterIPService := jig.UpdateServiceOrFail(ns, externalNameService.Name, func(s *v1.Service) {
  789. s.Spec.Type = v1.ServiceTypeClusterIP
  790. s.Spec.ExternalName = ""
  791. s.Spec.Ports = []v1.ServicePort{
  792. {Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
  793. }
  794. })
  795. jig.SanityCheckService(clusterIPService, v1.ServiceTypeClusterIP)
  796. })
  797. ginkgo.It("should be able to change the type from ExternalName to NodePort", func() {
  798. serviceName := "externalname-service"
  799. ns := f.Namespace.Name
  800. jig := framework.NewServiceTestJig(cs, serviceName)
  801. ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
  802. externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil)
  803. defer func() {
  804. e2elog.Logf("Cleaning up the ExternalName to NodePort test service")
  805. err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
  806. framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
  807. }()
  808. jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName)
  809. ginkgo.By("changing the ExternalName service to type=NodePort")
  810. nodePortService := jig.UpdateServiceOrFail(ns, externalNameService.Name, func(s *v1.Service) {
  811. s.Spec.Type = v1.ServiceTypeNodePort
  812. s.Spec.ExternalName = ""
  813. s.Spec.Ports = []v1.ServicePort{
  814. {Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
  815. }
  816. })
  817. jig.SanityCheckService(nodePortService, v1.ServiceTypeNodePort)
  818. })
  819. ginkgo.It("should be able to change the type from ClusterIP to ExternalName", func() {
  820. serviceName := "clusterip-service"
  821. ns := f.Namespace.Name
  822. jig := framework.NewServiceTestJig(cs, serviceName)
  823. ginkgo.By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns)
  824. clusterIPService := jig.CreateTCPServiceOrFail(ns, nil)
  825. defer func() {
  826. e2elog.Logf("Cleaning up the ClusterIP to ExternalName test service")
  827. err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
  828. framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
  829. }()
  830. jig.SanityCheckService(clusterIPService, v1.ServiceTypeClusterIP)
  831. ginkgo.By("changing the ClusterIP service to type=ExternalName")
  832. externalNameService := jig.UpdateServiceOrFail(ns, clusterIPService.Name, func(s *v1.Service) {
  833. s.Spec.Type = v1.ServiceTypeExternalName
  834. s.Spec.ExternalName = "foo.example.com"
  835. s.Spec.ClusterIP = ""
  836. })
  837. jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName)
  838. })
  839. ginkgo.It("should be able to change the type from NodePort to ExternalName", func() {
  840. serviceName := "nodeport-service"
  841. ns := f.Namespace.Name
  842. jig := framework.NewServiceTestJig(cs, serviceName)
  843. ginkgo.By("creating a service " + serviceName + " with the type=NodePort in namespace " + ns)
  844. nodePortService := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
  845. svc.Spec.Type = v1.ServiceTypeNodePort
  846. })
  847. defer func() {
  848. e2elog.Logf("Cleaning up the NodePort to ExternalName test service")
  849. err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
  850. framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
  851. }()
  852. jig.SanityCheckService(nodePortService, v1.ServiceTypeNodePort)
  853. ginkgo.By("changing the NodePort service to type=ExternalName")
  854. externalNameService := jig.UpdateServiceOrFail(ns, nodePortService.Name, func(s *v1.Service) {
  855. s.Spec.Type = v1.ServiceTypeExternalName
  856. s.Spec.ExternalName = "foo.example.com"
  857. s.Spec.ClusterIP = ""
  858. s.Spec.Ports[0].NodePort = 0
  859. })
  860. jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName)
  861. })
  862. ginkgo.It("should use same NodePort with same port but different protocols", func() {
  863. serviceName := "nodeports"
  864. ns := f.Namespace.Name
  865. t := framework.NewServerTest(cs, ns, serviceName)
  866. defer func() {
  867. defer ginkgo.GinkgoRecover()
  868. errs := t.Cleanup()
  869. if len(errs) != 0 {
  870. framework.Failf("errors in cleanup: %v", errs)
  871. }
  872. }()
  873. ginkgo.By("creating service " + serviceName + " with same NodePort but different protocols in namespace " + ns)
  874. service := &v1.Service{
  875. ObjectMeta: metav1.ObjectMeta{
  876. Name: t.ServiceName,
  877. Namespace: t.Namespace,
  878. },
  879. Spec: v1.ServiceSpec{
  880. Selector: t.Labels,
  881. Type: v1.ServiceTypeNodePort,
  882. Ports: []v1.ServicePort{
  883. {
  884. Name: "tcp-port",
  885. Port: 53,
  886. Protocol: v1.ProtocolTCP,
  887. },
  888. {
  889. Name: "udp-port",
  890. Port: 53,
  891. Protocol: v1.ProtocolUDP,
  892. },
  893. },
  894. },
  895. }
  896. result, err := t.CreateService(service)
  897. framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
  898. if len(result.Spec.Ports) != 2 {
  899. framework.Failf("got unexpected len(Spec.Ports) for new service: %v", result)
  900. }
  901. if result.Spec.Ports[0].NodePort != result.Spec.Ports[1].NodePort {
  902. framework.Failf("should use same NodePort for new service: %v", result)
  903. }
  904. })
  905. ginkgo.It("should prevent NodePort collisions", func() {
  906. // TODO: use the ServiceTestJig here
  907. baseName := "nodeport-collision-"
  908. serviceName1 := baseName + "1"
  909. serviceName2 := baseName + "2"
  910. ns := f.Namespace.Name
  911. t := framework.NewServerTest(cs, ns, serviceName1)
  912. defer func() {
  913. defer ginkgo.GinkgoRecover()
  914. errs := t.Cleanup()
  915. if len(errs) != 0 {
  916. framework.Failf("errors in cleanup: %v", errs)
  917. }
  918. }()
  919. ginkgo.By("creating service " + serviceName1 + " with type NodePort in namespace " + ns)
  920. service := t.BuildServiceSpec()
  921. service.Spec.Type = v1.ServiceTypeNodePort
  922. result, err := t.CreateService(service)
  923. framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName1, ns)
  924. if result.Spec.Type != v1.ServiceTypeNodePort {
  925. framework.Failf("got unexpected Spec.Type for new service: %v", result)
  926. }
  927. if len(result.Spec.Ports) != 1 {
  928. framework.Failf("got unexpected len(Spec.Ports) for new service: %v", result)
  929. }
  930. port := result.Spec.Ports[0]
  931. if port.NodePort == 0 {
  932. framework.Failf("got unexpected Spec.Ports[0].NodePort for new service: %v", result)
  933. }
  934. ginkgo.By("creating service " + serviceName2 + " with conflicting NodePort")
  935. service2 := t.BuildServiceSpec()
  936. service2.Name = serviceName2
  937. service2.Spec.Type = v1.ServiceTypeNodePort
  938. service2.Spec.Ports[0].NodePort = port.NodePort
  939. result2, err := t.CreateService(service2)
  940. if err == nil {
  941. framework.Failf("Created service with conflicting NodePort: %v", result2)
  942. }
  943. expectedErr := fmt.Sprintf("%d.*port is already allocated", port.NodePort)
  944. gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr))
  945. ginkgo.By("deleting service " + serviceName1 + " to release NodePort")
  946. err = t.DeleteService(serviceName1)
  947. framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName1, ns)
  948. ginkgo.By("creating service " + serviceName2 + " with no-longer-conflicting NodePort")
  949. _, err = t.CreateService(service2)
  950. framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName1, ns)
  951. })
  952. ginkgo.It("should check NodePort out-of-range", func() {
  953. // TODO: use the ServiceTestJig here
  954. serviceName := "nodeport-range-test"
  955. ns := f.Namespace.Name
  956. t := framework.NewServerTest(cs, ns, serviceName)
  957. defer func() {
  958. defer ginkgo.GinkgoRecover()
  959. errs := t.Cleanup()
  960. if len(errs) != 0 {
  961. framework.Failf("errors in cleanup: %v", errs)
  962. }
  963. }()
  964. service := t.BuildServiceSpec()
  965. service.Spec.Type = v1.ServiceTypeNodePort
  966. ginkgo.By("creating service " + serviceName + " with type NodePort in namespace " + ns)
  967. service, err := t.CreateService(service)
  968. framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
  969. if service.Spec.Type != v1.ServiceTypeNodePort {
  970. framework.Failf("got unexpected Spec.Type for new service: %v", service)
  971. }
  972. if len(service.Spec.Ports) != 1 {
  973. framework.Failf("got unexpected len(Spec.Ports) for new service: %v", service)
  974. }
  975. port := service.Spec.Ports[0]
  976. if port.NodePort == 0 {
  977. framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
  978. }
  979. if !framework.ServiceNodePortRange.Contains(int(port.NodePort)) {
  980. framework.Failf("got unexpected (out-of-range) port for new service: %v", service)
  981. }
  982. outOfRangeNodePort := 0
  983. rand.Seed(time.Now().UnixNano())
  984. for {
  985. outOfRangeNodePort = 1 + rand.Intn(65535)
  986. if !framework.ServiceNodePortRange.Contains(outOfRangeNodePort) {
  987. break
  988. }
  989. }
  990. ginkgo.By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort))
  991. result, err := framework.UpdateService(cs, ns, serviceName, func(s *v1.Service) {
  992. s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort)
  993. })
  994. if err == nil {
  995. framework.Failf("failed to prevent update of service with out-of-range NodePort: %v", result)
  996. }
  997. expectedErr := fmt.Sprintf("%d.*port is not in the valid range", outOfRangeNodePort)
  998. gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr))
  999. ginkgo.By("deleting original service " + serviceName)
  1000. err = t.DeleteService(serviceName)
  1001. framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
  1002. ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with out-of-range NodePort %d", outOfRangeNodePort))
  1003. service = t.BuildServiceSpec()
  1004. service.Spec.Type = v1.ServiceTypeNodePort
  1005. service.Spec.Ports[0].NodePort = int32(outOfRangeNodePort)
  1006. service, err = t.CreateService(service)
  1007. if err == nil {
  1008. framework.Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service)
  1009. }
  1010. gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr))
  1011. })
  1012. ginkgo.It("should release NodePorts on delete", func() {
  1013. // TODO: use the ServiceTestJig here
  1014. serviceName := "nodeport-reuse"
  1015. ns := f.Namespace.Name
  1016. t := framework.NewServerTest(cs, ns, serviceName)
  1017. defer func() {
  1018. defer ginkgo.GinkgoRecover()
  1019. errs := t.Cleanup()
  1020. if len(errs) != 0 {
  1021. framework.Failf("errors in cleanup: %v", errs)
  1022. }
  1023. }()
  1024. service := t.BuildServiceSpec()
  1025. service.Spec.Type = v1.ServiceTypeNodePort
  1026. ginkgo.By("creating service " + serviceName + " with type NodePort in namespace " + ns)
  1027. service, err := t.CreateService(service)
  1028. framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
  1029. if service.Spec.Type != v1.ServiceTypeNodePort {
  1030. framework.Failf("got unexpected Spec.Type for new service: %v", service)
  1031. }
  1032. if len(service.Spec.Ports) != 1 {
  1033. framework.Failf("got unexpected len(Spec.Ports) for new service: %v", service)
  1034. }
  1035. port := service.Spec.Ports[0]
  1036. if port.NodePort == 0 {
  1037. framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
  1038. }
  1039. if !framework.ServiceNodePortRange.Contains(int(port.NodePort)) {
  1040. framework.Failf("got unexpected (out-of-range) port for new service: %v", service)
  1041. }
  1042. nodePort := port.NodePort
  1043. ginkgo.By("deleting original service " + serviceName)
  1044. err = t.DeleteService(serviceName)
  1045. framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
  1046. hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec")
  1047. cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort)
  1048. var stdout string
  1049. if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) {
  1050. var err error
  1051. stdout, err = framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
  1052. if err != nil {
  1053. e2elog.Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout)
  1054. return false, nil
  1055. }
  1056. return true, nil
  1057. }); pollErr != nil {
  1058. framework.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, framework.KubeProxyLagTimeout, stdout)
  1059. }
  1060. ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort))
  1061. service = t.BuildServiceSpec()
  1062. service.Spec.Type = v1.ServiceTypeNodePort
  1063. service.Spec.Ports[0].NodePort = nodePort
  1064. service, err = t.CreateService(service)
  1065. framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
  1066. })
  1067. ginkgo.It("should create endpoints for unready pods", func() {
  1068. serviceName := "tolerate-unready"
  1069. ns := f.Namespace.Name
  1070. t := framework.NewServerTest(cs, ns, serviceName)
  1071. defer func() {
  1072. defer ginkgo.GinkgoRecover()
  1073. errs := t.Cleanup()
  1074. if len(errs) != 0 {
  1075. framework.Failf("errors in cleanup: %v", errs)
  1076. }
  1077. }()
  1078. t.Name = "slow-terminating-unready-pod"
  1079. t.Image = imageutils.GetE2EImage(imageutils.Netexec)
  1080. port := 80
  1081. terminateSeconds := int64(600)
  1082. service := &v1.Service{
  1083. ObjectMeta: metav1.ObjectMeta{
  1084. Name: t.ServiceName,
  1085. Namespace: t.Namespace,
  1086. Annotations: map[string]string{endpoint.TolerateUnreadyEndpointsAnnotation: "true"},
  1087. },
  1088. Spec: v1.ServiceSpec{
  1089. Selector: t.Labels,
  1090. Ports: []v1.ServicePort{{
  1091. Name: "http",
  1092. Port: int32(port),
  1093. TargetPort: intstr.FromInt(port),
  1094. }},
  1095. },
  1096. }
  1097. rcSpec := framework.RcByNameContainer(t.Name, 1, t.Image, t.Labels, v1.Container{
  1098. Args: []string{fmt.Sprintf("--http-port=%d", port)},
  1099. Name: t.Name,
  1100. Image: t.Image,
  1101. Ports: []v1.ContainerPort{{ContainerPort: int32(port), Protocol: v1.ProtocolTCP}},
  1102. ReadinessProbe: &v1.Probe{
  1103. Handler: v1.Handler{
  1104. Exec: &v1.ExecAction{
  1105. Command: []string{"/bin/false"},
  1106. },
  1107. },
  1108. },
  1109. Lifecycle: &v1.Lifecycle{
  1110. PreStop: &v1.Handler{
  1111. Exec: &v1.ExecAction{
  1112. Command: []string{"/bin/sleep", fmt.Sprintf("%d", terminateSeconds)},
  1113. },
  1114. },
  1115. },
  1116. }, nil)
  1117. rcSpec.Spec.Template.Spec.TerminationGracePeriodSeconds = &terminateSeconds
  1118. ginkgo.By(fmt.Sprintf("creating RC %v with selectors %v", rcSpec.Name, rcSpec.Spec.Selector))
  1119. _, err := t.CreateRC(rcSpec)
  1120. framework.ExpectNoError(err)
  1121. ginkgo.By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector))
  1122. _, err = t.CreateService(service)
  1123. framework.ExpectNoError(err)
  1124. ginkgo.By("Verifying pods for RC " + t.Name)
  1125. framework.ExpectNoError(framework.VerifyPods(t.Client, t.Namespace, t.Name, false, 1))
  1126. svcName := fmt.Sprintf("%v.%v.svc.%v", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
  1127. ginkgo.By("Waiting for endpoints of Service with DNS name " + svcName)
  1128. execPodName := framework.CreateExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-", nil)
  1129. cmd := fmt.Sprintf("wget -qO- http://%s:%d/", svcName, port)
  1130. var stdout string
  1131. if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) {
  1132. var err error
  1133. stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd)
  1134. if err != nil {
  1135. e2elog.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
  1136. return false, nil
  1137. }
  1138. return true, nil
  1139. }); pollErr != nil {
  1140. framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout)
  1141. }
  1142. ginkgo.By("Scaling down replication controller to zero")
  1143. framework.ScaleRC(f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false)
  1144. ginkgo.By("Update service to not tolerate unready services")
  1145. _, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) {
  1146. s.ObjectMeta.Annotations[endpoint.TolerateUnreadyEndpointsAnnotation] = "false"
  1147. })
  1148. framework.ExpectNoError(err)
  1149. ginkgo.By("Check if pod is unreachable")
  1150. cmd = fmt.Sprintf("wget -qO- -T 2 http://%s:%d/; test \"$?\" -eq \"1\"", svcName, port)
  1151. if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) {
  1152. var err error
  1153. stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd)
  1154. if err != nil {
  1155. e2elog.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
  1156. return false, nil
  1157. }
  1158. return true, nil
  1159. }); pollErr != nil {
  1160. framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout)
  1161. }
  1162. ginkgo.By("Update service to tolerate unready services again")
  1163. _, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) {
  1164. s.ObjectMeta.Annotations[endpoint.TolerateUnreadyEndpointsAnnotation] = "true"
  1165. })
  1166. framework.ExpectNoError(err)
  1167. ginkgo.By("Check if terminating pod is available through service")
  1168. cmd = fmt.Sprintf("wget -qO- http://%s:%d/", svcName, port)
  1169. if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) {
  1170. var err error
  1171. stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd)
  1172. if err != nil {
  1173. e2elog.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
  1174. return false, nil
  1175. }
  1176. return true, nil
  1177. }); pollErr != nil {
  1178. framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout)
  1179. }
  1180. ginkgo.By("Remove pods immediately")
  1181. label := labels.SelectorFromSet(labels.Set(t.Labels))
  1182. options := metav1.ListOptions{LabelSelector: label.String()}
  1183. podClient := t.Client.CoreV1().Pods(f.Namespace.Name)
  1184. pods, err := podClient.List(options)
  1185. if err != nil {
  1186. e2elog.Logf("warning: error retrieving pods: %s", err)
  1187. } else {
  1188. for _, pod := range pods.Items {
  1189. var gracePeriodSeconds int64 = 0
  1190. err := podClient.Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds})
  1191. if err != nil {
  1192. e2elog.Logf("warning: error force deleting pod '%s': %s", pod.Name, err)
  1193. }
  1194. }
  1195. }
  1196. })
  1197. ginkgo.It("should only allow access from service loadbalancer source ranges [Slow]", func() {
  1198. // this feature currently supported only on GCE/GKE/AWS
  1199. framework.SkipUnlessProviderIs("gce", "gke", "aws")
  1200. loadBalancerLagTimeout := framework.LoadBalancerLagTimeoutDefault
  1201. if framework.ProviderIs("aws") {
  1202. loadBalancerLagTimeout = framework.LoadBalancerLagTimeoutAWS
  1203. }
  1204. loadBalancerCreateTimeout := framework.LoadBalancerCreateTimeoutDefault
  1205. if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > framework.LargeClusterMinNodesNumber {
  1206. loadBalancerCreateTimeout = framework.LoadBalancerCreateTimeoutLarge
  1207. }
  1208. namespace := f.Namespace.Name
  1209. serviceName := "lb-sourcerange"
  1210. jig := framework.NewServiceTestJig(cs, serviceName)
  1211. ginkgo.By("Prepare allow source ips")
  1212. // prepare the exec pods
  1213. // acceptPod are allowed to access the loadbalancer
  1214. acceptPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-accept", nil)
  1215. dropPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-drop", nil)
  1216. acceptPod, err := cs.CoreV1().Pods(namespace).Get(acceptPodName, metav1.GetOptions{})
  1217. framework.ExpectNoError(err, "failed to fetch pod: %s in namespace: %s", acceptPodName, namespace)
  1218. dropPod, err := cs.CoreV1().Pods(namespace).Get(dropPodName, metav1.GetOptions{})
  1219. framework.ExpectNoError(err, "failed to fetch pod: %s in namespace: %s", dropPodName, namespace)
  1220. ginkgo.By("creating a pod to be part of the service " + serviceName)
  1221. // This container is an nginx container listening on port 80
  1222. // See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response
  1223. jig.RunOrFail(namespace, nil)
  1224. // Create loadbalancer service with source range from node[0] and podAccept
  1225. svc := jig.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
  1226. svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1227. svc.Spec.LoadBalancerSourceRanges = []string{acceptPod.Status.PodIP + "/32"}
  1228. })
  1229. // Clean up loadbalancer service
  1230. defer func() {
  1231. jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
  1232. svc.Spec.Type = v1.ServiceTypeNodePort
  1233. svc.Spec.LoadBalancerSourceRanges = nil
  1234. })
  1235. err = cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)
  1236. framework.ExpectNoError(err)
  1237. }()
  1238. svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, loadBalancerCreateTimeout)
  1239. jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
  1240. // timeout when we haven't just created the load balancer
  1241. normalReachabilityTimeout := 2 * time.Minute
  1242. ginkgo.By("check reachability from different sources")
  1243. svcIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
  1244. // Wait longer as this is our first request after creation. We can't check using a separate method,
  1245. // because the LB should only be reachable from the "accept" pod
  1246. framework.CheckReachabilityFromPod(true, loadBalancerLagTimeout, namespace, acceptPodName, svcIP)
  1247. framework.CheckReachabilityFromPod(false, normalReachabilityTimeout, namespace, dropPodName, svcIP)
  1248. ginkgo.By("Update service LoadBalancerSourceRange and check reachability")
  1249. jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
  1250. // only allow access from dropPod
  1251. svc.Spec.LoadBalancerSourceRanges = []string{dropPod.Status.PodIP + "/32"}
  1252. })
  1253. framework.CheckReachabilityFromPod(false, normalReachabilityTimeout, namespace, acceptPodName, svcIP)
  1254. framework.CheckReachabilityFromPod(true, normalReachabilityTimeout, namespace, dropPodName, svcIP)
  1255. ginkgo.By("Delete LoadBalancerSourceRange field and check reachability")
  1256. jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
  1257. svc.Spec.LoadBalancerSourceRanges = nil
  1258. })
  1259. framework.CheckReachabilityFromPod(true, normalReachabilityTimeout, namespace, acceptPodName, svcIP)
  1260. framework.CheckReachabilityFromPod(true, normalReachabilityTimeout, namespace, dropPodName, svcIP)
  1261. })
  1262. // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
  1263. ginkgo.It("should be able to create an internal type load balancer [Slow] [DisabledForLargeClusters]", func() {
  1264. framework.SkipUnlessProviderIs("azure", "gke", "gce")
  1265. createTimeout := framework.LoadBalancerCreateTimeoutDefault
  1266. if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > framework.LargeClusterMinNodesNumber {
  1267. createTimeout = framework.LoadBalancerCreateTimeoutLarge
  1268. }
  1269. pollInterval := framework.Poll * 10
  1270. namespace := f.Namespace.Name
  1271. serviceName := "lb-internal"
  1272. jig := framework.NewServiceTestJig(cs, serviceName)
  1273. ginkgo.By("creating pod to be part of service " + serviceName)
  1274. jig.RunOrFail(namespace, nil)
  1275. enableILB, disableILB := framework.EnableAndDisableInternalLB()
  1276. isInternalEndpoint := func(lbIngress *v1.LoadBalancerIngress) bool {
  1277. ingressEndpoint := framework.GetIngressPoint(lbIngress)
  1278. // Needs update for providers using hostname as endpoint.
  1279. return strings.HasPrefix(ingressEndpoint, "10.")
  1280. }
  1281. ginkgo.By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled")
  1282. svc := jig.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
  1283. svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1284. enableILB(svc)
  1285. })
  1286. svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, createTimeout)
  1287. jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
  1288. lbIngress := &svc.Status.LoadBalancer.Ingress[0]
  1289. svcPort := int(svc.Spec.Ports[0].Port)
  1290. // should have an internal IP.
  1291. gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeTrue())
  1292. // ILBs are not accessible from the test orchestrator, so it's necessary to use
  1293. // a pod to test the service.
  1294. ginkgo.By("hitting the internal load balancer from pod")
  1295. e2elog.Logf("creating pod with host network")
  1296. hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec")
  1297. e2elog.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName)
  1298. tcpIngressIP := framework.GetIngressPoint(lbIngress)
  1299. if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
  1300. cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort)
  1301. stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
  1302. if err != nil {
  1303. e2elog.Logf("error curling; stdout: %v. err: %v", stdout, err)
  1304. return false, nil
  1305. }
  1306. if !strings.Contains(stdout, "hello") {
  1307. e2elog.Logf("Expected output to contain 'hello', got %q; retrying...", stdout)
  1308. return false, nil
  1309. }
  1310. e2elog.Logf("Successful curl; stdout: %v", stdout)
  1311. return true, nil
  1312. }); pollErr != nil {
  1313. framework.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr)
  1314. }
  1315. ginkgo.By("switching to external type LoadBalancer")
  1316. svc = jig.UpdateServiceOrFail(namespace, serviceName, func(svc *v1.Service) {
  1317. disableILB(svc)
  1318. })
  1319. e2elog.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName)
  1320. if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
  1321. svc, err := jig.Client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})
  1322. if err != nil {
  1323. return false, err
  1324. }
  1325. lbIngress = &svc.Status.LoadBalancer.Ingress[0]
  1326. return !isInternalEndpoint(lbIngress), nil
  1327. }); pollErr != nil {
  1328. framework.Failf("Loadbalancer IP not changed to external.")
  1329. }
  1330. // should have an external IP.
  1331. jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
  1332. gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeFalse())
  1333. ginkgo.By("hitting the external load balancer")
  1334. e2elog.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName)
  1335. tcpIngressIP = framework.GetIngressPoint(lbIngress)
  1336. jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault)
  1337. // GCE cannot test a specific IP because the test may not own it. This cloud specific condition
  1338. // will be removed when GCP supports similar functionality.
  1339. if framework.ProviderIs("azure") {
  1340. ginkgo.By("switching back to interal type LoadBalancer, with static IP specified.")
  1341. internalStaticIP := "10.240.11.11"
  1342. svc = jig.UpdateServiceOrFail(namespace, serviceName, func(svc *v1.Service) {
  1343. svc.Spec.LoadBalancerIP = internalStaticIP
  1344. enableILB(svc)
  1345. })
  1346. e2elog.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName)
  1347. if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
  1348. svc, err := jig.Client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})
  1349. if err != nil {
  1350. return false, err
  1351. }
  1352. lbIngress = &svc.Status.LoadBalancer.Ingress[0]
  1353. return isInternalEndpoint(lbIngress), nil
  1354. }); pollErr != nil {
  1355. framework.Failf("Loadbalancer IP not changed to internal.")
  1356. }
  1357. // should have the given static internal IP.
  1358. jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
  1359. gomega.Expect(framework.GetIngressPoint(lbIngress)).To(gomega.Equal(internalStaticIP))
  1360. }
  1361. ginkgo.By("switching to ClusterIP type to destroy loadbalancer")
  1362. jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, createTimeout)
  1363. })
  1364. // This test creates a load balancer, make sure its health check interval
  1365. // equals to gceHcCheckIntervalSeconds. Then the interval is manipulated
  1366. // to be something else, see if the interval will be reconciled.
  1367. ginkgo.It("should reconcile LB health check interval [Slow][Serial]", func() {
  1368. const gceHcCheckIntervalSeconds = int64(8)
  1369. // This test is for clusters on GCE.
  1370. // (It restarts kube-controller-manager, which we don't support on GKE)
  1371. framework.SkipUnlessProviderIs("gce")
  1372. clusterID, err := gce.GetClusterID(cs)
  1373. if err != nil {
  1374. framework.Failf("framework.GetClusterID(cs) = _, %v; want nil", err)
  1375. }
  1376. gceCloud, err := gce.GetGCECloud()
  1377. if err != nil {
  1378. framework.Failf("framework.GetGCECloud() = _, %v; want nil", err)
  1379. }
  1380. namespace := f.Namespace.Name
  1381. serviceName := "lb-hc-int"
  1382. jig := framework.NewServiceTestJig(cs, serviceName)
  1383. ginkgo.By("create load balancer service")
  1384. // Create loadbalancer service with source range from node[0] and podAccept
  1385. svc := jig.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
  1386. svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1387. })
  1388. // Clean up loadbalancer service
  1389. defer func() {
  1390. jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
  1391. svc.Spec.Type = v1.ServiceTypeNodePort
  1392. })
  1393. err = cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)
  1394. framework.ExpectNoError(err)
  1395. }()
  1396. svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, framework.LoadBalancerCreateTimeoutDefault)
  1397. hcName := gcecloud.MakeNodesHealthCheckName(clusterID)
  1398. hc, err := gceCloud.GetHTTPHealthCheck(hcName)
  1399. if err != nil {
  1400. framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err)
  1401. }
  1402. gomega.Expect(hc.CheckIntervalSec).To(gomega.Equal(gceHcCheckIntervalSeconds))
  1403. ginkgo.By("modify the health check interval")
  1404. hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1
  1405. if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil {
  1406. framework.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err)
  1407. }
  1408. ginkgo.By("restart kube-controller-manager")
  1409. if err := framework.RestartControllerManager(); err != nil {
  1410. framework.Failf("framework.RestartControllerManager() = %v; want nil", err)
  1411. }
  1412. if err := framework.WaitForControllerManagerUp(); err != nil {
  1413. framework.Failf("framework.WaitForControllerManagerUp() = %v; want nil", err)
  1414. }
  1415. ginkgo.By("health check should be reconciled")
  1416. pollInterval := framework.Poll * 10
  1417. if pollErr := wait.PollImmediate(pollInterval, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) {
  1418. hc, err := gceCloud.GetHTTPHealthCheck(hcName)
  1419. if err != nil {
  1420. e2elog.Logf("ginkgo.Failed to get HttpHealthCheck(%q): %v", hcName, err)
  1421. return false, err
  1422. }
  1423. e2elog.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec)
  1424. return hc.CheckIntervalSec == gceHcCheckIntervalSeconds, nil
  1425. }); pollErr != nil {
  1426. framework.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds)
  1427. }
  1428. })
  1429. ginkgo.It("should have session affinity work for service with type clusterIP", func() {
  1430. svc := getServeHostnameService("service")
  1431. svc.Spec.Type = v1.ServiceTypeClusterIP
  1432. execAffinityTestForNonLBService(f, cs, svc)
  1433. })
  1434. ginkgo.It("should be able to switch session affinity for service with type clusterIP", func() {
  1435. svc := getServeHostnameService("service")
  1436. svc.Spec.Type = v1.ServiceTypeClusterIP
  1437. execAffinityTestForNonLBServiceWithTransition(f, cs, svc)
  1438. })
  1439. ginkgo.It("should have session affinity work for NodePort service", func() {
  1440. svc := getServeHostnameService("service")
  1441. svc.Spec.Type = v1.ServiceTypeNodePort
  1442. execAffinityTestForNonLBService(f, cs, svc)
  1443. })
  1444. ginkgo.It("should be able to switch session affinity for NodePort service", func() {
  1445. svc := getServeHostnameService("service")
  1446. svc.Spec.Type = v1.ServiceTypeNodePort
  1447. execAffinityTestForNonLBServiceWithTransition(f, cs, svc)
  1448. })
  1449. // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
  1450. ginkgo.It("should have session affinity work for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() {
  1451. // L4 load balancer affinity `ClientIP` is not supported on AWS ELB.
  1452. framework.SkipIfProviderIs("aws")
  1453. svc := getServeHostnameService("service")
  1454. svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1455. svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
  1456. execAffinityTestForLBService(f, cs, svc)
  1457. })
  1458. // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
  1459. ginkgo.It("should be able to switch session affinity for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() {
  1460. // L4 load balancer affinity `ClientIP` is not supported on AWS ELB.
  1461. framework.SkipIfProviderIs("aws")
  1462. svc := getServeHostnameService("service")
  1463. svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1464. svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
  1465. execAffinityTestForLBServiceWithTransition(f, cs, svc)
  1466. })
  1467. // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
  1468. ginkgo.It("should have session affinity work for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() {
  1469. // L4 load balancer affinity `ClientIP` is not supported on AWS ELB.
  1470. framework.SkipIfProviderIs("aws")
  1471. svc := getServeHostnameService("service")
  1472. svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1473. svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
  1474. execAffinityTestForLBService(f, cs, svc)
  1475. })
  1476. // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
  1477. ginkgo.It("should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() {
  1478. // L4 load balancer affinity `ClientIP` is not supported on AWS ELB.
  1479. framework.SkipIfProviderIs("aws")
  1480. svc := getServeHostnameService("service")
  1481. svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1482. svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
  1483. execAffinityTestForLBServiceWithTransition(f, cs, svc)
  1484. })
  1485. ginkgo.It("should implement service.kubernetes.io/service-proxy-name", func() {
  1486. // this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
  1487. framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
  1488. // this test does not work if the Node does not support SSH Key
  1489. framework.SkipUnlessSSHKeyPresent()
  1490. ns := f.Namespace.Name
  1491. numPods, servicePort := 3, defaultServeHostnameServicePort
  1492. serviceProxyNameLabels := map[string]string{"service.kubernetes.io/service-proxy-name": "foo-bar"}
  1493. // We will create 2 services to test creating services in both states and also dynamic updates
  1494. // svcDisabled: Created with the label, will always be disabled. We create this early and
  1495. // test again late to make sure it never becomes available.
  1496. // svcToggled: Created without the label then the label is toggled verifying reachability at each step.
  1497. ginkgo.By("creating service-disabled in namespace " + ns)
  1498. svcDisabled := getServeHostnameService("service-disabled")
  1499. svcDisabled.ObjectMeta.Labels = serviceProxyNameLabels
  1500. _, svcDisabledIP, err := framework.StartServeHostnameService(cs, svcDisabled, ns, numPods)
  1501. framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcDisabledIP, ns)
  1502. ginkgo.By("creating service in namespace " + ns)
  1503. svcToggled := getServeHostnameService("service")
  1504. podToggledNames, svcToggledIP, err := framework.StartServeHostnameService(cs, svcToggled, ns, numPods)
  1505. framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns)
  1506. jig := framework.NewServiceTestJig(cs, svcToggled.ObjectMeta.Name)
  1507. hosts, err := e2essh.NodeSSHHosts(cs)
  1508. framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
  1509. if len(hosts) == 0 {
  1510. framework.Failf("No ssh-able nodes")
  1511. }
  1512. host := hosts[0]
  1513. ginkgo.By("verifying service is up")
  1514. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort))
  1515. ginkgo.By("verifying service-disabled is not up")
  1516. framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort))
  1517. ginkgo.By("adding service-proxy-name label")
  1518. jig.UpdateServiceOrFail(ns, svcToggled.ObjectMeta.Name, func(svc *v1.Service) {
  1519. svc.ObjectMeta.Labels = serviceProxyNameLabels
  1520. })
  1521. ginkgo.By("verifying service is not up")
  1522. framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svcToggledIP, servicePort))
  1523. ginkgo.By("removing service-proxy-name annotation")
  1524. jig.UpdateServiceOrFail(ns, svcToggled.ObjectMeta.Name, func(svc *v1.Service) {
  1525. svc.ObjectMeta.Labels = nil
  1526. })
  1527. ginkgo.By("verifying service is up")
  1528. framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort))
  1529. ginkgo.By("verifying service-disabled is still not up")
  1530. framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort))
  1531. })
  1532. ginkgo.It("should be rejected when no endpoints exist", func() {
  1533. namespace := f.Namespace.Name
  1534. serviceName := "no-pods"
  1535. jig := framework.NewServiceTestJig(cs, serviceName)
  1536. nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests)
  1537. labels := map[string]string{
  1538. "nopods": "nopods",
  1539. }
  1540. port := 80
  1541. ports := []v1.ServicePort{{
  1542. Port: int32(port),
  1543. TargetPort: intstr.FromInt(80),
  1544. }}
  1545. ginkgo.By("creating a service with no endpoints")
  1546. _, err := jig.CreateServiceWithServicePort(labels, namespace, ports)
  1547. if err != nil {
  1548. framework.Failf("ginkgo.Failed to create service: %v", err)
  1549. }
  1550. nodeName := nodes.Items[0].Name
  1551. podName := "execpod-noendpoints"
  1552. ginkgo.By(fmt.Sprintf("creating %v on node %v", podName, nodeName))
  1553. execPodName := framework.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) {
  1554. pod.Spec.NodeName = nodeName
  1555. })
  1556. execPod, err := f.ClientSet.CoreV1().Pods(namespace).Get(execPodName, metav1.GetOptions{})
  1557. framework.ExpectNoError(err)
  1558. serviceAddress := net.JoinHostPort(serviceName, strconv.Itoa(port))
  1559. e2elog.Logf("waiting up to %v wget %v", framework.KubeProxyEndpointLagTimeout, serviceAddress)
  1560. cmd := fmt.Sprintf(`wget -T 3 -qO- %v`, serviceAddress)
  1561. ginkgo.By(fmt.Sprintf("hitting service %v from pod %v on node %v", serviceAddress, podName, nodeName))
  1562. expectedErr := "connection refused"
  1563. if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyEndpointLagTimeout, func() (bool, error) {
  1564. _, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
  1565. if err != nil {
  1566. if strings.Contains(strings.ToLower(err.Error()), expectedErr) {
  1567. e2elog.Logf("error contained '%s', as expected: %s", expectedErr, err.Error())
  1568. return true, nil
  1569. }
  1570. e2elog.Logf("error didn't contain '%s', keep trying: %s", expectedErr, err.Error())
  1571. return false, nil
  1572. }
  1573. return true, errors.New("expected wget call to fail")
  1574. }); pollErr != nil {
  1575. framework.ExpectNoError(pollErr)
  1576. }
  1577. })
  1578. // This test verifies if service load balancer cleanup finalizer can be removed
  1579. // when feature gate isn't enabled on the cluster.
  1580. // This ensures downgrading from higher version cluster will not break LoadBalancer
  1581. // type service.
  1582. ginkgo.It("should remove load balancer cleanup finalizer when service is deleted [Slow]", func() {
  1583. jig := framework.NewServiceTestJig(cs, "lb-remove-finalizer")
  1584. ginkgo.By("Create load balancer service")
  1585. svc := jig.CreateTCPServiceOrFail(f.Namespace.Name, func(svc *v1.Service) {
  1586. svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1587. })
  1588. defer func() {
  1589. waitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
  1590. }()
  1591. ginkgo.By("Wait for load balancer to serve traffic")
  1592. svc = jig.WaitForLoadBalancerOrFail(svc.Namespace, svc.Name, framework.GetServiceLoadBalancerCreationTimeout(cs))
  1593. ginkgo.By("Manually add load balancer cleanup finalizer to service")
  1594. svc.Finalizers = append(svc.Finalizers, "service.kubernetes.io/load-balancer-cleanup")
  1595. if _, err := cs.CoreV1().Services(svc.Namespace).Update(svc); err != nil {
  1596. framework.Failf("Failed to add finalizer to service %s/%s: %v", svc.Namespace, svc.Name, err)
  1597. }
  1598. })
  1599. // This test verifies if service load balancer cleanup finalizer is properly
  1600. // handled during service lifecycle.
  1601. // 1. Create service with type=LoadBalancer. Finalizer should be added.
  1602. // 2. Update service to type=ClusterIP. Finalizer should be removed.
  1603. // 3. Update service to type=LoadBalancer. Finalizer should be added.
  1604. // 4. Delete service with type=LoadBalancer. Finalizer should be removed.
  1605. ginkgo.It("should handle load balancer cleanup finalizer for service [Slow] [Feature:ServiceFinalizer]", func() {
  1606. jig := framework.NewServiceTestJig(cs, "lb-finalizer")
  1607. ginkgo.By("Create load balancer service")
  1608. svc := jig.CreateTCPServiceOrFail(f.Namespace.Name, func(svc *v1.Service) {
  1609. svc.Spec.Type = v1.ServiceTypeLoadBalancer
  1610. })
  1611. defer func() {
  1612. waitForServiceDeletedWithFinalizer(cs, svc.Namespace, svc.Name)
  1613. }()
  1614. ginkgo.By("Wait for load balancer to serve traffic")
  1615. svc = jig.WaitForLoadBalancerOrFail(svc.Namespace, svc.Name, framework.GetServiceLoadBalancerCreationTimeout(cs))
  1616. ginkgo.By("Check if finalizer presents on service with type=LoadBalancer")
  1617. waitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, true)
  1618. ginkgo.By("Check if finalizer is removed on service after changed to type=ClusterIP")
  1619. jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, framework.GetServiceLoadBalancerCreationTimeout(cs))
  1620. waitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, false)
  1621. ginkgo.By("Check if finalizer is added back to service after changed to type=LoadBalancer")
  1622. jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeLoadBalancer, framework.GetServiceLoadBalancerCreationTimeout(cs))
  1623. waitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, true)
  1624. })
  1625. })
  1626. func waitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name string) {
  1627. ginkgo.By("Delete service with finalizer")
  1628. if err := cs.CoreV1().Services(namespace).Delete(name, nil); err != nil {
  1629. framework.Failf("Failed to delete service %s/%s", namespace, name)
  1630. }
  1631. ginkgo.By("Wait for service to disappear")
  1632. if pollErr := wait.PollImmediate(framework.LoadBalancerPollInterval, framework.GetServiceLoadBalancerCreationTimeout(cs), func() (bool, error) {
  1633. svc, err := cs.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
  1634. if err != nil {
  1635. if apierrors.IsNotFound(err) {
  1636. e2elog.Logf("Service %s/%s is gone.", namespace, name)
  1637. return true, nil
  1638. }
  1639. return false, err
  1640. }
  1641. e2elog.Logf("Service %s/%s still exists with finalizers: %v", namespace, name, svc.Finalizers)
  1642. return false, nil
  1643. }); pollErr != nil {
  1644. framework.Failf("Failed to wait for service to disappear: %v", pollErr)
  1645. }
  1646. }
  1647. func waitForServiceUpdatedWithFinalizer(cs clientset.Interface, namespace, name string, hasFinalizer bool) {
  1648. ginkgo.By(fmt.Sprintf("Wait for service to hasFinalizer=%t", hasFinalizer))
  1649. if pollErr := wait.PollImmediate(framework.LoadBalancerPollInterval, framework.GetServiceLoadBalancerCreationTimeout(cs), func() (bool, error) {
  1650. svc, err := cs.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
  1651. if err != nil {
  1652. return false, err
  1653. }
  1654. foundFinalizer := false
  1655. for _, finalizer := range svc.Finalizers {
  1656. if finalizer == "service.kubernetes.io/load-balancer-cleanup" {
  1657. foundFinalizer = true
  1658. }
  1659. }
  1660. if foundFinalizer != hasFinalizer {
  1661. e2elog.Logf("Service %s/%s hasFinalizer=%t, want %t", namespace, name, foundFinalizer, hasFinalizer)
  1662. return false, nil
  1663. }
  1664. return true, nil
  1665. }); pollErr != nil {
  1666. framework.Failf("Failed to wait for service to hasFinalizer=%t: %v", hasFinalizer, pollErr)
  1667. }
  1668. }
  1669. // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed.
  1670. var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
  1671. f := framework.NewDefaultFramework("esipp")
  1672. loadBalancerCreateTimeout := framework.LoadBalancerCreateTimeoutDefault
  1673. var cs clientset.Interface
  1674. serviceLBNames := []string{}
  1675. ginkgo.BeforeEach(func() {
  1676. // requires cloud load-balancer support - this feature currently supported only on GCE/GKE
  1677. framework.SkipUnlessProviderIs("gce", "gke")
  1678. cs = f.ClientSet
  1679. if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > framework.LargeClusterMinNodesNumber {
  1680. loadBalancerCreateTimeout = framework.LoadBalancerCreateTimeoutLarge
  1681. }
  1682. })
  1683. ginkgo.AfterEach(func() {
  1684. if ginkgo.CurrentGinkgoTestDescription().Failed {
  1685. framework.DescribeSvc(f.Namespace.Name)
  1686. }
  1687. for _, lb := range serviceLBNames {
  1688. e2elog.Logf("cleaning load balancer resource for %s", lb)
  1689. framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
  1690. }
  1691. //reset serviceLBNames
  1692. serviceLBNames = []string{}
  1693. })
  1694. ginkgo.It("should work for type=LoadBalancer", func() {
  1695. namespace := f.Namespace.Name
  1696. serviceName := "external-local"
  1697. jig := framework.NewServiceTestJig(cs, serviceName)
  1698. svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
  1699. serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
  1700. healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
  1701. if healthCheckNodePort == 0 {
  1702. framework.Failf("Service HealthCheck NodePort was not allocated")
  1703. }
  1704. defer func() {
  1705. jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
  1706. // Make sure we didn't leak the health check node port.
  1707. threshold := 2
  1708. for _, ips := range jig.GetEndpointNodes(svc) {
  1709. err := jig.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", framework.KubeProxyEndpointLagTimeout, false, threshold)
  1710. framework.ExpectNoError(err)
  1711. }
  1712. err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)
  1713. framework.ExpectNoError(err)
  1714. }()
  1715. svcTCPPort := int(svc.Spec.Ports[0].Port)
  1716. ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
  1717. ginkgo.By("reading clientIP using the TCP service's service port via its external VIP")
  1718. content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, "/clientip")
  1719. clientIP := content.String()
  1720. e2elog.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP)
  1721. ginkgo.By("checking if Source IP is preserved")
  1722. if strings.HasPrefix(clientIP, "10.") {
  1723. framework.Failf("Source IP was NOT preserved")
  1724. }
  1725. })
  1726. ginkgo.It("should work for type=NodePort", func() {
  1727. namespace := f.Namespace.Name
  1728. serviceName := "external-local"
  1729. jig := framework.NewServiceTestJig(cs, serviceName)
  1730. svc := jig.CreateOnlyLocalNodePortService(namespace, serviceName, true)
  1731. defer func() {
  1732. err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)
  1733. framework.ExpectNoError(err)
  1734. }()
  1735. tcpNodePort := int(svc.Spec.Ports[0].NodePort)
  1736. endpointsNodeMap := jig.GetEndpointNodes(svc)
  1737. path := "/clientip"
  1738. for nodeName, nodeIPs := range endpointsNodeMap {
  1739. nodeIP := nodeIPs[0]
  1740. ginkgo.By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path))
  1741. content := jig.GetHTTPContent(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout, path)
  1742. clientIP := content.String()
  1743. e2elog.Logf("ClientIP detected by target pod using NodePort is %s", clientIP)
  1744. if strings.HasPrefix(clientIP, "10.") {
  1745. framework.Failf("Source IP was NOT preserved")
  1746. }
  1747. }
  1748. })
  1749. ginkgo.It("should only target nodes with endpoints", func() {
  1750. namespace := f.Namespace.Name
  1751. serviceName := "external-local"
  1752. jig := framework.NewServiceTestJig(cs, serviceName)
  1753. nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests)
  1754. svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, false,
  1755. func(svc *v1.Service) {
  1756. // Change service port to avoid collision with opened hostPorts
  1757. // in other tests that run in parallel.
  1758. if len(svc.Spec.Ports) != 0 {
  1759. svc.Spec.Ports[0].TargetPort = intstr.FromInt(int(svc.Spec.Ports[0].Port))
  1760. svc.Spec.Ports[0].Port = 8081
  1761. }
  1762. })
  1763. serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
  1764. defer func() {
  1765. jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
  1766. err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)
  1767. framework.ExpectNoError(err)
  1768. }()
  1769. healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
  1770. if healthCheckNodePort == 0 {
  1771. framework.Failf("Service HealthCheck NodePort was not allocated")
  1772. }
  1773. ips := framework.CollectAddresses(nodes, v1.NodeExternalIP)
  1774. ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
  1775. svcTCPPort := int(svc.Spec.Ports[0].Port)
  1776. threshold := 2
  1777. path := "/healthz"
  1778. for i := 0; i < len(nodes.Items); i++ {
  1779. endpointNodeName := nodes.Items[i].Name
  1780. ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName)
  1781. jig.RunOrFail(namespace, func(rc *v1.ReplicationController) {
  1782. rc.Name = serviceName
  1783. if endpointNodeName != "" {
  1784. rc.Spec.Template.Spec.NodeName = endpointNodeName
  1785. }
  1786. })
  1787. ginkgo.By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName))
  1788. jig.WaitForEndpointOnNode(namespace, serviceName, endpointNodeName)
  1789. // HealthCheck should pass only on the node where num(endpoints) > 0
  1790. // All other nodes should fail the healthcheck on the service healthCheckNodePort
  1791. for n, publicIP := range ips {
  1792. // Make sure the loadbalancer picked up the health check change.
  1793. // Confirm traffic can reach backend through LB before checking healthcheck nodeport.
  1794. jig.TestReachableHTTP(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout)
  1795. expectedSuccess := nodes.Items[n].Name == endpointNodeName
  1796. port := strconv.Itoa(healthCheckNodePort)
  1797. ipPort := net.JoinHostPort(publicIP, port)
  1798. e2elog.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess)
  1799. err := jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, framework.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)
  1800. framework.ExpectNoError(err)
  1801. }
  1802. framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName))
  1803. }
  1804. })
  1805. ginkgo.It("should work from pods", func() {
  1806. namespace := f.Namespace.Name
  1807. serviceName := "external-local"
  1808. jig := framework.NewServiceTestJig(cs, serviceName)
  1809. nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests)
  1810. svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
  1811. serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
  1812. defer func() {
  1813. jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
  1814. err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)
  1815. framework.ExpectNoError(err)
  1816. }()
  1817. ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
  1818. port := strconv.Itoa(int(svc.Spec.Ports[0].Port))
  1819. ipPort := net.JoinHostPort(ingressIP, port)
  1820. path := fmt.Sprintf("%s/clientip", ipPort)
  1821. nodeName := nodes.Items[0].Name
  1822. podName := "execpod-sourceip"
  1823. ginkgo.By(fmt.Sprintf("Creating %v on node %v", podName, nodeName))
  1824. execPodName := framework.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) {
  1825. pod.Spec.NodeName = nodeName
  1826. })
  1827. defer func() {
  1828. err := cs.CoreV1().Pods(namespace).Delete(execPodName, nil)
  1829. framework.ExpectNoError(err, "failed to delete pod: %s", execPodName)
  1830. }()
  1831. execPod, err := f.ClientSet.CoreV1().Pods(namespace).Get(execPodName, metav1.GetOptions{})
  1832. framework.ExpectNoError(err)
  1833. e2elog.Logf("Waiting up to %v wget %v", framework.KubeProxyLagTimeout, path)
  1834. cmd := fmt.Sprintf(`wget -T 30 -qO- %v`, path)
  1835. var srcIP string
  1836. ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, podName, nodeName))
  1837. if pollErr := wait.PollImmediate(framework.Poll, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) {
  1838. stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
  1839. if err != nil {
  1840. e2elog.Logf("got err: %v, retry until timeout", err)
  1841. return false, nil
  1842. }
  1843. srcIP = strings.TrimSpace(strings.Split(stdout, ":")[0])
  1844. return srcIP == execPod.Status.PodIP, nil
  1845. }); pollErr != nil {
  1846. framework.Failf("Source IP not preserved from %v, expected '%v' got '%v'", podName, execPod.Status.PodIP, srcIP)
  1847. }
  1848. })
  1849. ginkgo.It("should handle updates to ExternalTrafficPolicy field", func() {
  1850. namespace := f.Namespace.Name
  1851. serviceName := "external-local"
  1852. jig := framework.NewServiceTestJig(cs, serviceName)
  1853. nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests)
  1854. if len(nodes.Items) < 2 {
  1855. framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint")
  1856. }
  1857. svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
  1858. serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
  1859. defer func() {
  1860. jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
  1861. err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)
  1862. framework.ExpectNoError(err)
  1863. }()
  1864. // save the health check node port because it disappears when ESIPP is turned off.
  1865. healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
  1866. ginkgo.By("turning ESIPP off")
  1867. svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
  1868. svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
  1869. })
  1870. if svc.Spec.HealthCheckNodePort > 0 {
  1871. framework.Failf("Service HealthCheck NodePort still present")
  1872. }
  1873. endpointNodeMap := jig.GetEndpointNodes(svc)
  1874. noEndpointNodeMap := map[string][]string{}
  1875. for _, n := range nodes.Items {
  1876. if _, ok := endpointNodeMap[n.Name]; ok {
  1877. continue
  1878. }
  1879. noEndpointNodeMap[n.Name] = framework.GetNodeAddresses(&n, v1.NodeExternalIP)
  1880. }
  1881. svcTCPPort := int(svc.Spec.Ports[0].Port)
  1882. svcNodePort := int(svc.Spec.Ports[0].NodePort)
  1883. ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
  1884. path := "/clientip"
  1885. ginkgo.By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap))
  1886. for nodeName, nodeIPs := range noEndpointNodeMap {
  1887. ginkgo.By(fmt.Sprintf("Checking %v (%v:%v%v) proxies to endpoints on another node", nodeName, nodeIPs[0], svcNodePort, path))
  1888. jig.GetHTTPContent(nodeIPs[0], svcNodePort, framework.KubeProxyLagTimeout, path)
  1889. }
  1890. for nodeName, nodeIPs := range endpointNodeMap {
  1891. ginkgo.By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIPs[0]))
  1892. var body bytes.Buffer
  1893. pollfn := func() (bool, error) {
  1894. result := framework.PokeHTTP(nodeIPs[0], healthCheckNodePort, "/healthz", nil)
  1895. if result.Code == 0 {
  1896. return true, nil
  1897. }
  1898. body.Reset()
  1899. body.Write(result.Body)
  1900. return false, nil
  1901. }
  1902. if pollErr := wait.PollImmediate(framework.Poll, framework.ServiceTestTimeout, pollfn); pollErr != nil {
  1903. framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s",
  1904. nodeName, healthCheckNodePort, body.String())
  1905. }
  1906. }
  1907. // Poll till kube-proxy re-adds the MASQUERADE rule on the node.
  1908. ginkgo.By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP))
  1909. var clientIP string
  1910. pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) {
  1911. content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, "/clientip")
  1912. clientIP = content.String()
  1913. if strings.HasPrefix(clientIP, "10.") {
  1914. return true, nil
  1915. }
  1916. return false, nil
  1917. })
  1918. if pollErr != nil {
  1919. framework.Failf("Source IP WAS preserved even after ESIPP turned off. Got %v, expected a ten-dot cluster ip.", clientIP)
  1920. }
  1921. // TODO: We need to attempt to create another service with the previously
  1922. // allocated healthcheck nodePort. If the health check nodePort has been
  1923. // freed, the new service creation will succeed, upon which we cleanup.
  1924. // If the health check nodePort has NOT been freed, the new service
  1925. // creation will fail.
  1926. ginkgo.By("setting ExternalTraffic field back to OnlyLocal")
  1927. svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
  1928. svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
  1929. // Request the same healthCheckNodePort as before, to test the user-requested allocation path
  1930. svc.Spec.HealthCheckNodePort = int32(healthCheckNodePort)
  1931. })
  1932. pollErr = wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) {
  1933. content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, path)
  1934. clientIP = content.String()
  1935. ginkgo.By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP))
  1936. if !strings.HasPrefix(clientIP, "10.") {
  1937. return true, nil
  1938. }
  1939. return false, nil
  1940. })
  1941. if pollErr != nil {
  1942. framework.Failf("Source IP (%v) is not the client IP even after ESIPP turned on, expected a public IP.", clientIP)
  1943. }
  1944. })
  1945. })
  1946. func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeName, serviceIP string, servicePort int) (string, string) {
  1947. e2elog.Logf("Creating an exec pod on node %v", nodeName)
  1948. execPodName := framework.CreateExecPodOrFail(f.ClientSet, ns, fmt.Sprintf("execpod-sourceip-%s", nodeName), func(pod *v1.Pod) {
  1949. pod.Spec.NodeName = nodeName
  1950. })
  1951. defer func() {
  1952. e2elog.Logf("Cleaning up the exec pod")
  1953. err := c.CoreV1().Pods(ns).Delete(execPodName, nil)
  1954. framework.ExpectNoError(err, "failed to delete pod: %s", execPodName)
  1955. }()
  1956. execPod, err := f.ClientSet.CoreV1().Pods(ns).Get(execPodName, metav1.GetOptions{})
  1957. framework.ExpectNoError(err)
  1958. var stdout string
  1959. serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
  1960. timeout := 2 * time.Minute
  1961. e2elog.Logf("Waiting up to %v wget %s", timeout, serviceIPPort)
  1962. cmd := fmt.Sprintf(`wget -T 30 -qO- %s | grep client_address`, serviceIPPort)
  1963. for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
  1964. stdout, err = framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
  1965. if err != nil {
  1966. e2elog.Logf("got err: %v, retry until timeout", err)
  1967. continue
  1968. }
  1969. // Need to check output because wget -q might omit the error.
  1970. if strings.TrimSpace(stdout) == "" {
  1971. e2elog.Logf("got empty stdout, retry until timeout")
  1972. continue
  1973. }
  1974. break
  1975. }
  1976. framework.ExpectNoError(err)
  1977. // The stdout return from RunHostCmd seems to come with "\n", so TrimSpace is needed.
  1978. // Desired stdout in this format: client_address=x.x.x.x
  1979. outputs := strings.Split(strings.TrimSpace(stdout), "=")
  1980. if len(outputs) != 2 {
  1981. // ginkgo.Fail the test if output format is unexpected.
  1982. framework.Failf("exec pod returned unexpected stdout format: [%v]\n", stdout)
  1983. }
  1984. return execPod.Status.PodIP, outputs[1]
  1985. }
  1986. func execAffinityTestForNonLBServiceWithTransition(f *framework.Framework, cs clientset.Interface, svc *v1.Service) {
  1987. execAffinityTestForNonLBServiceWithOptionalTransition(f, cs, svc, true)
  1988. }
  1989. func execAffinityTestForNonLBService(f *framework.Framework, cs clientset.Interface, svc *v1.Service) {
  1990. execAffinityTestForNonLBServiceWithOptionalTransition(f, cs, svc, false)
  1991. }
  1992. // execAffinityTestForNonLBServiceWithOptionalTransition is a helper function that wrap the logic of
  1993. // affinity test for non-load-balancer services. Session afinity will be
  1994. // enabled when the service is created. If parameter isTransitionTest is true,
  1995. // session affinity will be switched off/on and test if the service converges
  1996. // to a stable affinity state.
  1997. func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) {
  1998. ns := f.Namespace.Name
  1999. numPods, servicePort, serviceName := 3, defaultServeHostnameServicePort, svc.ObjectMeta.Name
  2000. ginkgo.By("creating service in namespace " + ns)
  2001. serviceType := svc.Spec.Type
  2002. svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
  2003. _, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods)
  2004. framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns)
  2005. defer func() {
  2006. framework.StopServeHostnameService(cs, ns, serviceName)
  2007. }()
  2008. jig := framework.NewServiceTestJig(cs, serviceName)
  2009. svc, err = jig.Client.CoreV1().Services(ns).Get(serviceName, metav1.GetOptions{})
  2010. framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns)
  2011. var svcIP string
  2012. if serviceType == v1.ServiceTypeNodePort {
  2013. nodes := framework.GetReadySchedulableNodesOrDie(cs)
  2014. addrs := framework.CollectAddresses(nodes, v1.NodeInternalIP)
  2015. gomega.Expect(len(addrs)).To(gomega.BeNumerically(">", 0), "ginkgo.Failed to get Node internal IP")
  2016. svcIP = addrs[0]
  2017. servicePort = int(svc.Spec.Ports[0].NodePort)
  2018. } else {
  2019. svcIP = svc.Spec.ClusterIP
  2020. }
  2021. execPodName := framework.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil)
  2022. defer func() {
  2023. e2elog.Logf("Cleaning up the exec pod")
  2024. err := cs.CoreV1().Pods(ns).Delete(execPodName, nil)
  2025. framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPodName, ns)
  2026. }()
  2027. execPod, err := cs.CoreV1().Pods(ns).Get(execPodName, metav1.GetOptions{})
  2028. framework.ExpectNoError(err, "failed to fetch pod: %s in namespace: %s", execPodName, ns)
  2029. if !isTransitionTest {
  2030. gomega.Expect(framework.CheckAffinity(jig, execPod, svcIP, servicePort, true)).To(gomega.BeTrue())
  2031. }
  2032. if isTransitionTest {
  2033. svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
  2034. svc.Spec.SessionAffinity = v1.ServiceAffinityNone
  2035. })
  2036. gomega.Expect(framework.CheckAffinity(jig, execPod, svcIP, servicePort, false)).To(gomega.BeTrue())
  2037. svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
  2038. svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
  2039. })
  2040. gomega.Expect(framework.CheckAffinity(jig, execPod, svcIP, servicePort, true)).To(gomega.BeTrue())
  2041. }
  2042. }
  2043. func execAffinityTestForLBServiceWithTransition(f *framework.Framework, cs clientset.Interface, svc *v1.Service) {
  2044. execAffinityTestForLBServiceWithOptionalTransition(f, cs, svc, true)
  2045. }
  2046. func execAffinityTestForLBService(f *framework.Framework, cs clientset.Interface, svc *v1.Service) {
  2047. execAffinityTestForLBServiceWithOptionalTransition(f, cs, svc, false)
  2048. }
  2049. // execAffinityTestForLBServiceWithOptionalTransition is a helper function that wrap the logic of
  2050. // affinity test for load balancer services, similar to
  2051. // execAffinityTestForNonLBServiceWithOptionalTransition.
  2052. func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) {
  2053. numPods, ns, serviceName := 3, f.Namespace.Name, svc.ObjectMeta.Name
  2054. ginkgo.By("creating service in namespace " + ns)
  2055. svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
  2056. _, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods)
  2057. framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns)
  2058. jig := framework.NewServiceTestJig(cs, serviceName)
  2059. ginkgo.By("waiting for loadbalancer for service " + ns + "/" + serviceName)
  2060. svc = jig.WaitForLoadBalancerOrFail(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault)
  2061. jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
  2062. defer func() {
  2063. podNodePairs, err := framework.PodNodePairs(cs, ns)
  2064. e2elog.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err)
  2065. framework.StopServeHostnameService(cs, ns, serviceName)
  2066. lb := cloudprovider.DefaultLoadBalancerName(svc)
  2067. e2elog.Logf("cleaning load balancer resource for %s", lb)
  2068. framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
  2069. }()
  2070. ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
  2071. port := int(svc.Spec.Ports[0].Port)
  2072. if !isTransitionTest {
  2073. gomega.Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(gomega.BeTrue())
  2074. }
  2075. if isTransitionTest {
  2076. svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
  2077. svc.Spec.SessionAffinity = v1.ServiceAffinityNone
  2078. })
  2079. gomega.Expect(framework.CheckAffinity(jig, nil, ingressIP, port, false)).To(gomega.BeTrue())
  2080. svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
  2081. svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
  2082. })
  2083. gomega.Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(gomega.BeTrue())
  2084. }
  2085. }