networking.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. /*
  2. Copyright 2014 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package network
  14. import (
  15. "context"
  16. "fmt"
  17. "net/http"
  18. "strconv"
  19. "strings"
  20. v1 "k8s.io/api/core/v1"
  21. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  22. "k8s.io/apimachinery/pkg/util/sets"
  23. utilwait "k8s.io/apimachinery/pkg/util/wait"
  24. "k8s.io/kubernetes/pkg/master/ports"
  25. "k8s.io/kubernetes/test/e2e/framework"
  26. e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
  27. e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  28. e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
  29. e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
  30. "github.com/onsi/ginkgo"
  31. )
  32. // checkConnectivityToHost launches a pod to test connectivity to the specified
  33. // host. An error will be returned if the host is not reachable from the pod.
  34. //
  35. // An empty nodeName will use the schedule to choose where the pod is executed.
  36. func checkConnectivityToHost(f *framework.Framework, nodeName, podName, host string, port, timeout int) error {
  37. contName := fmt.Sprintf("%s-container", podName)
  38. command := []string{
  39. "nc",
  40. "-vz",
  41. "-w", strconv.Itoa(timeout),
  42. host,
  43. strconv.Itoa(port),
  44. }
  45. pod := &v1.Pod{
  46. ObjectMeta: metav1.ObjectMeta{
  47. Name: podName,
  48. },
  49. Spec: v1.PodSpec{
  50. Containers: []v1.Container{
  51. {
  52. Name: contName,
  53. Image: framework.AgnHostImage,
  54. Command: command,
  55. },
  56. },
  57. NodeName: nodeName,
  58. RestartPolicy: v1.RestartPolicyNever,
  59. },
  60. }
  61. podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
  62. _, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{})
  63. if err != nil {
  64. return err
  65. }
  66. err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name)
  67. if err != nil {
  68. logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, contName)
  69. if logErr != nil {
  70. framework.Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr)
  71. } else {
  72. framework.Logf("pod %s/%s logs:\n%s", f.Namespace.Name, pod.Name, logs)
  73. }
  74. }
  75. return err
  76. }
  77. var _ = SIGDescribe("Networking", func() {
  78. var svcname = "nettest"
  79. f := framework.NewDefaultFramework(svcname)
  80. ginkgo.BeforeEach(func() {
  81. // Assert basic external connectivity.
  82. // Since this is not really a test of kubernetes in any way, we
  83. // leave it as a pre-test assertion, rather than a Ginko test.
  84. ginkgo.By("Executing a successful http request from the external internet")
  85. resp, err := http.Get("http://google.com")
  86. if err != nil {
  87. framework.Failf("Unable to connect/talk to the internet: %v", err)
  88. }
  89. if resp.StatusCode != http.StatusOK {
  90. framework.Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp)
  91. }
  92. })
  93. ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv4]", func() {
  94. ginkgo.By("Running container which tries to connect to 8.8.8.8")
  95. framework.ExpectNoError(
  96. checkConnectivityToHost(f, "", "connectivity-test", "8.8.8.8", 53, 30))
  97. })
  98. ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv6][Experimental][LinuxOnly]", func() {
  99. // IPv6 is not supported on Windows.
  100. e2eskipper.SkipIfNodeOSDistroIs("windows")
  101. ginkgo.By("Running container which tries to connect to 2001:4860:4860::8888")
  102. framework.ExpectNoError(
  103. checkConnectivityToHost(f, "", "connectivity-test", "2001:4860:4860::8888", 53, 30))
  104. })
  105. // First test because it has no dependencies on variables created later on.
  106. ginkgo.It("should provide unchanging, static URL paths for kubernetes api services", func() {
  107. tests := []struct {
  108. path string
  109. }{
  110. {path: "/healthz"},
  111. {path: "/api"},
  112. {path: "/apis"},
  113. {path: "/metrics"},
  114. {path: "/openapi/v2"},
  115. {path: "/version"},
  116. // TODO: test proxy links here
  117. }
  118. if !framework.ProviderIs("gke", "skeleton") {
  119. tests = append(tests, struct{ path string }{path: "/logs"})
  120. }
  121. for _, test := range tests {
  122. ginkgo.By(fmt.Sprintf("testing: %s", test.path))
  123. data, err := f.ClientSet.CoreV1().RESTClient().Get().
  124. AbsPath(test.path).
  125. DoRaw(context.TODO())
  126. if err != nil {
  127. framework.Failf("ginkgo.Failed: %v\nBody: %s", err, string(data))
  128. }
  129. }
  130. })
  131. ginkgo.It("should check kube-proxy urls", func() {
  132. // TODO: this is overkill we just need the host networking pod
  133. // to hit kube-proxy urls.
  134. config := e2enetwork.NewNetworkingTestConfig(f, true)
  135. ginkgo.By("checking kube-proxy URLs")
  136. config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "200 OK")
  137. // Verify /healthz returns the proper content.
  138. config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "lastUpdated")
  139. // Verify /proxyMode returns http status code 200.
  140. config.GetSelfURLStatusCode(ports.ProxyStatusPort, "/proxyMode", "200")
  141. })
  142. ginkgo.Describe("Granular Checks: Services", func() {
  143. ginkgo.It("should function for pod-Service: http", func() {
  144. config := e2enetwork.NewNetworkingTestConfig(f, false)
  145. ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
  146. config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
  147. ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHTTPPort))
  148. config.DialFromTestContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
  149. })
  150. ginkgo.It("should function for pod-Service: udp", func() {
  151. config := e2enetwork.NewNetworkingTestConfig(f, false)
  152. ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
  153. config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
  154. ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUDPPort))
  155. config.DialFromTestContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
  156. })
  157. ginkgo.It("should function for node-Service: http", func() {
  158. config := e2enetwork.NewNetworkingTestConfig(f, true)
  159. ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterHTTPPort))
  160. config.DialFromNode("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
  161. ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort))
  162. config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
  163. })
  164. ginkgo.It("should function for node-Service: udp", func() {
  165. config := e2enetwork.NewNetworkingTestConfig(f, true)
  166. ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterUDPPort))
  167. config.DialFromNode("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
  168. ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort))
  169. config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
  170. })
  171. ginkgo.It("should function for endpoint-Service: http", func() {
  172. config := e2enetwork.NewNetworkingTestConfig(f, false)
  173. ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
  174. config.DialFromEndpointContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
  175. ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHTTPPort))
  176. config.DialFromEndpointContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
  177. })
  178. ginkgo.It("should function for endpoint-Service: udp", func() {
  179. config := e2enetwork.NewNetworkingTestConfig(f, false)
  180. ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
  181. config.DialFromEndpointContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
  182. ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUDPPort))
  183. config.DialFromEndpointContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
  184. })
  185. ginkgo.It("should update endpoints: http", func() {
  186. config := e2enetwork.NewNetworkingTestConfig(f, false)
  187. ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
  188. config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
  189. config.DeleteNetProxyPod()
  190. ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
  191. config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
  192. })
  193. ginkgo.It("should update endpoints: udp", func() {
  194. config := e2enetwork.NewNetworkingTestConfig(f, false)
  195. ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
  196. config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
  197. config.DeleteNetProxyPod()
  198. ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
  199. config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
  200. })
  201. // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
  202. ginkgo.It("should update nodePort: http [Slow]", func() {
  203. config := e2enetwork.NewNetworkingTestConfig(f, true)
  204. ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort))
  205. config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
  206. config.DeleteNodePortService()
  207. ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort))
  208. config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, config.MaxTries, sets.NewString())
  209. })
  210. // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
  211. ginkgo.It("should update nodePort: udp [Slow]", func() {
  212. config := e2enetwork.NewNetworkingTestConfig(f, true)
  213. ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort))
  214. config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
  215. config.DeleteNodePortService()
  216. ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort))
  217. config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, config.MaxTries, sets.NewString())
  218. })
  219. // [LinuxOnly]: Windows does not support session affinity.
  220. ginkgo.It("should function for client IP based session affinity: http [LinuxOnly]", func() {
  221. config := e2enetwork.NewNetworkingTestConfig(f, false)
  222. ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterHTTPPort))
  223. // Check if number of endpoints returned are exactly one.
  224. eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterHTTPPort, e2enetwork.SessionAffinityChecks)
  225. if err != nil {
  226. framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
  227. }
  228. if len(eps) == 0 {
  229. framework.Failf("Unexpected no endpoints return")
  230. }
  231. if len(eps) > 1 {
  232. framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
  233. }
  234. })
  235. // [LinuxOnly]: Windows does not support session affinity.
  236. ginkgo.It("should function for client IP based session affinity: udp [LinuxOnly]", func() {
  237. config := e2enetwork.NewNetworkingTestConfig(f, false)
  238. ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterUDPPort))
  239. // Check if number of endpoints returned are exactly one.
  240. eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterUDPPort, e2enetwork.SessionAffinityChecks)
  241. if err != nil {
  242. framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
  243. }
  244. if len(eps) == 0 {
  245. framework.Failf("Unexpected no endpoints return")
  246. }
  247. if len(eps) > 1 {
  248. framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
  249. }
  250. })
  251. ginkgo.It("should be able to handle large requests: http", func() {
  252. config := e2enetwork.NewNetworkingTestConfig(f, false)
  253. ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
  254. message := strings.Repeat("42", 1000)
  255. config.DialEchoFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message)
  256. })
  257. ginkgo.It("should be able to handle large requests: udp", func() {
  258. config := e2enetwork.NewNetworkingTestConfig(f, false)
  259. ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
  260. message := "n" + strings.Repeat("o", 1999)
  261. config.DialEchoFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message)
  262. })
  263. })
  264. ginkgo.It("should recreate its iptables rules if they are deleted [Disruptive]", func() {
  265. e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
  266. e2eskipper.SkipUnlessSSHKeyPresent()
  267. hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
  268. framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
  269. if len(hosts) == 0 {
  270. framework.Failf("No ssh-able nodes")
  271. }
  272. host := hosts[0]
  273. ns := f.Namespace.Name
  274. numPods, servicePort := 3, defaultServeHostnameServicePort
  275. svc := "iptables-flush-test"
  276. defer func() {
  277. framework.ExpectNoError(StopServeHostnameService(f.ClientSet, ns, svc))
  278. }()
  279. podNames, svcIP, err := StartServeHostnameService(f.ClientSet, getServeHostnameService(svc), ns, numPods)
  280. framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc, ns)
  281. // Ideally we want to reload the system firewall, but we don't necessarily
  282. // know how to do that on this system ("firewall-cmd --reload"? "systemctl
  283. // restart iptables"?). So instead we just manually delete all "KUBE-"
  284. // chains.
  285. ginkgo.By("dumping iptables rules on a node")
  286. result, err := e2essh.SSH("sudo iptables-save", host, framework.TestContext.Provider)
  287. if err != nil || result.Code != 0 {
  288. e2essh.LogResult(result)
  289. framework.Failf("couldn't dump iptable rules: %v", err)
  290. }
  291. // All the commands that delete rules have to come before all the commands
  292. // that delete chains, since the chains can't be deleted while there are
  293. // still rules referencing them.
  294. var deleteRuleCmds, deleteChainCmds []string
  295. table := ""
  296. for _, line := range strings.Split(result.Stdout, "\n") {
  297. if strings.HasPrefix(line, "*") {
  298. table = line[1:]
  299. } else if table == "" {
  300. continue
  301. }
  302. // Delete jumps from non-KUBE chains to KUBE chains
  303. if !strings.HasPrefix(line, "-A KUBE-") && strings.Contains(line, "-j KUBE-") {
  304. deleteRuleCmds = append(deleteRuleCmds, fmt.Sprintf("sudo iptables -t %s -D %s || true", table, line[3:]))
  305. }
  306. // Flush and delete all KUBE chains
  307. if strings.HasPrefix(line, ":KUBE-") {
  308. chain := strings.Split(line, " ")[0][1:]
  309. deleteRuleCmds = append(deleteRuleCmds, fmt.Sprintf("sudo iptables -t %s -F %s || true", table, chain))
  310. deleteChainCmds = append(deleteChainCmds, fmt.Sprintf("sudo iptables -t %s -X %s || true", table, chain))
  311. }
  312. }
  313. cmd := strings.Join(append(deleteRuleCmds, deleteChainCmds...), "\n")
  314. ginkgo.By("deleting all KUBE-* iptables chains")
  315. result, err = e2essh.SSH(cmd, host, framework.TestContext.Provider)
  316. if err != nil || result.Code != 0 {
  317. e2essh.LogResult(result)
  318. framework.Failf("couldn't delete iptable rules: %v", err)
  319. }
  320. ginkgo.By("verifying that kube-proxy rules are eventually recreated")
  321. framework.ExpectNoError(verifyServeHostnameServiceUp(f.ClientSet, ns, host, podNames, svcIP, servicePort))
  322. ginkgo.By("verifying that kubelet rules are eventually recreated")
  323. err = utilwait.PollImmediate(framework.Poll, framework.RestartNodeReadyAgainTimeout, func() (bool, error) {
  324. result, err = e2essh.SSH("sudo iptables-save -t nat", host, framework.TestContext.Provider)
  325. if err != nil || result.Code != 0 {
  326. e2essh.LogResult(result)
  327. return false, err
  328. }
  329. if strings.Contains(result.Stdout, "\n-A KUBE-MARK-DROP ") {
  330. return true, nil
  331. }
  332. return false, nil
  333. })
  334. framework.ExpectNoError(err, "kubelet did not recreate its iptables rules")
  335. })
  336. })