volumes.go 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. /*
  14. * This test checks that various VolumeSources are working.
  15. *
  16. * There are two ways, how to test the volumes:
  17. * 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
  18. * The test creates a server pod, exporting simple 'index.html' file.
  19. * Then it uses appropriate VolumeSource to import this file into a client pod
  20. * and checks that the pod can see the file. It does so by importing the file
  21. * into web server root and loadind the index.html from it.
  22. *
  23. * These tests work only when privileged containers are allowed, exporting
  24. * various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
  25. * other privileged magic in the server pod.
  26. *
  27. * Note that the server containers are for testing purposes only and should not
  28. * be used in production.
  29. *
  30. * 2) With server outside of Kubernetes (Cinder, ...)
  31. * Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
  32. * the tested Kubernetes cluster. The test itself creates a new volume,
  33. * and checks, that Kubernetes can use it as a volume.
  34. */
  35. // GlusterFS test is duplicated from test/e2e/volumes.go. Any changes made there
  36. // should be duplicated here
  37. package common
  38. import (
  39. "context"
  40. v1 "k8s.io/api/core/v1"
  41. clientset "k8s.io/client-go/kubernetes"
  42. "k8s.io/kubernetes/test/e2e/framework"
  43. e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
  44. "k8s.io/kubernetes/test/e2e/framework/volume"
  45. "github.com/onsi/ginkgo"
  46. )
  47. // These tests need privileged containers, which are disabled by default. Run
  48. // tests with "--ginkgo.focus=[Feature:Volumes]"
  49. var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
  50. f := framework.NewDefaultFramework("gcp-volume")
  51. // note that namespace deletion is handled by delete-namespace flag
  52. // filled in BeforeEach
  53. var namespace *v1.Namespace
  54. var c clientset.Interface
  55. ginkgo.BeforeEach(func() {
  56. e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom")
  57. namespace = f.Namespace
  58. c = f.ClientSet
  59. })
  60. ////////////////////////////////////////////////////////////////////////
  61. // NFS
  62. ////////////////////////////////////////////////////////////////////////
  63. ginkgo.Describe("NFSv4", func() {
  64. ginkgo.It("should be mountable for NFSv4", func() {
  65. config, _, serverIP := volume.NewNFSServer(c, namespace.Name, []string{})
  66. defer volume.TestServerCleanup(f, config)
  67. tests := []volume.Test{
  68. {
  69. Volume: v1.VolumeSource{
  70. NFS: &v1.NFSVolumeSource{
  71. Server: serverIP,
  72. Path: "/",
  73. ReadOnly: true,
  74. },
  75. },
  76. File: "index.html",
  77. ExpectedContent: "Hello from NFS!",
  78. },
  79. }
  80. // Must match content of test/images/volumes-tester/nfs/index.html
  81. volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
  82. })
  83. })
  84. ginkgo.Describe("NFSv3", func() {
  85. ginkgo.It("should be mountable for NFSv3", func() {
  86. config, _, serverIP := volume.NewNFSServer(c, namespace.Name, []string{})
  87. defer volume.TestServerCleanup(f, config)
  88. tests := []volume.Test{
  89. {
  90. Volume: v1.VolumeSource{
  91. NFS: &v1.NFSVolumeSource{
  92. Server: serverIP,
  93. Path: "/exports",
  94. ReadOnly: true,
  95. },
  96. },
  97. File: "index.html",
  98. ExpectedContent: "Hello from NFS!",
  99. },
  100. }
  101. // Must match content of test/images/volume-tester/nfs/index.html
  102. volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
  103. })
  104. })
  105. ////////////////////////////////////////////////////////////////////////
  106. // Gluster
  107. ////////////////////////////////////////////////////////////////////////
  108. ginkgo.Describe("GlusterFS", func() {
  109. ginkgo.It("should be mountable", func() {
  110. // create gluster server and endpoints
  111. config, _, _ := volume.NewGlusterfsServer(c, namespace.Name)
  112. name := config.Prefix + "-server"
  113. defer func() {
  114. volume.TestServerCleanup(f, config)
  115. err := c.CoreV1().Endpoints(namespace.Name).Delete(context.TODO(), name, nil)
  116. framework.ExpectNoError(err, "defer: Gluster delete endpoints failed")
  117. }()
  118. tests := []volume.Test{
  119. {
  120. Volume: v1.VolumeSource{
  121. Glusterfs: &v1.GlusterfsVolumeSource{
  122. EndpointsName: name,
  123. // 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
  124. Path: "test_vol",
  125. ReadOnly: true,
  126. },
  127. },
  128. File: "index.html",
  129. // Must match content of test/images/volumes-tester/gluster/index.html
  130. ExpectedContent: "Hello from GlusterFS!",
  131. },
  132. }
  133. volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
  134. })
  135. })
  136. })