fixtures.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. /*
  14. * This test checks that various VolumeSources are working.
  15. *
  16. * There are two ways, how to test the volumes:
  17. * 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
  18. * The test creates a server pod, exporting simple 'index.html' file.
  19. * Then it uses appropriate VolumeSource to import this file into a client pod
  20. * and checks that the pod can see the file. It does so by importing the file
  21. * into web server root and loadind the index.html from it.
  22. *
  23. * These tests work only when privileged containers are allowed, exporting
  24. * various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
  25. * other privileged magic in the server pod.
  26. *
  27. * Note that the server containers are for testing purposes only and should not
  28. * be used in production.
  29. *
  30. * 2) With server outside of Kubernetes (Cinder, ...)
  31. * Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
  32. * the tested Kubernetes cluster. The test itself creates a new volume,
  33. * and checks, that Kubernetes can use it as a volume.
  34. */
  35. package volume
  36. import (
  37. "context"
  38. "fmt"
  39. "path/filepath"
  40. "strconv"
  41. "time"
  42. v1 "k8s.io/api/core/v1"
  43. apierrors "k8s.io/apimachinery/pkg/api/errors"
  44. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  45. "k8s.io/apimachinery/pkg/labels"
  46. clientset "k8s.io/client-go/kubernetes"
  47. "k8s.io/kubernetes/test/e2e/framework"
  48. e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  49. "k8s.io/kubernetes/test/e2e/storage/utils"
  50. imageutils "k8s.io/kubernetes/test/utils/image"
  51. "github.com/onsi/ginkgo"
  52. "github.com/onsi/gomega"
  53. )
  54. const (
  55. // Kb is byte size of kilobyte
  56. Kb int64 = 1000
  57. // Mb is byte size of megabyte
  58. Mb int64 = 1000 * Kb
  59. // Gb is byte size of gigabyte
  60. Gb int64 = 1000 * Mb
  61. // Tb is byte size of terabyte
  62. Tb int64 = 1000 * Gb
  63. // KiB is byte size of kibibyte
  64. KiB int64 = 1024
  65. // MiB is byte size of mebibyte
  66. MiB int64 = 1024 * KiB
  67. // GiB is byte size of gibibyte
  68. GiB int64 = 1024 * MiB
  69. // TiB is byte size of tebibyte
  70. TiB int64 = 1024 * GiB
  71. // VolumeServerPodStartupTimeout is a waiting period for volume server (Ceph, ...) to initialize itself.
  72. VolumeServerPodStartupTimeout = 3 * time.Minute
  73. // PodCleanupTimeout is a waiting period for pod to be cleaned up and unmount its volumes so we
  74. // don't tear down containers with NFS/Ceph/Gluster server too early.
  75. PodCleanupTimeout = 20 * time.Second
  76. )
  77. // SizeRange encapsulates a range of sizes specified as minimum and maximum quantity strings
  78. // Both values are optional.
  79. // If size is not set, it will assume there's not limitation and it may set a very small size (E.g. 1ki)
  80. // as Min and set a considerable big size(E.g. 10Ei) as Max, which make it possible to calculate
  81. // the intersection of given intervals (if it exists)
  82. type SizeRange struct {
  83. // Max quantity specified as a string including units. E.g "3Gi".
  84. // If the Max size is unset, It will be assign a default valid maximum size 10Ei,
  85. // which is defined in test/e2e/storage/testsuites/base.go
  86. Max string
  87. // Min quantity specified as a string including units. E.g "1Gi"
  88. // If the Min size is unset, It will be assign a default valid minimum size 1Ki,
  89. // which is defined in test/e2e/storage/testsuites/base.go
  90. Min string
  91. }
  92. // TestConfig is a struct for configuration of one tests. The test consist of:
  93. // - server pod - runs serverImage, exports ports[]
  94. // - client pod - does not need any special configuration
  95. type TestConfig struct {
  96. Namespace string
  97. // Prefix of all pods. Typically the test name.
  98. Prefix string
  99. // Name of container image for the server pod.
  100. ServerImage string
  101. // Ports to export from the server pod. TCP only.
  102. ServerPorts []int
  103. // Commands to run in the container image.
  104. ServerCmds []string
  105. // Arguments to pass to the container image.
  106. ServerArgs []string
  107. // Volumes needed to be mounted to the server container from the host
  108. // map <host (source) path> -> <container (dst.) path>
  109. // if <host (source) path> is empty, mount a tmpfs emptydir
  110. ServerVolumes map[string]string
  111. // Message to wait for before starting clients
  112. ServerReadyMessage string
  113. // Use HostNetwork for the server
  114. ServerHostNetwork bool
  115. // Wait for the pod to terminate successfully
  116. // False indicates that the pod is long running
  117. WaitForCompletion bool
  118. // ClientNodeSelection restricts where the client pod runs on. Default is any node.
  119. ClientNodeSelection e2epod.NodeSelection
  120. }
  121. // Test contains a volume to mount into a client pod and its
  122. // expected content.
  123. type Test struct {
  124. Volume v1.VolumeSource
  125. Mode v1.PersistentVolumeMode
  126. // Name of file to read/write in FileSystem mode
  127. File string
  128. ExpectedContent string
  129. }
  130. // NewNFSServer is a NFS-specific wrapper for CreateStorageServer.
  131. func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, ip string) {
  132. config = TestConfig{
  133. Namespace: namespace,
  134. Prefix: "nfs",
  135. ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
  136. ServerPorts: []int{2049},
  137. ServerVolumes: map[string]string{"": "/exports"},
  138. ServerReadyMessage: "NFS started",
  139. }
  140. if len(args) > 0 {
  141. config.ServerArgs = args
  142. }
  143. pod, ip = CreateStorageServer(cs, config)
  144. return config, pod, ip
  145. }
  146. // NewGlusterfsServer is a GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.
  147. func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestConfig, pod *v1.Pod, ip string) {
  148. config = TestConfig{
  149. Namespace: namespace,
  150. Prefix: "gluster",
  151. ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
  152. ServerPorts: []int{24007, 24008, 49152},
  153. }
  154. pod, ip = CreateStorageServer(cs, config)
  155. ginkgo.By("creating Gluster endpoints")
  156. endpoints := &v1.Endpoints{
  157. TypeMeta: metav1.TypeMeta{
  158. Kind: "Endpoints",
  159. APIVersion: "v1",
  160. },
  161. ObjectMeta: metav1.ObjectMeta{
  162. Name: config.Prefix + "-server",
  163. },
  164. Subsets: []v1.EndpointSubset{
  165. {
  166. Addresses: []v1.EndpointAddress{
  167. {
  168. IP: ip,
  169. },
  170. },
  171. Ports: []v1.EndpointPort{
  172. {
  173. Name: "gluster",
  174. Port: 24007,
  175. Protocol: v1.ProtocolTCP,
  176. },
  177. },
  178. },
  179. },
  180. }
  181. _, err := cs.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{})
  182. framework.ExpectNoError(err, "failed to create endpoints for Gluster server")
  183. return config, pod, ip
  184. }
  185. // CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
  186. // and ip address string are returned.
  187. // Note: Expect() is called so no error is returned.
  188. func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod, ip string) {
  189. pod = startVolumeServer(cs, config)
  190. gomega.Expect(pod).NotTo(gomega.BeNil(), "storage server pod should not be nil")
  191. ip = pod.Status.PodIP
  192. gomega.Expect(len(ip)).NotTo(gomega.BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
  193. framework.Logf("%s server pod IP address: %s", config.Prefix, ip)
  194. return pod, ip
  195. }
  196. // startVolumeServer starts a container specified by config.serverImage and exports all
  197. // config.serverPorts from it. The returned pod should be used to get the server
  198. // IP address and create appropriate VolumeSource.
  199. func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
  200. podClient := client.CoreV1().Pods(config.Namespace)
  201. portCount := len(config.ServerPorts)
  202. serverPodPorts := make([]v1.ContainerPort, portCount)
  203. for i := 0; i < portCount; i++ {
  204. portName := fmt.Sprintf("%s-%d", config.Prefix, i)
  205. serverPodPorts[i] = v1.ContainerPort{
  206. Name: portName,
  207. ContainerPort: int32(config.ServerPorts[i]),
  208. Protocol: v1.ProtocolTCP,
  209. }
  210. }
  211. volumeCount := len(config.ServerVolumes)
  212. volumes := make([]v1.Volume, volumeCount)
  213. mounts := make([]v1.VolumeMount, volumeCount)
  214. i := 0
  215. for src, dst := range config.ServerVolumes {
  216. mountName := fmt.Sprintf("path%d", i)
  217. volumes[i].Name = mountName
  218. if src == "" {
  219. volumes[i].VolumeSource.EmptyDir = &v1.EmptyDirVolumeSource{}
  220. } else {
  221. volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
  222. Path: src,
  223. }
  224. }
  225. mounts[i].Name = mountName
  226. mounts[i].ReadOnly = false
  227. mounts[i].MountPath = dst
  228. i++
  229. }
  230. serverPodName := fmt.Sprintf("%s-server", config.Prefix)
  231. ginkgo.By(fmt.Sprint("creating ", serverPodName, " pod"))
  232. privileged := new(bool)
  233. *privileged = true
  234. restartPolicy := v1.RestartPolicyAlways
  235. if config.WaitForCompletion {
  236. restartPolicy = v1.RestartPolicyNever
  237. }
  238. serverPod := &v1.Pod{
  239. TypeMeta: metav1.TypeMeta{
  240. Kind: "Pod",
  241. APIVersion: "v1",
  242. },
  243. ObjectMeta: metav1.ObjectMeta{
  244. Name: serverPodName,
  245. Labels: map[string]string{
  246. "role": serverPodName,
  247. },
  248. },
  249. Spec: v1.PodSpec{
  250. HostNetwork: config.ServerHostNetwork,
  251. Containers: []v1.Container{
  252. {
  253. Name: serverPodName,
  254. Image: config.ServerImage,
  255. SecurityContext: &v1.SecurityContext{
  256. Privileged: privileged,
  257. },
  258. Command: config.ServerCmds,
  259. Args: config.ServerArgs,
  260. Ports: serverPodPorts,
  261. VolumeMounts: mounts,
  262. },
  263. },
  264. Volumes: volumes,
  265. RestartPolicy: restartPolicy,
  266. },
  267. }
  268. var pod *v1.Pod
  269. serverPod, err := podClient.Create(context.TODO(), serverPod, metav1.CreateOptions{})
  270. // ok if the server pod already exists. TODO: make this controllable by callers
  271. if err != nil {
  272. if apierrors.IsAlreadyExists(err) {
  273. framework.Logf("Ignore \"already-exists\" error, re-get pod...")
  274. ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
  275. serverPod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{})
  276. framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
  277. pod = serverPod
  278. } else {
  279. framework.ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
  280. }
  281. }
  282. if config.WaitForCompletion {
  283. framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
  284. framework.ExpectNoError(podClient.Delete(context.TODO(), serverPod.Name, nil))
  285. } else {
  286. framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, serverPod))
  287. if pod == nil {
  288. ginkgo.By(fmt.Sprintf("locating the %q server pod", serverPodName))
  289. pod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{})
  290. framework.ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
  291. }
  292. }
  293. if config.ServerReadyMessage != "" {
  294. _, err := framework.LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
  295. framework.ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
  296. }
  297. return pod
  298. }
  299. // TestServerCleanup cleans server pod.
  300. func TestServerCleanup(f *framework.Framework, config TestConfig) {
  301. ginkgo.By(fmt.Sprint("cleaning the environment after ", config.Prefix))
  302. defer ginkgo.GinkgoRecover()
  303. if config.ServerImage == "" {
  304. return
  305. }
  306. err := e2epod.DeletePodWithWaitByName(f.ClientSet, config.Prefix+"-server", config.Namespace)
  307. gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
  308. }
  309. func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test) (*v1.Pod, error) {
  310. ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-", podSuffix))
  311. var gracePeriod int64 = 1
  312. var command string
  313. if !framework.NodeOSDistroIs("windows") {
  314. command = "while true ; do sleep 2; done "
  315. } else {
  316. command = "while(1) {sleep 2}"
  317. }
  318. seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"}
  319. clientPod := &v1.Pod{
  320. TypeMeta: metav1.TypeMeta{
  321. Kind: "Pod",
  322. APIVersion: "v1",
  323. },
  324. ObjectMeta: metav1.ObjectMeta{
  325. Name: config.Prefix + "-" + podSuffix,
  326. Labels: map[string]string{
  327. "role": config.Prefix + "-" + podSuffix,
  328. },
  329. },
  330. Spec: v1.PodSpec{
  331. Containers: []v1.Container{
  332. {
  333. Name: config.Prefix + "-" + podSuffix,
  334. Image: GetTestImage(framework.BusyBoxImage),
  335. WorkingDir: "/opt",
  336. // An imperative and easily debuggable container which reads/writes vol contents for
  337. // us to scan in the tests or by eye.
  338. // We expect that /opt is empty in the minimal containers which we use in this test.
  339. Command: GenerateScriptCmd(command),
  340. VolumeMounts: []v1.VolumeMount{},
  341. },
  342. },
  343. TerminationGracePeriodSeconds: &gracePeriod,
  344. SecurityContext: GeneratePodSecurityContext(fsGroup, seLinuxOptions),
  345. Volumes: []v1.Volume{},
  346. },
  347. }
  348. e2epod.SetNodeSelection(clientPod, config.ClientNodeSelection)
  349. for i, test := range tests {
  350. volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
  351. // We need to make the container privileged when SELinux is enabled on the
  352. // host, so the test can write data to a location like /tmp. Also, due to
  353. // the Docker bug below, it's not currently possible to map a device with
  354. // a privileged container, so we don't go privileged for block volumes.
  355. // https://github.com/moby/moby/issues/35991
  356. if privileged && test.Mode == v1.PersistentVolumeBlock {
  357. privileged = false
  358. }
  359. clientPod.Spec.Containers[0].SecurityContext = GenerateSecurityContext(privileged)
  360. if test.Mode == v1.PersistentVolumeBlock {
  361. clientPod.Spec.Containers[0].VolumeDevices = append(clientPod.Spec.Containers[0].VolumeDevices, v1.VolumeDevice{
  362. Name: volumeName,
  363. DevicePath: fmt.Sprintf("/opt/%d", i),
  364. })
  365. } else {
  366. clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
  367. Name: volumeName,
  368. MountPath: fmt.Sprintf("/opt/%d", i),
  369. })
  370. }
  371. clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
  372. Name: volumeName,
  373. VolumeSource: test.Volume,
  374. })
  375. }
  376. podsNamespacer := client.CoreV1().Pods(config.Namespace)
  377. clientPod, err := podsNamespacer.Create(context.TODO(), clientPod, metav1.CreateOptions{})
  378. if err != nil {
  379. return nil, err
  380. }
  381. err = e2epod.WaitForPodRunningInNamespace(client, clientPod)
  382. if err != nil {
  383. e2epod.WaitForPodToDisappear(client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
  384. return nil, err
  385. }
  386. return clientPod, nil
  387. }
  388. func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsType string, tests []Test) {
  389. ginkgo.By("Checking that text file contents are perfect.")
  390. for i, test := range tests {
  391. if test.Mode == v1.PersistentVolumeBlock {
  392. // Block: check content
  393. deviceName := fmt.Sprintf("/opt/%d", i)
  394. commands := generateReadBlockCmd(deviceName, len(test.ExpectedContent))
  395. _, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
  396. framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
  397. // Check that it's a real block device
  398. utils.CheckVolumeModeOfPath(f, pod, test.Mode, deviceName)
  399. } else {
  400. // Filesystem: check content
  401. fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
  402. commands := generateReadFileCmd(fileName)
  403. _, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
  404. framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
  405. // Check that a directory has been mounted
  406. dirName := filepath.Dir(fileName)
  407. utils.CheckVolumeModeOfPath(f, pod, test.Mode, dirName)
  408. if !framework.NodeOSDistroIs("windows") {
  409. // Filesystem: check fsgroup
  410. if fsGroup != nil {
  411. ginkgo.By("Checking fsGroup is correct.")
  412. _, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
  413. framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
  414. }
  415. // Filesystem: check fsType
  416. if fsType != "" {
  417. ginkgo.By("Checking fsType is correct.")
  418. _, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
  419. framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
  420. }
  421. }
  422. }
  423. }
  424. }
  425. // TestVolumeClient start a client pod using given VolumeSource (exported by startVolumeServer())
  426. // and check that the pod sees expected data, e.g. from the server pod.
  427. // Multiple Tests can be specified to mount multiple volumes to a single
  428. // pod.
  429. func TestVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
  430. clientPod, err := runVolumeTesterPod(f.ClientSet, config, "client", false, fsGroup, tests)
  431. if err != nil {
  432. framework.Failf("Failed to create client pod: %v", err)
  433. }
  434. defer func() {
  435. e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name)
  436. e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
  437. }()
  438. framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, clientPod))
  439. testVolumeContent(f, clientPod, fsGroup, fsType, tests)
  440. }
  441. // InjectContent inserts index.html with given content into given volume. It does so by
  442. // starting and auxiliary pod which writes the file there.
  443. // The volume must be writable.
  444. func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
  445. privileged := true
  446. if framework.NodeOSDistroIs("windows") {
  447. privileged = false
  448. }
  449. injectorPod, err := runVolumeTesterPod(f.ClientSet, config, "injector", privileged, fsGroup, tests)
  450. if err != nil {
  451. framework.Failf("Failed to create injector pod: %v", err)
  452. return
  453. }
  454. defer func() {
  455. e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name)
  456. e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
  457. }()
  458. ginkgo.By("Writing text file contents in the container.")
  459. for i, test := range tests {
  460. commands := []string{"exec", injectorPod.Name, fmt.Sprintf("--namespace=%v", injectorPod.Namespace), "--"}
  461. if test.Mode == v1.PersistentVolumeBlock {
  462. // Block: write content
  463. deviceName := fmt.Sprintf("/opt/%d", i)
  464. commands = append(commands, generateWriteBlockCmd(test.ExpectedContent, deviceName)...)
  465. } else {
  466. // Filesystem: write content
  467. fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
  468. commands = append(commands, generateWriteFileCmd(test.ExpectedContent, fileName)...)
  469. }
  470. out, err := framework.RunKubectl(injectorPod.Namespace, commands...)
  471. framework.ExpectNoError(err, "failed: writing the contents: %s", out)
  472. }
  473. // Check that the data have been really written in this pod.
  474. // This tests non-persistent volume types
  475. testVolumeContent(f, injectorPod, fsGroup, fsType, tests)
  476. }
  477. // GenerateScriptCmd generates the corresponding command lines to execute a command.
  478. // Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
  479. func GenerateScriptCmd(command string) []string {
  480. var commands []string
  481. if !framework.NodeOSDistroIs("windows") {
  482. commands = []string{"/bin/sh", "-c", command}
  483. } else {
  484. commands = []string{"powershell", "/c", command}
  485. }
  486. return commands
  487. }
  488. // generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
  489. func generateWriteCmd(content, path string) []string {
  490. var commands []string
  491. if !framework.NodeOSDistroIs("windows") {
  492. commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + path}
  493. } else {
  494. commands = []string{"powershell", "/c", "echo '" + content + "' > " + path}
  495. }
  496. return commands
  497. }
  498. // generateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
  499. // Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
  500. func generateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
  501. var commands []string
  502. if !framework.NodeOSDistroIs("windows") {
  503. commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
  504. } else {
  505. // TODO: is there a way on windows to get the first X bytes from a device?
  506. commands = []string{"powershell", "/c", "type " + fullPath}
  507. }
  508. return commands
  509. }
  510. // generateWriteBlockCmd generates the corresponding command lines to write to a block device the given content.
  511. // Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
  512. func generateWriteBlockCmd(content, fullPath string) []string {
  513. return generateWriteCmd(content, fullPath)
  514. }
  515. // generateReadFileCmd generates the corresponding command lines to read from a file with the given file path.
  516. // Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
  517. func generateReadFileCmd(fullPath string) []string {
  518. var commands []string
  519. if !framework.NodeOSDistroIs("windows") {
  520. commands = []string{"cat", fullPath}
  521. } else {
  522. commands = []string{"powershell", "/c", "type " + fullPath}
  523. }
  524. return commands
  525. }
  526. // generateWriteFileCmd generates the corresponding command lines to write a file with the given content and file path.
  527. // Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
  528. func generateWriteFileCmd(content, fullPath string) []string {
  529. return generateWriteCmd(content, fullPath)
  530. }
  531. // GenerateSecurityContext generates the corresponding container security context with the given inputs
  532. // If the Node OS is windows, currently we will ignore the inputs and return nil.
  533. // TODO: Will modify it after windows has its own security context
  534. func GenerateSecurityContext(privileged bool) *v1.SecurityContext {
  535. if framework.NodeOSDistroIs("windows") {
  536. return nil
  537. }
  538. return &v1.SecurityContext{
  539. Privileged: &privileged,
  540. }
  541. }
  542. // GeneratePodSecurityContext generates the corresponding pod security context with the given inputs
  543. // If the Node OS is windows, currently we will ignore the inputs and return nil.
  544. // TODO: Will modify it after windows has its own security context
  545. func GeneratePodSecurityContext(fsGroup *int64, seLinuxOptions *v1.SELinuxOptions) *v1.PodSecurityContext {
  546. if framework.NodeOSDistroIs("windows") {
  547. return nil
  548. }
  549. return &v1.PodSecurityContext{
  550. SELinuxOptions: seLinuxOptions,
  551. FSGroup: fsGroup,
  552. }
  553. }
  554. // GetTestImage returns the image name with the given input
  555. // If the Node OS is windows, currently we return Agnhost image for Windows node
  556. // due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
  557. func GetTestImage(image string) string {
  558. if framework.NodeOSDistroIs("windows") {
  559. return imageutils.GetE2EImage(imageutils.Agnhost)
  560. }
  561. return image
  562. }