subpath.go 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973
  1. /*
  2. Copyright 2018 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package testsuites
  14. import (
  15. "context"
  16. "fmt"
  17. "path/filepath"
  18. "regexp"
  19. "strings"
  20. "time"
  21. "github.com/onsi/ginkgo"
  22. "github.com/onsi/gomega"
  23. v1 "k8s.io/api/core/v1"
  24. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  25. "k8s.io/apimachinery/pkg/util/errors"
  26. "k8s.io/apimachinery/pkg/util/rand"
  27. "k8s.io/apimachinery/pkg/util/sets"
  28. "k8s.io/apimachinery/pkg/util/wait"
  29. "k8s.io/kubernetes/test/e2e/framework"
  30. e2enode "k8s.io/kubernetes/test/e2e/framework/node"
  31. e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  32. e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
  33. "k8s.io/kubernetes/test/e2e/framework/volume"
  34. "k8s.io/kubernetes/test/e2e/storage/testpatterns"
  35. "k8s.io/kubernetes/test/e2e/storage/utils"
  36. imageutils "k8s.io/kubernetes/test/utils/image"
  37. )
  38. var (
  39. volumePath = "/test-volume"
  40. volumeName = "test-volume"
  41. probeVolumePath = "/probe-volume"
  42. probeFilePath = probeVolumePath + "/probe-file"
  43. fileName = "test-file"
  44. retryDuration = 20
  45. mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
  46. )
  47. type subPathTestSuite struct {
  48. tsInfo TestSuiteInfo
  49. }
  50. var _ TestSuite = &subPathTestSuite{}
  51. // InitSubPathTestSuite returns subPathTestSuite that implements TestSuite interface
  52. func InitSubPathTestSuite() TestSuite {
  53. return &subPathTestSuite{
  54. tsInfo: TestSuiteInfo{
  55. Name: "subPath",
  56. TestPatterns: []testpatterns.TestPattern{
  57. testpatterns.DefaultFsInlineVolume,
  58. testpatterns.DefaultFsPreprovisionedPV,
  59. testpatterns.DefaultFsDynamicPV,
  60. testpatterns.NtfsDynamicPV,
  61. },
  62. SupportedSizeRange: volume.SizeRange{
  63. Min: "1Mi",
  64. },
  65. },
  66. }
  67. }
  68. func (s *subPathTestSuite) GetTestSuiteInfo() TestSuiteInfo {
  69. return s.tsInfo
  70. }
  71. func (s *subPathTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
  72. skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(
  73. testpatterns.PreprovisionedPV,
  74. testpatterns.InlineVolume))
  75. }
  76. func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
  77. type local struct {
  78. config *PerTestConfig
  79. driverCleanup func()
  80. hostExec utils.HostExec
  81. resource *VolumeResource
  82. roVolSource *v1.VolumeSource
  83. pod *v1.Pod
  84. formatPod *v1.Pod
  85. subPathDir string
  86. filePathInSubpath string
  87. filePathInVolume string
  88. intreeOps opCounts
  89. migratedOps opCounts
  90. }
  91. var l local
  92. // No preconditions to test. Normally they would be in a BeforeEach here.
  93. // This intentionally comes after checking the preconditions because it
  94. // registers its own BeforeEach which creates the namespace. Beware that it
  95. // also registers an AfterEach which renders f unusable. Any code using
  96. // f must run inside an It or Context callback.
  97. f := framework.NewDefaultFramework("provisioning")
  98. init := func() {
  99. l = local{}
  100. // Now do the more expensive test initialization.
  101. l.config, l.driverCleanup = driver.PrepareTest(f)
  102. l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName)
  103. testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
  104. l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
  105. l.hostExec = utils.NewHostExec(f)
  106. // Setup subPath test dependent resource
  107. volType := pattern.VolType
  108. switch volType {
  109. case testpatterns.InlineVolume:
  110. if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
  111. l.roVolSource = iDriver.GetVolumeSource(true, pattern.FsType, l.resource.Volume)
  112. }
  113. case testpatterns.PreprovisionedPV:
  114. l.roVolSource = &v1.VolumeSource{
  115. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  116. ClaimName: l.resource.Pvc.Name,
  117. ReadOnly: true,
  118. },
  119. }
  120. case testpatterns.DynamicPV:
  121. l.roVolSource = &v1.VolumeSource{
  122. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  123. ClaimName: l.resource.Pvc.Name,
  124. ReadOnly: true,
  125. },
  126. }
  127. default:
  128. framework.Failf("SubPath test doesn't support: %s", volType)
  129. }
  130. subPath := f.Namespace.Name
  131. l.pod = SubpathTestPod(f, subPath, string(volType), l.resource.VolSource, true)
  132. e2epod.SetNodeSelection(l.pod, l.config.ClientNodeSelection)
  133. l.formatPod = volumeFormatPod(f, l.resource.VolSource)
  134. e2epod.SetNodeSelection(l.formatPod, l.config.ClientNodeSelection)
  135. l.subPathDir = filepath.Join(volumePath, subPath)
  136. l.filePathInSubpath = filepath.Join(volumePath, fileName)
  137. l.filePathInVolume = filepath.Join(l.subPathDir, fileName)
  138. }
  139. cleanup := func() {
  140. var errs []error
  141. if l.pod != nil {
  142. ginkgo.By("Deleting pod")
  143. err := e2epod.DeletePodWithWait(f.ClientSet, l.pod)
  144. errs = append(errs, err)
  145. l.pod = nil
  146. }
  147. if l.resource != nil {
  148. errs = append(errs, l.resource.CleanupResource())
  149. l.resource = nil
  150. }
  151. errs = append(errs, tryFunc(l.driverCleanup))
  152. l.driverCleanup = nil
  153. framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
  154. if l.hostExec != nil {
  155. l.hostExec.Cleanup()
  156. }
  157. validateMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName, l.intreeOps, l.migratedOps)
  158. }
  159. driverName := driver.GetDriverInfo().Name
  160. ginkgo.It("should support non-existent path", func() {
  161. init()
  162. defer cleanup()
  163. // Write the file in the subPath from init container 1
  164. setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
  165. // Read it from outside the subPath from container 1
  166. testReadFile(f, l.filePathInVolume, l.pod, 1)
  167. })
  168. ginkgo.It("should support existing directory", func() {
  169. init()
  170. defer cleanup()
  171. // Create the directory
  172. setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
  173. // Write the file in the subPath from init container 1
  174. setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
  175. // Read it from outside the subPath from container 1
  176. testReadFile(f, l.filePathInVolume, l.pod, 1)
  177. })
  178. ginkgo.It("should support existing single file [LinuxOnly]", func() {
  179. init()
  180. defer cleanup()
  181. // Create the file in the init container
  182. setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", l.subPathDir, l.filePathInVolume))
  183. // Read it from inside the subPath from container 0
  184. testReadFile(f, l.filePathInSubpath, l.pod, 0)
  185. })
  186. ginkgo.It("should support file as subpath [LinuxOnly]", func() {
  187. init()
  188. defer cleanup()
  189. // Create the file in the init container
  190. setInitCommand(l.pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, l.subPathDir))
  191. TestBasicSubpath(f, f.Namespace.Name, l.pod)
  192. })
  193. ginkgo.It("should fail if subpath directory is outside the volume [Slow]", func() {
  194. init()
  195. defer cleanup()
  196. // Create the subpath outside the volume
  197. var command string
  198. if framework.NodeOSDistroIs("windows") {
  199. command = fmt.Sprintf("New-Item -ItemType SymbolicLink -Path %s -value \\Windows", l.subPathDir)
  200. } else {
  201. command = fmt.Sprintf("ln -s /bin %s", l.subPathDir)
  202. }
  203. setInitCommand(l.pod, command)
  204. // Pod should fail
  205. testPodFailSubpath(f, l.pod, false)
  206. })
  207. ginkgo.It("should fail if subpath file is outside the volume [Slow][LinuxOnly]", func() {
  208. init()
  209. defer cleanup()
  210. // Create the subpath outside the volume
  211. setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/sh %s", l.subPathDir))
  212. // Pod should fail
  213. testPodFailSubpath(f, l.pod, false)
  214. })
  215. ginkgo.It("should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]", func() {
  216. init()
  217. defer cleanup()
  218. // Create the subpath outside the volume
  219. setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", l.subPathDir))
  220. // Pod should fail
  221. testPodFailSubpath(f, l.pod, false)
  222. })
  223. ginkgo.It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
  224. init()
  225. defer cleanup()
  226. // Create the subpath outside the volume
  227. var command string
  228. if framework.NodeOSDistroIs("windows") {
  229. command = fmt.Sprintf("New-Item -ItemType SymbolicLink -Path %s -value ..\\", l.subPathDir)
  230. } else {
  231. command = fmt.Sprintf("ln -s ../ %s", l.subPathDir)
  232. }
  233. setInitCommand(l.pod, command)
  234. // Pod should fail
  235. testPodFailSubpath(f, l.pod, false)
  236. })
  237. ginkgo.It("should support creating multiple subpath from same volumes [Slow]", func() {
  238. init()
  239. defer cleanup()
  240. subpathDir1 := filepath.Join(volumePath, "subpath1")
  241. subpathDir2 := filepath.Join(volumePath, "subpath2")
  242. filepath1 := filepath.Join("/test-subpath1", fileName)
  243. filepath2 := filepath.Join("/test-subpath2", fileName)
  244. setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
  245. addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{
  246. Name: volumeName,
  247. MountPath: "/test-subpath1",
  248. SubPath: "subpath1",
  249. })
  250. addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{
  251. Name: volumeName,
  252. MountPath: "/test-subpath2",
  253. SubPath: "subpath2",
  254. })
  255. // Write the files from container 0 and instantly read them back
  256. addMultipleWrites(&l.pod.Spec.Containers[0], filepath1, filepath2)
  257. testMultipleReads(f, l.pod, 0, filepath1, filepath2)
  258. })
  259. ginkgo.It("should support restarting containers using directory as subpath [Slow]", func() {
  260. init()
  261. defer cleanup()
  262. // Create the directory
  263. var command string
  264. if framework.NodeOSDistroIs("windows") {
  265. command = fmt.Sprintf("mkdir -p %v; New-Item -itemtype File -path %v", l.subPathDir, probeFilePath)
  266. } else {
  267. command = fmt.Sprintf("mkdir -p %v; touch %v", l.subPathDir, probeFilePath)
  268. }
  269. setInitCommand(l.pod, command)
  270. testPodContainerRestart(f, l.pod)
  271. })
  272. ginkgo.It("should support restarting containers using file as subpath [Slow][LinuxOnly]", func() {
  273. init()
  274. defer cleanup()
  275. // Create the file
  276. setInitCommand(l.pod, fmt.Sprintf("touch %v; touch %v", l.subPathDir, probeFilePath))
  277. testPodContainerRestart(f, l.pod)
  278. })
  279. ginkgo.It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func() {
  280. init()
  281. defer cleanup()
  282. testSubpathReconstruction(f, l.hostExec, l.pod, false)
  283. })
  284. ginkgo.It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func() {
  285. init()
  286. defer cleanup()
  287. if strings.HasPrefix(driverName, "hostPath") {
  288. // TODO: This skip should be removed once #61446 is fixed
  289. e2eskipper.Skipf("Driver %s does not support reconstruction, skipping", driverName)
  290. }
  291. testSubpathReconstruction(f, l.hostExec, l.pod, true)
  292. })
  293. ginkgo.It("should support readOnly directory specified in the volumeMount", func() {
  294. init()
  295. defer cleanup()
  296. // Create the directory
  297. setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
  298. // Write the file in the volume from init container 2
  299. setWriteCommand(l.filePathInVolume, &l.pod.Spec.InitContainers[2])
  300. // Read it from inside the subPath from container 0
  301. l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
  302. testReadFile(f, l.filePathInSubpath, l.pod, 0)
  303. })
  304. ginkgo.It("should support readOnly file specified in the volumeMount [LinuxOnly]", func() {
  305. init()
  306. defer cleanup()
  307. // Create the file
  308. setInitCommand(l.pod, fmt.Sprintf("touch %s", l.subPathDir))
  309. // Write the file in the volume from init container 2
  310. setWriteCommand(l.subPathDir, &l.pod.Spec.InitContainers[2])
  311. // Read it from inside the subPath from container 0
  312. l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
  313. testReadFile(f, volumePath, l.pod, 0)
  314. })
  315. ginkgo.It("should support existing directories when readOnly specified in the volumeSource", func() {
  316. init()
  317. defer cleanup()
  318. if l.roVolSource == nil {
  319. e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType)
  320. }
  321. origpod := l.pod.DeepCopy()
  322. // Create the directory
  323. setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
  324. // Write the file in the subPath from init container 1
  325. setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
  326. // Read it from inside the subPath from container 0
  327. testReadFile(f, l.filePathInSubpath, l.pod, 0)
  328. // Reset the pod
  329. l.pod = origpod
  330. // Set volume source to read only
  331. l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource
  332. // Read it from inside the subPath from container 0
  333. testReadFile(f, l.filePathInSubpath, l.pod, 0)
  334. })
  335. ginkgo.It("should verify container cannot write to subpath readonly volumes [Slow]", func() {
  336. init()
  337. defer cleanup()
  338. if l.roVolSource == nil {
  339. e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType)
  340. }
  341. // Format the volume while it's writable
  342. formatVolume(f, l.formatPod)
  343. // Set volume source to read only
  344. l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource
  345. // Write the file in the volume from container 0
  346. setWriteCommand(l.subPathDir, &l.pod.Spec.Containers[0])
  347. // Pod should fail
  348. testPodFailSubpath(f, l.pod, true)
  349. })
  350. ginkgo.It("should be able to unmount after the subpath directory is deleted", func() {
  351. init()
  352. defer cleanup()
  353. // Change volume container to busybox so we can exec later
  354. l.pod.Spec.Containers[1].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
  355. l.pod.Spec.Containers[1].Command = volume.GenerateScriptCmd("sleep 100000")
  356. ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name))
  357. removeUnusedContainers(l.pod)
  358. pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), l.pod, metav1.CreateOptions{})
  359. framework.ExpectNoError(err, "while creating pod")
  360. defer func() {
  361. ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
  362. e2epod.DeletePodWithWait(f.ClientSet, pod)
  363. }()
  364. // Wait for pod to be running
  365. err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, l.pod)
  366. framework.ExpectNoError(err, "while waiting for pod to be running")
  367. // Exec into container that mounted the volume, delete subpath directory
  368. rmCmd := fmt.Sprintf("rm -r %s", l.subPathDir)
  369. _, err = podContainerExec(l.pod, 1, rmCmd)
  370. framework.ExpectNoError(err, "while removing subpath directory")
  371. // Delete pod (from defer) and wait for it to be successfully deleted
  372. })
  373. // TODO: add a test case for the same disk with two partitions
  374. }
  375. // TestBasicSubpath runs basic subpath test
  376. func TestBasicSubpath(f *framework.Framework, contents string, pod *v1.Pod) {
  377. TestBasicSubpathFile(f, contents, pod, volumePath)
  378. }
  379. // TestBasicSubpathFile runs basic subpath file test
  380. func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod, filepath string) {
  381. setReadCommand(filepath, &pod.Spec.Containers[0])
  382. ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
  383. removeUnusedContainers(pod)
  384. f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents})
  385. ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
  386. err := e2epod.DeletePodWithWait(f.ClientSet, pod)
  387. framework.ExpectNoError(err, "while deleting pod")
  388. }
  389. func generateSuffixForPodName(s string) string {
  390. // Pod name must:
  391. // 1. consist of lower case alphanumeric characters or '-',
  392. // 2. start and end with an alphanumeric character.
  393. // (e.g. 'my-name', or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')
  394. // Therefore, suffix is generated by following steps:
  395. // 1. all strings other than [A-Za-z0-9] is replaced with "-",
  396. // 2. add lower case alphanumeric characters at the end ('-[a-z0-9]{4}' is added),
  397. // 3. convert the entire strings to lower case.
  398. re := regexp.MustCompile("[^A-Za-z0-9]")
  399. return strings.ToLower(fmt.Sprintf("%s-%s", re.ReplaceAllString(s, "-"), rand.String(4)))
  400. }
  401. // SubpathTestPod returns a pod spec for subpath tests
  402. func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *v1.VolumeSource, privilegedSecurityContext bool) *v1.Pod {
  403. var (
  404. suffix = generateSuffixForPodName(volumeType)
  405. gracePeriod = int64(1)
  406. probeVolumeName = "liveness-probe-volume"
  407. seLinuxOptions = &v1.SELinuxOptions{Level: "s0:c0,c1"}
  408. )
  409. return &v1.Pod{
  410. ObjectMeta: metav1.ObjectMeta{
  411. Name: fmt.Sprintf("pod-subpath-test-%s", suffix),
  412. Namespace: f.Namespace.Name,
  413. },
  414. Spec: v1.PodSpec{
  415. InitContainers: []v1.Container{
  416. {
  417. Name: fmt.Sprintf("init-volume-%s", suffix),
  418. Image: volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)),
  419. VolumeMounts: []v1.VolumeMount{
  420. {
  421. Name: volumeName,
  422. MountPath: volumePath,
  423. },
  424. {
  425. Name: probeVolumeName,
  426. MountPath: probeVolumePath,
  427. },
  428. },
  429. SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext),
  430. },
  431. {
  432. Name: fmt.Sprintf("test-init-subpath-%s", suffix),
  433. Image: mountImage,
  434. VolumeMounts: []v1.VolumeMount{
  435. {
  436. Name: volumeName,
  437. MountPath: volumePath,
  438. SubPath: subpath,
  439. },
  440. {
  441. Name: probeVolumeName,
  442. MountPath: probeVolumePath,
  443. },
  444. },
  445. SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext),
  446. },
  447. {
  448. Name: fmt.Sprintf("test-init-volume-%s", suffix),
  449. Image: mountImage,
  450. VolumeMounts: []v1.VolumeMount{
  451. {
  452. Name: volumeName,
  453. MountPath: volumePath,
  454. },
  455. {
  456. Name: probeVolumeName,
  457. MountPath: probeVolumePath,
  458. },
  459. },
  460. SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext),
  461. },
  462. },
  463. Containers: []v1.Container{
  464. {
  465. Name: fmt.Sprintf("test-container-subpath-%s", suffix),
  466. Image: mountImage,
  467. VolumeMounts: []v1.VolumeMount{
  468. {
  469. Name: volumeName,
  470. MountPath: volumePath,
  471. SubPath: subpath,
  472. },
  473. {
  474. Name: probeVolumeName,
  475. MountPath: probeVolumePath,
  476. },
  477. },
  478. SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext),
  479. },
  480. {
  481. Name: fmt.Sprintf("test-container-volume-%s", suffix),
  482. Image: mountImage,
  483. VolumeMounts: []v1.VolumeMount{
  484. {
  485. Name: volumeName,
  486. MountPath: volumePath,
  487. },
  488. {
  489. Name: probeVolumeName,
  490. MountPath: probeVolumePath,
  491. },
  492. },
  493. SecurityContext: volume.GenerateSecurityContext(privilegedSecurityContext),
  494. },
  495. },
  496. RestartPolicy: v1.RestartPolicyNever,
  497. TerminationGracePeriodSeconds: &gracePeriod,
  498. Volumes: []v1.Volume{
  499. {
  500. Name: volumeName,
  501. VolumeSource: *source,
  502. },
  503. {
  504. Name: probeVolumeName,
  505. VolumeSource: v1.VolumeSource{
  506. EmptyDir: &v1.EmptyDirVolumeSource{},
  507. },
  508. },
  509. },
  510. SecurityContext: volume.GeneratePodSecurityContext(nil, seLinuxOptions),
  511. },
  512. }
  513. }
  514. func containerIsUnused(container *v1.Container) bool {
  515. // mountImage with nil Args does nothing. Leave everything else
  516. return container.Image == mountImage && container.Args == nil
  517. }
  518. // removeUnusedContainers removes containers from a SubpathTestPod that aren't
  519. // needed for a test. e.g. to test for subpath mount failure, only one
  520. // container needs to run and get its status checked.
  521. func removeUnusedContainers(pod *v1.Pod) {
  522. initContainers := []v1.Container{}
  523. containers := []v1.Container{}
  524. if pod.Spec.InitContainers[0].Command != nil {
  525. initContainers = append(initContainers, pod.Spec.InitContainers[0])
  526. }
  527. for _, ic := range pod.Spec.InitContainers[1:] {
  528. if !containerIsUnused(&ic) {
  529. initContainers = append(initContainers, ic)
  530. }
  531. }
  532. containers = append(containers, pod.Spec.Containers[0])
  533. if !containerIsUnused(&pod.Spec.Containers[1]) {
  534. containers = append(containers, pod.Spec.Containers[1])
  535. }
  536. pod.Spec.InitContainers = initContainers
  537. pod.Spec.Containers = containers
  538. }
  539. // volumeFormatPod returns a Pod that does nothing but will cause the plugin to format a filesystem
  540. // on first use
  541. func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1.Pod {
  542. return &v1.Pod{
  543. ObjectMeta: metav1.ObjectMeta{
  544. Name: fmt.Sprintf("volume-prep-%s", f.Namespace.Name),
  545. },
  546. Spec: v1.PodSpec{
  547. Containers: []v1.Container{
  548. {
  549. Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name),
  550. Image: volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)),
  551. Command: volume.GenerateScriptCmd("echo nothing"),
  552. VolumeMounts: []v1.VolumeMount{
  553. {
  554. Name: volumeName,
  555. MountPath: "/vol",
  556. },
  557. },
  558. },
  559. },
  560. RestartPolicy: v1.RestartPolicyNever,
  561. Volumes: []v1.Volume{
  562. {
  563. Name: volumeName,
  564. VolumeSource: *volumeSource,
  565. },
  566. },
  567. },
  568. }
  569. }
  570. func setInitCommand(pod *v1.Pod, command string) {
  571. pod.Spec.InitContainers[0].Command = volume.GenerateScriptCmd(command)
  572. }
  573. func setWriteCommand(file string, container *v1.Container) {
  574. container.Args = []string{
  575. fmt.Sprintf("--new_file_0644=%v", file),
  576. fmt.Sprintf("--file_mode=%v", file),
  577. }
  578. }
  579. func addSubpathVolumeContainer(container *v1.Container, volumeMount v1.VolumeMount) {
  580. existingMounts := container.VolumeMounts
  581. container.VolumeMounts = append(existingMounts, volumeMount)
  582. }
  583. func addMultipleWrites(container *v1.Container, file1 string, file2 string) {
  584. container.Args = []string{
  585. fmt.Sprintf("--new_file_0644=%v", file1),
  586. fmt.Sprintf("--new_file_0666=%v", file2),
  587. }
  588. }
  589. func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) {
  590. ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
  591. removeUnusedContainers(pod)
  592. f.TestContainerOutput("multi_subpath", pod, containerIndex, []string{
  593. "content of file \"" + file1 + "\": mount-tester new file",
  594. "content of file \"" + file2 + "\": mount-tester new file",
  595. })
  596. }
  597. func setReadCommand(file string, container *v1.Container) {
  598. container.Args = []string{
  599. fmt.Sprintf("--file_content_in_loop=%v", file),
  600. fmt.Sprintf("--retry_time=%d", retryDuration),
  601. }
  602. }
  603. func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerIndex int) {
  604. setReadCommand(file, &pod.Spec.Containers[containerIndex])
  605. ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
  606. removeUnusedContainers(pod)
  607. f.TestContainerOutput("subpath", pod, containerIndex, []string{
  608. "content of file \"" + file + "\": mount-tester new file",
  609. })
  610. ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
  611. err := e2epod.DeletePodWithWait(f.ClientSet, pod)
  612. framework.ExpectNoError(err, "while deleting pod")
  613. }
  614. func testPodFailSubpath(f *framework.Framework, pod *v1.Pod, allowContainerTerminationError bool) {
  615. testPodFailSubpathError(f, pod, "subPath", allowContainerTerminationError)
  616. }
  617. func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg string, allowContainerTerminationError bool) {
  618. ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
  619. removeUnusedContainers(pod)
  620. pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
  621. framework.ExpectNoError(err, "while creating pod")
  622. defer func() {
  623. e2epod.DeletePodWithWait(f.ClientSet, pod)
  624. }()
  625. ginkgo.By("Checking for subpath error in container status")
  626. err = waitForPodSubpathError(f, pod, allowContainerTerminationError)
  627. framework.ExpectNoError(err, "while waiting for subpath failure")
  628. }
  629. func findSubpathContainerName(pod *v1.Pod) string {
  630. for _, container := range pod.Spec.Containers {
  631. for _, mount := range container.VolumeMounts {
  632. if mount.SubPath != "" {
  633. return container.Name
  634. }
  635. }
  636. }
  637. return ""
  638. }
  639. func waitForPodSubpathError(f *framework.Framework, pod *v1.Pod, allowContainerTerminationError bool) error {
  640. subpathContainerName := findSubpathContainerName(pod)
  641. if subpathContainerName == "" {
  642. return fmt.Errorf("failed to find container that uses subpath")
  643. }
  644. waitErr := wait.PollImmediate(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
  645. pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
  646. if err != nil {
  647. return false, err
  648. }
  649. for _, status := range pod.Status.ContainerStatuses {
  650. // 0 is the container that uses subpath
  651. if status.Name == subpathContainerName {
  652. switch {
  653. case status.State.Terminated != nil:
  654. if status.State.Terminated.ExitCode != 0 && allowContainerTerminationError {
  655. return true, nil
  656. }
  657. return false, fmt.Errorf("subpath container unexpectedly terminated")
  658. case status.State.Waiting != nil:
  659. if status.State.Waiting.Reason == "CreateContainerConfigError" &&
  660. strings.Contains(status.State.Waiting.Message, "subPath") {
  661. return true, nil
  662. }
  663. return false, nil
  664. default:
  665. return false, nil
  666. }
  667. }
  668. }
  669. return false, nil
  670. })
  671. if waitErr != nil {
  672. return fmt.Errorf("error waiting for pod subpath error to occur: %v", waitErr)
  673. }
  674. return nil
  675. }
  676. // Tests that the existing subpath mount is detected when a container restarts
  677. func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
  678. pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
  679. pod.Spec.Containers[0].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
  680. pod.Spec.Containers[0].Command = volume.GenerateScriptCmd("sleep 100000")
  681. pod.Spec.Containers[1].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
  682. pod.Spec.Containers[1].Command = volume.GenerateScriptCmd("sleep 100000")
  683. // Add liveness probe to subpath container
  684. pod.Spec.Containers[0].LivenessProbe = &v1.Probe{
  685. Handler: v1.Handler{
  686. Exec: &v1.ExecAction{
  687. Command: []string{"cat", probeFilePath},
  688. },
  689. },
  690. InitialDelaySeconds: 1,
  691. FailureThreshold: 1,
  692. PeriodSeconds: 2,
  693. }
  694. // Start pod
  695. ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
  696. removeUnusedContainers(pod)
  697. pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
  698. framework.ExpectNoError(err, "while creating pod")
  699. defer func() {
  700. e2epod.DeletePodWithWait(f.ClientSet, pod)
  701. }()
  702. err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
  703. framework.ExpectNoError(err, "while waiting for pod to be running")
  704. ginkgo.By("Failing liveness probe")
  705. out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath))
  706. framework.Logf("Pod exec output: %v", out)
  707. framework.ExpectNoError(err, "while failing liveness probe")
  708. // Check that container has restarted
  709. ginkgo.By("Waiting for container to restart")
  710. restarts := int32(0)
  711. err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) {
  712. pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
  713. if err != nil {
  714. return false, err
  715. }
  716. for _, status := range pod.Status.ContainerStatuses {
  717. if status.Name == pod.Spec.Containers[0].Name {
  718. framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
  719. restarts = status.RestartCount
  720. if restarts > 0 {
  721. framework.Logf("Container has restart count: %v", restarts)
  722. return true, nil
  723. }
  724. }
  725. }
  726. return false, nil
  727. })
  728. framework.ExpectNoError(err, "while waiting for container to restart")
  729. // Fix liveness probe
  730. ginkgo.By("Rewriting the file")
  731. var writeCmd string
  732. if framework.NodeOSDistroIs("windows") {
  733. writeCmd = fmt.Sprintf("echo test-after | Out-File -FilePath %v", probeFilePath)
  734. } else {
  735. writeCmd = fmt.Sprintf("echo test-after > %v", probeFilePath)
  736. }
  737. out, err = podContainerExec(pod, 1, writeCmd)
  738. framework.Logf("Pod exec output: %v", out)
  739. framework.ExpectNoError(err, "while rewriting the probe file")
  740. // Wait for container restarts to stabilize
  741. ginkgo.By("Waiting for container to stop restarting")
  742. stableCount := int(0)
  743. stableThreshold := int(time.Minute / framework.Poll)
  744. err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) {
  745. pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
  746. if err != nil {
  747. return false, err
  748. }
  749. for _, status := range pod.Status.ContainerStatuses {
  750. if status.Name == pod.Spec.Containers[0].Name {
  751. if status.RestartCount == restarts {
  752. stableCount++
  753. if stableCount > stableThreshold {
  754. framework.Logf("Container restart has stabilized")
  755. return true, nil
  756. }
  757. } else {
  758. restarts = status.RestartCount
  759. stableCount = 0
  760. framework.Logf("Container has restart count: %v", restarts)
  761. }
  762. break
  763. }
  764. }
  765. return false, nil
  766. })
  767. framework.ExpectNoError(err, "while waiting for container to stabilize")
  768. }
  769. func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec, pod *v1.Pod, forceDelete bool) {
  770. // This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
  771. // Disruptive test run serially, we can cache all voluem global mount
  772. // points and verify after the test that we do not leak any global mount point.
  773. nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
  774. framework.ExpectNoError(err, "while listing scheduable nodes")
  775. globalMountPointsByNode := make(map[string]sets.String, len(nodeList.Items))
  776. for _, node := range nodeList.Items {
  777. globalMountPointsByNode[node.Name] = utils.FindVolumeGlobalMountPoints(hostExec, &node)
  778. }
  779. // Change to busybox
  780. pod.Spec.Containers[0].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
  781. pod.Spec.Containers[0].Command = volume.GenerateScriptCmd("sleep 100000")
  782. pod.Spec.Containers[1].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
  783. pod.Spec.Containers[1].Command = volume.GenerateScriptCmd("sleep 100000")
  784. // If grace period is too short, then there is not enough time for the volume
  785. // manager to cleanup the volumes
  786. gracePeriod := int64(30)
  787. pod.Spec.TerminationGracePeriodSeconds = &gracePeriod
  788. ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
  789. removeUnusedContainers(pod)
  790. pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
  791. framework.ExpectNoError(err, "while creating pod")
  792. err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
  793. framework.ExpectNoError(err, "while waiting for pod to be running")
  794. pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
  795. framework.ExpectNoError(err, "while getting pod")
  796. var podNode *v1.Node
  797. for i := range nodeList.Items {
  798. if nodeList.Items[i].Name == pod.Spec.NodeName {
  799. podNode = &nodeList.Items[i]
  800. }
  801. }
  802. framework.ExpectNotEqual(podNode, nil, "pod node should exist in scheduable nodes")
  803. utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
  804. if podNode != nil {
  805. mountPoints := globalMountPointsByNode[podNode.Name]
  806. mountPointsAfter := utils.FindVolumeGlobalMountPoints(hostExec, podNode)
  807. s1 := mountPointsAfter.Difference(mountPoints)
  808. s2 := mountPoints.Difference(mountPointsAfter)
  809. gomega.Expect(s1).To(gomega.BeEmpty(), "global mount points leaked: %v", s1)
  810. gomega.Expect(s2).To(gomega.BeEmpty(), "global mount points not found: %v", s2)
  811. }
  812. }
  813. func formatVolume(f *framework.Framework, pod *v1.Pod) {
  814. ginkgo.By(fmt.Sprintf("Creating pod to format volume %s", pod.Name))
  815. pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
  816. framework.ExpectNoError(err, "while creating volume init pod")
  817. err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
  818. framework.ExpectNoError(err, "while waiting for volume init pod to succeed")
  819. err = e2epod.DeletePodWithWait(f.ClientSet, pod)
  820. framework.ExpectNoError(err, "while deleting volume init pod")
  821. }
  822. func podContainerExec(pod *v1.Pod, containerIndex int, command string) (string, error) {
  823. var shell string
  824. var option string
  825. if framework.NodeOSDistroIs("windows") {
  826. shell = "powershell"
  827. option = "/c"
  828. } else {
  829. shell = "/bin/sh"
  830. option = "-c"
  831. }
  832. return framework.RunKubectl(pod.Namespace, "exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", shell, option, command)
  833. }