volumemode.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483
  1. /*
  2. Copyright 2018 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package testsuites
  14. import (
  15. "context"
  16. "fmt"
  17. "path/filepath"
  18. "strings"
  19. "github.com/onsi/ginkgo"
  20. "github.com/onsi/gomega"
  21. v1 "k8s.io/api/core/v1"
  22. storagev1 "k8s.io/api/storage/v1"
  23. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  24. "k8s.io/apimachinery/pkg/fields"
  25. "k8s.io/apimachinery/pkg/util/errors"
  26. clientset "k8s.io/client-go/kubernetes"
  27. volevents "k8s.io/kubernetes/pkg/controller/volume/events"
  28. "k8s.io/kubernetes/pkg/kubelet/events"
  29. "k8s.io/kubernetes/test/e2e/framework"
  30. e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
  31. e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  32. e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
  33. e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
  34. "k8s.io/kubernetes/test/e2e/framework/volume"
  35. "k8s.io/kubernetes/test/e2e/storage/testpatterns"
  36. "k8s.io/kubernetes/test/e2e/storage/utils"
  37. )
  38. const (
  39. noProvisioner = "kubernetes.io/no-provisioner"
  40. pvNamePrefix = "pv"
  41. )
  42. type volumeModeTestSuite struct {
  43. tsInfo TestSuiteInfo
  44. }
  45. var _ TestSuite = &volumeModeTestSuite{}
  46. // InitVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface
  47. func InitVolumeModeTestSuite() TestSuite {
  48. return &volumeModeTestSuite{
  49. tsInfo: TestSuiteInfo{
  50. Name: "volumeMode",
  51. TestPatterns: []testpatterns.TestPattern{
  52. testpatterns.FsVolModePreprovisionedPV,
  53. testpatterns.FsVolModeDynamicPV,
  54. testpatterns.BlockVolModePreprovisionedPV,
  55. testpatterns.BlockVolModeDynamicPV,
  56. },
  57. SupportedSizeRange: volume.SizeRange{
  58. Min: "1Mi",
  59. },
  60. },
  61. }
  62. }
  63. func (t *volumeModeTestSuite) GetTestSuiteInfo() TestSuiteInfo {
  64. return t.tsInfo
  65. }
  66. func (t *volumeModeTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
  67. }
  68. func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
  69. type local struct {
  70. config *PerTestConfig
  71. driverCleanup func()
  72. cs clientset.Interface
  73. ns *v1.Namespace
  74. // VolumeResource contains pv, pvc, sc, etc., owns cleaning that up
  75. VolumeResource
  76. intreeOps opCounts
  77. migratedOps opCounts
  78. }
  79. var (
  80. dInfo = driver.GetDriverInfo()
  81. l local
  82. )
  83. // No preconditions to test. Normally they would be in a BeforeEach here.
  84. // This intentionally comes after checking the preconditions because it
  85. // registers its own BeforeEach which creates the namespace. Beware that it
  86. // also registers an AfterEach which renders f unusable. Any code using
  87. // f must run inside an It or Context callback.
  88. f := framework.NewDefaultFramework("volumemode")
  89. init := func() {
  90. l = local{}
  91. l.ns = f.Namespace
  92. l.cs = f.ClientSet
  93. // Now do the more expensive test initialization.
  94. l.config, l.driverCleanup = driver.PrepareTest(f)
  95. l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName)
  96. }
  97. // manualInit initializes l.VolumeResource without creating the PV & PVC objects.
  98. manualInit := func() {
  99. init()
  100. fsType := pattern.FsType
  101. volBindMode := storagev1.VolumeBindingImmediate
  102. var (
  103. scName string
  104. pvSource *v1.PersistentVolumeSource
  105. volumeNodeAffinity *v1.VolumeNodeAffinity
  106. )
  107. l.VolumeResource = VolumeResource{
  108. Config: l.config,
  109. Pattern: pattern,
  110. }
  111. // Create volume for pre-provisioned volume tests
  112. l.Volume = CreateVolume(driver, l.config, pattern.VolType)
  113. switch pattern.VolType {
  114. case testpatterns.PreprovisionedPV:
  115. if pattern.VolMode == v1.PersistentVolumeBlock {
  116. scName = fmt.Sprintf("%s-%s-sc-for-block", l.ns.Name, dInfo.Name)
  117. } else if pattern.VolMode == v1.PersistentVolumeFilesystem {
  118. scName = fmt.Sprintf("%s-%s-sc-for-file", l.ns.Name, dInfo.Name)
  119. }
  120. if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
  121. pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, l.Volume)
  122. if pvSource == nil {
  123. e2eskipper.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
  124. }
  125. storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity)
  126. l.Sc = storageClass
  127. l.Pv = e2epv.MakePersistentVolume(pvConfig)
  128. l.Pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, l.ns.Name)
  129. }
  130. case testpatterns.DynamicPV:
  131. if dDriver, ok := driver.(DynamicPVTestDriver); ok {
  132. l.Sc = dDriver.GetDynamicProvisionStorageClass(l.config, fsType)
  133. if l.Sc == nil {
  134. e2eskipper.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
  135. }
  136. l.Sc.VolumeBindingMode = &volBindMode
  137. testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
  138. driverVolumeSizeRange := dInfo.SupportedSizeRange
  139. claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
  140. framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
  141. l.Pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
  142. ClaimSize: claimSize,
  143. StorageClassName: &(l.Sc.Name),
  144. VolumeMode: &pattern.VolMode,
  145. }, l.ns.Name)
  146. }
  147. default:
  148. framework.Failf("Volume mode test doesn't support: %s", pattern.VolType)
  149. }
  150. }
  151. cleanup := func() {
  152. var errs []error
  153. errs = append(errs, l.CleanupResource())
  154. errs = append(errs, tryFunc(l.driverCleanup))
  155. l.driverCleanup = nil
  156. framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
  157. validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps)
  158. }
  159. // We register different tests depending on the drive
  160. isBlockSupported := dInfo.Capabilities[CapBlock]
  161. switch pattern.VolType {
  162. case testpatterns.PreprovisionedPV:
  163. if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
  164. ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func() {
  165. manualInit()
  166. defer cleanup()
  167. var err error
  168. ginkgo.By("Creating sc")
  169. l.Sc, err = l.cs.StorageV1().StorageClasses().Create(context.TODO(), l.Sc, metav1.CreateOptions{})
  170. framework.ExpectNoError(err, "Failed to create sc")
  171. ginkgo.By("Creating pv and pvc")
  172. l.Pv, err = l.cs.CoreV1().PersistentVolumes().Create(context.TODO(), l.Pv, metav1.CreateOptions{})
  173. framework.ExpectNoError(err, "Failed to create pv")
  174. // Prebind pv
  175. l.Pvc.Spec.VolumeName = l.Pv.Name
  176. l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc, metav1.CreateOptions{})
  177. framework.ExpectNoError(err, "Failed to create pvc")
  178. framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc")
  179. ginkgo.By("Creating pod")
  180. pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
  181. // Setting node
  182. e2epod.SetNodeSelection(pod, l.config.ClientNodeSelection)
  183. pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
  184. framework.ExpectNoError(err, "Failed to create pod")
  185. defer func() {
  186. framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod), "Failed to delete pod")
  187. }()
  188. eventSelector := fields.Set{
  189. "involvedObject.kind": "Pod",
  190. "involvedObject.name": pod.Name,
  191. "involvedObject.namespace": l.ns.Name,
  192. "reason": events.FailedMountVolume,
  193. }.AsSelector().String()
  194. msg := "Unable to attach or mount volumes"
  195. err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout)
  196. // Events are unreliable, don't depend on the event. It's used only to speed up the test.
  197. if err != nil {
  198. framework.Logf("Warning: did not get event about FailedMountVolume")
  199. }
  200. // Check the pod is still not running
  201. p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
  202. framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)")
  203. framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending")
  204. })
  205. }
  206. case testpatterns.DynamicPV:
  207. if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
  208. ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow]", func() {
  209. manualInit()
  210. defer cleanup()
  211. var err error
  212. ginkgo.By("Creating sc")
  213. l.Sc, err = l.cs.StorageV1().StorageClasses().Create(context.TODO(), l.Sc, metav1.CreateOptions{})
  214. framework.ExpectNoError(err, "Failed to create sc")
  215. ginkgo.By("Creating pv and pvc")
  216. l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc, metav1.CreateOptions{})
  217. framework.ExpectNoError(err, "Failed to create pvc")
  218. eventSelector := fields.Set{
  219. "involvedObject.kind": "PersistentVolumeClaim",
  220. "involvedObject.name": l.Pvc.Name,
  221. "involvedObject.namespace": l.ns.Name,
  222. "reason": volevents.ProvisioningFailed,
  223. }.AsSelector().String()
  224. msg := "does not support block volume provisioning"
  225. err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.ClaimProvisionTimeout)
  226. // Events are unreliable, don't depend on the event. It's used only to speed up the test.
  227. if err != nil {
  228. framework.Logf("Warning: did not get event about provisioing failed")
  229. }
  230. // Check the pvc is still pending
  231. pvc, err := l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Get(context.TODO(), l.Pvc.Name, metav1.GetOptions{})
  232. framework.ExpectNoError(err, "Failed to re-read the pvc after event (or timeout)")
  233. framework.ExpectEqual(pvc.Status.Phase, v1.ClaimPending, "PVC phase isn't pending")
  234. })
  235. }
  236. default:
  237. framework.Failf("Volume mode test doesn't support volType: %v", pattern.VolType)
  238. }
  239. ginkgo.It("should fail to use a volume in a pod with mismatched mode [Slow]", func() {
  240. skipTestIfBlockNotSupported(driver)
  241. init()
  242. testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
  243. l.VolumeResource = *CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
  244. defer cleanup()
  245. ginkgo.By("Creating pod")
  246. var err error
  247. pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
  248. // Change volumeMounts to volumeDevices and the other way around
  249. pod = swapVolumeMode(pod)
  250. // Run the pod
  251. pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
  252. framework.ExpectNoError(err, "Failed to create pod")
  253. defer func() {
  254. framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod), "Failed to delete pod")
  255. }()
  256. ginkgo.By("Waiting for the pod to fail")
  257. // Wait for an event that the pod is invalid.
  258. eventSelector := fields.Set{
  259. "involvedObject.kind": "Pod",
  260. "involvedObject.name": pod.Name,
  261. "involvedObject.namespace": l.ns.Name,
  262. "reason": events.FailedMountVolume,
  263. }.AsSelector().String()
  264. var msg string
  265. if pattern.VolMode == v1.PersistentVolumeBlock {
  266. msg = "has volumeMode Block, but is specified in volumeMounts"
  267. } else {
  268. msg = "has volumeMode Filesystem, but is specified in volumeDevices"
  269. }
  270. err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout)
  271. // Events are unreliable, don't depend on them. They're used only to speed up the test.
  272. if err != nil {
  273. framework.Logf("Warning: did not get event about mismatched volume use")
  274. }
  275. // Check the pod is still not running
  276. p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
  277. framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)")
  278. framework.ExpectEqual(p.Status.Phase, v1.PodPending, "Pod phase isn't pending")
  279. })
  280. ginkgo.It("should not mount / map unused volumes in a pod", func() {
  281. if pattern.VolMode == v1.PersistentVolumeBlock {
  282. skipTestIfBlockNotSupported(driver)
  283. }
  284. init()
  285. testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
  286. l.VolumeResource = *CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
  287. defer cleanup()
  288. ginkgo.By("Creating pod")
  289. var err error
  290. pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil)
  291. for i := range pod.Spec.Containers {
  292. pod.Spec.Containers[i].VolumeDevices = nil
  293. pod.Spec.Containers[i].VolumeMounts = nil
  294. }
  295. // Run the pod
  296. pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
  297. framework.ExpectNoError(err)
  298. defer func() {
  299. framework.ExpectNoError(e2epod.DeletePodWithWait(l.cs, pod))
  300. }()
  301. err = e2epod.WaitForPodNameRunningInNamespace(l.cs, pod.Name, pod.Namespace)
  302. framework.ExpectNoError(err)
  303. // Reload the pod to get its node
  304. pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
  305. framework.ExpectNoError(err)
  306. framework.ExpectNotEqual(pod.Spec.NodeName, "", "pod should be scheduled to a node")
  307. node, err := l.cs.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{})
  308. framework.ExpectNoError(err)
  309. ginkgo.By("Listing mounted volumes in the pod")
  310. hostExec := utils.NewHostExec(f)
  311. defer hostExec.Cleanup()
  312. volumePaths, devicePaths, err := listPodVolumePluginDirectory(hostExec, pod, node)
  313. framework.ExpectNoError(err)
  314. driverInfo := driver.GetDriverInfo()
  315. volumePlugin := driverInfo.InTreePluginName
  316. if len(volumePlugin) == 0 {
  317. // TODO: check if it's a CSI volume first
  318. volumePlugin = "kubernetes.io/csi"
  319. }
  320. ginkgo.By(fmt.Sprintf("Checking that volume plugin %s is not used in pod directory", volumePlugin))
  321. safeVolumePlugin := strings.ReplaceAll(volumePlugin, "/", "~")
  322. for _, path := range volumePaths {
  323. gomega.Expect(path).NotTo(gomega.ContainSubstring(safeVolumePlugin), fmt.Sprintf("no %s volume should be mounted into pod directory", volumePlugin))
  324. }
  325. for _, path := range devicePaths {
  326. gomega.Expect(path).NotTo(gomega.ContainSubstring(safeVolumePlugin), fmt.Sprintf("no %s volume should be symlinked into pod directory", volumePlugin))
  327. }
  328. })
  329. }
  330. func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
  331. volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource, volumeNodeAffinity *v1.VolumeNodeAffinity) (*storagev1.StorageClass,
  332. e2epv.PersistentVolumeConfig, e2epv.PersistentVolumeClaimConfig) {
  333. // StorageClass
  334. scConfig := &storagev1.StorageClass{
  335. ObjectMeta: metav1.ObjectMeta{
  336. Name: scName,
  337. },
  338. Provisioner: noProvisioner,
  339. VolumeBindingMode: &volBindMode,
  340. }
  341. // PV
  342. pvConfig := e2epv.PersistentVolumeConfig{
  343. PVSource: pvSource,
  344. NodeAffinity: volumeNodeAffinity,
  345. NamePrefix: pvNamePrefix,
  346. StorageClassName: scName,
  347. VolumeMode: &volMode,
  348. }
  349. // PVC
  350. pvcConfig := e2epv.PersistentVolumeClaimConfig{
  351. AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
  352. StorageClassName: &scName,
  353. VolumeMode: &volMode,
  354. }
  355. return scConfig, pvConfig, pvcConfig
  356. }
  357. // swapVolumeMode changes volumeMounts to volumeDevices and the other way around
  358. func swapVolumeMode(podTemplate *v1.Pod) *v1.Pod {
  359. pod := podTemplate.DeepCopy()
  360. for c := range pod.Spec.Containers {
  361. container := &pod.Spec.Containers[c]
  362. container.VolumeDevices = []v1.VolumeDevice{}
  363. container.VolumeMounts = []v1.VolumeMount{}
  364. // Change VolumeMounts to VolumeDevices
  365. for _, volumeMount := range podTemplate.Spec.Containers[c].VolumeMounts {
  366. container.VolumeDevices = append(container.VolumeDevices, v1.VolumeDevice{
  367. Name: volumeMount.Name,
  368. DevicePath: volumeMount.MountPath,
  369. })
  370. }
  371. // Change VolumeDevices to VolumeMounts
  372. for _, volumeDevice := range podTemplate.Spec.Containers[c].VolumeDevices {
  373. container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
  374. Name: volumeDevice.Name,
  375. MountPath: volumeDevice.DevicePath,
  376. })
  377. }
  378. }
  379. return pod
  380. }
  381. // listPodVolumePluginDirectory returns all volumes in /var/lib/kubelet/pods/<pod UID>/volumes/* and
  382. // /var/lib/kubelet/pods/<pod UID>/volumeDevices/*
  383. // Sample output:
  384. // /var/lib/kubelet/pods/a4717a30-000a-4081-a7a8-f51adf280036/volumes/kubernetes.io~secret/default-token-rphdt
  385. // /var/lib/kubelet/pods/4475b7a3-4a55-4716-9119-fd0053d9d4a6/volumeDevices/kubernetes.io~aws-ebs/pvc-5f9f80f5-c90b-4586-9966-83f91711e1c0
  386. func listPodVolumePluginDirectory(h utils.HostExec, pod *v1.Pod, node *v1.Node) (mounts []string, devices []string, err error) {
  387. mountPath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumes")
  388. devicePath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumeDevices")
  389. mounts, err = listPodDirectory(h, mountPath, node)
  390. if err != nil {
  391. return nil, nil, err
  392. }
  393. devices, err = listPodDirectory(h, devicePath, node)
  394. if err != nil {
  395. return nil, nil, err
  396. }
  397. return mounts, devices, nil
  398. }
  399. func listPodDirectory(h utils.HostExec, path string, node *v1.Node) ([]string, error) {
  400. // Return no error if the directory does not exist (e.g. there are no block volumes used)
  401. _, err := h.IssueCommandWithResult("test ! -d "+path, node)
  402. if err == nil {
  403. // The directory does not exist
  404. return nil, nil
  405. }
  406. // The directory either exists or a real error happened (e.g. "access denied").
  407. // Ignore the error, "find" will hit the error again and we report it there.
  408. // Inside /var/lib/kubelet/pods/<pod>/volumes, look for <volume_plugin>/<volume-name>, hence depth 2
  409. cmd := fmt.Sprintf("find %s -mindepth 2 -maxdepth 2", path)
  410. out, err := h.IssueCommandWithResult(cmd, node)
  411. if err != nil {
  412. return nil, fmt.Errorf("error checking directory %s on node %s: %s", path, node.Name, err)
  413. }
  414. return strings.Split(out, "\n"), nil
  415. }