vsphere_utils.go 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package vsphere
  14. import (
  15. "context"
  16. "fmt"
  17. "path/filepath"
  18. "regexp"
  19. "strings"
  20. "time"
  21. "github.com/onsi/ginkgo"
  22. "github.com/onsi/gomega"
  23. "github.com/vmware/govmomi/find"
  24. "github.com/vmware/govmomi/object"
  25. "github.com/vmware/govmomi/vim25/mo"
  26. vim25types "github.com/vmware/govmomi/vim25/types"
  27. "k8s.io/klog"
  28. v1 "k8s.io/api/core/v1"
  29. storage "k8s.io/api/storage/v1"
  30. "k8s.io/apimachinery/pkg/api/resource"
  31. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  32. "k8s.io/apimachinery/pkg/util/rand"
  33. "k8s.io/apimachinery/pkg/util/uuid"
  34. "k8s.io/apimachinery/pkg/util/wait"
  35. clientset "k8s.io/client-go/kubernetes"
  36. "k8s.io/kubernetes/pkg/volume/util"
  37. "k8s.io/kubernetes/test/e2e/framework"
  38. e2elog "k8s.io/kubernetes/test/e2e/framework/log"
  39. e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
  40. "k8s.io/kubernetes/test/e2e/storage/utils"
  41. imageutils "k8s.io/kubernetes/test/utils/image"
  42. )
  43. const (
  44. volumesPerNode = 55
  45. storageclass1 = "sc-default"
  46. storageclass2 = "sc-vsan"
  47. storageclass3 = "sc-spbm"
  48. storageclass4 = "sc-user-specified-ds"
  49. DummyDiskName = "kube-dummyDisk.vmdk"
  50. ProviderPrefix = "vsphere://"
  51. )
  52. // volumeState represents the state of a volume.
  53. type volumeState int32
  54. const (
  55. volumeStateDetached volumeState = 1
  56. volumeStateAttached volumeState = 2
  57. )
  58. // Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes
  59. func waitForVSphereDisksToDetach(nodeVolumes map[string][]string) error {
  60. var (
  61. err error
  62. disksAttached = true
  63. detachTimeout = 5 * time.Minute
  64. detachPollTime = 10 * time.Second
  65. )
  66. err = wait.Poll(detachPollTime, detachTimeout, func() (bool, error) {
  67. attachedResult, err := disksAreAttached(nodeVolumes)
  68. if err != nil {
  69. return false, err
  70. }
  71. for nodeName, nodeVolumes := range attachedResult {
  72. for volumePath, attached := range nodeVolumes {
  73. if attached {
  74. e2elog.Logf("Waiting for volumes %q to detach from %q.", volumePath, string(nodeName))
  75. return false, nil
  76. }
  77. }
  78. }
  79. disksAttached = false
  80. e2elog.Logf("Volume are successfully detached from all the nodes: %+v", nodeVolumes)
  81. return true, nil
  82. })
  83. if err != nil {
  84. return err
  85. }
  86. if disksAttached {
  87. return fmt.Errorf("Gave up waiting for volumes to detach after %v", detachTimeout)
  88. }
  89. return nil
  90. }
  91. // Wait until vsphere vmdk moves to expected state on the given node, or time out after 6 minutes
  92. func waitForVSphereDiskStatus(volumePath string, nodeName string, expectedState volumeState) error {
  93. var (
  94. err error
  95. diskAttached bool
  96. currentState volumeState
  97. timeout = 6 * time.Minute
  98. pollTime = 10 * time.Second
  99. )
  100. var attachedState = map[bool]volumeState{
  101. true: volumeStateAttached,
  102. false: volumeStateDetached,
  103. }
  104. var attachedStateMsg = map[volumeState]string{
  105. volumeStateAttached: "attached to",
  106. volumeStateDetached: "detached from",
  107. }
  108. err = wait.Poll(pollTime, timeout, func() (bool, error) {
  109. diskAttached, err = diskIsAttached(volumePath, nodeName)
  110. if err != nil {
  111. return true, err
  112. }
  113. currentState = attachedState[diskAttached]
  114. if currentState == expectedState {
  115. e2elog.Logf("Volume %q has successfully %s %q", volumePath, attachedStateMsg[currentState], nodeName)
  116. return true, nil
  117. }
  118. e2elog.Logf("Waiting for Volume %q to be %s %q.", volumePath, attachedStateMsg[expectedState], nodeName)
  119. return false, nil
  120. })
  121. if err != nil {
  122. return err
  123. }
  124. if currentState != expectedState {
  125. err = fmt.Errorf("Gave up waiting for Volume %q to be %s %q after %v", volumePath, attachedStateMsg[expectedState], nodeName, timeout)
  126. }
  127. return err
  128. }
  129. // Wait until vsphere vmdk is attached from the given node or time out after 6 minutes
  130. func waitForVSphereDiskToAttach(volumePath string, nodeName string) error {
  131. return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateAttached)
  132. }
  133. // Wait until vsphere vmdk is detached from the given node or time out after 6 minutes
  134. func waitForVSphereDiskToDetach(volumePath string, nodeName string) error {
  135. return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateDetached)
  136. }
  137. // function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
  138. func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy, labels map[string]string) *v1.PersistentVolume {
  139. var (
  140. pvConfig framework.PersistentVolumeConfig
  141. pv *v1.PersistentVolume
  142. claimRef *v1.ObjectReference
  143. )
  144. pvConfig = framework.PersistentVolumeConfig{
  145. NamePrefix: "vspherepv-",
  146. PVSource: v1.PersistentVolumeSource{
  147. VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
  148. VolumePath: volumePath,
  149. FSType: "ext4",
  150. },
  151. },
  152. Prebind: nil,
  153. }
  154. pv = &v1.PersistentVolume{
  155. ObjectMeta: metav1.ObjectMeta{
  156. GenerateName: pvConfig.NamePrefix,
  157. Annotations: map[string]string{
  158. util.VolumeGidAnnotationKey: "777",
  159. },
  160. },
  161. Spec: v1.PersistentVolumeSpec{
  162. PersistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy,
  163. Capacity: v1.ResourceList{
  164. v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
  165. },
  166. PersistentVolumeSource: pvConfig.PVSource,
  167. AccessModes: []v1.PersistentVolumeAccessMode{
  168. v1.ReadWriteOnce,
  169. },
  170. ClaimRef: claimRef,
  171. },
  172. }
  173. if labels != nil {
  174. pv.Labels = labels
  175. }
  176. return pv
  177. }
  178. // function to get vsphere persistent volume spec with given selector labels.
  179. func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]string) *v1.PersistentVolumeClaim {
  180. var (
  181. pvc *v1.PersistentVolumeClaim
  182. )
  183. pvc = &v1.PersistentVolumeClaim{
  184. ObjectMeta: metav1.ObjectMeta{
  185. GenerateName: "pvc-",
  186. Namespace: namespace,
  187. },
  188. Spec: v1.PersistentVolumeClaimSpec{
  189. AccessModes: []v1.PersistentVolumeAccessMode{
  190. v1.ReadWriteOnce,
  191. },
  192. Resources: v1.ResourceRequirements{
  193. Requests: v1.ResourceList{
  194. v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
  195. },
  196. },
  197. },
  198. }
  199. if labels != nil {
  200. pvc.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels}
  201. }
  202. return pvc
  203. }
  204. // function to write content to the volume backed by given PVC
  205. func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
  206. utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
  207. e2elog.Logf("Done with writing content to volume")
  208. }
  209. // function to verify content is matching on the volume backed for given PVC
  210. func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
  211. utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
  212. e2elog.Logf("Successfully verified content of the volume")
  213. }
  214. func getVSphereStorageClassSpec(name string, scParameters map[string]string, zones []string) *storage.StorageClass {
  215. var sc *storage.StorageClass
  216. sc = &storage.StorageClass{
  217. TypeMeta: metav1.TypeMeta{
  218. Kind: "StorageClass",
  219. },
  220. ObjectMeta: metav1.ObjectMeta{
  221. Name: name,
  222. },
  223. Provisioner: "kubernetes.io/vsphere-volume",
  224. }
  225. if scParameters != nil {
  226. sc.Parameters = scParameters
  227. }
  228. if zones != nil {
  229. term := v1.TopologySelectorTerm{
  230. MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
  231. {
  232. Key: v1.LabelZoneFailureDomain,
  233. Values: zones,
  234. },
  235. },
  236. }
  237. sc.AllowedTopologies = append(sc.AllowedTopologies, term)
  238. }
  239. return sc
  240. }
  241. func getVSphereClaimSpecWithStorageClass(ns string, diskSize string, storageclass *storage.StorageClass) *v1.PersistentVolumeClaim {
  242. claim := &v1.PersistentVolumeClaim{
  243. ObjectMeta: metav1.ObjectMeta{
  244. GenerateName: "pvc-",
  245. Namespace: ns,
  246. },
  247. Spec: v1.PersistentVolumeClaimSpec{
  248. AccessModes: []v1.PersistentVolumeAccessMode{
  249. v1.ReadWriteOnce,
  250. },
  251. Resources: v1.ResourceRequirements{
  252. Requests: v1.ResourceList{
  253. v1.ResourceName(v1.ResourceStorage): resource.MustParse(diskSize),
  254. },
  255. },
  256. StorageClassName: &(storageclass.Name),
  257. },
  258. }
  259. return claim
  260. }
  261. // func to get pod spec with given volume claim, node selector labels and command
  262. func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]string, command string) *v1.Pod {
  263. pod := &v1.Pod{
  264. TypeMeta: metav1.TypeMeta{
  265. Kind: "Pod",
  266. APIVersion: "v1",
  267. },
  268. ObjectMeta: metav1.ObjectMeta{
  269. GenerateName: "pod-pvc-",
  270. },
  271. Spec: v1.PodSpec{
  272. Containers: []v1.Container{
  273. {
  274. Name: "volume-tester",
  275. Image: imageutils.GetE2EImage(imageutils.BusyBox),
  276. Command: []string{"/bin/sh"},
  277. Args: []string{"-c", command},
  278. VolumeMounts: []v1.VolumeMount{
  279. {
  280. Name: "my-volume",
  281. MountPath: "/mnt/test",
  282. },
  283. },
  284. },
  285. },
  286. RestartPolicy: v1.RestartPolicyNever,
  287. Volumes: []v1.Volume{
  288. {
  289. Name: "my-volume",
  290. VolumeSource: v1.VolumeSource{
  291. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  292. ClaimName: claimName,
  293. ReadOnly: false,
  294. },
  295. },
  296. },
  297. },
  298. },
  299. }
  300. if nodeSelectorKV != nil {
  301. pod.Spec.NodeSelector = nodeSelectorKV
  302. }
  303. return pod
  304. }
  305. // func to get pod spec with given volume paths, node selector lables and container commands
  306. func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod {
  307. var volumeMounts []v1.VolumeMount
  308. var volumes []v1.Volume
  309. for index, volumePath := range volumePaths {
  310. name := fmt.Sprintf("volume%v", index+1)
  311. volumeMounts = append(volumeMounts, v1.VolumeMount{Name: name, MountPath: "/mnt/" + name})
  312. vsphereVolume := new(v1.VsphereVirtualDiskVolumeSource)
  313. vsphereVolume.VolumePath = volumePath
  314. vsphereVolume.FSType = "ext4"
  315. volumes = append(volumes, v1.Volume{Name: name})
  316. volumes[index].VolumeSource.VsphereVolume = vsphereVolume
  317. }
  318. if commands == nil || len(commands) == 0 {
  319. commands = []string{
  320. "/bin/sh",
  321. "-c",
  322. "while true; do sleep 2; done",
  323. }
  324. }
  325. pod := &v1.Pod{
  326. TypeMeta: metav1.TypeMeta{
  327. Kind: "Pod",
  328. APIVersion: "v1",
  329. },
  330. ObjectMeta: metav1.ObjectMeta{
  331. GenerateName: "vsphere-e2e-",
  332. },
  333. Spec: v1.PodSpec{
  334. Containers: []v1.Container{
  335. {
  336. Name: "vsphere-e2e-container-" + string(uuid.NewUUID()),
  337. Image: imageutils.GetE2EImage(imageutils.BusyBox),
  338. Command: commands,
  339. VolumeMounts: volumeMounts,
  340. },
  341. },
  342. RestartPolicy: v1.RestartPolicyNever,
  343. Volumes: volumes,
  344. },
  345. }
  346. if keyValuelabel != nil {
  347. pod.Spec.NodeSelector = keyValuelabel
  348. }
  349. return pod
  350. }
  351. func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) {
  352. for _, filePath := range filePaths {
  353. _, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath)
  354. framework.ExpectNoError(err, fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName))
  355. }
  356. }
  357. func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) {
  358. for _, filePath := range filePaths {
  359. err := framework.CreateEmptyFileOnPod(namespace, podName, filePath)
  360. framework.ExpectNoError(err)
  361. }
  362. }
  363. // verify volumes are attached to the node and are accessible in pod
  364. func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume) {
  365. nodeName := pod.Spec.NodeName
  366. namespace := pod.Namespace
  367. for index, pv := range persistentvolumes {
  368. // Verify disks are attached to the node
  369. isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
  370. framework.ExpectNoError(err)
  371. gomega.Expect(isAttached).To(gomega.BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
  372. // Verify Volumes are accessible
  373. filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
  374. _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
  375. framework.ExpectNoError(err)
  376. }
  377. }
  378. // verify volumes are created on one of the specified zones
  379. func verifyVolumeCreationOnRightZone(persistentvolumes []*v1.PersistentVolume, nodeName string, zones []string) {
  380. for _, pv := range persistentvolumes {
  381. volumePath := pv.Spec.VsphereVolume.VolumePath
  382. // Extract datastoreName from the volume path in the pv spec
  383. // For example : "vsanDatastore" is extracted from "[vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk"
  384. datastorePathObj, _ := getDatastorePathObjFromVMDiskPath(volumePath)
  385. datastoreName := datastorePathObj.Datastore
  386. nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
  387. ctx, cancel := context.WithCancel(context.Background())
  388. defer cancel()
  389. // Get the datastore object reference from the datastore name
  390. datastoreRef, err := nodeInfo.VSphere.GetDatastoreRefFromName(ctx, nodeInfo.DataCenterRef, datastoreName)
  391. if err != nil {
  392. framework.ExpectNoError(err)
  393. }
  394. // Find common datastores among the specified zones
  395. var datastoreCountMap = make(map[string]int)
  396. numZones := len(zones)
  397. var commonDatastores []string
  398. for _, zone := range zones {
  399. datastoreInZone := TestContext.NodeMapper.GetDatastoresInZone(nodeInfo.VSphere.Config.Hostname, zone)
  400. for _, datastore := range datastoreInZone {
  401. datastoreCountMap[datastore] = datastoreCountMap[datastore] + 1
  402. if datastoreCountMap[datastore] == numZones {
  403. commonDatastores = append(commonDatastores, datastore)
  404. }
  405. }
  406. }
  407. gomega.Expect(commonDatastores).To(gomega.ContainElement(datastoreRef.Value), "PV was created in an unsupported zone.")
  408. }
  409. }
  410. // Get vSphere Volume Path from PVC
  411. func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string {
  412. pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
  413. framework.ExpectNoError(err)
  414. pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
  415. framework.ExpectNoError(err)
  416. return pv.Spec.VsphereVolume.VolumePath
  417. }
  418. // Get canonical volume path for volume Path.
  419. // Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk
  420. // Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path.
  421. func getCanonicalVolumePath(ctx context.Context, dc *object.Datacenter, volumePath string) (string, error) {
  422. var folderID string
  423. canonicalVolumePath := volumePath
  424. dsPathObj, err := getDatastorePathObjFromVMDiskPath(volumePath)
  425. if err != nil {
  426. return "", err
  427. }
  428. dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/")
  429. if len(dsPath) <= 1 {
  430. return canonicalVolumePath, nil
  431. }
  432. datastore := dsPathObj.Datastore
  433. dsFolder := dsPath[0]
  434. // Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
  435. if !isValidUUID(dsFolder) {
  436. dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + DummyDiskName
  437. // Querying a non-existent dummy disk on the datastore folder.
  438. // It would fail and return an folder ID in the error message.
  439. _, err := getVirtualDiskPage83Data(ctx, dc, dummyDiskVolPath)
  440. if err != nil {
  441. re := regexp.MustCompile("File (.*?) was not found")
  442. match := re.FindStringSubmatch(err.Error())
  443. canonicalVolumePath = match[1]
  444. }
  445. }
  446. diskPath := getPathFromVMDiskPath(canonicalVolumePath)
  447. if diskPath == "" {
  448. return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath)
  449. }
  450. folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0]
  451. canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1)
  452. return canonicalVolumePath, nil
  453. }
  454. // getPathFromVMDiskPath retrieves the path from VM Disk Path.
  455. // Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is kubevols/volume.vmdk
  456. func getPathFromVMDiskPath(vmDiskPath string) string {
  457. datastorePathObj := new(object.DatastorePath)
  458. isSuccess := datastorePathObj.FromString(vmDiskPath)
  459. if !isSuccess {
  460. e2elog.Logf("Failed to parse vmDiskPath: %s", vmDiskPath)
  461. return ""
  462. }
  463. return datastorePathObj.Path
  464. }
  465. //getDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path.
  466. func getDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) {
  467. datastorePathObj := new(object.DatastorePath)
  468. isSuccess := datastorePathObj.FromString(vmDiskPath)
  469. if !isSuccess {
  470. e2elog.Logf("Failed to parse volPath: %s", vmDiskPath)
  471. return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
  472. }
  473. return datastorePathObj, nil
  474. }
  475. // getVirtualDiskPage83Data gets the virtual disk UUID by diskPath
  476. func getVirtualDiskPage83Data(ctx context.Context, dc *object.Datacenter, diskPath string) (string, error) {
  477. if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" {
  478. diskPath += ".vmdk"
  479. }
  480. vdm := object.NewVirtualDiskManager(dc.Client())
  481. // Returns uuid of vmdk virtual disk
  482. diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc)
  483. if err != nil {
  484. klog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
  485. return "", err
  486. }
  487. diskUUID = formatVirtualDiskUUID(diskUUID)
  488. return diskUUID, nil
  489. }
  490. // formatVirtualDiskUUID removes any spaces and hyphens in UUID
  491. // Example UUID input is 42375390-71f9-43a3-a770-56803bcd7baa and output after format is 4237539071f943a3a77056803bcd7baa
  492. func formatVirtualDiskUUID(uuid string) string {
  493. uuidwithNoSpace := strings.Replace(uuid, " ", "", -1)
  494. uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1)
  495. return strings.ToLower(uuidWithNoHypens)
  496. }
  497. //isValidUUID checks if the string is a valid UUID.
  498. func isValidUUID(uuid string) bool {
  499. r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
  500. return r.MatchString(uuid)
  501. }
  502. // removeStorageClusterORFolderNameFromVDiskPath removes the cluster or folder path from the vDiskPath
  503. // for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
  504. // for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
  505. func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
  506. datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1]
  507. if filepath.Base(datastore) != datastore {
  508. vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1)
  509. }
  510. return vDiskPath
  511. }
  512. // getVirtualDeviceByPath gets the virtual device by path
  513. func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) {
  514. vmDevices, err := vm.Device(ctx)
  515. if err != nil {
  516. e2elog.Logf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
  517. return nil, err
  518. }
  519. // filter vm devices to retrieve device for the given vmdk file identified by disk path
  520. for _, device := range vmDevices {
  521. if vmDevices.TypeName(device) == "VirtualDisk" {
  522. virtualDevice := device.GetVirtualDevice()
  523. if backing, ok := virtualDevice.Backing.(*vim25types.VirtualDiskFlatVer2BackingInfo); ok {
  524. if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
  525. e2elog.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
  526. return device, nil
  527. } else {
  528. e2elog.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
  529. }
  530. }
  531. }
  532. }
  533. return nil, nil
  534. }
  535. func matchVirtualDiskAndVolPath(diskPath, volPath string) bool {
  536. fileExt := ".vmdk"
  537. diskPath = strings.TrimSuffix(diskPath, fileExt)
  538. volPath = strings.TrimSuffix(volPath, fileExt)
  539. return diskPath == volPath
  540. }
  541. // convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath
  542. func convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[string][]string) (map[string][]string, error) {
  543. vmVolumes := make(map[string][]string)
  544. for nodeName, volPaths := range nodeVolumes {
  545. nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
  546. datacenter := nodeInfo.VSphere.GetDatacenterFromObjectReference(ctx, nodeInfo.DataCenterRef)
  547. for i, volPath := range volPaths {
  548. deviceVolPath, err := convertVolPathToDevicePath(ctx, datacenter, volPath)
  549. if err != nil {
  550. e2elog.Logf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
  551. return nil, err
  552. }
  553. volPaths[i] = deviceVolPath
  554. }
  555. vmVolumes[nodeName] = volPaths
  556. }
  557. return vmVolumes, nil
  558. }
  559. // convertVolPathToDevicePath takes volPath and returns canonical volume path
  560. func convertVolPathToDevicePath(ctx context.Context, dc *object.Datacenter, volPath string) (string, error) {
  561. volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
  562. // Get the canonical volume path for volPath.
  563. canonicalVolumePath, err := getCanonicalVolumePath(ctx, dc, volPath)
  564. if err != nil {
  565. e2elog.Logf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
  566. return "", err
  567. }
  568. // Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
  569. if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" {
  570. canonicalVolumePath += ".vmdk"
  571. }
  572. return canonicalVolumePath, nil
  573. }
  574. // get .vmx file path for a virtual machine
  575. func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) {
  576. ctx, cancel := context.WithCancel(context.Background())
  577. defer cancel()
  578. var nodeVM mo.VirtualMachine
  579. err := vmObject.Properties(ctx, vmObject.Reference(), []string{"config.files"}, &nodeVM)
  580. framework.ExpectNoError(err)
  581. gomega.Expect(nodeVM.Config).NotTo(gomega.BeNil())
  582. vmxPath = nodeVM.Config.Files.VmPathName
  583. e2elog.Logf("vmx file path is %s", vmxPath)
  584. return vmxPath
  585. }
  586. // verify ready node count. Try upto 3 minutes. Return true if count is expected count
  587. func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool {
  588. numNodes := 0
  589. for i := 0; i < 36; i++ {
  590. nodeList := framework.GetReadySchedulableNodesOrDie(client)
  591. gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
  592. numNodes = len(nodeList.Items)
  593. if numNodes == expectedNodes {
  594. break
  595. }
  596. time.Sleep(5 * time.Second)
  597. }
  598. return (numNodes == expectedNodes)
  599. }
  600. // poweroff nodeVM and confirm the poweroff state
  601. func poweroffNodeVM(nodeName string, vm *object.VirtualMachine) {
  602. ctx, cancel := context.WithCancel(context.Background())
  603. defer cancel()
  604. e2elog.Logf("Powering off node VM %s", nodeName)
  605. _, err := vm.PowerOff(ctx)
  606. framework.ExpectNoError(err)
  607. err = vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOff)
  608. framework.ExpectNoError(err, "Unable to power off the node")
  609. }
  610. // poweron nodeVM and confirm the poweron state
  611. func poweronNodeVM(nodeName string, vm *object.VirtualMachine) {
  612. ctx, cancel := context.WithCancel(context.Background())
  613. defer cancel()
  614. e2elog.Logf("Powering on node VM %s", nodeName)
  615. vm.PowerOn(ctx)
  616. err := vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOn)
  617. framework.ExpectNoError(err, "Unable to power on the node")
  618. }
  619. // unregister a nodeVM from VC
  620. func unregisterNodeVM(nodeName string, vm *object.VirtualMachine) {
  621. ctx, cancel := context.WithCancel(context.Background())
  622. defer cancel()
  623. poweroffNodeVM(nodeName, vm)
  624. e2elog.Logf("Unregistering node VM %s", nodeName)
  625. err := vm.Unregister(ctx)
  626. framework.ExpectNoError(err, "Unable to unregister the node")
  627. }
  628. // register a nodeVM into a VC
  629. func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.ResourcePool, host *object.HostSystem) {
  630. ctx, cancel := context.WithCancel(context.Background())
  631. defer cancel()
  632. e2elog.Logf("Registering node VM %s with vmx file path %s", nodeName, vmxFilePath)
  633. nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
  634. finder := find.NewFinder(nodeInfo.VSphere.Client.Client, false)
  635. vmFolder, err := finder.FolderOrDefault(ctx, workingDir)
  636. framework.ExpectNoError(err)
  637. registerTask, err := vmFolder.RegisterVM(ctx, vmxFilePath, nodeName, false, rpool, host)
  638. framework.ExpectNoError(err)
  639. err = registerTask.Wait(ctx)
  640. framework.ExpectNoError(err)
  641. vmPath := filepath.Join(workingDir, nodeName)
  642. vm, err := finder.VirtualMachine(ctx, vmPath)
  643. framework.ExpectNoError(err)
  644. poweronNodeVM(nodeName, vm)
  645. }
  646. // disksAreAttached takes map of node and it's volumes and returns map of node, its volumes and attachment state
  647. func disksAreAttached(nodeVolumes map[string][]string) (nodeVolumesAttachMap map[string]map[string]bool, err error) {
  648. ctx, cancel := context.WithCancel(context.Background())
  649. defer cancel()
  650. disksAttached := make(map[string]map[string]bool)
  651. if len(nodeVolumes) == 0 {
  652. return disksAttached, nil
  653. }
  654. // Convert VolPaths into canonical form so that it can be compared with the VM device path.
  655. vmVolumes, err := convertVolPathsToDevicePaths(ctx, nodeVolumes)
  656. if err != nil {
  657. e2elog.Logf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
  658. return nil, err
  659. }
  660. for vm, volumes := range vmVolumes {
  661. volumeAttachedMap := make(map[string]bool)
  662. for _, volume := range volumes {
  663. attached, err := diskIsAttached(volume, vm)
  664. if err != nil {
  665. return nil, err
  666. }
  667. volumeAttachedMap[volume] = attached
  668. }
  669. nodeVolumesAttachMap[vm] = volumeAttachedMap
  670. }
  671. return disksAttached, nil
  672. }
  673. // diskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
  674. func diskIsAttached(volPath string, nodeName string) (bool, error) {
  675. // Create context
  676. ctx, cancel := context.WithCancel(context.Background())
  677. defer cancel()
  678. nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
  679. Connect(ctx, nodeInfo.VSphere)
  680. vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
  681. volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
  682. device, err := getVirtualDeviceByPath(ctx, vm, volPath)
  683. if err != nil {
  684. e2elog.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
  685. volPath,
  686. nodeName)
  687. return false, err
  688. }
  689. if device == nil {
  690. return false, nil
  691. }
  692. e2elog.Logf("diskIsAttached found the disk %q attached on node %q", volPath, nodeName)
  693. return true, nil
  694. }
  695. // getUUIDFromProviderID strips ProviderPrefix - "vsphere://" from the providerID
  696. // this gives the VM UUID which can be used to find Node VM from vCenter
  697. func getUUIDFromProviderID(providerID string) string {
  698. return strings.TrimPrefix(providerID, ProviderPrefix)
  699. }
  700. // GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
  701. func GetReadySchedulableNodeInfos() []*NodeInfo {
  702. nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
  703. gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
  704. var nodesInfo []*NodeInfo
  705. for _, node := range nodeList.Items {
  706. nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name)
  707. if nodeInfo != nil {
  708. nodesInfo = append(nodesInfo, nodeInfo)
  709. }
  710. }
  711. return nodesInfo
  712. }
  713. // GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node.
  714. // if multiple nodes are present with Ready and Scheduable state then one of the Node is selected randomly
  715. // and it's associated NodeInfo object is returned.
  716. func GetReadySchedulableRandomNodeInfo() *NodeInfo {
  717. nodesInfo := GetReadySchedulableNodeInfos()
  718. gomega.Expect(nodesInfo).NotTo(gomega.BeEmpty())
  719. return nodesInfo[rand.Int()%len(nodesInfo)]
  720. }
  721. // invokeVCenterServiceControl invokes the given command for the given service
  722. // via service-control on the given vCenter host over SSH.
  723. func invokeVCenterServiceControl(command, service, host string) error {
  724. sshCmd := fmt.Sprintf("service-control --%s %s", command, service)
  725. e2elog.Logf("Invoking command %v on vCenter host %v", sshCmd, host)
  726. result, err := e2essh.SSH(sshCmd, host, framework.TestContext.Provider)
  727. if err != nil || result.Code != 0 {
  728. e2essh.LogResult(result)
  729. return fmt.Errorf("couldn't execute command: %s on vCenter host: %v", sshCmd, err)
  730. }
  731. return nil
  732. }
  733. // expectVolumeToBeAttached checks if the given Volume is attached to the given
  734. // Node, else fails.
  735. func expectVolumeToBeAttached(nodeName, volumePath string) {
  736. isAttached, err := diskIsAttached(volumePath, nodeName)
  737. framework.ExpectNoError(err)
  738. gomega.Expect(isAttached).To(gomega.BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
  739. }
  740. // expectVolumesToBeAttached checks if the given Volumes are attached to the
  741. // corresponding set of Nodes, else fails.
  742. func expectVolumesToBeAttached(pods []*v1.Pod, volumePaths []string) {
  743. for i, pod := range pods {
  744. nodeName := pod.Spec.NodeName
  745. volumePath := volumePaths[i]
  746. ginkgo.By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName))
  747. expectVolumeToBeAttached(nodeName, volumePath)
  748. }
  749. }
  750. // expectFilesToBeAccessible checks if the given files are accessible on the
  751. // corresponding set of Nodes, else fails.
  752. func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []string) {
  753. for i, pod := range pods {
  754. podName := pod.Name
  755. filePath := filePaths[i]
  756. ginkgo.By(fmt.Sprintf("Verifying that file %v is accessible on pod %v", filePath, podName))
  757. verifyFilesExistOnVSphereVolume(namespace, podName, filePath)
  758. }
  759. }
  760. // writeContentToPodFile writes the given content to the specified file.
  761. func writeContentToPodFile(namespace, podName, filePath, content string) error {
  762. _, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName,
  763. "--", "/bin/sh", "-c", fmt.Sprintf("echo '%s' > %s", content, filePath))
  764. return err
  765. }
  766. // expectFileContentToMatch checks if a given file contains the specified
  767. // content, else fails.
  768. func expectFileContentToMatch(namespace, podName, filePath, content string) {
  769. _, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName,
  770. "--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath))
  771. framework.ExpectNoError(err, fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName))
  772. }
  773. // expectFileContentsToMatch checks if the given contents match the ones present
  774. // in corresponding files on respective Pods, else fails.
  775. func expectFileContentsToMatch(namespace string, pods []*v1.Pod, filePaths []string, contents []string) {
  776. for i, pod := range pods {
  777. podName := pod.Name
  778. filePath := filePaths[i]
  779. ginkgo.By(fmt.Sprintf("Matching file content for %v on pod %v", filePath, podName))
  780. expectFileContentToMatch(namespace, podName, filePath, contents[i])
  781. }
  782. }