vsphere_utils.go 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package vsphere
  14. import (
  15. "context"
  16. "fmt"
  17. "path/filepath"
  18. "regexp"
  19. "strings"
  20. "time"
  21. "github.com/onsi/ginkgo"
  22. "github.com/onsi/gomega"
  23. "github.com/vmware/govmomi/find"
  24. "github.com/vmware/govmomi/object"
  25. "github.com/vmware/govmomi/vim25/mo"
  26. vim25types "github.com/vmware/govmomi/vim25/types"
  27. "k8s.io/klog"
  28. v1 "k8s.io/api/core/v1"
  29. storagev1 "k8s.io/api/storage/v1"
  30. "k8s.io/apimachinery/pkg/api/resource"
  31. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  32. "k8s.io/apimachinery/pkg/util/rand"
  33. "k8s.io/apimachinery/pkg/util/uuid"
  34. "k8s.io/apimachinery/pkg/util/wait"
  35. clientset "k8s.io/client-go/kubernetes"
  36. "k8s.io/kubernetes/test/e2e/framework"
  37. e2enode "k8s.io/kubernetes/test/e2e/framework/node"
  38. e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
  39. e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
  40. "k8s.io/kubernetes/test/e2e/storage/utils"
  41. imageutils "k8s.io/kubernetes/test/utils/image"
  42. )
  43. const (
  44. volumesPerNode = 55
  45. storageclass1 = "sc-default"
  46. storageclass2 = "sc-vsan"
  47. storageclass3 = "sc-spbm"
  48. storageclass4 = "sc-user-specified-ds"
  49. dummyDiskName = "kube-dummyDisk.vmdk"
  50. providerPrefix = "vsphere://"
  51. )
  52. // volumeState represents the state of a volume.
  53. type volumeState int32
  54. const (
  55. volumeStateDetached volumeState = 1
  56. volumeStateAttached volumeState = 2
  57. )
  58. // Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes
  59. func waitForVSphereDisksToDetach(nodeVolumes map[string][]string) error {
  60. var (
  61. detachTimeout = 5 * time.Minute
  62. detachPollTime = 10 * time.Second
  63. )
  64. waitErr := wait.Poll(detachPollTime, detachTimeout, func() (bool, error) {
  65. attachedResult, err := disksAreAttached(nodeVolumes)
  66. if err != nil {
  67. return false, err
  68. }
  69. for nodeName, nodeVolumes := range attachedResult {
  70. for volumePath, attached := range nodeVolumes {
  71. if attached {
  72. framework.Logf("Volume %q is still attached to %q.", volumePath, string(nodeName))
  73. return false, nil
  74. }
  75. }
  76. }
  77. framework.Logf("Volume are successfully detached from all the nodes: %+v", nodeVolumes)
  78. return true, nil
  79. })
  80. if waitErr != nil {
  81. if waitErr == wait.ErrWaitTimeout {
  82. return fmt.Errorf("volumes have not detached after %v: %v", detachTimeout, waitErr)
  83. }
  84. return fmt.Errorf("error waiting for volumes to detach: %v", waitErr)
  85. }
  86. return nil
  87. }
  88. // Wait until vsphere vmdk moves to expected state on the given node, or time out after 6 minutes
  89. func waitForVSphereDiskStatus(volumePath string, nodeName string, expectedState volumeState) error {
  90. var (
  91. currentState volumeState
  92. timeout = 6 * time.Minute
  93. pollTime = 10 * time.Second
  94. )
  95. var attachedState = map[bool]volumeState{
  96. true: volumeStateAttached,
  97. false: volumeStateDetached,
  98. }
  99. var attachedStateMsg = map[volumeState]string{
  100. volumeStateAttached: "attached to",
  101. volumeStateDetached: "detached from",
  102. }
  103. waitErr := wait.Poll(pollTime, timeout, func() (bool, error) {
  104. diskAttached, err := diskIsAttached(volumePath, nodeName)
  105. if err != nil {
  106. return true, err
  107. }
  108. currentState = attachedState[diskAttached]
  109. if currentState == expectedState {
  110. framework.Logf("Volume %q has successfully %s %q", volumePath, attachedStateMsg[currentState], nodeName)
  111. return true, nil
  112. }
  113. framework.Logf("Waiting for Volume %q to be %s %q.", volumePath, attachedStateMsg[expectedState], nodeName)
  114. return false, nil
  115. })
  116. if waitErr != nil {
  117. if waitErr == wait.ErrWaitTimeout {
  118. return fmt.Errorf("volume %q is not %s %q after %v: %v", volumePath, attachedStateMsg[expectedState], nodeName, timeout, waitErr)
  119. }
  120. return fmt.Errorf("error waiting for volume %q to be %s %q: %v", volumePath, attachedStateMsg[expectedState], nodeName, waitErr)
  121. }
  122. return nil
  123. }
  124. // Wait until vsphere vmdk is attached from the given node or time out after 6 minutes
  125. func waitForVSphereDiskToAttach(volumePath string, nodeName string) error {
  126. return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateAttached)
  127. }
  128. // Wait until vsphere vmdk is detached from the given node or time out after 6 minutes
  129. func waitForVSphereDiskToDetach(volumePath string, nodeName string) error {
  130. return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateDetached)
  131. }
  132. // function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
  133. func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy, labels map[string]string) *v1.PersistentVolume {
  134. return e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{
  135. NamePrefix: "vspherepv-",
  136. PVSource: v1.PersistentVolumeSource{
  137. VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
  138. VolumePath: volumePath,
  139. FSType: "ext4",
  140. },
  141. },
  142. ReclaimPolicy: persistentVolumeReclaimPolicy,
  143. Capacity: "2Gi",
  144. AccessModes: []v1.PersistentVolumeAccessMode{
  145. v1.ReadWriteOnce,
  146. },
  147. Labels: labels,
  148. })
  149. }
  150. // function to get vsphere persistent volume spec with given selector labels.
  151. func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]string) *v1.PersistentVolumeClaim {
  152. var (
  153. pvc *v1.PersistentVolumeClaim
  154. )
  155. pvc = &v1.PersistentVolumeClaim{
  156. ObjectMeta: metav1.ObjectMeta{
  157. GenerateName: "pvc-",
  158. Namespace: namespace,
  159. },
  160. Spec: v1.PersistentVolumeClaimSpec{
  161. AccessModes: []v1.PersistentVolumeAccessMode{
  162. v1.ReadWriteOnce,
  163. },
  164. Resources: v1.ResourceRequirements{
  165. Requests: v1.ResourceList{
  166. v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
  167. },
  168. },
  169. },
  170. }
  171. if labels != nil {
  172. pvc.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels}
  173. }
  174. return pvc
  175. }
  176. // function to write content to the volume backed by given PVC
  177. func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
  178. utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
  179. framework.Logf("Done with writing content to volume")
  180. }
  181. // function to verify content is matching on the volume backed for given PVC
  182. func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
  183. utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
  184. framework.Logf("Successfully verified content of the volume")
  185. }
  186. func getVSphereStorageClassSpec(name string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) *storagev1.StorageClass {
  187. var sc *storagev1.StorageClass
  188. sc = &storagev1.StorageClass{
  189. TypeMeta: metav1.TypeMeta{
  190. Kind: "StorageClass",
  191. },
  192. ObjectMeta: metav1.ObjectMeta{
  193. Name: name,
  194. },
  195. Provisioner: "kubernetes.io/vsphere-volume",
  196. }
  197. if scParameters != nil {
  198. sc.Parameters = scParameters
  199. }
  200. if zones != nil {
  201. term := v1.TopologySelectorTerm{
  202. MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
  203. {
  204. Key: v1.LabelZoneFailureDomain,
  205. Values: zones,
  206. },
  207. },
  208. }
  209. sc.AllowedTopologies = append(sc.AllowedTopologies, term)
  210. }
  211. if volumeBindingMode != "" {
  212. mode := storagev1.VolumeBindingMode(string(volumeBindingMode))
  213. sc.VolumeBindingMode = &mode
  214. }
  215. return sc
  216. }
  217. func getVSphereClaimSpecWithStorageClass(ns string, diskSize string, storageclass *storagev1.StorageClass) *v1.PersistentVolumeClaim {
  218. claim := &v1.PersistentVolumeClaim{
  219. ObjectMeta: metav1.ObjectMeta{
  220. GenerateName: "pvc-",
  221. Namespace: ns,
  222. },
  223. Spec: v1.PersistentVolumeClaimSpec{
  224. AccessModes: []v1.PersistentVolumeAccessMode{
  225. v1.ReadWriteOnce,
  226. },
  227. Resources: v1.ResourceRequirements{
  228. Requests: v1.ResourceList{
  229. v1.ResourceName(v1.ResourceStorage): resource.MustParse(diskSize),
  230. },
  231. },
  232. StorageClassName: &(storageclass.Name),
  233. },
  234. }
  235. return claim
  236. }
  237. // func to get pod spec with given volume claim, node selector labels and command
  238. func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]string, command string) *v1.Pod {
  239. pod := &v1.Pod{
  240. TypeMeta: metav1.TypeMeta{
  241. Kind: "Pod",
  242. APIVersion: "v1",
  243. },
  244. ObjectMeta: metav1.ObjectMeta{
  245. GenerateName: "pod-pvc-",
  246. },
  247. Spec: v1.PodSpec{
  248. Containers: []v1.Container{
  249. {
  250. Name: "volume-tester",
  251. Image: imageutils.GetE2EImage(imageutils.BusyBox),
  252. Command: []string{"/bin/sh"},
  253. Args: []string{"-c", command},
  254. VolumeMounts: []v1.VolumeMount{
  255. {
  256. Name: "my-volume",
  257. MountPath: "/mnt/test",
  258. },
  259. },
  260. },
  261. },
  262. RestartPolicy: v1.RestartPolicyNever,
  263. Volumes: []v1.Volume{
  264. {
  265. Name: "my-volume",
  266. VolumeSource: v1.VolumeSource{
  267. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  268. ClaimName: claimName,
  269. ReadOnly: false,
  270. },
  271. },
  272. },
  273. },
  274. },
  275. }
  276. if nodeSelectorKV != nil {
  277. pod.Spec.NodeSelector = nodeSelectorKV
  278. }
  279. return pod
  280. }
  281. // func to get pod spec with given volume paths, node selector lables and container commands
  282. func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod {
  283. var volumeMounts []v1.VolumeMount
  284. var volumes []v1.Volume
  285. for index, volumePath := range volumePaths {
  286. name := fmt.Sprintf("volume%v", index+1)
  287. volumeMounts = append(volumeMounts, v1.VolumeMount{Name: name, MountPath: "/mnt/" + name})
  288. vsphereVolume := new(v1.VsphereVirtualDiskVolumeSource)
  289. vsphereVolume.VolumePath = volumePath
  290. vsphereVolume.FSType = "ext4"
  291. volumes = append(volumes, v1.Volume{Name: name})
  292. volumes[index].VolumeSource.VsphereVolume = vsphereVolume
  293. }
  294. if commands == nil || len(commands) == 0 {
  295. commands = []string{
  296. "/bin/sh",
  297. "-c",
  298. "while true; do sleep 2; done",
  299. }
  300. }
  301. pod := &v1.Pod{
  302. TypeMeta: metav1.TypeMeta{
  303. Kind: "Pod",
  304. APIVersion: "v1",
  305. },
  306. ObjectMeta: metav1.ObjectMeta{
  307. GenerateName: "vsphere-e2e-",
  308. },
  309. Spec: v1.PodSpec{
  310. Containers: []v1.Container{
  311. {
  312. Name: "vsphere-e2e-container-" + string(uuid.NewUUID()),
  313. Image: imageutils.GetE2EImage(imageutils.BusyBox),
  314. Command: commands,
  315. VolumeMounts: volumeMounts,
  316. },
  317. },
  318. RestartPolicy: v1.RestartPolicyNever,
  319. Volumes: volumes,
  320. },
  321. }
  322. if keyValuelabel != nil {
  323. pod.Spec.NodeSelector = keyValuelabel
  324. }
  325. return pod
  326. }
  327. func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) {
  328. for _, filePath := range filePaths {
  329. _, err := framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath)
  330. framework.ExpectNoError(err, fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName))
  331. }
  332. }
  333. func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) {
  334. for _, filePath := range filePaths {
  335. err := framework.CreateEmptyFileOnPod(namespace, podName, filePath)
  336. framework.ExpectNoError(err)
  337. }
  338. }
  339. // verify volumes are attached to the node and are accessible in pod
  340. func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume) {
  341. nodeName := pod.Spec.NodeName
  342. namespace := pod.Namespace
  343. for index, pv := range persistentvolumes {
  344. // Verify disks are attached to the node
  345. isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
  346. framework.ExpectNoError(err)
  347. framework.ExpectEqual(isAttached, true, fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
  348. // Verify Volumes are accessible
  349. filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
  350. _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
  351. framework.ExpectNoError(err)
  352. }
  353. }
  354. // verify volumes are created on one of the specified zones
  355. func verifyVolumeCreationOnRightZone(persistentvolumes []*v1.PersistentVolume, nodeName string, zones []string) {
  356. for _, pv := range persistentvolumes {
  357. volumePath := pv.Spec.VsphereVolume.VolumePath
  358. // Extract datastoreName from the volume path in the pv spec
  359. // For example : "vsanDatastore" is extracted from "[vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk"
  360. datastorePathObj, _ := getDatastorePathObjFromVMDiskPath(volumePath)
  361. datastoreName := datastorePathObj.Datastore
  362. nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
  363. ctx, cancel := context.WithCancel(context.Background())
  364. defer cancel()
  365. // Get the datastore object reference from the datastore name
  366. datastoreRef, err := nodeInfo.VSphere.GetDatastoreRefFromName(ctx, nodeInfo.DataCenterRef, datastoreName)
  367. if err != nil {
  368. framework.ExpectNoError(err)
  369. }
  370. // Find common datastores among the specified zones
  371. var datastoreCountMap = make(map[string]int)
  372. numZones := len(zones)
  373. var commonDatastores []string
  374. for _, zone := range zones {
  375. datastoreInZone := TestContext.NodeMapper.GetDatastoresInZone(nodeInfo.VSphere.Config.Hostname, zone)
  376. for _, datastore := range datastoreInZone {
  377. datastoreCountMap[datastore] = datastoreCountMap[datastore] + 1
  378. if datastoreCountMap[datastore] == numZones {
  379. commonDatastores = append(commonDatastores, datastore)
  380. }
  381. }
  382. }
  383. gomega.Expect(commonDatastores).To(gomega.ContainElement(datastoreRef.Value), "PV was created in an unsupported zone.")
  384. }
  385. }
  386. // Get vSphere Volume Path from PVC
  387. func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string {
  388. pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(), claimName, metav1.GetOptions{})
  389. framework.ExpectNoError(err)
  390. pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), pvclaim.Spec.VolumeName, metav1.GetOptions{})
  391. framework.ExpectNoError(err)
  392. return pv.Spec.VsphereVolume.VolumePath
  393. }
  394. // Get canonical volume path for volume Path.
  395. // Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk
  396. // Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path.
  397. func getCanonicalVolumePath(ctx context.Context, dc *object.Datacenter, volumePath string) (string, error) {
  398. var folderID string
  399. canonicalVolumePath := volumePath
  400. dsPathObj, err := getDatastorePathObjFromVMDiskPath(volumePath)
  401. if err != nil {
  402. return "", err
  403. }
  404. dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/")
  405. if len(dsPath) <= 1 {
  406. return canonicalVolumePath, nil
  407. }
  408. datastore := dsPathObj.Datastore
  409. dsFolder := dsPath[0]
  410. // Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
  411. if !isValidUUID(dsFolder) {
  412. dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + dummyDiskName
  413. // Querying a non-existent dummy disk on the datastore folder.
  414. // It would fail and return an folder ID in the error message.
  415. _, err := getVirtualDiskPage83Data(ctx, dc, dummyDiskVolPath)
  416. if err != nil {
  417. re := regexp.MustCompile("File (.*?) was not found")
  418. match := re.FindStringSubmatch(err.Error())
  419. canonicalVolumePath = match[1]
  420. }
  421. }
  422. diskPath := getPathFromVMDiskPath(canonicalVolumePath)
  423. if diskPath == "" {
  424. return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath)
  425. }
  426. folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0]
  427. canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1)
  428. return canonicalVolumePath, nil
  429. }
  430. // getPathFromVMDiskPath retrieves the path from VM Disk Path.
  431. // Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is kubevols/volume.vmdk
  432. func getPathFromVMDiskPath(vmDiskPath string) string {
  433. datastorePathObj := new(object.DatastorePath)
  434. isSuccess := datastorePathObj.FromString(vmDiskPath)
  435. if !isSuccess {
  436. framework.Logf("Failed to parse vmDiskPath: %s", vmDiskPath)
  437. return ""
  438. }
  439. return datastorePathObj.Path
  440. }
  441. //getDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path.
  442. func getDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) {
  443. datastorePathObj := new(object.DatastorePath)
  444. isSuccess := datastorePathObj.FromString(vmDiskPath)
  445. if !isSuccess {
  446. framework.Logf("Failed to parse volPath: %s", vmDiskPath)
  447. return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
  448. }
  449. return datastorePathObj, nil
  450. }
  451. // getVirtualDiskPage83Data gets the virtual disk UUID by diskPath
  452. func getVirtualDiskPage83Data(ctx context.Context, dc *object.Datacenter, diskPath string) (string, error) {
  453. if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" {
  454. diskPath += ".vmdk"
  455. }
  456. vdm := object.NewVirtualDiskManager(dc.Client())
  457. // Returns uuid of vmdk virtual disk
  458. diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc)
  459. if err != nil {
  460. klog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
  461. return "", err
  462. }
  463. diskUUID = formatVirtualDiskUUID(diskUUID)
  464. return diskUUID, nil
  465. }
  466. // formatVirtualDiskUUID removes any spaces and hyphens in UUID
  467. // Example UUID input is 42375390-71f9-43a3-a770-56803bcd7baa and output after format is 4237539071f943a3a77056803bcd7baa
  468. func formatVirtualDiskUUID(uuid string) string {
  469. uuidwithNoSpace := strings.Replace(uuid, " ", "", -1)
  470. uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1)
  471. return strings.ToLower(uuidWithNoHypens)
  472. }
  473. //isValidUUID checks if the string is a valid UUID.
  474. func isValidUUID(uuid string) bool {
  475. r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
  476. return r.MatchString(uuid)
  477. }
  478. // removeStorageClusterORFolderNameFromVDiskPath removes the cluster or folder path from the vDiskPath
  479. // for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
  480. // for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
  481. func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
  482. datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1]
  483. if filepath.Base(datastore) != datastore {
  484. vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1)
  485. }
  486. return vDiskPath
  487. }
  488. // getVirtualDeviceByPath gets the virtual device by path
  489. func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) {
  490. vmDevices, err := vm.Device(ctx)
  491. if err != nil {
  492. framework.Logf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
  493. return nil, err
  494. }
  495. // filter vm devices to retrieve device for the given vmdk file identified by disk path
  496. for _, device := range vmDevices {
  497. if vmDevices.TypeName(device) == "VirtualDisk" {
  498. virtualDevice := device.GetVirtualDevice()
  499. if backing, ok := virtualDevice.Backing.(*vim25types.VirtualDiskFlatVer2BackingInfo); ok {
  500. if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
  501. framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
  502. return device, nil
  503. }
  504. framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
  505. }
  506. }
  507. }
  508. return nil, nil
  509. }
  510. func matchVirtualDiskAndVolPath(diskPath, volPath string) bool {
  511. fileExt := ".vmdk"
  512. diskPath = strings.TrimSuffix(diskPath, fileExt)
  513. volPath = strings.TrimSuffix(volPath, fileExt)
  514. return diskPath == volPath
  515. }
  516. // convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath
  517. func convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[string][]string) (map[string][]string, error) {
  518. vmVolumes := make(map[string][]string)
  519. for nodeName, volPaths := range nodeVolumes {
  520. nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
  521. datacenter := nodeInfo.VSphere.GetDatacenterFromObjectReference(ctx, nodeInfo.DataCenterRef)
  522. for i, volPath := range volPaths {
  523. deviceVolPath, err := convertVolPathToDevicePath(ctx, datacenter, volPath)
  524. if err != nil {
  525. framework.Logf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
  526. return nil, err
  527. }
  528. volPaths[i] = deviceVolPath
  529. }
  530. vmVolumes[nodeName] = volPaths
  531. }
  532. return vmVolumes, nil
  533. }
  534. // convertVolPathToDevicePath takes volPath and returns canonical volume path
  535. func convertVolPathToDevicePath(ctx context.Context, dc *object.Datacenter, volPath string) (string, error) {
  536. volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
  537. // Get the canonical volume path for volPath.
  538. canonicalVolumePath, err := getCanonicalVolumePath(ctx, dc, volPath)
  539. if err != nil {
  540. framework.Logf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
  541. return "", err
  542. }
  543. // Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
  544. if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" {
  545. canonicalVolumePath += ".vmdk"
  546. }
  547. return canonicalVolumePath, nil
  548. }
  549. // get .vmx file path for a virtual machine
  550. func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) {
  551. ctx, cancel := context.WithCancel(context.Background())
  552. defer cancel()
  553. var nodeVM mo.VirtualMachine
  554. err := vmObject.Properties(ctx, vmObject.Reference(), []string{"config.files"}, &nodeVM)
  555. framework.ExpectNoError(err)
  556. gomega.Expect(nodeVM.Config).NotTo(gomega.BeNil())
  557. vmxPath = nodeVM.Config.Files.VmPathName
  558. framework.Logf("vmx file path is %s", vmxPath)
  559. return vmxPath
  560. }
  561. // verify ready node count. Try upto 3 minutes. Return true if count is expected count
  562. func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool {
  563. numNodes := 0
  564. for i := 0; i < 36; i++ {
  565. nodeList, err := e2enode.GetReadySchedulableNodes(client)
  566. framework.ExpectNoError(err)
  567. numNodes = len(nodeList.Items)
  568. if numNodes == expectedNodes {
  569. break
  570. }
  571. time.Sleep(5 * time.Second)
  572. }
  573. return (numNodes == expectedNodes)
  574. }
  575. // poweroff nodeVM and confirm the poweroff state
  576. func poweroffNodeVM(nodeName string, vm *object.VirtualMachine) {
  577. ctx, cancel := context.WithCancel(context.Background())
  578. defer cancel()
  579. framework.Logf("Powering off node VM %s", nodeName)
  580. _, err := vm.PowerOff(ctx)
  581. framework.ExpectNoError(err)
  582. err = vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOff)
  583. framework.ExpectNoError(err, "Unable to power off the node")
  584. }
  585. // poweron nodeVM and confirm the poweron state
  586. func poweronNodeVM(nodeName string, vm *object.VirtualMachine) {
  587. ctx, cancel := context.WithCancel(context.Background())
  588. defer cancel()
  589. framework.Logf("Powering on node VM %s", nodeName)
  590. vm.PowerOn(ctx)
  591. err := vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOn)
  592. framework.ExpectNoError(err, "Unable to power on the node")
  593. }
  594. // unregister a nodeVM from VC
  595. func unregisterNodeVM(nodeName string, vm *object.VirtualMachine) {
  596. ctx, cancel := context.WithCancel(context.Background())
  597. defer cancel()
  598. poweroffNodeVM(nodeName, vm)
  599. framework.Logf("Unregistering node VM %s", nodeName)
  600. err := vm.Unregister(ctx)
  601. framework.ExpectNoError(err, "Unable to unregister the node")
  602. }
  603. // register a nodeVM into a VC
  604. func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.ResourcePool, host *object.HostSystem) {
  605. ctx, cancel := context.WithCancel(context.Background())
  606. defer cancel()
  607. framework.Logf("Registering node VM %s with vmx file path %s", nodeName, vmxFilePath)
  608. nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
  609. finder := find.NewFinder(nodeInfo.VSphere.Client.Client, false)
  610. vmFolder, err := finder.FolderOrDefault(ctx, workingDir)
  611. framework.ExpectNoError(err)
  612. registerTask, err := vmFolder.RegisterVM(ctx, vmxFilePath, nodeName, false, rpool, host)
  613. framework.ExpectNoError(err)
  614. err = registerTask.Wait(ctx)
  615. framework.ExpectNoError(err)
  616. vmPath := filepath.Join(workingDir, nodeName)
  617. vm, err := finder.VirtualMachine(ctx, vmPath)
  618. framework.ExpectNoError(err)
  619. poweronNodeVM(nodeName, vm)
  620. }
  621. // disksAreAttached takes map of node and it's volumes and returns map of node, its volumes and attachment state
  622. func disksAreAttached(nodeVolumes map[string][]string) (nodeVolumesAttachMap map[string]map[string]bool, err error) {
  623. ctx, cancel := context.WithCancel(context.Background())
  624. defer cancel()
  625. disksAttached := make(map[string]map[string]bool)
  626. if len(nodeVolumes) == 0 {
  627. return disksAttached, nil
  628. }
  629. // Convert VolPaths into canonical form so that it can be compared with the VM device path.
  630. vmVolumes, err := convertVolPathsToDevicePaths(ctx, nodeVolumes)
  631. if err != nil {
  632. framework.Logf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
  633. return nil, err
  634. }
  635. for vm, volumes := range vmVolumes {
  636. volumeAttachedMap := make(map[string]bool)
  637. for _, volume := range volumes {
  638. attached, err := diskIsAttached(volume, vm)
  639. if err != nil {
  640. return nil, err
  641. }
  642. volumeAttachedMap[volume] = attached
  643. }
  644. nodeVolumesAttachMap[vm] = volumeAttachedMap
  645. }
  646. return disksAttached, nil
  647. }
  648. // diskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
  649. func diskIsAttached(volPath string, nodeName string) (bool, error) {
  650. // Create context
  651. ctx, cancel := context.WithCancel(context.Background())
  652. defer cancel()
  653. nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
  654. Connect(ctx, nodeInfo.VSphere)
  655. vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
  656. volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
  657. device, err := getVirtualDeviceByPath(ctx, vm, volPath)
  658. if err != nil {
  659. framework.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
  660. volPath,
  661. nodeName)
  662. return false, err
  663. }
  664. if device == nil {
  665. return false, nil
  666. }
  667. framework.Logf("diskIsAttached found the disk %q attached on node %q", volPath, nodeName)
  668. return true, nil
  669. }
  670. // getUUIDFromProviderID strips ProviderPrefix - "vsphere://" from the providerID
  671. // this gives the VM UUID which can be used to find Node VM from vCenter
  672. func getUUIDFromProviderID(providerID string) string {
  673. return strings.TrimPrefix(providerID, providerPrefix)
  674. }
  675. // GetReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
  676. func GetReadySchedulableNodeInfos() []*NodeInfo {
  677. nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
  678. framework.ExpectNoError(err)
  679. var nodesInfo []*NodeInfo
  680. for _, node := range nodeList.Items {
  681. nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name)
  682. if nodeInfo != nil {
  683. nodesInfo = append(nodesInfo, nodeInfo)
  684. }
  685. }
  686. return nodesInfo
  687. }
  688. // GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node.
  689. // if multiple nodes are present with Ready and Scheduable state then one of the Node is selected randomly
  690. // and it's associated NodeInfo object is returned.
  691. func GetReadySchedulableRandomNodeInfo() *NodeInfo {
  692. nodesInfo := GetReadySchedulableNodeInfos()
  693. gomega.Expect(nodesInfo).NotTo(gomega.BeEmpty())
  694. return nodesInfo[rand.Int()%len(nodesInfo)]
  695. }
  696. // invokeVCenterServiceControl invokes the given command for the given service
  697. // via service-control on the given vCenter host over SSH.
  698. func invokeVCenterServiceControl(command, service, host string) error {
  699. sshCmd := fmt.Sprintf("service-control --%s %s", command, service)
  700. framework.Logf("Invoking command %v on vCenter host %v", sshCmd, host)
  701. result, err := e2essh.SSH(sshCmd, host, framework.TestContext.Provider)
  702. if err != nil || result.Code != 0 {
  703. e2essh.LogResult(result)
  704. return fmt.Errorf("couldn't execute command: %s on vCenter host: %v", sshCmd, err)
  705. }
  706. return nil
  707. }
  708. // expectVolumeToBeAttached checks if the given Volume is attached to the given
  709. // Node, else fails.
  710. func expectVolumeToBeAttached(nodeName, volumePath string) {
  711. isAttached, err := diskIsAttached(volumePath, nodeName)
  712. framework.ExpectNoError(err)
  713. framework.ExpectEqual(isAttached, true, fmt.Sprintf("disk: %s is not attached with the node", volumePath))
  714. }
  715. // expectVolumesToBeAttached checks if the given Volumes are attached to the
  716. // corresponding set of Nodes, else fails.
  717. func expectVolumesToBeAttached(pods []*v1.Pod, volumePaths []string) {
  718. for i, pod := range pods {
  719. nodeName := pod.Spec.NodeName
  720. volumePath := volumePaths[i]
  721. ginkgo.By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName))
  722. expectVolumeToBeAttached(nodeName, volumePath)
  723. }
  724. }
  725. // expectFilesToBeAccessible checks if the given files are accessible on the
  726. // corresponding set of Nodes, else fails.
  727. func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []string) {
  728. for i, pod := range pods {
  729. podName := pod.Name
  730. filePath := filePaths[i]
  731. ginkgo.By(fmt.Sprintf("Verifying that file %v is accessible on pod %v", filePath, podName))
  732. verifyFilesExistOnVSphereVolume(namespace, podName, filePath)
  733. }
  734. }
  735. // writeContentToPodFile writes the given content to the specified file.
  736. func writeContentToPodFile(namespace, podName, filePath, content string) error {
  737. _, err := framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName,
  738. "--", "/bin/sh", "-c", fmt.Sprintf("echo '%s' > %s", content, filePath))
  739. return err
  740. }
  741. // expectFileContentToMatch checks if a given file contains the specified
  742. // content, else fails.
  743. func expectFileContentToMatch(namespace, podName, filePath, content string) {
  744. _, err := framework.RunKubectl(namespace, "exec", fmt.Sprintf("--namespace=%s", namespace), podName,
  745. "--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath))
  746. framework.ExpectNoError(err, fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName))
  747. }
  748. // expectFileContentsToMatch checks if the given contents match the ones present
  749. // in corresponding files on respective Pods, else fails.
  750. func expectFileContentsToMatch(namespace string, pods []*v1.Pod, filePaths []string, contents []string) {
  751. for i, pod := range pods {
  752. podName := pod.Name
  753. filePath := filePaths[i]
  754. ginkgo.By(fmt.Sprintf("Matching file content for %v on pod %v", filePath, podName))
  755. expectFileContentToMatch(namespace, podName, filePath, contents[i])
  756. }
  757. }