openstack_volumes.go 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package openstack
  14. import (
  15. "context"
  16. "errors"
  17. "fmt"
  18. "io/ioutil"
  19. "path"
  20. "path/filepath"
  21. "strings"
  22. "time"
  23. "k8s.io/api/core/v1"
  24. "k8s.io/apimachinery/pkg/api/resource"
  25. "k8s.io/apimachinery/pkg/types"
  26. cloudprovider "k8s.io/cloud-provider"
  27. cloudvolume "k8s.io/cloud-provider/volume"
  28. volerr "k8s.io/cloud-provider/volume/errors"
  29. volumehelpers "k8s.io/cloud-provider/volume/helpers"
  30. "github.com/gophercloud/gophercloud"
  31. volumeexpand "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions"
  32. volumes_v1 "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes"
  33. volumes_v2 "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
  34. volumes_v3 "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes"
  35. "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach"
  36. "github.com/prometheus/client_golang/prometheus"
  37. "k8s.io/klog"
  38. )
  39. type volumeService interface {
  40. createVolume(opts volumeCreateOpts) (string, string, error)
  41. getVolume(volumeID string) (Volume, error)
  42. deleteVolume(volumeName string) error
  43. expandVolume(volumeID string, newSize int) error
  44. }
  45. // VolumesV1 is a Volumes implementation for cinder v1
  46. type VolumesV1 struct {
  47. blockstorage *gophercloud.ServiceClient
  48. opts BlockStorageOpts
  49. }
  50. // VolumesV2 is a Volumes implementation for cinder v2
  51. type VolumesV2 struct {
  52. blockstorage *gophercloud.ServiceClient
  53. opts BlockStorageOpts
  54. }
  55. // VolumesV3 is a Volumes implementation for cinder v3
  56. type VolumesV3 struct {
  57. blockstorage *gophercloud.ServiceClient
  58. opts BlockStorageOpts
  59. }
  60. // Volume stores information about a single volume
  61. type Volume struct {
  62. // ID of the instance, to which this volume is attached. "" if not attached
  63. AttachedServerID string
  64. // Device file path
  65. AttachedDevice string
  66. // availabilityZone is which availability zone the volume is in
  67. AvailabilityZone string
  68. // Unique identifier for the volume.
  69. ID string
  70. // Human-readable display name for the volume.
  71. Name string
  72. // Current status of the volume.
  73. Status string
  74. // Volume size in GB
  75. Size int
  76. }
  77. type volumeCreateOpts struct {
  78. Size int
  79. Availability string
  80. Name string
  81. VolumeType string
  82. Metadata map[string]string
  83. }
  84. // implements PVLabeler.
  85. var _ cloudprovider.PVLabeler = (*OpenStack)(nil)
  86. const (
  87. volumeAvailableStatus = "available"
  88. volumeInUseStatus = "in-use"
  89. volumeDeletedStatus = "deleted"
  90. volumeErrorStatus = "error"
  91. // On some environments, we need to query the metadata service in order
  92. // to locate disks. We'll use the Newton version, which includes device
  93. // metadata.
  94. newtonMetadataVersion = "2016-06-30"
  95. )
  96. func (volumes *VolumesV1) createVolume(opts volumeCreateOpts) (string, string, error) {
  97. startTime := time.Now()
  98. createOpts := volumes_v1.CreateOpts{
  99. Name: opts.Name,
  100. Size: opts.Size,
  101. VolumeType: opts.VolumeType,
  102. AvailabilityZone: opts.Availability,
  103. Metadata: opts.Metadata,
  104. }
  105. vol, err := volumes_v1.Create(volumes.blockstorage, createOpts).Extract()
  106. timeTaken := time.Since(startTime).Seconds()
  107. recordOpenstackOperationMetric("create_v1_volume", timeTaken, err)
  108. if err != nil {
  109. return "", "", err
  110. }
  111. return vol.ID, vol.AvailabilityZone, nil
  112. }
  113. func (volumes *VolumesV2) createVolume(opts volumeCreateOpts) (string, string, error) {
  114. startTime := time.Now()
  115. createOpts := volumes_v2.CreateOpts{
  116. Name: opts.Name,
  117. Size: opts.Size,
  118. VolumeType: opts.VolumeType,
  119. AvailabilityZone: opts.Availability,
  120. Metadata: opts.Metadata,
  121. }
  122. vol, err := volumes_v2.Create(volumes.blockstorage, createOpts).Extract()
  123. timeTaken := time.Since(startTime).Seconds()
  124. recordOpenstackOperationMetric("create_v2_volume", timeTaken, err)
  125. if err != nil {
  126. return "", "", err
  127. }
  128. return vol.ID, vol.AvailabilityZone, nil
  129. }
  130. func (volumes *VolumesV3) createVolume(opts volumeCreateOpts) (string, string, error) {
  131. startTime := time.Now()
  132. createOpts := volumes_v3.CreateOpts{
  133. Name: opts.Name,
  134. Size: opts.Size,
  135. VolumeType: opts.VolumeType,
  136. AvailabilityZone: opts.Availability,
  137. Metadata: opts.Metadata,
  138. }
  139. vol, err := volumes_v3.Create(volumes.blockstorage, createOpts).Extract()
  140. timeTaken := time.Since(startTime).Seconds()
  141. recordOpenstackOperationMetric("create_v3_volume", timeTaken, err)
  142. if err != nil {
  143. return "", "", err
  144. }
  145. return vol.ID, vol.AvailabilityZone, nil
  146. }
  147. func (volumes *VolumesV1) getVolume(volumeID string) (Volume, error) {
  148. startTime := time.Now()
  149. volumeV1, err := volumes_v1.Get(volumes.blockstorage, volumeID).Extract()
  150. timeTaken := time.Since(startTime).Seconds()
  151. recordOpenstackOperationMetric("get_v1_volume", timeTaken, err)
  152. if err != nil {
  153. return Volume{}, fmt.Errorf("error occurred getting volume by ID: %s, err: %v", volumeID, err)
  154. }
  155. volume := Volume{
  156. AvailabilityZone: volumeV1.AvailabilityZone,
  157. ID: volumeV1.ID,
  158. Name: volumeV1.Name,
  159. Status: volumeV1.Status,
  160. Size: volumeV1.Size,
  161. }
  162. if len(volumeV1.Attachments) > 0 && volumeV1.Attachments[0]["server_id"] != nil {
  163. volume.AttachedServerID = volumeV1.Attachments[0]["server_id"].(string)
  164. volume.AttachedDevice = volumeV1.Attachments[0]["device"].(string)
  165. }
  166. return volume, nil
  167. }
  168. func (volumes *VolumesV2) getVolume(volumeID string) (Volume, error) {
  169. startTime := time.Now()
  170. volumeV2, err := volumes_v2.Get(volumes.blockstorage, volumeID).Extract()
  171. timeTaken := time.Since(startTime).Seconds()
  172. recordOpenstackOperationMetric("get_v2_volume", timeTaken, err)
  173. if err != nil {
  174. return Volume{}, fmt.Errorf("error occurred getting volume by ID: %s, err: %v", volumeID, err)
  175. }
  176. volume := Volume{
  177. AvailabilityZone: volumeV2.AvailabilityZone,
  178. ID: volumeV2.ID,
  179. Name: volumeV2.Name,
  180. Status: volumeV2.Status,
  181. Size: volumeV2.Size,
  182. }
  183. if len(volumeV2.Attachments) > 0 {
  184. volume.AttachedServerID = volumeV2.Attachments[0].ServerID
  185. volume.AttachedDevice = volumeV2.Attachments[0].Device
  186. }
  187. return volume, nil
  188. }
  189. func (volumes *VolumesV3) getVolume(volumeID string) (Volume, error) {
  190. startTime := time.Now()
  191. volumeV3, err := volumes_v3.Get(volumes.blockstorage, volumeID).Extract()
  192. timeTaken := time.Since(startTime).Seconds()
  193. recordOpenstackOperationMetric("get_v3_volume", timeTaken, err)
  194. if err != nil {
  195. return Volume{}, fmt.Errorf("error occurred getting volume by ID: %s, err: %v", volumeID, err)
  196. }
  197. volume := Volume{
  198. AvailabilityZone: volumeV3.AvailabilityZone,
  199. ID: volumeV3.ID,
  200. Name: volumeV3.Name,
  201. Status: volumeV3.Status,
  202. Size: volumeV3.Size,
  203. }
  204. if len(volumeV3.Attachments) > 0 {
  205. volume.AttachedServerID = volumeV3.Attachments[0].ServerID
  206. volume.AttachedDevice = volumeV3.Attachments[0].Device
  207. }
  208. return volume, nil
  209. }
  210. func (volumes *VolumesV1) deleteVolume(volumeID string) error {
  211. startTime := time.Now()
  212. err := volumes_v1.Delete(volumes.blockstorage, volumeID).ExtractErr()
  213. timeTaken := time.Since(startTime).Seconds()
  214. recordOpenstackOperationMetric("delete_v1_volume", timeTaken, err)
  215. return err
  216. }
  217. func (volumes *VolumesV2) deleteVolume(volumeID string) error {
  218. startTime := time.Now()
  219. err := volumes_v2.Delete(volumes.blockstorage, volumeID, nil).ExtractErr()
  220. timeTaken := time.Since(startTime).Seconds()
  221. recordOpenstackOperationMetric("delete_v2_volume", timeTaken, err)
  222. return err
  223. }
  224. func (volumes *VolumesV3) deleteVolume(volumeID string) error {
  225. startTime := time.Now()
  226. err := volumes_v3.Delete(volumes.blockstorage, volumeID, nil).ExtractErr()
  227. timeTaken := time.Since(startTime).Seconds()
  228. recordOpenstackOperationMetric("delete_v3_volume", timeTaken, err)
  229. return err
  230. }
  231. func (volumes *VolumesV1) expandVolume(volumeID string, newSize int) error {
  232. startTime := time.Now()
  233. createOpts := volumeexpand.ExtendSizeOpts{
  234. NewSize: newSize,
  235. }
  236. err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr()
  237. timeTaken := time.Since(startTime).Seconds()
  238. recordOpenstackOperationMetric("expand_volume", timeTaken, err)
  239. return err
  240. }
  241. func (volumes *VolumesV2) expandVolume(volumeID string, newSize int) error {
  242. startTime := time.Now()
  243. createOpts := volumeexpand.ExtendSizeOpts{
  244. NewSize: newSize,
  245. }
  246. err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr()
  247. timeTaken := time.Since(startTime).Seconds()
  248. recordOpenstackOperationMetric("expand_volume", timeTaken, err)
  249. return err
  250. }
  251. func (volumes *VolumesV3) expandVolume(volumeID string, newSize int) error {
  252. startTime := time.Now()
  253. createOpts := volumeexpand.ExtendSizeOpts{
  254. NewSize: newSize,
  255. }
  256. err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, createOpts).ExtractErr()
  257. timeTaken := time.Since(startTime).Seconds()
  258. recordOpenstackOperationMetric("expand_volume", timeTaken, err)
  259. return err
  260. }
  261. // OperationPending checks if there is an operation pending on a volume
  262. func (os *OpenStack) OperationPending(diskName string) (bool, string, error) {
  263. volume, err := os.getVolume(diskName)
  264. if err != nil {
  265. return false, "", err
  266. }
  267. volumeStatus := volume.Status
  268. if volumeStatus == volumeErrorStatus {
  269. err = fmt.Errorf("status of volume %s is %s", diskName, volumeStatus)
  270. return false, volumeStatus, err
  271. }
  272. if volumeStatus == volumeAvailableStatus || volumeStatus == volumeInUseStatus || volumeStatus == volumeDeletedStatus {
  273. return false, volume.Status, nil
  274. }
  275. return true, volumeStatus, nil
  276. }
  277. // AttachDisk attaches given cinder volume to the compute running kubelet
  278. func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) {
  279. volume, err := os.getVolume(volumeID)
  280. if err != nil {
  281. return "", err
  282. }
  283. cClient, err := os.NewComputeV2()
  284. if err != nil {
  285. return "", err
  286. }
  287. if volume.AttachedServerID != "" {
  288. if instanceID == volume.AttachedServerID {
  289. klog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID)
  290. return volume.ID, nil
  291. }
  292. nodeName, err := os.GetNodeNameByID(volume.AttachedServerID)
  293. attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerID)
  294. if err != nil {
  295. klog.Error(attachErr)
  296. return "", errors.New(attachErr)
  297. }
  298. // using volume.AttachedDevice may cause problems because cinder does not report device path correctly see issue #33128
  299. devicePath := volume.AttachedDevice
  300. danglingErr := volerr.NewDanglingError(attachErr, nodeName, devicePath)
  301. klog.V(2).Infof("Found dangling volume %s attached to node %s", volumeID, nodeName)
  302. return "", danglingErr
  303. }
  304. startTime := time.Now()
  305. // add read only flag here if possible spothanis
  306. _, err = volumeattach.Create(cClient, instanceID, &volumeattach.CreateOpts{
  307. VolumeID: volume.ID,
  308. }).Extract()
  309. timeTaken := time.Since(startTime).Seconds()
  310. recordOpenstackOperationMetric("attach_disk", timeTaken, err)
  311. if err != nil {
  312. return "", fmt.Errorf("failed to attach %s volume to %s compute: %v", volumeID, instanceID, err)
  313. }
  314. klog.V(2).Infof("Successfully attached %s volume to %s compute", volumeID, instanceID)
  315. return volume.ID, nil
  316. }
  317. // DetachDisk detaches given cinder volume from the compute running kubelet
  318. func (os *OpenStack) DetachDisk(instanceID, volumeID string) error {
  319. volume, err := os.getVolume(volumeID)
  320. if err != nil {
  321. return err
  322. }
  323. if volume.Status == volumeAvailableStatus {
  324. // "available" is fine since that means the volume is detached from instance already.
  325. klog.V(2).Infof("volume: %s has been detached from compute: %s ", volume.ID, instanceID)
  326. return nil
  327. }
  328. if volume.Status != volumeInUseStatus {
  329. return fmt.Errorf("can not detach volume %s, its status is %s", volume.Name, volume.Status)
  330. }
  331. cClient, err := os.NewComputeV2()
  332. if err != nil {
  333. return err
  334. }
  335. if volume.AttachedServerID != instanceID {
  336. return fmt.Errorf("disk: %s has no attachments or is not attached to compute: %s", volume.Name, instanceID)
  337. }
  338. startTime := time.Now()
  339. // This is a blocking call and effects kubelet's performance directly.
  340. // We should consider kicking it out into a separate routine, if it is bad.
  341. err = volumeattach.Delete(cClient, instanceID, volume.ID).ExtractErr()
  342. timeTaken := time.Since(startTime).Seconds()
  343. recordOpenstackOperationMetric("detach_disk", timeTaken, err)
  344. if err != nil {
  345. return fmt.Errorf("failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err)
  346. }
  347. klog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID)
  348. return nil
  349. }
  350. // ExpandVolume expands the size of specific cinder volume (in GiB)
  351. func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
  352. volume, err := os.getVolume(volumeID)
  353. if err != nil {
  354. return oldSize, err
  355. }
  356. if volume.Status != volumeAvailableStatus {
  357. // cinder volume can not be expanded if its status is not available
  358. return oldSize, fmt.Errorf("volume status is not available")
  359. }
  360. // Cinder works with gigabytes, convert to GiB with rounding up
  361. volSizeGiB, err := volumehelpers.RoundUpToGiBInt(newSize)
  362. if err != nil {
  363. return oldSize, err
  364. }
  365. newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGiB))
  366. // if volume size equals to or greater than the newSize, return nil
  367. if volume.Size >= volSizeGiB {
  368. return newSizeQuant, nil
  369. }
  370. volumes, err := os.volumeService("")
  371. if err != nil {
  372. return oldSize, err
  373. }
  374. err = volumes.expandVolume(volumeID, volSizeGiB)
  375. if err != nil {
  376. return oldSize, err
  377. }
  378. return newSizeQuant, nil
  379. }
  380. // getVolume retrieves Volume by its ID.
  381. func (os *OpenStack) getVolume(volumeID string) (Volume, error) {
  382. volumes, err := os.volumeService("")
  383. if err != nil {
  384. return Volume{}, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err)
  385. }
  386. return volumes.getVolume(volumeID)
  387. }
  388. // CreateVolume creates a volume of given size (in GiB)
  389. func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, string, bool, error) {
  390. volumes, err := os.volumeService("")
  391. if err != nil {
  392. return "", "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err)
  393. }
  394. opts := volumeCreateOpts{
  395. Name: name,
  396. Size: size,
  397. VolumeType: vtype,
  398. Availability: availability,
  399. }
  400. if tags != nil {
  401. opts.Metadata = *tags
  402. }
  403. volumeID, volumeAZ, err := volumes.createVolume(opts)
  404. if err != nil {
  405. return "", "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("failed to create a %d GB volume: %v", size, err)
  406. }
  407. klog.Infof("Created volume %v in Availability Zone: %v Region: %v Ignore volume AZ: %v", volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ)
  408. return volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ, nil
  409. }
  410. // GetDevicePathBySerialID returns the path of an attached block storage volume, specified by its id.
  411. func (os *OpenStack) GetDevicePathBySerialID(volumeID string) string {
  412. // Build a list of candidate device paths.
  413. // Certain Nova drivers will set the disk serial ID, including the Cinder volume id.
  414. candidateDeviceNodes := []string{
  415. // KVM
  416. fmt.Sprintf("virtio-%s", volumeID[:20]),
  417. // KVM virtio-scsi
  418. fmt.Sprintf("scsi-0QEMU_QEMU_HARDDISK_%s", volumeID[:20]),
  419. // ESXi
  420. fmt.Sprintf("wwn-0x%s", strings.Replace(volumeID, "-", "", -1)),
  421. }
  422. files, _ := ioutil.ReadDir("/dev/disk/by-id/")
  423. for _, f := range files {
  424. for _, c := range candidateDeviceNodes {
  425. if c == f.Name() {
  426. klog.V(4).Infof("Found disk attached as %q; full devicepath: %s\n", f.Name(), path.Join("/dev/disk/by-id/", f.Name()))
  427. return path.Join("/dev/disk/by-id/", f.Name())
  428. }
  429. }
  430. }
  431. klog.V(4).Infof("Failed to find device for the volumeID: %q by serial ID", volumeID)
  432. return ""
  433. }
  434. func (os *OpenStack) getDevicePathFromInstanceMetadata(volumeID string) string {
  435. // Nova Hyper-V hosts cannot override disk SCSI IDs. In order to locate
  436. // volumes, we're querying the metadata service. Note that the Hyper-V
  437. // driver will include device metadata for untagged volumes as well.
  438. //
  439. // We're avoiding using cached metadata (or the configdrive),
  440. // relying on the metadata service.
  441. instanceMetadata, err := getMetadataFromMetadataService(
  442. newtonMetadataVersion)
  443. if err != nil {
  444. klog.V(4).Infof(
  445. "Could not retrieve instance metadata. Error: %v", err)
  446. return ""
  447. }
  448. for _, device := range instanceMetadata.Devices {
  449. if device.Type == "disk" && device.Serial == volumeID {
  450. klog.V(4).Infof(
  451. "Found disk metadata for volumeID %q. Bus: %q, Address: %q",
  452. volumeID, device.Bus, device.Address)
  453. diskPattern := fmt.Sprintf(
  454. "/dev/disk/by-path/*-%s-%s",
  455. device.Bus, device.Address)
  456. diskPaths, err := filepath.Glob(diskPattern)
  457. if err != nil {
  458. klog.Errorf(
  459. "could not retrieve disk path for volumeID: %q. Error filepath.Glob(%q): %v",
  460. volumeID, diskPattern, err)
  461. return ""
  462. }
  463. if len(diskPaths) == 1 {
  464. return diskPaths[0]
  465. }
  466. klog.Errorf(
  467. "expecting to find one disk path for volumeID %q, found %d: %v",
  468. volumeID, len(diskPaths), diskPaths)
  469. return ""
  470. }
  471. }
  472. klog.V(4).Infof(
  473. "Could not retrieve device metadata for volumeID: %q", volumeID)
  474. return ""
  475. }
  476. // GetDevicePath returns the path of an attached block storage volume, specified by its id.
  477. func (os *OpenStack) GetDevicePath(volumeID string) string {
  478. devicePath := os.GetDevicePathBySerialID(volumeID)
  479. if devicePath == "" {
  480. devicePath = os.getDevicePathFromInstanceMetadata(volumeID)
  481. }
  482. if devicePath == "" {
  483. klog.Warningf("Failed to find device for the volumeID: %q", volumeID)
  484. }
  485. return devicePath
  486. }
  487. // DeleteVolume deletes a volume given volume name.
  488. func (os *OpenStack) DeleteVolume(volumeID string) error {
  489. used, err := os.diskIsUsed(volumeID)
  490. if err != nil {
  491. return err
  492. }
  493. if used {
  494. msg := fmt.Sprintf("Cannot delete the volume %q, it's still attached to a node", volumeID)
  495. return volerr.NewDeletedVolumeInUseError(msg)
  496. }
  497. volumes, err := os.volumeService("")
  498. if err != nil {
  499. return fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err)
  500. }
  501. err = volumes.deleteVolume(volumeID)
  502. return err
  503. }
  504. // GetAttachmentDiskPath gets device path of attached volume to the compute running kubelet, as known by cinder
  505. func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) {
  506. // See issue #33128 - Cinder does not always tell you the right device path, as such
  507. // we must only use this value as a last resort.
  508. volume, err := os.getVolume(volumeID)
  509. if err != nil {
  510. return "", err
  511. }
  512. if volume.Status != volumeInUseStatus {
  513. return "", fmt.Errorf("can not get device path of volume %s, its status is %s ", volume.Name, volume.Status)
  514. }
  515. if volume.AttachedServerID != "" {
  516. if instanceID == volume.AttachedServerID {
  517. // Attachment[0]["device"] points to the device path
  518. // see http://developer.openstack.org/api-ref-blockstorage-v1.html
  519. return volume.AttachedDevice, nil
  520. }
  521. return "", fmt.Errorf("disk %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.AttachedServerID)
  522. }
  523. return "", fmt.Errorf("volume %s has no ServerId", volumeID)
  524. }
  525. // DiskIsAttached queries if a volume is attached to a compute instance
  526. func (os *OpenStack) DiskIsAttached(instanceID, volumeID string) (bool, error) {
  527. if instanceID == "" {
  528. klog.Warningf("calling DiskIsAttached with empty instanceid: %s %s", instanceID, volumeID)
  529. }
  530. volume, err := os.getVolume(volumeID)
  531. if err != nil {
  532. return false, err
  533. }
  534. return instanceID == volume.AttachedServerID, nil
  535. }
  536. // DiskIsAttachedByName queries if a volume is attached to a compute instance by name
  537. func (os *OpenStack) DiskIsAttachedByName(nodeName types.NodeName, volumeID string) (bool, string, error) {
  538. cClient, err := os.NewComputeV2()
  539. if err != nil {
  540. return false, "", err
  541. }
  542. srv, err := getServerByName(cClient, nodeName)
  543. if err != nil {
  544. if err == ErrNotFound {
  545. // instance not found anymore in cloudprovider, assume that cinder is detached
  546. return false, "", nil
  547. }
  548. return false, "", err
  549. }
  550. instanceID := "/" + srv.ID
  551. if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
  552. instanceID = instanceID[(ind + 1):]
  553. }
  554. attached, err := os.DiskIsAttached(instanceID, volumeID)
  555. return attached, instanceID, err
  556. }
  557. // DisksAreAttached queries if a list of volumes are attached to a compute instance
  558. func (os *OpenStack) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) {
  559. attached := make(map[string]bool)
  560. for _, volumeID := range volumeIDs {
  561. isAttached, err := os.DiskIsAttached(instanceID, volumeID)
  562. if err != nil && err != ErrNotFound {
  563. attached[volumeID] = true
  564. continue
  565. }
  566. attached[volumeID] = isAttached
  567. }
  568. return attached, nil
  569. }
  570. // DisksAreAttachedByName queries if a list of volumes are attached to a compute instance by name
  571. func (os *OpenStack) DisksAreAttachedByName(nodeName types.NodeName, volumeIDs []string) (map[string]bool, error) {
  572. attached := make(map[string]bool)
  573. cClient, err := os.NewComputeV2()
  574. if err != nil {
  575. return attached, err
  576. }
  577. srv, err := getServerByName(cClient, nodeName)
  578. if err != nil {
  579. if err == ErrNotFound {
  580. // instance not found anymore, mark all volumes as detached
  581. for _, volumeID := range volumeIDs {
  582. attached[volumeID] = false
  583. }
  584. return attached, nil
  585. }
  586. return attached, err
  587. }
  588. instanceID := "/" + srv.ID
  589. if ind := strings.LastIndex(instanceID, "/"); ind >= 0 {
  590. instanceID = instanceID[(ind + 1):]
  591. }
  592. return os.DisksAreAttached(instanceID, volumeIDs)
  593. }
  594. // diskIsUsed returns true a disk is attached to any node.
  595. func (os *OpenStack) diskIsUsed(volumeID string) (bool, error) {
  596. volume, err := os.getVolume(volumeID)
  597. if err != nil {
  598. return false, err
  599. }
  600. return volume.AttachedServerID != "", nil
  601. }
  602. // ShouldTrustDevicePath queries if we should trust the cinder provide deviceName, See issue #33128
  603. func (os *OpenStack) ShouldTrustDevicePath() bool {
  604. return os.bsOpts.TrustDevicePath
  605. }
  606. // NodeVolumeAttachLimit specifies number of cinder volumes that can be attached to this node.
  607. func (os *OpenStack) NodeVolumeAttachLimit() int {
  608. return os.bsOpts.NodeVolumeAttachLimit
  609. }
  610. // GetLabelsForVolume implements PVLabeler.GetLabelsForVolume
  611. func (os *OpenStack) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
  612. // Ignore if not Cinder.
  613. if pv.Spec.Cinder == nil {
  614. return nil, nil
  615. }
  616. // Ignore any volumes that are being provisioned
  617. if pv.Spec.Cinder.VolumeID == cloudvolume.ProvisionedVolumeName {
  618. return nil, nil
  619. }
  620. // Get Volume
  621. volume, err := os.getVolume(pv.Spec.Cinder.VolumeID)
  622. if err != nil {
  623. return nil, err
  624. }
  625. // Construct Volume Labels
  626. labels := make(map[string]string)
  627. labels[v1.LabelZoneFailureDomain] = volume.AvailabilityZone
  628. labels[v1.LabelZoneRegion] = os.region
  629. klog.V(4).Infof("The Volume %s has labels %v", pv.Spec.Cinder.VolumeID, labels)
  630. return labels, nil
  631. }
  632. // recordOpenstackOperationMetric records openstack operation metrics
  633. func recordOpenstackOperationMetric(operation string, timeTaken float64, err error) {
  634. if err != nil {
  635. openstackAPIRequestErrors.With(prometheus.Labels{"request": operation}).Inc()
  636. } else {
  637. openstackOperationsLatency.With(prometheus.Labels{"request": operation}).Observe(timeTaken)
  638. }
  639. }