csi_test.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642
  1. /*
  2. Copyright 2019 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package nodevolumelimits
  14. import (
  15. "context"
  16. "fmt"
  17. "reflect"
  18. "strings"
  19. "testing"
  20. v1 "k8s.io/api/core/v1"
  21. storagev1 "k8s.io/api/storage/v1"
  22. "k8s.io/apimachinery/pkg/api/resource"
  23. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  24. "k8s.io/apimachinery/pkg/util/rand"
  25. "k8s.io/apimachinery/pkg/util/sets"
  26. utilfeature "k8s.io/apiserver/pkg/util/feature"
  27. featuregatetesting "k8s.io/component-base/featuregate/testing"
  28. csitrans "k8s.io/csi-translation-lib"
  29. csilibplugins "k8s.io/csi-translation-lib/plugins"
  30. "k8s.io/kubernetes/pkg/features"
  31. framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
  32. fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
  33. schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
  34. volumeutil "k8s.io/kubernetes/pkg/volume/util"
  35. utilpointer "k8s.io/utils/pointer"
  36. )
  37. const (
  38. ebsCSIDriverName = csilibplugins.AWSEBSDriverName
  39. gceCSIDriverName = csilibplugins.GCEPDDriverName
  40. hostpathInTreePluginName = "kubernetes.io/hostpath"
  41. )
  42. // getVolumeLimitKey returns a ResourceName by filter type
  43. func getVolumeLimitKey(filterType string) v1.ResourceName {
  44. switch filterType {
  45. case ebsVolumeFilterType:
  46. return v1.ResourceName(volumeutil.EBSVolumeLimitKey)
  47. case gcePDVolumeFilterType:
  48. return v1.ResourceName(volumeutil.GCEVolumeLimitKey)
  49. case azureDiskVolumeFilterType:
  50. return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
  51. case cinderVolumeFilterType:
  52. return v1.ResourceName(volumeutil.CinderVolumeLimitKey)
  53. default:
  54. return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
  55. }
  56. }
  57. func TestCSILimits(t *testing.T) {
  58. runningPod := &v1.Pod{
  59. Spec: v1.PodSpec{
  60. Volumes: []v1.Volume{
  61. {
  62. VolumeSource: v1.VolumeSource{
  63. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  64. ClaimName: "csi-ebs.csi.aws.com-3",
  65. },
  66. },
  67. },
  68. },
  69. },
  70. }
  71. pendingVolumePod := &v1.Pod{
  72. Spec: v1.PodSpec{
  73. Volumes: []v1.Volume{
  74. {
  75. VolumeSource: v1.VolumeSource{
  76. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  77. ClaimName: "csi-4",
  78. },
  79. },
  80. },
  81. },
  82. },
  83. }
  84. // Different pod than pendingVolumePod, but using the same unbound PVC
  85. unboundPVCPod2 := &v1.Pod{
  86. Spec: v1.PodSpec{
  87. Volumes: []v1.Volume{
  88. {
  89. VolumeSource: v1.VolumeSource{
  90. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  91. ClaimName: "csi-4",
  92. },
  93. },
  94. },
  95. },
  96. },
  97. }
  98. missingPVPod := &v1.Pod{
  99. Spec: v1.PodSpec{
  100. Volumes: []v1.Volume{
  101. {
  102. VolumeSource: v1.VolumeSource{
  103. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  104. ClaimName: "csi-6",
  105. },
  106. },
  107. },
  108. },
  109. },
  110. }
  111. noSCPVCPod := &v1.Pod{
  112. Spec: v1.PodSpec{
  113. Volumes: []v1.Volume{
  114. {
  115. VolumeSource: v1.VolumeSource{
  116. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  117. ClaimName: "csi-5",
  118. },
  119. },
  120. },
  121. },
  122. },
  123. }
  124. gceTwoVolPod := &v1.Pod{
  125. Spec: v1.PodSpec{
  126. Volumes: []v1.Volume{
  127. {
  128. VolumeSource: v1.VolumeSource{
  129. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  130. ClaimName: "csi-pd.csi.storage.gke.io-1",
  131. },
  132. },
  133. },
  134. {
  135. VolumeSource: v1.VolumeSource{
  136. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  137. ClaimName: "csi-pd.csi.storage.gke.io-2",
  138. },
  139. },
  140. },
  141. },
  142. },
  143. }
  144. // In-tree volumes
  145. inTreeOneVolPod := &v1.Pod{
  146. Spec: v1.PodSpec{
  147. Volumes: []v1.Volume{
  148. {
  149. VolumeSource: v1.VolumeSource{
  150. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  151. ClaimName: "csi-kubernetes.io/aws-ebs-0",
  152. },
  153. },
  154. },
  155. },
  156. },
  157. }
  158. inTreeTwoVolPod := &v1.Pod{
  159. Spec: v1.PodSpec{
  160. Volumes: []v1.Volume{
  161. {
  162. VolumeSource: v1.VolumeSource{
  163. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  164. ClaimName: "csi-kubernetes.io/aws-ebs-1",
  165. },
  166. },
  167. },
  168. {
  169. VolumeSource: v1.VolumeSource{
  170. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  171. ClaimName: "csi-kubernetes.io/aws-ebs-2",
  172. },
  173. },
  174. },
  175. },
  176. },
  177. }
  178. // pods with matching csi driver names
  179. csiEBSOneVolPod := &v1.Pod{
  180. Spec: v1.PodSpec{
  181. Volumes: []v1.Volume{
  182. {
  183. VolumeSource: v1.VolumeSource{
  184. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  185. ClaimName: "csi-ebs.csi.aws.com-0",
  186. },
  187. },
  188. },
  189. },
  190. },
  191. }
  192. csiEBSTwoVolPod := &v1.Pod{
  193. Spec: v1.PodSpec{
  194. Volumes: []v1.Volume{
  195. {
  196. VolumeSource: v1.VolumeSource{
  197. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  198. ClaimName: "csi-ebs.csi.aws.com-1",
  199. },
  200. },
  201. },
  202. {
  203. VolumeSource: v1.VolumeSource{
  204. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  205. ClaimName: "csi-ebs.csi.aws.com-2",
  206. },
  207. },
  208. },
  209. },
  210. },
  211. }
  212. inTreeNonMigratableOneVolPod := &v1.Pod{
  213. Spec: v1.PodSpec{
  214. Volumes: []v1.Volume{
  215. {
  216. VolumeSource: v1.VolumeSource{
  217. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  218. ClaimName: "csi-kubernetes.io/hostpath-0",
  219. },
  220. },
  221. },
  222. },
  223. },
  224. }
  225. tests := []struct {
  226. newPod *v1.Pod
  227. existingPods []*v1.Pod
  228. filterName string
  229. maxVols int
  230. driverNames []string
  231. test string
  232. migrationEnabled bool
  233. limitSource string
  234. wantStatus *framework.Status
  235. }{
  236. {
  237. newPod: csiEBSOneVolPod,
  238. existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
  239. filterName: "csi",
  240. maxVols: 4,
  241. driverNames: []string{ebsCSIDriverName},
  242. test: "fits when node volume limit >= new pods CSI volume",
  243. limitSource: "node",
  244. },
  245. {
  246. newPod: csiEBSOneVolPod,
  247. existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
  248. filterName: "csi",
  249. maxVols: 2,
  250. driverNames: []string{ebsCSIDriverName},
  251. test: "doesn't when node volume limit <= pods CSI volume",
  252. limitSource: "node",
  253. wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
  254. },
  255. {
  256. newPod: csiEBSOneVolPod,
  257. existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
  258. filterName: "csi",
  259. maxVols: 2,
  260. driverNames: []string{ebsCSIDriverName},
  261. test: "should when driver does not support volume limits",
  262. limitSource: "csinode-with-no-limit",
  263. },
  264. // should count pending PVCs
  265. {
  266. newPod: csiEBSOneVolPod,
  267. existingPods: []*v1.Pod{pendingVolumePod, csiEBSTwoVolPod},
  268. filterName: "csi",
  269. maxVols: 2,
  270. driverNames: []string{ebsCSIDriverName},
  271. test: "count pending PVCs towards volume limit <= pods CSI volume",
  272. limitSource: "node",
  273. wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
  274. },
  275. // two same pending PVCs should be counted as 1
  276. {
  277. newPod: csiEBSOneVolPod,
  278. existingPods: []*v1.Pod{pendingVolumePod, unboundPVCPod2, csiEBSTwoVolPod},
  279. filterName: "csi",
  280. maxVols: 4,
  281. driverNames: []string{ebsCSIDriverName},
  282. test: "count multiple pending pvcs towards volume limit >= pods CSI volume",
  283. limitSource: "node",
  284. },
  285. // should count PVCs with invalid PV name but valid SC
  286. {
  287. newPod: csiEBSOneVolPod,
  288. existingPods: []*v1.Pod{missingPVPod, csiEBSTwoVolPod},
  289. filterName: "csi",
  290. maxVols: 2,
  291. driverNames: []string{ebsCSIDriverName},
  292. test: "should count PVCs with invalid PV name but valid SC",
  293. limitSource: "node",
  294. wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
  295. },
  296. // don't count a volume which has storageclass missing
  297. {
  298. newPod: csiEBSOneVolPod,
  299. existingPods: []*v1.Pod{runningPod, noSCPVCPod},
  300. filterName: "csi",
  301. maxVols: 2,
  302. driverNames: []string{ebsCSIDriverName},
  303. test: "don't count pvcs with missing SC towards volume limit",
  304. limitSource: "node",
  305. },
  306. // don't count multiple volume types
  307. {
  308. newPod: csiEBSOneVolPod,
  309. existingPods: []*v1.Pod{gceTwoVolPod, csiEBSTwoVolPod},
  310. filterName: "csi",
  311. maxVols: 2,
  312. driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
  313. test: "count pvcs with the same type towards volume limit",
  314. limitSource: "node",
  315. wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
  316. },
  317. {
  318. newPod: gceTwoVolPod,
  319. existingPods: []*v1.Pod{csiEBSTwoVolPod, runningPod},
  320. filterName: "csi",
  321. maxVols: 2,
  322. driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
  323. test: "don't count pvcs with different type towards volume limit",
  324. limitSource: "node",
  325. },
  326. // Tests for in-tree volume migration
  327. {
  328. newPod: inTreeOneVolPod,
  329. existingPods: []*v1.Pod{inTreeTwoVolPod},
  330. filterName: "csi",
  331. maxVols: 2,
  332. driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
  333. migrationEnabled: true,
  334. limitSource: "csinode",
  335. test: "should count in-tree volumes if migration is enabled",
  336. wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
  337. },
  338. {
  339. newPod: pendingVolumePod,
  340. existingPods: []*v1.Pod{inTreeTwoVolPod},
  341. filterName: "csi",
  342. maxVols: 2,
  343. driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
  344. migrationEnabled: true,
  345. limitSource: "csinode",
  346. test: "should count unbound in-tree volumes if migration is enabled",
  347. wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
  348. },
  349. {
  350. newPod: inTreeOneVolPod,
  351. existingPods: []*v1.Pod{inTreeTwoVolPod},
  352. filterName: "csi",
  353. maxVols: 2,
  354. driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
  355. migrationEnabled: false,
  356. limitSource: "csinode",
  357. test: "should not count in-tree volume if migration is disabled",
  358. },
  359. {
  360. newPod: inTreeOneVolPod,
  361. existingPods: []*v1.Pod{inTreeTwoVolPod},
  362. filterName: "csi",
  363. maxVols: 2,
  364. driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
  365. migrationEnabled: true,
  366. limitSource: "csinode-with-no-limit",
  367. test: "should not limit pod if volume used does not report limits",
  368. },
  369. {
  370. newPod: inTreeOneVolPod,
  371. existingPods: []*v1.Pod{inTreeTwoVolPod},
  372. filterName: "csi",
  373. maxVols: 2,
  374. driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
  375. migrationEnabled: false,
  376. limitSource: "csinode-with-no-limit",
  377. test: "should not limit in-tree pod if migration is disabled",
  378. },
  379. {
  380. newPod: inTreeNonMigratableOneVolPod,
  381. existingPods: []*v1.Pod{csiEBSTwoVolPod},
  382. filterName: "csi",
  383. maxVols: 2,
  384. driverNames: []string{hostpathInTreePluginName, ebsCSIDriverName},
  385. migrationEnabled: true,
  386. limitSource: "csinode",
  387. test: "should not count non-migratable in-tree volumes",
  388. },
  389. // mixed volumes
  390. {
  391. newPod: inTreeOneVolPod,
  392. existingPods: []*v1.Pod{csiEBSTwoVolPod},
  393. filterName: "csi",
  394. maxVols: 2,
  395. driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
  396. migrationEnabled: true,
  397. limitSource: "csinode",
  398. test: "should count in-tree and csi volumes if migration is enabled (when scheduling in-tree volumes)",
  399. wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
  400. },
  401. {
  402. newPod: csiEBSOneVolPod,
  403. existingPods: []*v1.Pod{inTreeTwoVolPod},
  404. filterName: "csi",
  405. maxVols: 2,
  406. driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
  407. migrationEnabled: true,
  408. limitSource: "csinode",
  409. test: "should count in-tree and csi volumes if migration is enabled (when scheduling csi volumes)",
  410. wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
  411. },
  412. {
  413. newPod: csiEBSOneVolPod,
  414. existingPods: []*v1.Pod{csiEBSTwoVolPod, inTreeTwoVolPod},
  415. filterName: "csi",
  416. maxVols: 3,
  417. driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
  418. migrationEnabled: false,
  419. limitSource: "csinode",
  420. test: "should not count in-tree and count csi volumes if migration is disabled (when scheduling csi volumes)",
  421. },
  422. {
  423. newPod: inTreeOneVolPod,
  424. existingPods: []*v1.Pod{csiEBSTwoVolPod},
  425. filterName: "csi",
  426. maxVols: 2,
  427. driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
  428. migrationEnabled: false,
  429. limitSource: "csinode",
  430. test: "should not count in-tree and count csi volumes if migration is disabled (when scheduling in-tree volumes)",
  431. },
  432. }
  433. // running attachable predicate tests with feature gate and limit present on nodes
  434. for _, test := range tests {
  435. t.Run(test.test, func(t *testing.T) {
  436. node, csiNode := getNodeWithPodAndVolumeLimits(test.limitSource, test.existingPods, int64(test.maxVols), test.driverNames...)
  437. if test.migrationEnabled {
  438. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigration, true)()
  439. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationAWS, true)()
  440. enableMigrationOnNode(csiNode, csilibplugins.AWSEBSInTreePluginName)
  441. } else {
  442. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigration, false)()
  443. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationAWS, false)()
  444. }
  445. p := &CSILimits{
  446. csiNodeLister: getFakeCSINodeLister(csiNode),
  447. pvLister: getFakeCSIPVLister(test.filterName, test.driverNames...),
  448. pvcLister: getFakeCSIPVCLister(test.filterName, "csi-sc", test.driverNames...),
  449. scLister: getFakeCSIStorageClassLister("csi-sc", test.driverNames[0]),
  450. randomVolumeIDPrefix: rand.String(32),
  451. translator: csitrans.New(),
  452. }
  453. gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
  454. if !reflect.DeepEqual(gotStatus, test.wantStatus) {
  455. t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
  456. }
  457. })
  458. }
  459. }
  460. func getFakeCSIPVLister(volumeName string, driverNames ...string) fakelisters.PersistentVolumeLister {
  461. pvLister := fakelisters.PersistentVolumeLister{}
  462. for _, driver := range driverNames {
  463. for j := 0; j < 4; j++ {
  464. volumeHandle := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
  465. pv := v1.PersistentVolume{
  466. ObjectMeta: metav1.ObjectMeta{Name: volumeHandle},
  467. Spec: v1.PersistentVolumeSpec{
  468. PersistentVolumeSource: v1.PersistentVolumeSource{
  469. CSI: &v1.CSIPersistentVolumeSource{
  470. Driver: driver,
  471. VolumeHandle: volumeHandle,
  472. },
  473. },
  474. },
  475. }
  476. switch driver {
  477. case csilibplugins.AWSEBSInTreePluginName:
  478. pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
  479. AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
  480. VolumeID: volumeHandle,
  481. },
  482. }
  483. case hostpathInTreePluginName:
  484. pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
  485. HostPath: &v1.HostPathVolumeSource{
  486. Path: "/tmp",
  487. },
  488. }
  489. default:
  490. pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
  491. CSI: &v1.CSIPersistentVolumeSource{
  492. Driver: driver,
  493. VolumeHandle: volumeHandle,
  494. },
  495. }
  496. }
  497. pvLister = append(pvLister, pv)
  498. }
  499. }
  500. return pvLister
  501. }
  502. func getFakeCSIPVCLister(volumeName, scName string, driverNames ...string) fakelisters.PersistentVolumeClaimLister {
  503. pvcLister := fakelisters.PersistentVolumeClaimLister{}
  504. for _, driver := range driverNames {
  505. for j := 0; j < 4; j++ {
  506. v := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
  507. pvc := v1.PersistentVolumeClaim{
  508. ObjectMeta: metav1.ObjectMeta{Name: v},
  509. Spec: v1.PersistentVolumeClaimSpec{VolumeName: v},
  510. }
  511. pvcLister = append(pvcLister, pvc)
  512. }
  513. }
  514. pvcLister = append(pvcLister, v1.PersistentVolumeClaim{
  515. ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-4"},
  516. Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &scName},
  517. })
  518. pvcLister = append(pvcLister, v1.PersistentVolumeClaim{
  519. ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-5"},
  520. Spec: v1.PersistentVolumeClaimSpec{},
  521. })
  522. // a pvc with missing PV but available storageclass.
  523. pvcLister = append(pvcLister, v1.PersistentVolumeClaim{
  524. ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-6"},
  525. Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &scName, VolumeName: "missing-in-action"},
  526. })
  527. return pvcLister
  528. }
  529. func enableMigrationOnNode(csiNode *storagev1.CSINode, pluginName string) {
  530. nodeInfoAnnotations := csiNode.GetAnnotations()
  531. if nodeInfoAnnotations == nil {
  532. nodeInfoAnnotations = map[string]string{}
  533. }
  534. newAnnotationSet := sets.NewString()
  535. newAnnotationSet.Insert(pluginName)
  536. nas := strings.Join(newAnnotationSet.List(), ",")
  537. nodeInfoAnnotations[v1.MigratedPluginsAnnotationKey] = nas
  538. csiNode.Annotations = nodeInfoAnnotations
  539. }
  540. func getFakeCSIStorageClassLister(scName, provisionerName string) fakelisters.StorageClassLister {
  541. return fakelisters.StorageClassLister{
  542. {
  543. ObjectMeta: metav1.ObjectMeta{Name: scName},
  544. Provisioner: provisionerName,
  545. },
  546. }
  547. }
  548. func getFakeCSINodeLister(csiNode *storagev1.CSINode) fakelisters.CSINodeLister {
  549. if csiNode != nil {
  550. return fakelisters.CSINodeLister(*csiNode)
  551. }
  552. return fakelisters.CSINodeLister{}
  553. }
  554. func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *storagev1.CSINode) {
  555. nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
  556. node := &v1.Node{
  557. ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
  558. Status: v1.NodeStatus{
  559. Allocatable: v1.ResourceList{},
  560. },
  561. }
  562. var csiNode *storagev1.CSINode
  563. addLimitToNode := func() {
  564. for _, driver := range driverNames {
  565. node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI)
  566. }
  567. }
  568. initCSINode := func() {
  569. csiNode = &storagev1.CSINode{
  570. ObjectMeta: metav1.ObjectMeta{Name: "csi-node-for-max-pd-test-1"},
  571. Spec: storagev1.CSINodeSpec{
  572. Drivers: []storagev1.CSINodeDriver{},
  573. },
  574. }
  575. }
  576. addDriversCSINode := func(addLimits bool) {
  577. initCSINode()
  578. for _, driver := range driverNames {
  579. driver := storagev1.CSINodeDriver{
  580. Name: driver,
  581. NodeID: "node-for-max-pd-test-1",
  582. }
  583. if addLimits {
  584. driver.Allocatable = &storagev1.VolumeNodeResources{
  585. Count: utilpointer.Int32Ptr(int32(limit)),
  586. }
  587. }
  588. csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, driver)
  589. }
  590. }
  591. switch limitSource {
  592. case "node":
  593. addLimitToNode()
  594. case "csinode":
  595. addDriversCSINode(true)
  596. case "both":
  597. addLimitToNode()
  598. addDriversCSINode(true)
  599. case "csinode-with-no-limit":
  600. addDriversCSINode(false)
  601. case "no-csi-driver":
  602. initCSINode()
  603. default:
  604. // Do nothing.
  605. }
  606. nodeInfo.SetNode(node)
  607. return nodeInfo, csiNode
  608. }