volume_restrictions_test.go 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /*
  2. Copyright 2019 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package volumerestrictions
  14. import (
  15. "context"
  16. "reflect"
  17. "testing"
  18. v1 "k8s.io/api/core/v1"
  19. framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
  20. schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
  21. )
  22. func TestGCEDiskConflicts(t *testing.T) {
  23. volState := v1.PodSpec{
  24. Volumes: []v1.Volume{
  25. {
  26. VolumeSource: v1.VolumeSource{
  27. GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
  28. PDName: "foo",
  29. },
  30. },
  31. },
  32. },
  33. }
  34. volState2 := v1.PodSpec{
  35. Volumes: []v1.Volume{
  36. {
  37. VolumeSource: v1.VolumeSource{
  38. GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
  39. PDName: "bar",
  40. },
  41. },
  42. },
  43. },
  44. }
  45. errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
  46. tests := []struct {
  47. pod *v1.Pod
  48. nodeInfo *schedulernodeinfo.NodeInfo
  49. isOk bool
  50. name string
  51. wantStatus *framework.Status
  52. }{
  53. {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil},
  54. {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
  55. {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
  56. {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
  57. }
  58. for _, test := range tests {
  59. t.Run(test.name, func(t *testing.T) {
  60. p, _ := New(nil, nil)
  61. gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, test.nodeInfo)
  62. if !reflect.DeepEqual(gotStatus, test.wantStatus) {
  63. t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
  64. }
  65. })
  66. }
  67. }
  68. func TestAWSDiskConflicts(t *testing.T) {
  69. volState := v1.PodSpec{
  70. Volumes: []v1.Volume{
  71. {
  72. VolumeSource: v1.VolumeSource{
  73. AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
  74. VolumeID: "foo",
  75. },
  76. },
  77. },
  78. },
  79. }
  80. volState2 := v1.PodSpec{
  81. Volumes: []v1.Volume{
  82. {
  83. VolumeSource: v1.VolumeSource{
  84. AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
  85. VolumeID: "bar",
  86. },
  87. },
  88. },
  89. },
  90. }
  91. errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
  92. tests := []struct {
  93. pod *v1.Pod
  94. nodeInfo *schedulernodeinfo.NodeInfo
  95. isOk bool
  96. name string
  97. wantStatus *framework.Status
  98. }{
  99. {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil},
  100. {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
  101. {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
  102. {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
  103. }
  104. for _, test := range tests {
  105. t.Run(test.name, func(t *testing.T) {
  106. p, _ := New(nil, nil)
  107. gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, test.nodeInfo)
  108. if !reflect.DeepEqual(gotStatus, test.wantStatus) {
  109. t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
  110. }
  111. })
  112. }
  113. }
  114. func TestRBDDiskConflicts(t *testing.T) {
  115. volState := v1.PodSpec{
  116. Volumes: []v1.Volume{
  117. {
  118. VolumeSource: v1.VolumeSource{
  119. RBD: &v1.RBDVolumeSource{
  120. CephMonitors: []string{"a", "b"},
  121. RBDPool: "foo",
  122. RBDImage: "bar",
  123. FSType: "ext4",
  124. },
  125. },
  126. },
  127. },
  128. }
  129. volState2 := v1.PodSpec{
  130. Volumes: []v1.Volume{
  131. {
  132. VolumeSource: v1.VolumeSource{
  133. RBD: &v1.RBDVolumeSource{
  134. CephMonitors: []string{"c", "d"},
  135. RBDPool: "foo",
  136. RBDImage: "bar",
  137. FSType: "ext4",
  138. },
  139. },
  140. },
  141. },
  142. }
  143. errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
  144. tests := []struct {
  145. pod *v1.Pod
  146. nodeInfo *schedulernodeinfo.NodeInfo
  147. isOk bool
  148. name string
  149. wantStatus *framework.Status
  150. }{
  151. {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil},
  152. {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
  153. {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
  154. {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
  155. }
  156. for _, test := range tests {
  157. t.Run(test.name, func(t *testing.T) {
  158. p, _ := New(nil, nil)
  159. gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, test.nodeInfo)
  160. if !reflect.DeepEqual(gotStatus, test.wantStatus) {
  161. t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
  162. }
  163. })
  164. }
  165. }
  166. func TestISCSIDiskConflicts(t *testing.T) {
  167. volState := v1.PodSpec{
  168. Volumes: []v1.Volume{
  169. {
  170. VolumeSource: v1.VolumeSource{
  171. ISCSI: &v1.ISCSIVolumeSource{
  172. TargetPortal: "127.0.0.1:3260",
  173. IQN: "iqn.2016-12.server:storage.target01",
  174. FSType: "ext4",
  175. Lun: 0,
  176. },
  177. },
  178. },
  179. },
  180. }
  181. volState2 := v1.PodSpec{
  182. Volumes: []v1.Volume{
  183. {
  184. VolumeSource: v1.VolumeSource{
  185. ISCSI: &v1.ISCSIVolumeSource{
  186. TargetPortal: "127.0.0.1:3260",
  187. IQN: "iqn.2017-12.server:storage.target01",
  188. FSType: "ext4",
  189. Lun: 0,
  190. },
  191. },
  192. },
  193. },
  194. }
  195. errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
  196. tests := []struct {
  197. pod *v1.Pod
  198. nodeInfo *schedulernodeinfo.NodeInfo
  199. isOk bool
  200. name string
  201. wantStatus *framework.Status
  202. }{
  203. {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil},
  204. {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
  205. {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
  206. {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
  207. }
  208. for _, test := range tests {
  209. t.Run(test.name, func(t *testing.T) {
  210. p, _ := New(nil, nil)
  211. gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, test.nodeInfo)
  212. if !reflect.DeepEqual(gotStatus, test.wantStatus) {
  213. t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
  214. }
  215. })
  216. }
  217. }