scheduling_queue_test.go 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package queue
  14. import (
  15. "fmt"
  16. "reflect"
  17. "strings"
  18. "sync"
  19. "testing"
  20. "time"
  21. v1 "k8s.io/api/core/v1"
  22. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  23. "k8s.io/apimachinery/pkg/types"
  24. "k8s.io/apimachinery/pkg/util/clock"
  25. "k8s.io/client-go/informers"
  26. "k8s.io/client-go/kubernetes/fake"
  27. "k8s.io/component-base/metrics/testutil"
  28. podutil "k8s.io/kubernetes/pkg/api/v1/pod"
  29. "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
  30. schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
  31. frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
  32. framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
  33. "k8s.io/kubernetes/pkg/scheduler/internal/cache"
  34. "k8s.io/kubernetes/pkg/scheduler/metrics"
  35. "k8s.io/kubernetes/pkg/scheduler/util"
  36. )
  37. const queueMetricMetadata = `
  38. # HELP scheduler_queue_incoming_pods_total [ALPHA] Number of pods added to scheduling queues by event and queue type.
  39. # TYPE scheduler_queue_incoming_pods_total counter
  40. `
  41. var lowPriority, midPriority, highPriority = int32(0), int32(100), int32(1000)
  42. var mediumPriority = (lowPriority + highPriority) / 2
  43. var highPriorityPod, highPriNominatedPod, medPriorityPod, unschedulablePod = v1.Pod{
  44. ObjectMeta: metav1.ObjectMeta{
  45. Name: "hpp",
  46. Namespace: "ns1",
  47. UID: "hppns1",
  48. },
  49. Spec: v1.PodSpec{
  50. Priority: &highPriority,
  51. },
  52. },
  53. v1.Pod{
  54. ObjectMeta: metav1.ObjectMeta{
  55. Name: "hpp",
  56. Namespace: "ns1",
  57. UID: "hppns1",
  58. },
  59. Spec: v1.PodSpec{
  60. Priority: &highPriority,
  61. },
  62. Status: v1.PodStatus{
  63. NominatedNodeName: "node1",
  64. },
  65. },
  66. v1.Pod{
  67. ObjectMeta: metav1.ObjectMeta{
  68. Name: "mpp",
  69. Namespace: "ns2",
  70. UID: "mppns2",
  71. Annotations: map[string]string{
  72. "annot2": "val2",
  73. },
  74. },
  75. Spec: v1.PodSpec{
  76. Priority: &mediumPriority,
  77. },
  78. Status: v1.PodStatus{
  79. NominatedNodeName: "node1",
  80. },
  81. },
  82. v1.Pod{
  83. ObjectMeta: metav1.ObjectMeta{
  84. Name: "up",
  85. Namespace: "ns1",
  86. UID: "upns1",
  87. Annotations: map[string]string{
  88. "annot2": "val2",
  89. },
  90. },
  91. Spec: v1.PodSpec{
  92. Priority: &lowPriority,
  93. },
  94. Status: v1.PodStatus{
  95. Conditions: []v1.PodCondition{
  96. {
  97. Type: v1.PodScheduled,
  98. Status: v1.ConditionFalse,
  99. Reason: v1.PodReasonUnschedulable,
  100. },
  101. },
  102. NominatedNodeName: "node1",
  103. },
  104. }
  105. func getUnschedulablePod(p *PriorityQueue, pod *v1.Pod) *v1.Pod {
  106. p.lock.Lock()
  107. defer p.lock.Unlock()
  108. pInfo := p.unschedulableQ.get(pod)
  109. if pInfo != nil {
  110. return pInfo.Pod
  111. }
  112. return nil
  113. }
  114. func TestPriorityQueue_Add(t *testing.T) {
  115. q := createAndRunPriorityQueue(newDefaultFramework())
  116. if err := q.Add(&medPriorityPod); err != nil {
  117. t.Errorf("add failed: %v", err)
  118. }
  119. if err := q.Add(&unschedulablePod); err != nil {
  120. t.Errorf("add failed: %v", err)
  121. }
  122. if err := q.Add(&highPriorityPod); err != nil {
  123. t.Errorf("add failed: %v", err)
  124. }
  125. expectedNominatedPods := &nominatedPodMap{
  126. nominatedPodToNode: map[types.UID]string{
  127. medPriorityPod.UID: "node1",
  128. unschedulablePod.UID: "node1",
  129. },
  130. nominatedPods: map[string][]*v1.Pod{
  131. "node1": {&medPriorityPod, &unschedulablePod},
  132. },
  133. }
  134. if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
  135. t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
  136. }
  137. if p, err := q.Pop(); err != nil || p.Pod != &highPriorityPod {
  138. t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Pod.Name)
  139. }
  140. if p, err := q.Pop(); err != nil || p.Pod != &medPriorityPod {
  141. t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Pod.Name)
  142. }
  143. if p, err := q.Pop(); err != nil || p.Pod != &unschedulablePod {
  144. t.Errorf("Expected: %v after Pop, but got: %v", unschedulablePod.Name, p.Pod.Name)
  145. }
  146. if len(q.nominatedPods.nominatedPods["node1"]) != 2 {
  147. t.Errorf("Expected medPriorityPod and unschedulablePod to be still present in nomindatePods: %v", q.nominatedPods.nominatedPods["node1"])
  148. }
  149. }
  150. func newDefaultFramework() framework.Framework {
  151. plugins := algorithmprovider.NewRegistry()[schedulerapi.SchedulerDefaultProviderName]
  152. fakeClient := fake.NewSimpleClientset()
  153. fwk, err := framework.NewFramework(
  154. frameworkplugins.NewInTreeRegistry(),
  155. plugins,
  156. nil,
  157. framework.WithClientSet(fakeClient),
  158. framework.WithInformerFactory(informers.NewSharedInformerFactory(fakeClient, 0)),
  159. framework.WithSnapshotSharedLister(cache.NewEmptySnapshot()),
  160. )
  161. if err != nil {
  162. panic(err)
  163. }
  164. return fwk
  165. }
  166. func TestPriorityQueue_AddWithReversePriorityLessFunc(t *testing.T) {
  167. q := createAndRunPriorityQueue(newDefaultFramework())
  168. if err := q.Add(&medPriorityPod); err != nil {
  169. t.Errorf("add failed: %v", err)
  170. }
  171. if err := q.Add(&highPriorityPod); err != nil {
  172. t.Errorf("add failed: %v", err)
  173. }
  174. if p, err := q.Pop(); err != nil || p.Pod != &highPriorityPod {
  175. t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Pod.Name)
  176. }
  177. if p, err := q.Pop(); err != nil || p.Pod != &medPriorityPod {
  178. t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Pod.Name)
  179. }
  180. }
  181. func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) {
  182. q := createAndRunPriorityQueue(newDefaultFramework())
  183. q.Add(&highPriNominatedPod)
  184. q.AddUnschedulableIfNotPresent(newPodInfoNoTimestamp(&highPriNominatedPod), q.SchedulingCycle()) // Must not add anything.
  185. q.AddUnschedulableIfNotPresent(newPodInfoNoTimestamp(&unschedulablePod), q.SchedulingCycle())
  186. expectedNominatedPods := &nominatedPodMap{
  187. nominatedPodToNode: map[types.UID]string{
  188. unschedulablePod.UID: "node1",
  189. highPriNominatedPod.UID: "node1",
  190. },
  191. nominatedPods: map[string][]*v1.Pod{
  192. "node1": {&highPriNominatedPod, &unschedulablePod},
  193. },
  194. }
  195. if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
  196. t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
  197. }
  198. if p, err := q.Pop(); err != nil || p.Pod != &highPriNominatedPod {
  199. t.Errorf("Expected: %v after Pop, but got: %v", highPriNominatedPod.Name, p.Pod.Name)
  200. }
  201. if len(q.nominatedPods.nominatedPods) != 1 {
  202. t.Errorf("Expected nomindatePods to have one element: %v", q.nominatedPods)
  203. }
  204. if getUnschedulablePod(q, &unschedulablePod) != &unschedulablePod {
  205. t.Errorf("Pod %v was not found in the unschedulableQ.", unschedulablePod.Name)
  206. }
  207. }
  208. // TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff tests the scenarios when
  209. // AddUnschedulableIfNotPresent is called asynchronously.
  210. // Pods in and before current scheduling cycle will be put back to activeQueue
  211. // if we were trying to schedule them when we received move request.
  212. func TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff(t *testing.T) {
  213. q := createAndRunPriorityQueue(newDefaultFramework(), WithClock(clock.NewFakeClock(time.Now())))
  214. totalNum := 10
  215. expectedPods := make([]v1.Pod, 0, totalNum)
  216. for i := 0; i < totalNum; i++ {
  217. priority := int32(i)
  218. p := v1.Pod{
  219. ObjectMeta: metav1.ObjectMeta{
  220. Name: fmt.Sprintf("pod%d", i),
  221. Namespace: fmt.Sprintf("ns%d", i),
  222. UID: types.UID(fmt.Sprintf("upns%d", i)),
  223. },
  224. Spec: v1.PodSpec{
  225. Priority: &priority,
  226. },
  227. }
  228. expectedPods = append(expectedPods, p)
  229. // priority is to make pods ordered in the PriorityQueue
  230. q.Add(&p)
  231. }
  232. // Pop all pods except for the first one
  233. for i := totalNum - 1; i > 0; i-- {
  234. p, _ := q.Pop()
  235. if !reflect.DeepEqual(&expectedPods[i], p.Pod) {
  236. t.Errorf("Unexpected pod. Expected: %v, got: %v", &expectedPods[i], p)
  237. }
  238. }
  239. // move all pods to active queue when we were trying to schedule them
  240. q.MoveAllToActiveOrBackoffQueue("test")
  241. oldCycle := q.SchedulingCycle()
  242. firstPod, _ := q.Pop()
  243. if !reflect.DeepEqual(&expectedPods[0], firstPod.Pod) {
  244. t.Errorf("Unexpected pod. Expected: %v, got: %v", &expectedPods[0], firstPod)
  245. }
  246. // mark pods[1] ~ pods[totalNum-1] as unschedulable and add them back
  247. for i := 1; i < totalNum; i++ {
  248. unschedulablePod := expectedPods[i].DeepCopy()
  249. unschedulablePod.Status = v1.PodStatus{
  250. Conditions: []v1.PodCondition{
  251. {
  252. Type: v1.PodScheduled,
  253. Status: v1.ConditionFalse,
  254. Reason: v1.PodReasonUnschedulable,
  255. },
  256. },
  257. }
  258. if err := q.AddUnschedulableIfNotPresent(newPodInfoNoTimestamp(unschedulablePod), oldCycle); err != nil {
  259. t.Errorf("Failed to call AddUnschedulableIfNotPresent(%v): %v", unschedulablePod.Name, err)
  260. }
  261. }
  262. q.lock.RLock()
  263. // Since there was a move request at the same cycle as "oldCycle", these pods
  264. // should be in the backoff queue.
  265. for i := 1; i < totalNum; i++ {
  266. if _, exists, _ := q.podBackoffQ.Get(newPodInfoNoTimestamp(&expectedPods[i])); !exists {
  267. t.Errorf("Expected %v to be added to podBackoffQ.", expectedPods[i].Name)
  268. }
  269. }
  270. q.lock.RUnlock()
  271. }
  272. func TestPriorityQueue_Pop(t *testing.T) {
  273. q := createAndRunPriorityQueue(newDefaultFramework())
  274. wg := sync.WaitGroup{}
  275. wg.Add(1)
  276. go func() {
  277. defer wg.Done()
  278. if p, err := q.Pop(); err != nil || p.Pod != &medPriorityPod {
  279. t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Pod.Name)
  280. }
  281. if len(q.nominatedPods.nominatedPods["node1"]) != 1 {
  282. t.Errorf("Expected medPriorityPod to be present in nomindatePods: %v", q.nominatedPods.nominatedPods["node1"])
  283. }
  284. }()
  285. q.Add(&medPriorityPod)
  286. wg.Wait()
  287. }
  288. func TestPriorityQueue_Update(t *testing.T) {
  289. q := createAndRunPriorityQueue(newDefaultFramework())
  290. q.Update(nil, &highPriorityPod)
  291. q.lock.RLock()
  292. if _, exists, _ := q.activeQ.Get(newPodInfoNoTimestamp(&highPriorityPod)); !exists {
  293. t.Errorf("Expected %v to be added to activeQ.", highPriorityPod.Name)
  294. }
  295. q.lock.RUnlock()
  296. if len(q.nominatedPods.nominatedPods) != 0 {
  297. t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods)
  298. }
  299. // Update highPriorityPod and add a nominatedNodeName to it.
  300. q.Update(&highPriorityPod, &highPriNominatedPod)
  301. q.lock.RLock()
  302. if q.activeQ.Len() != 1 {
  303. t.Error("Expected only one item in activeQ.")
  304. }
  305. q.lock.RUnlock()
  306. if len(q.nominatedPods.nominatedPods) != 1 {
  307. t.Errorf("Expected one item in nomindatePods map: %v", q.nominatedPods)
  308. }
  309. // Updating an unschedulable pod which is not in any of the two queues, should
  310. // add the pod to activeQ.
  311. q.Update(&unschedulablePod, &unschedulablePod)
  312. q.lock.RLock()
  313. if _, exists, _ := q.activeQ.Get(newPodInfoNoTimestamp(&unschedulablePod)); !exists {
  314. t.Errorf("Expected %v to be added to activeQ.", unschedulablePod.Name)
  315. }
  316. q.lock.RUnlock()
  317. // Updating a pod that is already in activeQ, should not change it.
  318. q.Update(&unschedulablePod, &unschedulablePod)
  319. if len(q.unschedulableQ.podInfoMap) != 0 {
  320. t.Error("Expected unschedulableQ to be empty.")
  321. }
  322. q.lock.RLock()
  323. if _, exists, _ := q.activeQ.Get(newPodInfoNoTimestamp(&unschedulablePod)); !exists {
  324. t.Errorf("Expected: %v to be added to activeQ.", unschedulablePod.Name)
  325. }
  326. q.lock.RUnlock()
  327. if p, err := q.Pop(); err != nil || p.Pod != &highPriNominatedPod {
  328. t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Pod.Name)
  329. }
  330. // Updating a pod that is in unschedulableQ in a way that it may
  331. // become schedulable should add the pod to the activeQ.
  332. q.AddUnschedulableIfNotPresent(q.newPodInfo(&medPriorityPod), q.SchedulingCycle())
  333. if len(q.unschedulableQ.podInfoMap) != 1 {
  334. t.Error("Expected unschedulableQ to be 1.")
  335. }
  336. updatedPod := medPriorityPod.DeepCopy()
  337. updatedPod.ClusterName = "test"
  338. q.Update(&medPriorityPod, updatedPod)
  339. if p, err := q.Pop(); err != nil || p.Pod != updatedPod {
  340. t.Errorf("Expected: %v after Pop, but got: %v", updatedPod.Name, p.Pod.Name)
  341. }
  342. }
  343. func TestPriorityQueue_Delete(t *testing.T) {
  344. q := createAndRunPriorityQueue(newDefaultFramework())
  345. q.Update(&highPriorityPod, &highPriNominatedPod)
  346. q.Add(&unschedulablePod)
  347. if err := q.Delete(&highPriNominatedPod); err != nil {
  348. t.Errorf("delete failed: %v", err)
  349. }
  350. q.lock.RLock()
  351. if _, exists, _ := q.activeQ.Get(newPodInfoNoTimestamp(&unschedulablePod)); !exists {
  352. t.Errorf("Expected %v to be in activeQ.", unschedulablePod.Name)
  353. }
  354. if _, exists, _ := q.activeQ.Get(newPodInfoNoTimestamp(&highPriNominatedPod)); exists {
  355. t.Errorf("Didn't expect %v to be in activeQ.", highPriorityPod.Name)
  356. }
  357. q.lock.RUnlock()
  358. if len(q.nominatedPods.nominatedPods) != 1 {
  359. t.Errorf("Expected nomindatePods to have only 'unschedulablePod': %v", q.nominatedPods.nominatedPods)
  360. }
  361. if err := q.Delete(&unschedulablePod); err != nil {
  362. t.Errorf("delete failed: %v", err)
  363. }
  364. if len(q.nominatedPods.nominatedPods) != 0 {
  365. t.Errorf("Expected nomindatePods to be empty: %v", q.nominatedPods)
  366. }
  367. }
  368. func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) {
  369. q := createAndRunPriorityQueue(newDefaultFramework())
  370. q.Add(&medPriorityPod)
  371. q.AddUnschedulableIfNotPresent(q.newPodInfo(&unschedulablePod), q.SchedulingCycle())
  372. q.AddUnschedulableIfNotPresent(q.newPodInfo(&highPriorityPod), q.SchedulingCycle())
  373. q.MoveAllToActiveOrBackoffQueue("test")
  374. q.lock.RLock()
  375. defer q.lock.RUnlock()
  376. if q.activeQ.Len() != 1 {
  377. t.Error("Expected 1 item to be in activeQ")
  378. }
  379. if q.podBackoffQ.Len() != 2 {
  380. t.Error("Expected 2 items to be in podBackoffQ")
  381. }
  382. }
  383. // TestPriorityQueue_AssignedPodAdded tests AssignedPodAdded. It checks that
  384. // when a pod with pod affinity is in unschedulableQ and another pod with a
  385. // matching label is added, the unschedulable pod is moved to activeQ.
  386. func TestPriorityQueue_AssignedPodAdded(t *testing.T) {
  387. affinityPod := unschedulablePod.DeepCopy()
  388. affinityPod.Name = "afp"
  389. affinityPod.Spec = v1.PodSpec{
  390. Affinity: &v1.Affinity{
  391. PodAffinity: &v1.PodAffinity{
  392. RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
  393. {
  394. LabelSelector: &metav1.LabelSelector{
  395. MatchExpressions: []metav1.LabelSelectorRequirement{
  396. {
  397. Key: "service",
  398. Operator: metav1.LabelSelectorOpIn,
  399. Values: []string{"securityscan", "value2"},
  400. },
  401. },
  402. },
  403. TopologyKey: "region",
  404. },
  405. },
  406. },
  407. },
  408. Priority: &mediumPriority,
  409. }
  410. labelPod := v1.Pod{
  411. ObjectMeta: metav1.ObjectMeta{
  412. Name: "lbp",
  413. Namespace: affinityPod.Namespace,
  414. Labels: map[string]string{"service": "securityscan"},
  415. },
  416. Spec: v1.PodSpec{NodeName: "machine1"},
  417. }
  418. c := clock.NewFakeClock(time.Now())
  419. q := createAndRunPriorityQueue(newDefaultFramework(), WithClock(c))
  420. q.Add(&medPriorityPod)
  421. // Add a couple of pods to the unschedulableQ.
  422. q.AddUnschedulableIfNotPresent(q.newPodInfo(&unschedulablePod), q.SchedulingCycle())
  423. q.AddUnschedulableIfNotPresent(q.newPodInfo(affinityPod), q.SchedulingCycle())
  424. // Move clock to make the unschedulable pods complete backoff.
  425. c.Step(DefaultPodInitialBackoffDuration + time.Second)
  426. // Simulate addition of an assigned pod. The pod has matching labels for
  427. // affinityPod. So, affinityPod should go to activeQ.
  428. q.AssignedPodAdded(&labelPod)
  429. if getUnschedulablePod(q, affinityPod) != nil {
  430. t.Error("affinityPod is still in the unschedulableQ.")
  431. }
  432. q.lock.RLock()
  433. if _, exists, _ := q.activeQ.Get(newPodInfoNoTimestamp(affinityPod)); !exists {
  434. t.Error("affinityPod is not moved to activeQ.")
  435. }
  436. q.lock.RUnlock()
  437. // Check that the other pod is still in the unschedulableQ.
  438. if getUnschedulablePod(q, &unschedulablePod) == nil {
  439. t.Error("unschedulablePod is not in the unschedulableQ.")
  440. }
  441. }
  442. func TestPriorityQueue_NominatedPodsForNode(t *testing.T) {
  443. q := createAndRunPriorityQueue(newDefaultFramework())
  444. q.Add(&medPriorityPod)
  445. q.Add(&unschedulablePod)
  446. q.Add(&highPriorityPod)
  447. if p, err := q.Pop(); err != nil || p.Pod != &highPriorityPod {
  448. t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Pod.Name)
  449. }
  450. expectedList := []*v1.Pod{&medPriorityPod, &unschedulablePod}
  451. if !reflect.DeepEqual(expectedList, q.NominatedPodsForNode("node1")) {
  452. t.Error("Unexpected list of nominated Pods for node.")
  453. }
  454. if q.NominatedPodsForNode("node2") != nil {
  455. t.Error("Expected list of nominated Pods for node2 to be empty.")
  456. }
  457. }
  458. func TestPriorityQueue_PendingPods(t *testing.T) {
  459. makeSet := func(pods []*v1.Pod) map[*v1.Pod]struct{} {
  460. pendingSet := map[*v1.Pod]struct{}{}
  461. for _, p := range pods {
  462. pendingSet[p] = struct{}{}
  463. }
  464. return pendingSet
  465. }
  466. q := createAndRunPriorityQueue(newDefaultFramework())
  467. q.Add(&medPriorityPod)
  468. q.AddUnschedulableIfNotPresent(q.newPodInfo(&unschedulablePod), q.SchedulingCycle())
  469. q.AddUnschedulableIfNotPresent(q.newPodInfo(&highPriorityPod), q.SchedulingCycle())
  470. expectedSet := makeSet([]*v1.Pod{&medPriorityPod, &unschedulablePod, &highPriorityPod})
  471. if !reflect.DeepEqual(expectedSet, makeSet(q.PendingPods())) {
  472. t.Error("Unexpected list of pending Pods.")
  473. }
  474. // Move all to active queue. We should still see the same set of pods.
  475. q.MoveAllToActiveOrBackoffQueue("test")
  476. if !reflect.DeepEqual(expectedSet, makeSet(q.PendingPods())) {
  477. t.Error("Unexpected list of pending Pods...")
  478. }
  479. }
  480. func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) {
  481. q := createAndRunPriorityQueue(newDefaultFramework())
  482. if err := q.Add(&medPriorityPod); err != nil {
  483. t.Errorf("add failed: %v", err)
  484. }
  485. // Update unschedulablePod on a different node than specified in the pod.
  486. q.UpdateNominatedPodForNode(&unschedulablePod, "node5")
  487. // Update nominated node name of a pod on a node that is not specified in the pod object.
  488. q.UpdateNominatedPodForNode(&highPriorityPod, "node2")
  489. expectedNominatedPods := &nominatedPodMap{
  490. nominatedPodToNode: map[types.UID]string{
  491. medPriorityPod.UID: "node1",
  492. highPriorityPod.UID: "node2",
  493. unschedulablePod.UID: "node5",
  494. },
  495. nominatedPods: map[string][]*v1.Pod{
  496. "node1": {&medPriorityPod},
  497. "node2": {&highPriorityPod},
  498. "node5": {&unschedulablePod},
  499. },
  500. }
  501. if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
  502. t.Errorf("Unexpected nominated map after adding pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
  503. }
  504. if p, err := q.Pop(); err != nil || p.Pod != &medPriorityPod {
  505. t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Pod.Name)
  506. }
  507. // List of nominated pods shouldn't change after popping them from the queue.
  508. if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
  509. t.Errorf("Unexpected nominated map after popping pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
  510. }
  511. // Update one of the nominated pods that doesn't have nominatedNodeName in the
  512. // pod object. It should be updated correctly.
  513. q.UpdateNominatedPodForNode(&highPriorityPod, "node4")
  514. expectedNominatedPods = &nominatedPodMap{
  515. nominatedPodToNode: map[types.UID]string{
  516. medPriorityPod.UID: "node1",
  517. highPriorityPod.UID: "node4",
  518. unschedulablePod.UID: "node5",
  519. },
  520. nominatedPods: map[string][]*v1.Pod{
  521. "node1": {&medPriorityPod},
  522. "node4": {&highPriorityPod},
  523. "node5": {&unschedulablePod},
  524. },
  525. }
  526. if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
  527. t.Errorf("Unexpected nominated map after updating pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
  528. }
  529. // Delete a nominated pod that doesn't have nominatedNodeName in the pod
  530. // object. It should be deleted.
  531. q.DeleteNominatedPodIfExists(&highPriorityPod)
  532. expectedNominatedPods = &nominatedPodMap{
  533. nominatedPodToNode: map[types.UID]string{
  534. medPriorityPod.UID: "node1",
  535. unschedulablePod.UID: "node5",
  536. },
  537. nominatedPods: map[string][]*v1.Pod{
  538. "node1": {&medPriorityPod},
  539. "node5": {&unschedulablePod},
  540. },
  541. }
  542. if !reflect.DeepEqual(q.nominatedPods, expectedNominatedPods) {
  543. t.Errorf("Unexpected nominated map after deleting pods. Expected: %v, got: %v", expectedNominatedPods, q.nominatedPods)
  544. }
  545. }
  546. func TestPriorityQueue_NewWithOptions(t *testing.T) {
  547. q := createAndRunPriorityQueue(
  548. newDefaultFramework(),
  549. WithPodInitialBackoffDuration(2*time.Second),
  550. WithPodMaxBackoffDuration(20*time.Second),
  551. )
  552. if q.podInitialBackoffDuration != 2*time.Second {
  553. t.Errorf("Unexpected pod backoff initial duration. Expected: %v, got: %v", 2*time.Second, q.podInitialBackoffDuration)
  554. }
  555. if q.podMaxBackoffDuration != 20*time.Second {
  556. t.Errorf("Unexpected pod backoff max duration. Expected: %v, got: %v", 2*time.Second, q.podMaxBackoffDuration)
  557. }
  558. }
  559. func TestUnschedulablePodsMap(t *testing.T) {
  560. var pods = []*v1.Pod{
  561. {
  562. ObjectMeta: metav1.ObjectMeta{
  563. Name: "p0",
  564. Namespace: "ns1",
  565. Annotations: map[string]string{
  566. "annot1": "val1",
  567. },
  568. },
  569. Status: v1.PodStatus{
  570. NominatedNodeName: "node1",
  571. },
  572. },
  573. {
  574. ObjectMeta: metav1.ObjectMeta{
  575. Name: "p1",
  576. Namespace: "ns1",
  577. Annotations: map[string]string{
  578. "annot": "val",
  579. },
  580. },
  581. },
  582. {
  583. ObjectMeta: metav1.ObjectMeta{
  584. Name: "p2",
  585. Namespace: "ns2",
  586. Annotations: map[string]string{
  587. "annot2": "val2", "annot3": "val3",
  588. },
  589. },
  590. Status: v1.PodStatus{
  591. NominatedNodeName: "node3",
  592. },
  593. },
  594. {
  595. ObjectMeta: metav1.ObjectMeta{
  596. Name: "p3",
  597. Namespace: "ns4",
  598. },
  599. Status: v1.PodStatus{
  600. NominatedNodeName: "node1",
  601. },
  602. },
  603. }
  604. var updatedPods = make([]*v1.Pod, len(pods))
  605. updatedPods[0] = pods[0].DeepCopy()
  606. updatedPods[1] = pods[1].DeepCopy()
  607. updatedPods[3] = pods[3].DeepCopy()
  608. tests := []struct {
  609. name string
  610. podsToAdd []*v1.Pod
  611. expectedMapAfterAdd map[string]*framework.PodInfo
  612. podsToUpdate []*v1.Pod
  613. expectedMapAfterUpdate map[string]*framework.PodInfo
  614. podsToDelete []*v1.Pod
  615. expectedMapAfterDelete map[string]*framework.PodInfo
  616. }{
  617. {
  618. name: "create, update, delete subset of pods",
  619. podsToAdd: []*v1.Pod{pods[0], pods[1], pods[2], pods[3]},
  620. expectedMapAfterAdd: map[string]*framework.PodInfo{
  621. util.GetPodFullName(pods[0]): {Pod: pods[0]},
  622. util.GetPodFullName(pods[1]): {Pod: pods[1]},
  623. util.GetPodFullName(pods[2]): {Pod: pods[2]},
  624. util.GetPodFullName(pods[3]): {Pod: pods[3]},
  625. },
  626. podsToUpdate: []*v1.Pod{updatedPods[0]},
  627. expectedMapAfterUpdate: map[string]*framework.PodInfo{
  628. util.GetPodFullName(pods[0]): {Pod: updatedPods[0]},
  629. util.GetPodFullName(pods[1]): {Pod: pods[1]},
  630. util.GetPodFullName(pods[2]): {Pod: pods[2]},
  631. util.GetPodFullName(pods[3]): {Pod: pods[3]},
  632. },
  633. podsToDelete: []*v1.Pod{pods[0], pods[1]},
  634. expectedMapAfterDelete: map[string]*framework.PodInfo{
  635. util.GetPodFullName(pods[2]): {Pod: pods[2]},
  636. util.GetPodFullName(pods[3]): {Pod: pods[3]},
  637. },
  638. },
  639. {
  640. name: "create, update, delete all",
  641. podsToAdd: []*v1.Pod{pods[0], pods[3]},
  642. expectedMapAfterAdd: map[string]*framework.PodInfo{
  643. util.GetPodFullName(pods[0]): {Pod: pods[0]},
  644. util.GetPodFullName(pods[3]): {Pod: pods[3]},
  645. },
  646. podsToUpdate: []*v1.Pod{updatedPods[3]},
  647. expectedMapAfterUpdate: map[string]*framework.PodInfo{
  648. util.GetPodFullName(pods[0]): {Pod: pods[0]},
  649. util.GetPodFullName(pods[3]): {Pod: updatedPods[3]},
  650. },
  651. podsToDelete: []*v1.Pod{pods[0], pods[3]},
  652. expectedMapAfterDelete: map[string]*framework.PodInfo{},
  653. },
  654. {
  655. name: "delete non-existing and existing pods",
  656. podsToAdd: []*v1.Pod{pods[1], pods[2]},
  657. expectedMapAfterAdd: map[string]*framework.PodInfo{
  658. util.GetPodFullName(pods[1]): {Pod: pods[1]},
  659. util.GetPodFullName(pods[2]): {Pod: pods[2]},
  660. },
  661. podsToUpdate: []*v1.Pod{updatedPods[1]},
  662. expectedMapAfterUpdate: map[string]*framework.PodInfo{
  663. util.GetPodFullName(pods[1]): {Pod: updatedPods[1]},
  664. util.GetPodFullName(pods[2]): {Pod: pods[2]},
  665. },
  666. podsToDelete: []*v1.Pod{pods[2], pods[3]},
  667. expectedMapAfterDelete: map[string]*framework.PodInfo{
  668. util.GetPodFullName(pods[1]): {Pod: updatedPods[1]},
  669. },
  670. },
  671. }
  672. for _, test := range tests {
  673. t.Run(test.name, func(t *testing.T) {
  674. upm := newUnschedulablePodsMap(nil)
  675. for _, p := range test.podsToAdd {
  676. upm.addOrUpdate(newPodInfoNoTimestamp(p))
  677. }
  678. if !reflect.DeepEqual(upm.podInfoMap, test.expectedMapAfterAdd) {
  679. t.Errorf("Unexpected map after adding pods. Expected: %v, got: %v",
  680. test.expectedMapAfterAdd, upm.podInfoMap)
  681. }
  682. if len(test.podsToUpdate) > 0 {
  683. for _, p := range test.podsToUpdate {
  684. upm.addOrUpdate(newPodInfoNoTimestamp(p))
  685. }
  686. if !reflect.DeepEqual(upm.podInfoMap, test.expectedMapAfterUpdate) {
  687. t.Errorf("Unexpected map after updating pods. Expected: %v, got: %v",
  688. test.expectedMapAfterUpdate, upm.podInfoMap)
  689. }
  690. }
  691. for _, p := range test.podsToDelete {
  692. upm.delete(p)
  693. }
  694. if !reflect.DeepEqual(upm.podInfoMap, test.expectedMapAfterDelete) {
  695. t.Errorf("Unexpected map after deleting pods. Expected: %v, got: %v",
  696. test.expectedMapAfterDelete, upm.podInfoMap)
  697. }
  698. upm.clear()
  699. if len(upm.podInfoMap) != 0 {
  700. t.Errorf("Expected the map to be empty, but has %v elements.", len(upm.podInfoMap))
  701. }
  702. })
  703. }
  704. }
  705. func TestSchedulingQueue_Close(t *testing.T) {
  706. tests := []struct {
  707. name string
  708. q SchedulingQueue
  709. expectedErr error
  710. }{
  711. {
  712. name: "PriorityQueue close",
  713. q: createAndRunPriorityQueue(newDefaultFramework()),
  714. expectedErr: fmt.Errorf(queueClosed),
  715. },
  716. }
  717. for _, test := range tests {
  718. t.Run(test.name, func(t *testing.T) {
  719. wg := sync.WaitGroup{}
  720. wg.Add(1)
  721. go func() {
  722. defer wg.Done()
  723. pod, err := test.q.Pop()
  724. if err.Error() != test.expectedErr.Error() {
  725. t.Errorf("Expected err %q from Pop() if queue is closed, but got %q", test.expectedErr.Error(), err.Error())
  726. }
  727. if pod != nil {
  728. t.Errorf("Expected pod nil from Pop() if queue is closed, but got: %v", pod)
  729. }
  730. }()
  731. test.q.Close()
  732. wg.Wait()
  733. })
  734. }
  735. }
  736. // TestRecentlyTriedPodsGoBack tests that pods which are recently tried and are
  737. // unschedulable go behind other pods with the same priority. This behavior
  738. // ensures that an unschedulable pod does not block head of the queue when there
  739. // are frequent events that move pods to the active queue.
  740. func TestRecentlyTriedPodsGoBack(t *testing.T) {
  741. q := createAndRunPriorityQueue(newDefaultFramework())
  742. // Add a few pods to priority queue.
  743. for i := 0; i < 5; i++ {
  744. p := v1.Pod{
  745. ObjectMeta: metav1.ObjectMeta{
  746. Name: fmt.Sprintf("test-pod-%v", i),
  747. Namespace: "ns1",
  748. UID: types.UID(fmt.Sprintf("tp00%v", i)),
  749. },
  750. Spec: v1.PodSpec{
  751. Priority: &highPriority,
  752. },
  753. Status: v1.PodStatus{
  754. NominatedNodeName: "node1",
  755. },
  756. }
  757. q.Add(&p)
  758. }
  759. // Simulate a pod being popped by the scheduler, determined unschedulable, and
  760. // then moved back to the active queue.
  761. p1, err := q.Pop()
  762. if err != nil {
  763. t.Errorf("Error while popping the head of the queue: %v", err)
  764. }
  765. // Update pod condition to unschedulable.
  766. podutil.UpdatePodCondition(&p1.Pod.Status, &v1.PodCondition{
  767. Type: v1.PodScheduled,
  768. Status: v1.ConditionFalse,
  769. Reason: v1.PodReasonUnschedulable,
  770. Message: "fake scheduling failure",
  771. LastProbeTime: metav1.Now(),
  772. })
  773. // Put in the unschedulable queue.
  774. q.AddUnschedulableIfNotPresent(p1, q.SchedulingCycle())
  775. // Move all unschedulable pods to the active queue.
  776. q.MoveAllToActiveOrBackoffQueue("test")
  777. // Simulation is over. Now let's pop all pods. The pod popped first should be
  778. // the last one we pop here.
  779. for i := 0; i < 5; i++ {
  780. p, err := q.Pop()
  781. if err != nil {
  782. t.Errorf("Error while popping pods from the queue: %v", err)
  783. }
  784. if (i == 4) != (p1 == p) {
  785. t.Errorf("A pod tried before is not the last pod popped: i: %v, pod name: %v", i, p.Pod.Name)
  786. }
  787. }
  788. }
  789. // TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod tests
  790. // that a pod determined as unschedulable multiple times doesn't block any newer pod.
  791. // This behavior ensures that an unschedulable pod does not block head of the queue when there
  792. // are frequent events that move pods to the active queue.
  793. func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) {
  794. c := clock.NewFakeClock(time.Now())
  795. q := createAndRunPriorityQueue(newDefaultFramework(), WithClock(c))
  796. // Add an unschedulable pod to a priority queue.
  797. // This makes a situation that the pod was tried to schedule
  798. // and had been determined unschedulable so far
  799. unschedulablePod := v1.Pod{
  800. ObjectMeta: metav1.ObjectMeta{
  801. Name: "test-pod-unscheduled",
  802. Namespace: "ns1",
  803. UID: "tp001",
  804. },
  805. Spec: v1.PodSpec{
  806. Priority: &highPriority,
  807. },
  808. Status: v1.PodStatus{
  809. NominatedNodeName: "node1",
  810. },
  811. }
  812. // Update pod condition to unschedulable.
  813. podutil.UpdatePodCondition(&unschedulablePod.Status, &v1.PodCondition{
  814. Type: v1.PodScheduled,
  815. Status: v1.ConditionFalse,
  816. Reason: v1.PodReasonUnschedulable,
  817. Message: "fake scheduling failure",
  818. })
  819. // Put in the unschedulable queue
  820. q.AddUnschedulableIfNotPresent(newPodInfoNoTimestamp(&unschedulablePod), q.SchedulingCycle())
  821. // Move clock to make the unschedulable pods complete backoff.
  822. c.Step(DefaultPodInitialBackoffDuration + time.Second)
  823. // Move all unschedulable pods to the active queue.
  824. q.MoveAllToActiveOrBackoffQueue("test")
  825. // Simulate a pod being popped by the scheduler,
  826. // At this time, unschedulable pod should be popped.
  827. p1, err := q.Pop()
  828. if err != nil {
  829. t.Errorf("Error while popping the head of the queue: %v", err)
  830. }
  831. if p1.Pod != &unschedulablePod {
  832. t.Errorf("Expected that test-pod-unscheduled was popped, got %v", p1.Pod.Name)
  833. }
  834. // Assume newer pod was added just after unschedulable pod
  835. // being popped and before being pushed back to the queue.
  836. newerPod := v1.Pod{
  837. ObjectMeta: metav1.ObjectMeta{
  838. Name: "test-newer-pod",
  839. Namespace: "ns1",
  840. UID: "tp002",
  841. CreationTimestamp: metav1.Now(),
  842. },
  843. Spec: v1.PodSpec{
  844. Priority: &highPriority,
  845. },
  846. Status: v1.PodStatus{
  847. NominatedNodeName: "node1",
  848. },
  849. }
  850. q.Add(&newerPod)
  851. // And then unschedulablePod was determined as unschedulable AGAIN.
  852. podutil.UpdatePodCondition(&unschedulablePod.Status, &v1.PodCondition{
  853. Type: v1.PodScheduled,
  854. Status: v1.ConditionFalse,
  855. Reason: v1.PodReasonUnschedulable,
  856. Message: "fake scheduling failure",
  857. })
  858. // And then, put unschedulable pod to the unschedulable queue
  859. q.AddUnschedulableIfNotPresent(newPodInfoNoTimestamp(&unschedulablePod), q.SchedulingCycle())
  860. // Move clock to make the unschedulable pods complete backoff.
  861. c.Step(DefaultPodInitialBackoffDuration + time.Second)
  862. // Move all unschedulable pods to the active queue.
  863. q.MoveAllToActiveOrBackoffQueue("test")
  864. // At this time, newerPod should be popped
  865. // because it is the oldest tried pod.
  866. p2, err2 := q.Pop()
  867. if err2 != nil {
  868. t.Errorf("Error while popping the head of the queue: %v", err2)
  869. }
  870. if p2.Pod != &newerPod {
  871. t.Errorf("Expected that test-newer-pod was popped, got %v", p2.Pod.Name)
  872. }
  873. }
  874. // TestHighPriorityBackoff tests that a high priority pod does not block
  875. // other pods if it is unschedulable
  876. func TestHighPriorityBackoff(t *testing.T) {
  877. q := createAndRunPriorityQueue(newDefaultFramework())
  878. midPod := v1.Pod{
  879. ObjectMeta: metav1.ObjectMeta{
  880. Name: "test-midpod",
  881. Namespace: "ns1",
  882. UID: types.UID("tp-mid"),
  883. },
  884. Spec: v1.PodSpec{
  885. Priority: &midPriority,
  886. },
  887. Status: v1.PodStatus{
  888. NominatedNodeName: "node1",
  889. },
  890. }
  891. highPod := v1.Pod{
  892. ObjectMeta: metav1.ObjectMeta{
  893. Name: "test-highpod",
  894. Namespace: "ns1",
  895. UID: types.UID("tp-high"),
  896. },
  897. Spec: v1.PodSpec{
  898. Priority: &highPriority,
  899. },
  900. Status: v1.PodStatus{
  901. NominatedNodeName: "node1",
  902. },
  903. }
  904. q.Add(&midPod)
  905. q.Add(&highPod)
  906. // Simulate a pod being popped by the scheduler, determined unschedulable, and
  907. // then moved back to the active queue.
  908. p, err := q.Pop()
  909. if err != nil {
  910. t.Errorf("Error while popping the head of the queue: %v", err)
  911. }
  912. if p.Pod != &highPod {
  913. t.Errorf("Expected to get high priority pod, got: %v", p)
  914. }
  915. // Update pod condition to unschedulable.
  916. podutil.UpdatePodCondition(&p.Pod.Status, &v1.PodCondition{
  917. Type: v1.PodScheduled,
  918. Status: v1.ConditionFalse,
  919. Reason: v1.PodReasonUnschedulable,
  920. Message: "fake scheduling failure",
  921. })
  922. // Put in the unschedulable queue.
  923. q.AddUnschedulableIfNotPresent(p, q.SchedulingCycle())
  924. // Move all unschedulable pods to the active queue.
  925. q.MoveAllToActiveOrBackoffQueue("test")
  926. p, err = q.Pop()
  927. if err != nil {
  928. t.Errorf("Error while popping the head of the queue: %v", err)
  929. }
  930. if p.Pod != &midPod {
  931. t.Errorf("Expected to get mid priority pod, got: %v", p)
  932. }
  933. }
  934. // TestHighPriorityFlushUnschedulableQLeftover tests that pods will be moved to
  935. // activeQ after one minutes if it is in unschedulableQ
  936. func TestHighPriorityFlushUnschedulableQLeftover(t *testing.T) {
  937. c := clock.NewFakeClock(time.Now())
  938. q := createAndRunPriorityQueue(newDefaultFramework(), WithClock(c))
  939. midPod := v1.Pod{
  940. ObjectMeta: metav1.ObjectMeta{
  941. Name: "test-midpod",
  942. Namespace: "ns1",
  943. UID: types.UID("tp-mid"),
  944. },
  945. Spec: v1.PodSpec{
  946. Priority: &midPriority,
  947. },
  948. Status: v1.PodStatus{
  949. NominatedNodeName: "node1",
  950. },
  951. }
  952. highPod := v1.Pod{
  953. ObjectMeta: metav1.ObjectMeta{
  954. Name: "test-highpod",
  955. Namespace: "ns1",
  956. UID: types.UID("tp-high"),
  957. },
  958. Spec: v1.PodSpec{
  959. Priority: &highPriority,
  960. },
  961. Status: v1.PodStatus{
  962. NominatedNodeName: "node1",
  963. },
  964. }
  965. // Update pod condition to highPod.
  966. podutil.UpdatePodCondition(&highPod.Status, &v1.PodCondition{
  967. Type: v1.PodScheduled,
  968. Status: v1.ConditionFalse,
  969. Reason: v1.PodReasonUnschedulable,
  970. Message: "fake scheduling failure",
  971. })
  972. // Update pod condition to midPod.
  973. podutil.UpdatePodCondition(&midPod.Status, &v1.PodCondition{
  974. Type: v1.PodScheduled,
  975. Status: v1.ConditionFalse,
  976. Reason: v1.PodReasonUnschedulable,
  977. Message: "fake scheduling failure",
  978. })
  979. q.AddUnschedulableIfNotPresent(q.newPodInfo(&highPod), q.SchedulingCycle())
  980. q.AddUnschedulableIfNotPresent(q.newPodInfo(&midPod), q.SchedulingCycle())
  981. c.Step(unschedulableQTimeInterval + time.Second)
  982. if p, err := q.Pop(); err != nil || p.Pod != &highPod {
  983. t.Errorf("Expected: %v after Pop, but got: %v", highPriorityPod.Name, p.Pod.Name)
  984. }
  985. if p, err := q.Pop(); err != nil || p.Pod != &midPod {
  986. t.Errorf("Expected: %v after Pop, but got: %v", medPriorityPod.Name, p.Pod.Name)
  987. }
  988. }
  989. type operation func(queue *PriorityQueue, pInfo *framework.PodInfo)
  990. var (
  991. add = func(queue *PriorityQueue, pInfo *framework.PodInfo) {
  992. queue.Add(pInfo.Pod)
  993. }
  994. addUnschedulablePodBackToUnschedulableQ = func(queue *PriorityQueue, pInfo *framework.PodInfo) {
  995. queue.AddUnschedulableIfNotPresent(pInfo, 0)
  996. }
  997. addUnschedulablePodBackToBackoffQ = func(queue *PriorityQueue, pInfo *framework.PodInfo) {
  998. queue.AddUnschedulableIfNotPresent(pInfo, -1)
  999. }
  1000. addPodActiveQ = func(queue *PriorityQueue, pInfo *framework.PodInfo) {
  1001. queue.lock.Lock()
  1002. queue.activeQ.Add(pInfo)
  1003. queue.lock.Unlock()
  1004. }
  1005. updatePodActiveQ = func(queue *PriorityQueue, pInfo *framework.PodInfo) {
  1006. queue.lock.Lock()
  1007. queue.activeQ.Update(pInfo)
  1008. queue.lock.Unlock()
  1009. }
  1010. addPodUnschedulableQ = func(queue *PriorityQueue, pInfo *framework.PodInfo) {
  1011. queue.lock.Lock()
  1012. // Update pod condition to unschedulable.
  1013. podutil.UpdatePodCondition(&pInfo.Pod.Status, &v1.PodCondition{
  1014. Type: v1.PodScheduled,
  1015. Status: v1.ConditionFalse,
  1016. Reason: v1.PodReasonUnschedulable,
  1017. Message: "fake scheduling failure",
  1018. })
  1019. queue.unschedulableQ.addOrUpdate(pInfo)
  1020. queue.lock.Unlock()
  1021. }
  1022. addPodBackoffQ = func(queue *PriorityQueue, pInfo *framework.PodInfo) {
  1023. queue.lock.Lock()
  1024. queue.podBackoffQ.Add(pInfo)
  1025. queue.lock.Unlock()
  1026. }
  1027. moveAllToActiveOrBackoffQ = func(queue *PriorityQueue, _ *framework.PodInfo) {
  1028. queue.MoveAllToActiveOrBackoffQueue("test")
  1029. }
  1030. flushBackoffQ = func(queue *PriorityQueue, _ *framework.PodInfo) {
  1031. queue.clock.(*clock.FakeClock).Step(2 * time.Second)
  1032. queue.flushBackoffQCompleted()
  1033. }
  1034. moveClockForward = func(queue *PriorityQueue, _ *framework.PodInfo) {
  1035. queue.clock.(*clock.FakeClock).Step(2 * time.Second)
  1036. }
  1037. )
  1038. // TestPodTimestamp tests the operations related to PodInfo.
  1039. func TestPodTimestamp(t *testing.T) {
  1040. pod1 := &v1.Pod{
  1041. ObjectMeta: metav1.ObjectMeta{
  1042. Name: "test-pod-1",
  1043. Namespace: "ns1",
  1044. UID: types.UID("tp-1"),
  1045. },
  1046. Status: v1.PodStatus{
  1047. NominatedNodeName: "node1",
  1048. },
  1049. }
  1050. pod2 := &v1.Pod{
  1051. ObjectMeta: metav1.ObjectMeta{
  1052. Name: "test-pod-2",
  1053. Namespace: "ns2",
  1054. UID: types.UID("tp-2"),
  1055. },
  1056. Status: v1.PodStatus{
  1057. NominatedNodeName: "node2",
  1058. },
  1059. }
  1060. var timestamp = time.Now()
  1061. pInfo1 := &framework.PodInfo{
  1062. Pod: pod1,
  1063. Timestamp: timestamp,
  1064. }
  1065. pInfo2 := &framework.PodInfo{
  1066. Pod: pod2,
  1067. Timestamp: timestamp.Add(time.Second),
  1068. }
  1069. tests := []struct {
  1070. name string
  1071. operations []operation
  1072. operands []*framework.PodInfo
  1073. expected []*framework.PodInfo
  1074. }{
  1075. {
  1076. name: "add two pod to activeQ and sort them by the timestamp",
  1077. operations: []operation{
  1078. addPodActiveQ,
  1079. addPodActiveQ,
  1080. },
  1081. operands: []*framework.PodInfo{pInfo2, pInfo1},
  1082. expected: []*framework.PodInfo{pInfo1, pInfo2},
  1083. },
  1084. {
  1085. name: "update two pod to activeQ and sort them by the timestamp",
  1086. operations: []operation{
  1087. updatePodActiveQ,
  1088. updatePodActiveQ,
  1089. },
  1090. operands: []*framework.PodInfo{pInfo2, pInfo1},
  1091. expected: []*framework.PodInfo{pInfo1, pInfo2},
  1092. },
  1093. {
  1094. name: "add two pod to unschedulableQ then move them to activeQ and sort them by the timestamp",
  1095. operations: []operation{
  1096. addPodUnschedulableQ,
  1097. addPodUnschedulableQ,
  1098. moveClockForward,
  1099. moveAllToActiveOrBackoffQ,
  1100. },
  1101. operands: []*framework.PodInfo{pInfo2, pInfo1, nil, nil},
  1102. expected: []*framework.PodInfo{pInfo1, pInfo2},
  1103. },
  1104. {
  1105. name: "add one pod to BackoffQ and move it to activeQ",
  1106. operations: []operation{
  1107. addPodActiveQ,
  1108. addPodBackoffQ,
  1109. flushBackoffQ,
  1110. moveAllToActiveOrBackoffQ,
  1111. },
  1112. operands: []*framework.PodInfo{pInfo2, pInfo1, nil, nil},
  1113. expected: []*framework.PodInfo{pInfo1, pInfo2},
  1114. },
  1115. }
  1116. for _, test := range tests {
  1117. t.Run(test.name, func(t *testing.T) {
  1118. queue := createAndRunPriorityQueue(newDefaultFramework(), WithClock(clock.NewFakeClock(timestamp)))
  1119. var podInfoList []*framework.PodInfo
  1120. for i, op := range test.operations {
  1121. op(queue, test.operands[i])
  1122. }
  1123. queue.lock.Lock()
  1124. for i := 0; i < len(test.expected); i++ {
  1125. if pInfo, err := queue.activeQ.Pop(); err != nil {
  1126. t.Errorf("Error while popping the head of the queue: %v", err)
  1127. } else {
  1128. podInfoList = append(podInfoList, pInfo.(*framework.PodInfo))
  1129. }
  1130. }
  1131. queue.lock.Unlock()
  1132. if !reflect.DeepEqual(test.expected, podInfoList) {
  1133. t.Errorf("Unexpected PodInfo list. Expected: %v, got: %v",
  1134. test.expected, podInfoList)
  1135. }
  1136. })
  1137. }
  1138. }
  1139. // TestPendingPodsMetric tests Prometheus metrics related with pending pods
  1140. func TestPendingPodsMetric(t *testing.T) {
  1141. timestamp := time.Now()
  1142. metrics.Register()
  1143. total := 50
  1144. pInfos := makePodInfos(total, timestamp)
  1145. totalWithDelay := 20
  1146. pInfosWithDelay := makePodInfos(totalWithDelay, timestamp.Add(2*time.Second))
  1147. tests := []struct {
  1148. name string
  1149. operations []operation
  1150. operands [][]*framework.PodInfo
  1151. metricsName string
  1152. wants string
  1153. }{
  1154. {
  1155. name: "add pods to activeQ and unschedulableQ",
  1156. operations: []operation{
  1157. addPodActiveQ,
  1158. addPodUnschedulableQ,
  1159. },
  1160. operands: [][]*framework.PodInfo{
  1161. pInfos[:30],
  1162. pInfos[30:],
  1163. },
  1164. metricsName: "scheduler_pending_pods",
  1165. wants: `
  1166. # HELP scheduler_pending_pods [ALPHA] Number of pending pods, by the queue type. 'active' means number of pods in activeQ; 'backoff' means number of pods in backoffQ; 'unschedulable' means number of pods in unschedulableQ.
  1167. # TYPE scheduler_pending_pods gauge
  1168. scheduler_pending_pods{queue="active"} 30
  1169. scheduler_pending_pods{queue="backoff"} 0
  1170. scheduler_pending_pods{queue="unschedulable"} 20
  1171. `,
  1172. },
  1173. {
  1174. name: "add pods to all kinds of queues",
  1175. operations: []operation{
  1176. addPodActiveQ,
  1177. addPodBackoffQ,
  1178. addPodUnschedulableQ,
  1179. },
  1180. operands: [][]*framework.PodInfo{
  1181. pInfos[:15],
  1182. pInfos[15:40],
  1183. pInfos[40:],
  1184. },
  1185. metricsName: "scheduler_pending_pods",
  1186. wants: `
  1187. # HELP scheduler_pending_pods [ALPHA] Number of pending pods, by the queue type. 'active' means number of pods in activeQ; 'backoff' means number of pods in backoffQ; 'unschedulable' means number of pods in unschedulableQ.
  1188. # TYPE scheduler_pending_pods gauge
  1189. scheduler_pending_pods{queue="active"} 15
  1190. scheduler_pending_pods{queue="backoff"} 25
  1191. scheduler_pending_pods{queue="unschedulable"} 10
  1192. `,
  1193. },
  1194. {
  1195. name: "add pods to unschedulableQ and then move all to activeQ",
  1196. operations: []operation{
  1197. addPodUnschedulableQ,
  1198. moveClockForward,
  1199. moveAllToActiveOrBackoffQ,
  1200. },
  1201. operands: [][]*framework.PodInfo{
  1202. pInfos[:total],
  1203. {nil},
  1204. {nil},
  1205. },
  1206. metricsName: "scheduler_pending_pods",
  1207. wants: `
  1208. # HELP scheduler_pending_pods [ALPHA] Number of pending pods, by the queue type. 'active' means number of pods in activeQ; 'backoff' means number of pods in backoffQ; 'unschedulable' means number of pods in unschedulableQ.
  1209. # TYPE scheduler_pending_pods gauge
  1210. scheduler_pending_pods{queue="active"} 50
  1211. scheduler_pending_pods{queue="backoff"} 0
  1212. scheduler_pending_pods{queue="unschedulable"} 0
  1213. `,
  1214. },
  1215. {
  1216. name: "make some pods subject to backoff, add pods to unschedulableQ, and then move all to activeQ",
  1217. operations: []operation{
  1218. addPodUnschedulableQ,
  1219. moveClockForward,
  1220. addPodUnschedulableQ,
  1221. moveAllToActiveOrBackoffQ,
  1222. },
  1223. operands: [][]*framework.PodInfo{
  1224. pInfos[20:total],
  1225. {nil},
  1226. pInfosWithDelay[:20],
  1227. {nil},
  1228. },
  1229. metricsName: "scheduler_pending_pods",
  1230. wants: `
  1231. # HELP scheduler_pending_pods [ALPHA] Number of pending pods, by the queue type. 'active' means number of pods in activeQ; 'backoff' means number of pods in backoffQ; 'unschedulable' means number of pods in unschedulableQ.
  1232. # TYPE scheduler_pending_pods gauge
  1233. scheduler_pending_pods{queue="active"} 30
  1234. scheduler_pending_pods{queue="backoff"} 20
  1235. scheduler_pending_pods{queue="unschedulable"} 0
  1236. `,
  1237. },
  1238. {
  1239. name: "make some pods subject to backoff, add pods to unschedulableQ/activeQ, move all to activeQ, and finally flush backoffQ",
  1240. operations: []operation{
  1241. addPodUnschedulableQ,
  1242. addPodActiveQ,
  1243. moveAllToActiveOrBackoffQ,
  1244. flushBackoffQ,
  1245. },
  1246. operands: [][]*framework.PodInfo{
  1247. pInfos[:40],
  1248. pInfos[40:],
  1249. {nil},
  1250. {nil},
  1251. },
  1252. metricsName: "scheduler_pending_pods",
  1253. wants: `
  1254. # HELP scheduler_pending_pods [ALPHA] Number of pending pods, by the queue type. 'active' means number of pods in activeQ; 'backoff' means number of pods in backoffQ; 'unschedulable' means number of pods in unschedulableQ.
  1255. # TYPE scheduler_pending_pods gauge
  1256. scheduler_pending_pods{queue="active"} 50
  1257. scheduler_pending_pods{queue="backoff"} 0
  1258. scheduler_pending_pods{queue="unschedulable"} 0
  1259. `,
  1260. },
  1261. }
  1262. resetMetrics := func() {
  1263. metrics.ActivePods().Set(0)
  1264. metrics.BackoffPods().Set(0)
  1265. metrics.UnschedulablePods().Set(0)
  1266. }
  1267. for _, test := range tests {
  1268. t.Run(test.name, func(t *testing.T) {
  1269. resetMetrics()
  1270. queue := createAndRunPriorityQueue(newDefaultFramework(), WithClock(clock.NewFakeClock(timestamp)))
  1271. for i, op := range test.operations {
  1272. for _, pInfo := range test.operands[i] {
  1273. op(queue, pInfo)
  1274. }
  1275. }
  1276. if err := testutil.GatherAndCompare(metrics.GetGather(), strings.NewReader(test.wants), test.metricsName); err != nil {
  1277. t.Fatal(err)
  1278. }
  1279. })
  1280. }
  1281. }
  1282. // TestPerPodSchedulingMetrics makes sure pod schedule attempts is updated correctly while
  1283. // initialAttemptTimestamp stays the same during multiple add/pop operations.
  1284. func TestPerPodSchedulingMetrics(t *testing.T) {
  1285. pod := &v1.Pod{
  1286. ObjectMeta: metav1.ObjectMeta{
  1287. Name: "test-pod",
  1288. Namespace: "test-ns",
  1289. UID: types.UID("test-uid"),
  1290. },
  1291. }
  1292. timestamp := time.Now()
  1293. // Case 1: A pod is created and scheduled after 1 attempt. The queue operations are
  1294. // Add -> Pop.
  1295. c := clock.NewFakeClock(timestamp)
  1296. queue := createAndRunPriorityQueue(newDefaultFramework(), WithClock(c))
  1297. queue.Add(pod)
  1298. pInfo, err := queue.Pop()
  1299. if err != nil {
  1300. t.Fatalf("Failed to pop a pod %v", err)
  1301. }
  1302. checkPerPodSchedulingMetrics("Attempt once", t, pInfo, 1, timestamp)
  1303. // Case 2: A pod is created and scheduled after 2 attempts. The queue operations are
  1304. // Add -> Pop -> AddUnschedulableIfNotPresent -> flushUnschedulableQLeftover -> Pop.
  1305. c = clock.NewFakeClock(timestamp)
  1306. queue = createAndRunPriorityQueue(newDefaultFramework(), WithClock(c))
  1307. queue.Add(pod)
  1308. pInfo, err = queue.Pop()
  1309. if err != nil {
  1310. t.Fatalf("Failed to pop a pod %v", err)
  1311. }
  1312. queue.AddUnschedulableIfNotPresent(pInfo, 1)
  1313. // Override clock to exceed the unschedulableQTimeInterval so that unschedulable pods
  1314. // will be moved to activeQ
  1315. c.SetTime(timestamp.Add(unschedulableQTimeInterval + 1))
  1316. queue.flushUnschedulableQLeftover()
  1317. pInfo, err = queue.Pop()
  1318. if err != nil {
  1319. t.Fatalf("Failed to pop a pod %v", err)
  1320. }
  1321. checkPerPodSchedulingMetrics("Attempt twice", t, pInfo, 2, timestamp)
  1322. // Case 3: Similar to case 2, but before the second pop, call update, the queue operations are
  1323. // Add -> Pop -> AddUnschedulableIfNotPresent -> flushUnschedulableQLeftover -> Update -> Pop.
  1324. c = clock.NewFakeClock(timestamp)
  1325. queue = createAndRunPriorityQueue(newDefaultFramework(), WithClock(c))
  1326. queue.Add(pod)
  1327. pInfo, err = queue.Pop()
  1328. if err != nil {
  1329. t.Fatalf("Failed to pop a pod %v", err)
  1330. }
  1331. queue.AddUnschedulableIfNotPresent(pInfo, 1)
  1332. // Override clock to exceed the unschedulableQTimeInterval so that unschedulable pods
  1333. // will be moved to activeQ
  1334. c.SetTime(timestamp.Add(unschedulableQTimeInterval + 1))
  1335. queue.flushUnschedulableQLeftover()
  1336. newPod := pod.DeepCopy()
  1337. newPod.Generation = 1
  1338. queue.Update(pod, newPod)
  1339. pInfo, err = queue.Pop()
  1340. if err != nil {
  1341. t.Fatalf("Failed to pop a pod %v", err)
  1342. }
  1343. checkPerPodSchedulingMetrics("Attempt twice with update", t, pInfo, 2, timestamp)
  1344. }
  1345. func TestIncomingPodsMetrics(t *testing.T) {
  1346. timestamp := time.Now()
  1347. metrics.Register()
  1348. var pInfos = make([]*framework.PodInfo, 0, 3)
  1349. for i := 1; i <= 3; i++ {
  1350. p := &framework.PodInfo{
  1351. Pod: &v1.Pod{
  1352. ObjectMeta: metav1.ObjectMeta{
  1353. Name: fmt.Sprintf("test-pod-%d", i),
  1354. Namespace: fmt.Sprintf("ns%d", i),
  1355. UID: types.UID(fmt.Sprintf("tp-%d", i)),
  1356. },
  1357. },
  1358. Timestamp: timestamp,
  1359. }
  1360. pInfos = append(pInfos, p)
  1361. }
  1362. tests := []struct {
  1363. name string
  1364. operations []operation
  1365. want string
  1366. }{
  1367. {
  1368. name: "add pods to activeQ",
  1369. operations: []operation{
  1370. add,
  1371. },
  1372. want: `
  1373. scheduler_queue_incoming_pods_total{event="PodAdd",queue="active"} 3
  1374. `,
  1375. },
  1376. {
  1377. name: "add pods to unschedulableQ",
  1378. operations: []operation{
  1379. addUnschedulablePodBackToUnschedulableQ,
  1380. },
  1381. want: `
  1382. scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3
  1383. `,
  1384. },
  1385. {
  1386. name: "add pods to unschedulableQ and then move all to backoffQ",
  1387. operations: []operation{
  1388. addUnschedulablePodBackToUnschedulableQ,
  1389. moveAllToActiveOrBackoffQ,
  1390. },
  1391. want: ` scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3
  1392. scheduler_queue_incoming_pods_total{event="test",queue="backoff"} 3
  1393. `,
  1394. },
  1395. {
  1396. name: "add pods to unschedulableQ and then move all to activeQ",
  1397. operations: []operation{
  1398. addUnschedulablePodBackToUnschedulableQ,
  1399. moveClockForward,
  1400. moveAllToActiveOrBackoffQ,
  1401. },
  1402. want: ` scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3
  1403. scheduler_queue_incoming_pods_total{event="test",queue="active"} 3
  1404. `,
  1405. },
  1406. {
  1407. name: "make some pods subject to backoff and add them to backoffQ, then flush backoffQ",
  1408. operations: []operation{
  1409. addUnschedulablePodBackToBackoffQ,
  1410. moveClockForward,
  1411. flushBackoffQ,
  1412. },
  1413. want: ` scheduler_queue_incoming_pods_total{event="BackoffComplete",queue="active"} 3
  1414. scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="backoff"} 3
  1415. `,
  1416. },
  1417. }
  1418. for _, test := range tests {
  1419. t.Run(test.name, func(t *testing.T) {
  1420. metrics.SchedulerQueueIncomingPods.Reset()
  1421. queue := NewPriorityQueue(newDefaultFramework(), WithClock(clock.NewFakeClock(timestamp)))
  1422. queue.Close()
  1423. queue.Run()
  1424. for _, op := range test.operations {
  1425. for _, pInfo := range pInfos {
  1426. op(queue, pInfo)
  1427. }
  1428. }
  1429. metricName := metrics.SchedulerSubsystem + "_" + metrics.SchedulerQueueIncomingPods.Name
  1430. if err := testutil.CollectAndCompare(metrics.SchedulerQueueIncomingPods, strings.NewReader(queueMetricMetadata+test.want), metricName); err != nil {
  1431. t.Errorf("unexpected collecting result:\n%s", err)
  1432. }
  1433. })
  1434. }
  1435. }
  1436. func checkPerPodSchedulingMetrics(name string, t *testing.T, pInfo *framework.PodInfo, wantAttemtps int, wantInitialAttemptTs time.Time) {
  1437. if pInfo.Attempts != wantAttemtps {
  1438. t.Errorf("[%s] Pod schedule attempt unexpected, got %v, want %v", name, pInfo.Attempts, wantAttemtps)
  1439. }
  1440. if pInfo.InitialAttemptTimestamp != wantInitialAttemptTs {
  1441. t.Errorf("[%s] Pod initial schedule attempt timestamp unexpected, got %v, want %v", name, pInfo.InitialAttemptTimestamp, wantInitialAttemptTs)
  1442. }
  1443. }
  1444. func createAndRunPriorityQueue(fwk framework.Framework, opts ...Option) *PriorityQueue {
  1445. q := NewPriorityQueue(fwk, opts...)
  1446. q.Run()
  1447. return q
  1448. }
  1449. func TestBackOffFlow(t *testing.T) {
  1450. cl := clock.NewFakeClock(time.Now())
  1451. q := NewPriorityQueue(newDefaultFramework(), WithClock(cl))
  1452. steps := []struct {
  1453. wantBackoff time.Duration
  1454. }{
  1455. {wantBackoff: time.Second},
  1456. {wantBackoff: 2 * time.Second},
  1457. {wantBackoff: 4 * time.Second},
  1458. {wantBackoff: 8 * time.Second},
  1459. {wantBackoff: 10 * time.Second},
  1460. {wantBackoff: 10 * time.Second},
  1461. {wantBackoff: 10 * time.Second},
  1462. }
  1463. pod := &v1.Pod{
  1464. ObjectMeta: metav1.ObjectMeta{
  1465. Name: "test-pod",
  1466. Namespace: "test-ns",
  1467. UID: "test-uid",
  1468. },
  1469. }
  1470. podID := nsNameForPod(pod)
  1471. if err := q.Add(pod); err != nil {
  1472. t.Fatal(err)
  1473. }
  1474. for i, step := range steps {
  1475. t.Run(fmt.Sprintf("step %d", i), func(t *testing.T) {
  1476. timestamp := cl.Now()
  1477. // Simulate schedule attempt.
  1478. podInfo, err := q.Pop()
  1479. if err != nil {
  1480. t.Fatal(err)
  1481. }
  1482. if podInfo.Attempts != i+1 {
  1483. t.Errorf("got attempts %d, want %d", podInfo.Attempts, i+1)
  1484. }
  1485. if err := q.AddUnschedulableIfNotPresent(podInfo, int64(i)); err != nil {
  1486. t.Fatal(err)
  1487. }
  1488. // An event happens.
  1489. q.MoveAllToActiveOrBackoffQueue("deleted pod")
  1490. q.lock.RLock()
  1491. if _, ok, _ := q.podBackoffQ.Get(podInfo); !ok {
  1492. t.Errorf("pod %v is not in the backoff queue", podID)
  1493. }
  1494. q.lock.RUnlock()
  1495. // Check backoff duration.
  1496. deadline := q.getBackoffTime(podInfo)
  1497. backoff := deadline.Sub(timestamp)
  1498. if backoff != step.wantBackoff {
  1499. t.Errorf("got backoff %s, want %s", backoff, step.wantBackoff)
  1500. }
  1501. // Simulate routine that continuously flushes the backoff queue.
  1502. cl.Step(time.Millisecond)
  1503. q.flushBackoffQCompleted()
  1504. // Still in backoff queue after an early flush.
  1505. q.lock.RLock()
  1506. if _, ok, _ := q.podBackoffQ.Get(podInfo); !ok {
  1507. t.Errorf("pod %v is not in the backoff queue", podID)
  1508. }
  1509. q.lock.RUnlock()
  1510. // Moved out of the backoff queue after timeout.
  1511. cl.Step(backoff)
  1512. q.flushBackoffQCompleted()
  1513. q.lock.RLock()
  1514. if _, ok, _ := q.podBackoffQ.Get(podInfo); ok {
  1515. t.Errorf("pod %v is still in the backoff queue", podID)
  1516. }
  1517. q.lock.RUnlock()
  1518. })
  1519. }
  1520. }
  1521. func makePodInfos(num int, timestamp time.Time) []*framework.PodInfo {
  1522. var pInfos = make([]*framework.PodInfo, 0, num)
  1523. for i := 1; i <= num; i++ {
  1524. p := &framework.PodInfo{
  1525. Pod: &v1.Pod{
  1526. ObjectMeta: metav1.ObjectMeta{
  1527. Name: fmt.Sprintf("test-pod-%d", i),
  1528. Namespace: fmt.Sprintf("ns%d", i),
  1529. UID: types.UID(fmt.Sprintf("tp-%d", i)),
  1530. },
  1531. },
  1532. Timestamp: timestamp,
  1533. }
  1534. pInfos = append(pInfos, p)
  1535. }
  1536. return pInfos
  1537. }