job.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package apps
  14. import (
  15. "context"
  16. "fmt"
  17. "time"
  18. batchv1 "k8s.io/api/batch/v1"
  19. v1 "k8s.io/api/core/v1"
  20. apierrors "k8s.io/apimachinery/pkg/api/errors"
  21. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  22. "k8s.io/apimachinery/pkg/util/wait"
  23. clientset "k8s.io/client-go/kubernetes"
  24. batchinternal "k8s.io/kubernetes/pkg/apis/batch"
  25. "k8s.io/kubernetes/test/e2e/framework"
  26. e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
  27. e2enode "k8s.io/kubernetes/test/e2e/framework/node"
  28. e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  29. "github.com/onsi/ginkgo"
  30. "github.com/onsi/gomega"
  31. )
  32. var _ = SIGDescribe("Job", func() {
  33. f := framework.NewDefaultFramework("job")
  34. parallelism := int32(2)
  35. completions := int32(4)
  36. backoffLimit := int32(6) // default value
  37. // Simplest case: N pods succeed
  38. ginkgo.It("should run a job to completion when tasks succeed", func() {
  39. ginkgo.By("Creating a job")
  40. job := e2ejob.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
  41. job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
  42. framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
  43. ginkgo.By("Ensuring job reaches completions")
  44. err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
  45. framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
  46. ginkgo.By("Ensuring pods for job exist")
  47. pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
  48. framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name)
  49. successes := int32(0)
  50. for _, pod := range pods.Items {
  51. if pod.Status.Phase == v1.PodSucceeded {
  52. successes++
  53. }
  54. }
  55. framework.ExpectEqual(successes, completions, "epected %d successful job pods, but got %d", completions, successes)
  56. })
  57. /*
  58. Testcase: Ensure that the pods associated with the job are removed once the job is deleted
  59. Description: Create a job and ensure the associated pod count is equal to paralellism count. Delete the
  60. job and ensure if the pods associated with the job have been removed
  61. */
  62. ginkgo.It("should remove pods when job is deleted", func() {
  63. ginkgo.By("Creating a job")
  64. job := e2ejob.NewTestJob("notTerminate", "all-pods-removed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
  65. job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
  66. framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
  67. ginkgo.By("Ensure pods equal to paralellism count is attached to the job")
  68. err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
  69. framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
  70. ginkgo.By("Delete the job")
  71. err = framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)
  72. framework.ExpectNoError(err, "failed to delete the job in namespace: %s", f.Namespace.Name)
  73. ginkgo.By("Ensure the pods associated with the job are also deleted")
  74. err = e2ejob.WaitForAllJobPodsGone(f.ClientSet, f.Namespace.Name, job.Name)
  75. framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
  76. })
  77. /*
  78. Release : v1.16
  79. Testname: Jobs, completion after task failure
  80. Description: Explicitly cause the tasks to fail once initially. After restarting, the Job MUST
  81. execute to completion.
  82. */
  83. framework.ConformanceIt("should run a job to completion when tasks sometimes fail and are locally restarted", func() {
  84. ginkgo.By("Creating a job")
  85. // One failure, then a success, local restarts.
  86. // We can't use the random failure approach, because kubelet will
  87. // throttle frequently failing containers in a given pod, ramping
  88. // up to 5 minutes between restarts, making test timeout due to
  89. // successive failures too likely with a reasonable test timeout.
  90. job := e2ejob.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit)
  91. job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
  92. framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
  93. ginkgo.By("Ensuring job reaches completions")
  94. err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
  95. framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
  96. })
  97. // Pods sometimes fail, but eventually succeed, after pod restarts
  98. ginkgo.It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() {
  99. // One failure, then a success, no local restarts.
  100. // We can't use the random failure approach, because JobController
  101. // will throttle frequently failing Pods of a given Job, ramping
  102. // up to 6 minutes between restarts, making test timeout due to
  103. // successive failures.
  104. // Instead, we force the Job's Pods to be scheduled to a single Node
  105. // and use a hostPath volume to persist data across new Pods.
  106. ginkgo.By("Looking for a node to schedule job pod")
  107. node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
  108. framework.ExpectNoError(err)
  109. ginkgo.By("Creating a job")
  110. job := e2ejob.NewTestJobOnNode("failOnce", "fail-once-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name)
  111. job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
  112. framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
  113. ginkgo.By("Ensuring job reaches completions")
  114. err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
  115. framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
  116. })
  117. ginkgo.It("should fail when exceeds active deadline", func() {
  118. ginkgo.By("Creating a job")
  119. var activeDeadlineSeconds int64 = 1
  120. job := e2ejob.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit)
  121. job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
  122. framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
  123. ginkgo.By("Ensuring job past active deadline")
  124. err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded")
  125. framework.ExpectNoError(err, "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name)
  126. })
  127. /*
  128. Release : v1.15
  129. Testname: Jobs, active pods, graceful termination
  130. Description: Create a job. Ensure the active pods reflect paralellism in the namespace and delete the job. Job MUST be deleted successfully.
  131. */
  132. framework.ConformanceIt("should delete a job", func() {
  133. ginkgo.By("Creating a job")
  134. job := e2ejob.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
  135. job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
  136. framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
  137. ginkgo.By("Ensuring active pods == parallelism")
  138. err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
  139. framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
  140. ginkgo.By("delete a job")
  141. framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
  142. ginkgo.By("Ensuring job was deleted")
  143. _, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
  144. framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name)
  145. framework.ExpectEqual(apierrors.IsNotFound(err), true)
  146. })
  147. /*
  148. Release : v1.16
  149. Testname: Jobs, orphan pods, re-adoption
  150. Description: Create a parallel job. The number of Pods MUST equal the level of parallelism.
  151. Orphan a Pod by modifying its owner reference. The Job MUST re-adopt the orphan pod.
  152. Modify the labels of one of the Job's Pods. The Job MUST release the Pod.
  153. */
  154. framework.ConformanceIt("should adopt matching orphans and release non-matching pods", func() {
  155. ginkgo.By("Creating a job")
  156. job := e2ejob.NewTestJob("notTerminate", "adopt-release", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
  157. // Replace job with the one returned from Create() so it has the UID.
  158. // Save Kind since it won't be populated in the returned job.
  159. kind := job.Kind
  160. job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
  161. framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
  162. job.Kind = kind
  163. ginkgo.By("Ensuring active pods == parallelism")
  164. err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
  165. framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
  166. ginkgo.By("Orphaning one of the Job's Pods")
  167. pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
  168. framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
  169. gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism)))
  170. pod := pods.Items[0]
  171. f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
  172. pod.OwnerReferences = nil
  173. })
  174. ginkgo.By("Checking that the Job readopts the Pod")
  175. gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", e2ejob.JobTimeout,
  176. func(pod *v1.Pod) (bool, error) {
  177. controllerRef := metav1.GetControllerOf(pod)
  178. if controllerRef == nil {
  179. return false, nil
  180. }
  181. if controllerRef.Kind != job.Kind || controllerRef.Name != job.Name || controllerRef.UID != job.UID {
  182. return false, fmt.Errorf("pod has wrong controllerRef: got %v, want %v", controllerRef, job)
  183. }
  184. return true, nil
  185. },
  186. )).To(gomega.Succeed(), "wait for pod %q to be readopted", pod.Name)
  187. ginkgo.By("Removing the labels from the Job's Pod")
  188. f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
  189. pod.Labels = nil
  190. })
  191. ginkgo.By("Checking that the Job releases the Pod")
  192. gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", e2ejob.JobTimeout,
  193. func(pod *v1.Pod) (bool, error) {
  194. controllerRef := metav1.GetControllerOf(pod)
  195. if controllerRef != nil {
  196. return false, nil
  197. }
  198. return true, nil
  199. },
  200. )).To(gomega.Succeed(), "wait for pod %q to be released", pod.Name)
  201. })
  202. ginkgo.It("should fail to exceed backoffLimit", func() {
  203. ginkgo.By("Creating a job")
  204. backoff := 1
  205. job := e2ejob.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff))
  206. job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
  207. framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
  208. ginkgo.By("Ensuring job exceed backofflimit")
  209. err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, e2ejob.JobTimeout, "BackoffLimitExceeded")
  210. framework.ExpectNoError(err, "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name)
  211. ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
  212. pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
  213. framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
  214. // gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1))
  215. // due to NumRequeus not being stable enough, especially with failed status
  216. // updates we need to allow more than backoff+1
  217. // TODO revert this back to above when https://github.com/kubernetes/kubernetes/issues/64787 gets fixed
  218. if len(pods.Items) < backoff+1 {
  219. framework.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items)
  220. }
  221. for _, pod := range pods.Items {
  222. framework.ExpectEqual(pod.Status.Phase, v1.PodFailed)
  223. }
  224. })
  225. })
  226. // waitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
  227. func waitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
  228. return wait.Poll(framework.Poll, timeout, func() (bool, error) {
  229. curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{})
  230. if err != nil {
  231. return false, err
  232. }
  233. for _, c := range curr.Status.Conditions {
  234. if c.Type == batchv1.JobFailed && c.Status == v1.ConditionTrue {
  235. if reason == "" || reason == c.Reason {
  236. return true, nil
  237. }
  238. }
  239. }
  240. return false, nil
  241. })
  242. }