device_plugin_test.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package e2enode
  14. import (
  15. "context"
  16. "path/filepath"
  17. "time"
  18. appsv1 "k8s.io/api/apps/v1"
  19. v1 "k8s.io/api/core/v1"
  20. "k8s.io/apimachinery/pkg/runtime"
  21. "k8s.io/apimachinery/pkg/runtime/serializer"
  22. "k8s.io/kubernetes/test/e2e/framework/testfiles"
  23. "regexp"
  24. "k8s.io/apimachinery/pkg/api/resource"
  25. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  26. "k8s.io/apimachinery/pkg/util/uuid"
  27. "k8s.io/kubernetes/pkg/features"
  28. kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
  29. kubeletpodresourcesv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
  30. "k8s.io/kubernetes/test/e2e/framework"
  31. e2enode "k8s.io/kubernetes/test/e2e/framework/node"
  32. e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  33. "github.com/onsi/ginkgo"
  34. "github.com/onsi/gomega"
  35. )
  36. const (
  37. // sampleResourceName is the name of the example resource which is used in the e2e test
  38. sampleResourceName = "example.com/resource"
  39. // sampleDevicePluginDSYAML is the path of the daemonset template of the sample device plugin. // TODO: Parametrize it by making it a feature in TestFramework.
  40. sampleDevicePluginDSYAML = "test/e2e/testing-manifests/sample-device-plugin.yaml"
  41. // sampleDevicePluginName is the name of the device plugin pod
  42. sampleDevicePluginName = "sample-device-plugin"
  43. // fake resource name
  44. resourceName = "example.com/resource"
  45. envVarNamePluginSockDir = "PLUGIN_SOCK_DIR"
  46. )
  47. var (
  48. appsScheme = runtime.NewScheme()
  49. appsCodecs = serializer.NewCodecFactory(appsScheme)
  50. )
  51. // Serial because the test restarts Kubelet
  52. var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePluginProbe][NodeFeature:DevicePluginProbe][Serial]", func() {
  53. f := framework.NewDefaultFramework("device-plugin-errors")
  54. testDevicePlugin(f, "/var/lib/kubelet/plugins_registry")
  55. })
  56. // numberOfSampleResources returns the number of resources advertised by a node.
  57. func numberOfSampleResources(node *v1.Node) int64 {
  58. val, ok := node.Status.Capacity[sampleResourceName]
  59. if !ok {
  60. return 0
  61. }
  62. return val.Value()
  63. }
  64. // getSampleDevicePluginPod returns the Device Plugin pod for sample resources in e2e tests.
  65. func getSampleDevicePluginPod() *v1.Pod {
  66. ds := readDaemonSetV1OrDie(testfiles.ReadOrDie(sampleDevicePluginDSYAML))
  67. p := &v1.Pod{
  68. ObjectMeta: metav1.ObjectMeta{
  69. Name: sampleDevicePluginName,
  70. Namespace: metav1.NamespaceSystem,
  71. },
  72. Spec: ds.Spec.Template.Spec,
  73. }
  74. return p
  75. }
  76. // readDaemonSetV1OrDie reads daemonset object from bytes. Panics on error.
  77. func readDaemonSetV1OrDie(objBytes []byte) *appsv1.DaemonSet {
  78. appsv1.AddToScheme(appsScheme)
  79. requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes)
  80. if err != nil {
  81. panic(err)
  82. }
  83. return requiredObj.(*appsv1.DaemonSet)
  84. }
  85. func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
  86. pluginSockDir = filepath.Join(pluginSockDir) + "/"
  87. ginkgo.Context("DevicePlugin", func() {
  88. ginkgo.By("Enabling support for Kubelet Plugins Watcher")
  89. tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
  90. if initialConfig.FeatureGates == nil {
  91. initialConfig.FeatureGates = map[string]bool{}
  92. }
  93. initialConfig.FeatureGates[string(features.KubeletPodResources)] = true
  94. })
  95. ginkgo.It("Verifies the Kubelet device plugin functionality.", func() {
  96. ginkgo.By("Wait for node is ready to start with")
  97. e2enode.WaitForNodeToBeReady(f.ClientSet, framework.TestContext.NodeName, 5*time.Minute)
  98. dp := getSampleDevicePluginPod()
  99. for i := range dp.Spec.Containers[0].Env {
  100. if dp.Spec.Containers[0].Env[i].Name == envVarNamePluginSockDir {
  101. dp.Spec.Containers[0].Env[i].Value = pluginSockDir
  102. }
  103. }
  104. framework.Logf("env %v", dp.Spec.Containers[0].Env)
  105. dp.Spec.NodeName = framework.TestContext.NodeName
  106. ginkgo.By("Create sample device plugin pod")
  107. devicePluginPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp, metav1.CreateOptions{})
  108. framework.ExpectNoError(err)
  109. ginkgo.By("Waiting for devices to become available on the local node")
  110. gomega.Eventually(func() bool {
  111. return numberOfSampleResources(getLocalNode(f)) > 0
  112. }, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
  113. framework.Logf("Successfully created device plugin pod")
  114. ginkgo.By("Waiting for the resource exported by the sample device plugin to become available on the local node")
  115. // TODO(vikasc): Instead of hard-coding number of devices, provide number of devices in the sample-device-plugin using configmap
  116. // and then use the same here
  117. devsLen := int64(2)
  118. gomega.Eventually(func() bool {
  119. node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
  120. framework.ExpectNoError(err)
  121. return numberOfDevicesCapacity(node, resourceName) == devsLen &&
  122. numberOfDevicesAllocatable(node, resourceName) == devsLen
  123. }, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
  124. ginkgo.By("Creating one pod on node with at least one fake-device")
  125. podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs"
  126. pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
  127. deviceIDRE := "stub devices: (Dev-[0-9]+)"
  128. devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
  129. gomega.Expect(devID1).To(gomega.Not(gomega.Equal("")))
  130. podResources, err := getNodeDevices()
  131. var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
  132. framework.Logf("pod resources %v", podResources)
  133. framework.ExpectNoError(err)
  134. framework.ExpectEqual(len(podResources.PodResources), 2)
  135. for _, res := range podResources.GetPodResources() {
  136. if res.Name == pod1.Name {
  137. resourcesForOurPod = res
  138. }
  139. }
  140. framework.Logf("resourcesForOurPod %v", resourcesForOurPod)
  141. gomega.Expect(resourcesForOurPod).NotTo(gomega.BeNil())
  142. framework.ExpectEqual(resourcesForOurPod.Name, pod1.Name)
  143. framework.ExpectEqual(resourcesForOurPod.Namespace, pod1.Namespace)
  144. framework.ExpectEqual(len(resourcesForOurPod.Containers), 1)
  145. framework.ExpectEqual(resourcesForOurPod.Containers[0].Name, pod1.Spec.Containers[0].Name)
  146. framework.ExpectEqual(len(resourcesForOurPod.Containers[0].Devices), 1)
  147. framework.ExpectEqual(resourcesForOurPod.Containers[0].Devices[0].ResourceName, resourceName)
  148. framework.ExpectEqual(len(resourcesForOurPod.Containers[0].Devices[0].DeviceIds), 1)
  149. pod1, err = f.PodClient().Get(context.TODO(), pod1.Name, metav1.GetOptions{})
  150. framework.ExpectNoError(err)
  151. ensurePodContainerRestart(f, pod1.Name, pod1.Name)
  152. ginkgo.By("Confirming that device assignment persists even after container restart")
  153. devIDAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
  154. framework.ExpectEqual(devIDAfterRestart, devID1)
  155. restartTime := time.Now()
  156. ginkgo.By("Restarting Kubelet")
  157. restartKubelet()
  158. // We need to wait for node to be ready before re-registering stub device plugin.
  159. // Otherwise, Kubelet DeviceManager may remove the re-registered sockets after it starts.
  160. ginkgo.By("Wait for node is ready")
  161. gomega.Eventually(func() bool {
  162. node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
  163. framework.ExpectNoError(err)
  164. for _, cond := range node.Status.Conditions {
  165. if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) {
  166. return true
  167. }
  168. }
  169. return false
  170. }, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
  171. ginkgo.By("Re-Register resources and deleting the pods and waiting for container removal")
  172. getOptions := metav1.GetOptions{}
  173. gp := int64(0)
  174. deleteOptions := metav1.DeleteOptions{
  175. GracePeriodSeconds: &gp,
  176. }
  177. err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions)
  178. framework.ExpectNoError(err)
  179. waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
  180. _, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Get(context.TODO(), dp.Name, getOptions)
  181. framework.Logf("Trying to get dp pod after deletion. err must be non-nil. err: %v", err)
  182. framework.ExpectError(err)
  183. devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp, metav1.CreateOptions{})
  184. framework.ExpectNoError(err)
  185. ensurePodContainerRestart(f, pod1.Name, pod1.Name)
  186. ginkgo.By("Confirming that after a kubelet restart, fake-device assignement is kept")
  187. devIDRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
  188. framework.ExpectEqual(devIDRestart1, devID1)
  189. ginkgo.By("Waiting for resource to become available on the local node after re-registration")
  190. gomega.Eventually(func() bool {
  191. node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
  192. framework.ExpectNoError(err)
  193. return numberOfDevicesCapacity(node, resourceName) == devsLen &&
  194. numberOfDevicesAllocatable(node, resourceName) == devsLen
  195. }, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
  196. ginkgo.By("Creating another pod")
  197. pod2 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
  198. ginkgo.By("Checking that pod got a different fake device")
  199. devID2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
  200. gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2)))
  201. ginkgo.By("By deleting the pods and waiting for container removal")
  202. err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions)
  203. framework.ExpectNoError(err)
  204. waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
  205. ginkgo.By("Waiting for stub device plugin to become unhealthy on the local node")
  206. gomega.Eventually(func() int64 {
  207. node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
  208. framework.ExpectNoError(err)
  209. return numberOfDevicesAllocatable(node, resourceName)
  210. }, 30*time.Second, framework.Poll).Should(gomega.Equal(int64(0)))
  211. ginkgo.By("Checking that scheduled pods can continue to run even after we delete device plugin.")
  212. ensurePodContainerRestart(f, pod1.Name, pod1.Name)
  213. devIDRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
  214. framework.ExpectEqual(devIDRestart1, devID1)
  215. ensurePodContainerRestart(f, pod2.Name, pod2.Name)
  216. devIDRestart2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
  217. framework.ExpectEqual(devIDRestart2, devID2)
  218. ginkgo.By("Re-register resources")
  219. devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), dp, metav1.CreateOptions{})
  220. framework.ExpectNoError(err)
  221. ginkgo.By("Waiting for the resource exported by the stub device plugin to become healthy on the local node")
  222. gomega.Eventually(func() int64 {
  223. node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
  224. framework.ExpectNoError(err)
  225. return numberOfDevicesAllocatable(node, resourceName)
  226. }, 30*time.Second, framework.Poll).Should(gomega.Equal(devsLen))
  227. ginkgo.By("by deleting the pods and waiting for container removal")
  228. err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), dp.Name, &deleteOptions)
  229. framework.ExpectNoError(err)
  230. waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
  231. ginkgo.By("Waiting for stub device plugin to become unavailable on the local node")
  232. gomega.Eventually(func() bool {
  233. node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
  234. framework.ExpectNoError(err)
  235. return numberOfDevicesCapacity(node, resourceName) <= 0
  236. }, 10*time.Minute, framework.Poll).Should(gomega.BeTrue())
  237. // Cleanup
  238. f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
  239. f.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
  240. })
  241. })
  242. }
  243. // makeBusyboxPod returns a simple Pod spec with a busybox container
  244. // that requests resourceName and runs the specified command.
  245. func makeBusyboxPod(resourceName, cmd string) *v1.Pod {
  246. podName := "device-plugin-test-" + string(uuid.NewUUID())
  247. rl := v1.ResourceList{v1.ResourceName(resourceName): *resource.NewQuantity(1, resource.DecimalSI)}
  248. return &v1.Pod{
  249. ObjectMeta: metav1.ObjectMeta{Name: podName},
  250. Spec: v1.PodSpec{
  251. RestartPolicy: v1.RestartPolicyAlways,
  252. Containers: []v1.Container{{
  253. Image: busyboxImage,
  254. Name: podName,
  255. // Runs the specified command in the test pod.
  256. Command: []string{"sh", "-c", cmd},
  257. Resources: v1.ResourceRequirements{
  258. Limits: rl,
  259. Requests: rl,
  260. },
  261. }},
  262. },
  263. }
  264. }
  265. // ensurePodContainerRestart confirms that pod container has restarted at least once
  266. func ensurePodContainerRestart(f *framework.Framework, podName string, contName string) {
  267. var initialCount int32
  268. var currentCount int32
  269. p, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
  270. if err != nil || len(p.Status.ContainerStatuses) < 1 {
  271. framework.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err)
  272. }
  273. initialCount = p.Status.ContainerStatuses[0].RestartCount
  274. gomega.Eventually(func() bool {
  275. p, err = f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
  276. if err != nil || len(p.Status.ContainerStatuses) < 1 {
  277. return false
  278. }
  279. currentCount = p.Status.ContainerStatuses[0].RestartCount
  280. framework.Logf("initial %v, current %v", initialCount, currentCount)
  281. return currentCount > initialCount
  282. }, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
  283. }
  284. // parseLog returns the matching string for the specified regular expression parsed from the container logs.
  285. func parseLog(f *framework.Framework, podName string, contName string, re string) string {
  286. logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName)
  287. if err != nil {
  288. framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
  289. }
  290. framework.Logf("got pod logs: %v", logs)
  291. regex := regexp.MustCompile(re)
  292. matches := regex.FindStringSubmatch(logs)
  293. if len(matches) < 2 {
  294. return ""
  295. }
  296. return matches[1]
  297. }
  298. // numberOfDevicesCapacity returns the number of devices of resourceName advertised by a node capacity
  299. func numberOfDevicesCapacity(node *v1.Node, resourceName string) int64 {
  300. val, ok := node.Status.Capacity[v1.ResourceName(resourceName)]
  301. if !ok {
  302. return 0
  303. }
  304. return val.Value()
  305. }
  306. // numberOfDevicesAllocatable returns the number of devices of resourceName advertised by a node allocatable
  307. func numberOfDevicesAllocatable(node *v1.Node, resourceName string) int64 {
  308. val, ok := node.Status.Allocatable[v1.ResourceName(resourceName)]
  309. if !ok {
  310. return 0
  311. }
  312. return val.Value()
  313. }