volume_manager_test.go 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package volumemanager
  14. import (
  15. "os"
  16. "reflect"
  17. "strconv"
  18. "testing"
  19. "time"
  20. "k8s.io/api/core/v1"
  21. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  22. "k8s.io/apimachinery/pkg/util/sets"
  23. clientset "k8s.io/client-go/kubernetes"
  24. "k8s.io/client-go/kubernetes/fake"
  25. "k8s.io/client-go/tools/record"
  26. utiltesting "k8s.io/client-go/util/testing"
  27. "k8s.io/kubernetes/pkg/kubelet/config"
  28. "k8s.io/kubernetes/pkg/kubelet/configmap"
  29. containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
  30. kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
  31. podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
  32. "k8s.io/kubernetes/pkg/kubelet/secret"
  33. "k8s.io/kubernetes/pkg/kubelet/status"
  34. statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
  35. "k8s.io/kubernetes/pkg/util/mount"
  36. "k8s.io/kubernetes/pkg/volume"
  37. volumetest "k8s.io/kubernetes/pkg/volume/testing"
  38. "k8s.io/kubernetes/pkg/volume/util"
  39. "k8s.io/kubernetes/pkg/volume/util/types"
  40. )
  41. const (
  42. testHostname = "test-hostname"
  43. )
  44. func TestGetMountedVolumesForPodAndGetVolumesInUse(t *testing.T) {
  45. tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
  46. if err != nil {
  47. t.Fatalf("can't make a temp dir: %v", err)
  48. }
  49. defer os.RemoveAll(tmpDir)
  50. cpm := podtest.NewMockCheckpointManager()
  51. podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager(), cpm)
  52. node, pod, pv, claim := createObjects()
  53. kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
  54. manager := newTestVolumeManager(tmpDir, podManager, kubeClient)
  55. stopCh := runVolumeManager(manager)
  56. defer close(stopCh)
  57. podManager.SetPods([]*v1.Pod{pod})
  58. // Fake node status update
  59. go simulateVolumeInUseUpdate(
  60. v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
  61. stopCh,
  62. manager)
  63. err = manager.WaitForAttachAndMount(pod)
  64. if err != nil {
  65. t.Errorf("Expected success: %v", err)
  66. }
  67. expectedMounted := pod.Spec.Volumes[0].Name
  68. actualMounted := manager.GetMountedVolumesForPod(types.UniquePodName(pod.ObjectMeta.UID))
  69. if _, ok := actualMounted[expectedMounted]; !ok || (len(actualMounted) != 1) {
  70. t.Errorf("Expected %v to be mounted to pod but got %v", expectedMounted, actualMounted)
  71. }
  72. expectedInUse := []v1.UniqueVolumeName{v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name)}
  73. actualInUse := manager.GetVolumesInUse()
  74. if !reflect.DeepEqual(expectedInUse, actualInUse) {
  75. t.Errorf("Expected %v to be in use but got %v", expectedInUse, actualInUse)
  76. }
  77. }
  78. func TestInitialPendingVolumesForPodAndGetVolumesInUse(t *testing.T) {
  79. tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
  80. if err != nil {
  81. t.Fatalf("can't make a temp dir: %v", err)
  82. }
  83. defer os.RemoveAll(tmpDir)
  84. cpm := podtest.NewMockCheckpointManager()
  85. podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager(), cpm)
  86. node, pod, pv, claim := createObjects()
  87. claim.Status = v1.PersistentVolumeClaimStatus{
  88. Phase: v1.ClaimPending,
  89. }
  90. kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
  91. manager := newTestVolumeManager(tmpDir, podManager, kubeClient)
  92. stopCh := runVolumeManager(manager)
  93. defer close(stopCh)
  94. podManager.SetPods([]*v1.Pod{pod})
  95. // Fake node status update
  96. go simulateVolumeInUseUpdate(
  97. v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
  98. stopCh,
  99. manager)
  100. // delayed claim binding
  101. go delayClaimBecomesBound(kubeClient, claim.GetNamespace(), claim.ObjectMeta.Name)
  102. err = manager.WaitForAttachAndMount(pod)
  103. if err != nil {
  104. t.Errorf("Expected success: %v", err)
  105. }
  106. }
  107. func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
  108. tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
  109. if err != nil {
  110. t.Fatalf("can't make a temp dir: %v", err)
  111. }
  112. defer os.RemoveAll(tmpDir)
  113. cpm := podtest.NewMockCheckpointManager()
  114. podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager(), cpm)
  115. node, pod, _, claim := createObjects()
  116. existingGid := pod.Spec.SecurityContext.SupplementalGroups[0]
  117. cases := []struct {
  118. gidAnnotation string
  119. expected []int64
  120. }{
  121. {
  122. gidAnnotation: "777",
  123. expected: []int64{777},
  124. },
  125. {
  126. gidAnnotation: strconv.FormatInt(int64(existingGid), 10),
  127. expected: []int64{},
  128. },
  129. {
  130. gidAnnotation: "a",
  131. expected: []int64{},
  132. },
  133. {
  134. gidAnnotation: "",
  135. expected: []int64{},
  136. },
  137. }
  138. for _, tc := range cases {
  139. fs := v1.PersistentVolumeFilesystem
  140. pv := &v1.PersistentVolume{
  141. ObjectMeta: metav1.ObjectMeta{
  142. Name: "pvA",
  143. Annotations: map[string]string{
  144. util.VolumeGidAnnotationKey: tc.gidAnnotation,
  145. },
  146. },
  147. Spec: v1.PersistentVolumeSpec{
  148. PersistentVolumeSource: v1.PersistentVolumeSource{
  149. GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
  150. PDName: "fake-device",
  151. },
  152. },
  153. ClaimRef: &v1.ObjectReference{
  154. Name: claim.ObjectMeta.Name,
  155. },
  156. VolumeMode: &fs,
  157. },
  158. }
  159. kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
  160. manager := newTestVolumeManager(tmpDir, podManager, kubeClient)
  161. stopCh := runVolumeManager(manager)
  162. defer close(stopCh)
  163. podManager.SetPods([]*v1.Pod{pod})
  164. // Fake node status update
  165. go simulateVolumeInUseUpdate(
  166. v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
  167. stopCh,
  168. manager)
  169. err = manager.WaitForAttachAndMount(pod)
  170. if err != nil {
  171. t.Errorf("Expected success: %v", err)
  172. continue
  173. }
  174. actual := manager.GetExtraSupplementalGroupsForPod(pod)
  175. if !reflect.DeepEqual(tc.expected, actual) {
  176. t.Errorf("Expected supplemental groups %v, got %v", tc.expected, actual)
  177. }
  178. }
  179. }
  180. func newTestVolumeManager(tmpDir string, podManager kubepod.Manager, kubeClient clientset.Interface) VolumeManager {
  181. plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
  182. fakeRecorder := &record.FakeRecorder{}
  183. plugMgr := &volume.VolumePluginMgr{}
  184. // TODO (#51147) inject mock prober
  185. plugMgr.InitPlugins([]volume.VolumePlugin{plug}, nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, kubeClient, nil))
  186. statusManager := status.NewManager(kubeClient, podManager, &statustest.FakePodDeletionSafetyProvider{})
  187. vm := NewVolumeManager(
  188. true,
  189. testHostname,
  190. podManager,
  191. statusManager,
  192. kubeClient,
  193. plugMgr,
  194. &containertest.FakeRuntime{},
  195. &mount.FakeMounter{},
  196. "",
  197. fakeRecorder,
  198. false, /* experimentalCheckNodeCapabilitiesBeforeMount */
  199. false /* keepTerminatedPodVolumes */)
  200. return vm
  201. }
  202. // createObjects returns objects for making a fake clientset. The pv is
  203. // already attached to the node and bound to the claim used by the pod.
  204. func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
  205. node := &v1.Node{
  206. ObjectMeta: metav1.ObjectMeta{Name: testHostname},
  207. Status: v1.NodeStatus{
  208. VolumesAttached: []v1.AttachedVolume{
  209. {
  210. Name: "fake/fake-device",
  211. DevicePath: "fake/path",
  212. },
  213. }},
  214. }
  215. pod := &v1.Pod{
  216. ObjectMeta: metav1.ObjectMeta{
  217. Name: "abc",
  218. Namespace: "nsA",
  219. UID: "1234",
  220. },
  221. Spec: v1.PodSpec{
  222. Volumes: []v1.Volume{
  223. {
  224. Name: "vol1",
  225. VolumeSource: v1.VolumeSource{
  226. PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  227. ClaimName: "claimA",
  228. },
  229. },
  230. },
  231. },
  232. SecurityContext: &v1.PodSecurityContext{
  233. SupplementalGroups: []int64{555},
  234. },
  235. },
  236. }
  237. fs := v1.PersistentVolumeFilesystem
  238. pv := &v1.PersistentVolume{
  239. ObjectMeta: metav1.ObjectMeta{
  240. Name: "pvA",
  241. },
  242. Spec: v1.PersistentVolumeSpec{
  243. PersistentVolumeSource: v1.PersistentVolumeSource{
  244. GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
  245. PDName: "fake-device",
  246. },
  247. },
  248. ClaimRef: &v1.ObjectReference{
  249. Name: "claimA",
  250. },
  251. VolumeMode: &fs,
  252. },
  253. }
  254. claim := &v1.PersistentVolumeClaim{
  255. ObjectMeta: metav1.ObjectMeta{
  256. Name: "claimA",
  257. Namespace: "nsA",
  258. },
  259. Spec: v1.PersistentVolumeClaimSpec{
  260. VolumeName: "pvA",
  261. },
  262. Status: v1.PersistentVolumeClaimStatus{
  263. Phase: v1.ClaimBound,
  264. },
  265. }
  266. return node, pod, pv, claim
  267. }
  268. func simulateVolumeInUseUpdate(volumeName v1.UniqueVolumeName, stopCh <-chan struct{}, volumeManager VolumeManager) {
  269. ticker := time.NewTicker(100 * time.Millisecond)
  270. defer ticker.Stop()
  271. for {
  272. select {
  273. case <-ticker.C:
  274. volumeManager.MarkVolumesAsReportedInUse(
  275. []v1.UniqueVolumeName{volumeName})
  276. case <-stopCh:
  277. return
  278. }
  279. }
  280. }
  281. func delayClaimBecomesBound(
  282. kubeClient clientset.Interface,
  283. namespace, claimName string,
  284. ) {
  285. time.Sleep(500 * time.Millisecond)
  286. volumeClaim, _ :=
  287. kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
  288. volumeClaim.Status = v1.PersistentVolumeClaimStatus{
  289. Phase: v1.ClaimBound,
  290. }
  291. kubeClient.CoreV1().PersistentVolumeClaims(namespace).Update(volumeClaim)
  292. return
  293. }
  294. func runVolumeManager(manager VolumeManager) chan struct{} {
  295. stopCh := make(chan struct{})
  296. //readyCh := make(chan bool, 1)
  297. //readyCh <- true
  298. sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true })
  299. go manager.Run(sourcesReady, stopCh)
  300. return stopCh
  301. }