empty_dir_wrapper.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package storage
  14. import (
  15. "fmt"
  16. "strconv"
  17. v1 "k8s.io/api/core/v1"
  18. "k8s.io/apimachinery/pkg/api/resource"
  19. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  20. "k8s.io/apimachinery/pkg/util/intstr"
  21. "k8s.io/apimachinery/pkg/util/uuid"
  22. "k8s.io/kubernetes/test/e2e/framework"
  23. "k8s.io/kubernetes/test/e2e/storage/utils"
  24. imageutils "k8s.io/kubernetes/test/utils/image"
  25. "github.com/onsi/ginkgo"
  26. "github.com/onsi/gomega"
  27. )
  28. const (
  29. // These numbers are obtained empirically.
  30. // If you make them too low, you'll get flaky
  31. // tests instead of failing ones if the race bug reappears.
  32. // If you make volume counts or pod counts too high,
  33. // the tests may fail because mounting configmap/git_repo
  34. // volumes is not very fast and the tests may time out
  35. // waiting for pods to become Running.
  36. // And of course the higher are the numbers, the
  37. // slower are the tests.
  38. wrappedVolumeRaceConfigMapVolumeCount = 50
  39. wrappedVolumeRaceConfigMapPodCount = 5
  40. wrappedVolumeRaceConfigMapIterationCount = 3
  41. wrappedVolumeRaceGitRepoVolumeCount = 50
  42. wrappedVolumeRaceGitRepoPodCount = 5
  43. wrappedVolumeRaceGitRepoIterationCount = 3
  44. wrappedVolumeRaceRCNamePrefix = "wrapped-volume-race-"
  45. )
  46. var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
  47. f := framework.NewDefaultFramework("emptydir-wrapper")
  48. /*
  49. Release : v1.13
  50. Testname: EmptyDir Wrapper Volume, Secret and ConfigMap volumes, no conflict
  51. Description: Secret volume and ConfigMap volume is created with data. Pod MUST be able to start with Secret and ConfigMap volumes mounted into the container.
  52. */
  53. framework.ConformanceIt("should not conflict", func() {
  54. name := "emptydir-wrapper-test-" + string(uuid.NewUUID())
  55. volumeName := "secret-volume"
  56. volumeMountPath := "/etc/secret-volume"
  57. secret := &v1.Secret{
  58. ObjectMeta: metav1.ObjectMeta{
  59. Namespace: f.Namespace.Name,
  60. Name: name,
  61. },
  62. Data: map[string][]byte{
  63. "data-1": []byte("value-1\n"),
  64. },
  65. }
  66. var err error
  67. if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
  68. framework.Failf("unable to create test secret %s: %v", secret.Name, err)
  69. }
  70. configMapVolumeName := "configmap-volume"
  71. configMapVolumeMountPath := "/etc/configmap-volume"
  72. configMap := &v1.ConfigMap{
  73. ObjectMeta: metav1.ObjectMeta{
  74. Namespace: f.Namespace.Name,
  75. Name: name,
  76. },
  77. BinaryData: map[string][]byte{
  78. "data-1": []byte("value-1\n"),
  79. },
  80. }
  81. if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
  82. framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
  83. }
  84. pod := &v1.Pod{
  85. ObjectMeta: metav1.ObjectMeta{
  86. Name: "pod-secrets-" + string(uuid.NewUUID()),
  87. },
  88. Spec: v1.PodSpec{
  89. Volumes: []v1.Volume{
  90. {
  91. Name: volumeName,
  92. VolumeSource: v1.VolumeSource{
  93. Secret: &v1.SecretVolumeSource{
  94. SecretName: name,
  95. },
  96. },
  97. },
  98. {
  99. Name: configMapVolumeName,
  100. VolumeSource: v1.VolumeSource{
  101. ConfigMap: &v1.ConfigMapVolumeSource{
  102. LocalObjectReference: v1.LocalObjectReference{
  103. Name: name,
  104. },
  105. },
  106. },
  107. },
  108. },
  109. Containers: []v1.Container{
  110. {
  111. Name: "secret-test",
  112. Image: imageutils.GetE2EImage(imageutils.TestWebserver),
  113. VolumeMounts: []v1.VolumeMount{
  114. {
  115. Name: volumeName,
  116. MountPath: volumeMountPath,
  117. ReadOnly: true,
  118. },
  119. {
  120. Name: configMapVolumeName,
  121. MountPath: configMapVolumeMountPath,
  122. },
  123. },
  124. },
  125. },
  126. },
  127. }
  128. pod = f.PodClient().CreateSync(pod)
  129. defer func() {
  130. ginkgo.By("Cleaning up the secret")
  131. if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil {
  132. framework.Failf("unable to delete secret %v: %v", secret.Name, err)
  133. }
  134. ginkgo.By("Cleaning up the configmap")
  135. if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil {
  136. framework.Failf("unable to delete configmap %v: %v", configMap.Name, err)
  137. }
  138. ginkgo.By("Cleaning up the pod")
  139. if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil {
  140. framework.Failf("unable to delete pod %v: %v", pod.Name, err)
  141. }
  142. }()
  143. })
  144. // The following two tests check for the problem fixed in #29641.
  145. // In order to reproduce it you need to revert the fix, e.g. via
  146. // git revert -n df1e925143daf34199b55ffb91d0598244888cce
  147. // or
  148. // curl -sL https://github.com/kubernetes/kubernetes/pull/29641.patch | patch -p1 -R
  149. //
  150. // After that these tests will fail because some of the pods
  151. // they create never enter Running state.
  152. //
  153. // They need to be [Serial] and [Slow] because they try to induce
  154. // the race by creating pods with many volumes and container volume mounts,
  155. // which takes considerable time and may interfere with other tests.
  156. //
  157. // Probably should also try making tests for secrets and downwardapi,
  158. // but these cases are harder because tmpfs-based emptyDir
  159. // appears to be less prone to the race problem.
  160. /*
  161. Release : v1.13
  162. Testname: EmptyDir Wrapper Volume, ConfigMap volumes, no race
  163. Description: Create 50 ConfigMaps Volumes and 5 replicas of pod with these ConfigMapvolumes mounted. Pod MUST NOT fail waiting for Volumes.
  164. */
  165. framework.ConformanceIt("should not cause race condition when used for configmaps [Serial]", func() {
  166. configMapNames := createConfigmapsForRace(f)
  167. defer deleteConfigMaps(f, configMapNames)
  168. volumes, volumeMounts := makeConfigMapVolumes(configMapNames)
  169. for i := 0; i < wrappedVolumeRaceConfigMapIterationCount; i++ {
  170. testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceConfigMapPodCount)
  171. }
  172. })
  173. // Slow by design [~150 Seconds].
  174. // This test uses deprecated GitRepo VolumeSource so it MUST not be promoted to Conformance.
  175. // To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.
  176. // This projected volume maps approach can also be tested with secrets and downwardapi VolumeSource but are less prone to the race problem.
  177. ginkgo.It("should not cause race condition when used for git_repo [Serial] [Slow]", func() {
  178. gitURL, gitRepo, cleanup := createGitServer(f)
  179. defer cleanup()
  180. volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo)
  181. for i := 0; i < wrappedVolumeRaceGitRepoIterationCount; i++ {
  182. testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceGitRepoPodCount)
  183. }
  184. })
  185. })
  186. func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cleanup func()) {
  187. var err error
  188. gitServerPodName := "git-server-" + string(uuid.NewUUID())
  189. containerPort := 8000
  190. labels := map[string]string{"name": gitServerPodName}
  191. gitServerPod := &v1.Pod{
  192. ObjectMeta: metav1.ObjectMeta{
  193. Name: gitServerPodName,
  194. Labels: labels,
  195. },
  196. Spec: v1.PodSpec{
  197. Containers: []v1.Container{
  198. {
  199. Name: "git-repo",
  200. Image: imageutils.GetE2EImage(imageutils.Fakegitserver),
  201. ImagePullPolicy: "IfNotPresent",
  202. Ports: []v1.ContainerPort{
  203. {ContainerPort: int32(containerPort)},
  204. },
  205. },
  206. },
  207. },
  208. }
  209. f.PodClient().CreateSync(gitServerPod)
  210. // Portal IP and port
  211. httpPort := 2345
  212. gitServerSvc := &v1.Service{
  213. ObjectMeta: metav1.ObjectMeta{
  214. Name: "git-server-svc",
  215. },
  216. Spec: v1.ServiceSpec{
  217. Selector: labels,
  218. Ports: []v1.ServicePort{
  219. {
  220. Name: "http-portal",
  221. Port: int32(httpPort),
  222. TargetPort: intstr.FromInt(containerPort),
  223. },
  224. },
  225. },
  226. }
  227. if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(gitServerSvc); err != nil {
  228. framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
  229. }
  230. return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
  231. ginkgo.By("Cleaning up the git server pod")
  232. if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
  233. framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
  234. }
  235. ginkgo.By("Cleaning up the git server svc")
  236. if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil {
  237. framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
  238. }
  239. }
  240. }
  241. func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
  242. for i := 0; i < wrappedVolumeRaceGitRepoVolumeCount; i++ {
  243. volumeName := fmt.Sprintf("racey-git-repo-%d", i)
  244. volumes = append(volumes, v1.Volume{
  245. Name: volumeName,
  246. VolumeSource: v1.VolumeSource{
  247. GitRepo: &v1.GitRepoVolumeSource{
  248. Repository: gitURL,
  249. Directory: gitRepo,
  250. },
  251. },
  252. })
  253. volumeMounts = append(volumeMounts, v1.VolumeMount{
  254. Name: volumeName,
  255. MountPath: fmt.Sprintf("/etc/git-volume-%d", i),
  256. })
  257. }
  258. return
  259. }
  260. func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
  261. ginkgo.By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount))
  262. for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ {
  263. configMapName := fmt.Sprintf("racey-configmap-%d", i)
  264. configMapNames = append(configMapNames, configMapName)
  265. configMap := &v1.ConfigMap{
  266. ObjectMeta: metav1.ObjectMeta{
  267. Namespace: f.Namespace.Name,
  268. Name: configMapName,
  269. },
  270. Data: map[string]string{
  271. "data-1": "value-1",
  272. },
  273. }
  274. _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
  275. framework.ExpectNoError(err)
  276. }
  277. return
  278. }
  279. func deleteConfigMaps(f *framework.Framework, configMapNames []string) {
  280. ginkgo.By("Cleaning up the configMaps")
  281. for _, configMapName := range configMapNames {
  282. err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil)
  283. framework.ExpectNoError(err, "unable to delete configMap %v", configMapName)
  284. }
  285. }
  286. func makeConfigMapVolumes(configMapNames []string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
  287. for i, configMapName := range configMapNames {
  288. volumeName := fmt.Sprintf("racey-configmap-%d", i)
  289. volumes = append(volumes, v1.Volume{
  290. Name: volumeName,
  291. VolumeSource: v1.VolumeSource{
  292. ConfigMap: &v1.ConfigMapVolumeSource{
  293. LocalObjectReference: v1.LocalObjectReference{
  294. Name: configMapName,
  295. },
  296. Items: []v1.KeyToPath{
  297. {
  298. Key: "data-1",
  299. Path: "data-1",
  300. },
  301. },
  302. },
  303. },
  304. })
  305. volumeMounts = append(volumeMounts, v1.VolumeMount{
  306. Name: volumeName,
  307. MountPath: fmt.Sprintf("/etc/config-%d", i),
  308. })
  309. }
  310. return
  311. }
  312. func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) {
  313. const nodeHostnameLabelKey = "kubernetes.io/hostname"
  314. rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
  315. nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
  316. gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0))
  317. targetNode := nodeList.Items[0]
  318. ginkgo.By("Creating RC which spawns configmap-volume pods")
  319. affinity := &v1.Affinity{
  320. NodeAffinity: &v1.NodeAffinity{
  321. RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  322. NodeSelectorTerms: []v1.NodeSelectorTerm{
  323. {
  324. MatchExpressions: []v1.NodeSelectorRequirement{
  325. {
  326. Key: nodeHostnameLabelKey,
  327. Operator: v1.NodeSelectorOpIn,
  328. Values: []string{targetNode.Labels[nodeHostnameLabelKey]},
  329. },
  330. },
  331. },
  332. },
  333. },
  334. },
  335. }
  336. rc := &v1.ReplicationController{
  337. ObjectMeta: metav1.ObjectMeta{
  338. Name: rcName,
  339. },
  340. Spec: v1.ReplicationControllerSpec{
  341. Replicas: &podCount,
  342. Selector: map[string]string{
  343. "name": rcName,
  344. },
  345. Template: &v1.PodTemplateSpec{
  346. ObjectMeta: metav1.ObjectMeta{
  347. Labels: map[string]string{"name": rcName},
  348. },
  349. Spec: v1.PodSpec{
  350. Containers: []v1.Container{
  351. {
  352. Name: "test-container",
  353. Image: imageutils.GetE2EImage(imageutils.BusyBox),
  354. Command: []string{"sleep", "10000"},
  355. Resources: v1.ResourceRequirements{
  356. Requests: v1.ResourceList{
  357. v1.ResourceCPU: resource.MustParse("10m"),
  358. },
  359. },
  360. VolumeMounts: volumeMounts,
  361. },
  362. },
  363. Affinity: affinity,
  364. DNSPolicy: v1.DNSDefault,
  365. Volumes: volumes,
  366. },
  367. },
  368. },
  369. }
  370. _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rc)
  371. framework.ExpectNoError(err, "error creating replication controller")
  372. defer func() {
  373. err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
  374. framework.ExpectNoError(err)
  375. }()
  376. pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
  377. ginkgo.By("Ensuring each pod is running")
  378. // Wait for the pods to enter the running state. Waiting loops until the pods
  379. // are running so non-running pods cause a timeout for this test.
  380. for _, pod := range pods.Items {
  381. if pod.DeletionTimestamp != nil {
  382. continue
  383. }
  384. err = f.WaitForPodRunning(pod.Name)
  385. framework.ExpectNoError(err, "Failed waiting for pod %s to enter running state", pod.Name)
  386. }
  387. }