123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427 |
- /*
- Copyright 2015 The Kubernetes Authors.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- */
- package storage
- import (
- "fmt"
- "strconv"
- v1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/resource"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/util/intstr"
- "k8s.io/apimachinery/pkg/util/uuid"
- "k8s.io/kubernetes/test/e2e/framework"
- "k8s.io/kubernetes/test/e2e/storage/utils"
- imageutils "k8s.io/kubernetes/test/utils/image"
- "github.com/onsi/ginkgo"
- "github.com/onsi/gomega"
- )
- const (
- // These numbers are obtained empirically.
- // If you make them too low, you'll get flaky
- // tests instead of failing ones if the race bug reappears.
- // If you make volume counts or pod counts too high,
- // the tests may fail because mounting configmap/git_repo
- // volumes is not very fast and the tests may time out
- // waiting for pods to become Running.
- // And of course the higher are the numbers, the
- // slower are the tests.
- wrappedVolumeRaceConfigMapVolumeCount = 50
- wrappedVolumeRaceConfigMapPodCount = 5
- wrappedVolumeRaceConfigMapIterationCount = 3
- wrappedVolumeRaceGitRepoVolumeCount = 50
- wrappedVolumeRaceGitRepoPodCount = 5
- wrappedVolumeRaceGitRepoIterationCount = 3
- wrappedVolumeRaceRCNamePrefix = "wrapped-volume-race-"
- )
- var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
- f := framework.NewDefaultFramework("emptydir-wrapper")
- /*
- Release : v1.13
- Testname: EmptyDir Wrapper Volume, Secret and ConfigMap volumes, no conflict
- Description: Secret volume and ConfigMap volume is created with data. Pod MUST be able to start with Secret and ConfigMap volumes mounted into the container.
- */
- framework.ConformanceIt("should not conflict", func() {
- name := "emptydir-wrapper-test-" + string(uuid.NewUUID())
- volumeName := "secret-volume"
- volumeMountPath := "/etc/secret-volume"
- secret := &v1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: f.Namespace.Name,
- Name: name,
- },
- Data: map[string][]byte{
- "data-1": []byte("value-1\n"),
- },
- }
- var err error
- if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
- framework.Failf("unable to create test secret %s: %v", secret.Name, err)
- }
- configMapVolumeName := "configmap-volume"
- configMapVolumeMountPath := "/etc/configmap-volume"
- configMap := &v1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: f.Namespace.Name,
- Name: name,
- },
- BinaryData: map[string][]byte{
- "data-1": []byte("value-1\n"),
- },
- }
- if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
- framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
- }
- pod := &v1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pod-secrets-" + string(uuid.NewUUID()),
- },
- Spec: v1.PodSpec{
- Volumes: []v1.Volume{
- {
- Name: volumeName,
- VolumeSource: v1.VolumeSource{
- Secret: &v1.SecretVolumeSource{
- SecretName: name,
- },
- },
- },
- {
- Name: configMapVolumeName,
- VolumeSource: v1.VolumeSource{
- ConfigMap: &v1.ConfigMapVolumeSource{
- LocalObjectReference: v1.LocalObjectReference{
- Name: name,
- },
- },
- },
- },
- },
- Containers: []v1.Container{
- {
- Name: "secret-test",
- Image: imageutils.GetE2EImage(imageutils.TestWebserver),
- VolumeMounts: []v1.VolumeMount{
- {
- Name: volumeName,
- MountPath: volumeMountPath,
- ReadOnly: true,
- },
- {
- Name: configMapVolumeName,
- MountPath: configMapVolumeMountPath,
- },
- },
- },
- },
- },
- }
- pod = f.PodClient().CreateSync(pod)
- defer func() {
- ginkgo.By("Cleaning up the secret")
- if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil {
- framework.Failf("unable to delete secret %v: %v", secret.Name, err)
- }
- ginkgo.By("Cleaning up the configmap")
- if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil {
- framework.Failf("unable to delete configmap %v: %v", configMap.Name, err)
- }
- ginkgo.By("Cleaning up the pod")
- if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil {
- framework.Failf("unable to delete pod %v: %v", pod.Name, err)
- }
- }()
- })
- // The following two tests check for the problem fixed in #29641.
- // In order to reproduce it you need to revert the fix, e.g. via
- // git revert -n df1e925143daf34199b55ffb91d0598244888cce
- // or
- // curl -sL https://github.com/kubernetes/kubernetes/pull/29641.patch | patch -p1 -R
- //
- // After that these tests will fail because some of the pods
- // they create never enter Running state.
- //
- // They need to be [Serial] and [Slow] because they try to induce
- // the race by creating pods with many volumes and container volume mounts,
- // which takes considerable time and may interfere with other tests.
- //
- // Probably should also try making tests for secrets and downwardapi,
- // but these cases are harder because tmpfs-based emptyDir
- // appears to be less prone to the race problem.
- /*
- Release : v1.13
- Testname: EmptyDir Wrapper Volume, ConfigMap volumes, no race
- Description: Create 50 ConfigMaps Volumes and 5 replicas of pod with these ConfigMapvolumes mounted. Pod MUST NOT fail waiting for Volumes.
- */
- framework.ConformanceIt("should not cause race condition when used for configmaps [Serial]", func() {
- configMapNames := createConfigmapsForRace(f)
- defer deleteConfigMaps(f, configMapNames)
- volumes, volumeMounts := makeConfigMapVolumes(configMapNames)
- for i := 0; i < wrappedVolumeRaceConfigMapIterationCount; i++ {
- testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceConfigMapPodCount)
- }
- })
- // Slow by design [~150 Seconds].
- // This test uses deprecated GitRepo VolumeSource so it MUST not be promoted to Conformance.
- // To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.
- // This projected volume maps approach can also be tested with secrets and downwardapi VolumeSource but are less prone to the race problem.
- ginkgo.It("should not cause race condition when used for git_repo [Serial] [Slow]", func() {
- gitURL, gitRepo, cleanup := createGitServer(f)
- defer cleanup()
- volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo)
- for i := 0; i < wrappedVolumeRaceGitRepoIterationCount; i++ {
- testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceGitRepoPodCount)
- }
- })
- })
- func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cleanup func()) {
- var err error
- gitServerPodName := "git-server-" + string(uuid.NewUUID())
- containerPort := 8000
- labels := map[string]string{"name": gitServerPodName}
- gitServerPod := &v1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: gitServerPodName,
- Labels: labels,
- },
- Spec: v1.PodSpec{
- Containers: []v1.Container{
- {
- Name: "git-repo",
- Image: imageutils.GetE2EImage(imageutils.Fakegitserver),
- ImagePullPolicy: "IfNotPresent",
- Ports: []v1.ContainerPort{
- {ContainerPort: int32(containerPort)},
- },
- },
- },
- },
- }
- f.PodClient().CreateSync(gitServerPod)
- // Portal IP and port
- httpPort := 2345
- gitServerSvc := &v1.Service{
- ObjectMeta: metav1.ObjectMeta{
- Name: "git-server-svc",
- },
- Spec: v1.ServiceSpec{
- Selector: labels,
- Ports: []v1.ServicePort{
- {
- Name: "http-portal",
- Port: int32(httpPort),
- TargetPort: intstr.FromInt(containerPort),
- },
- },
- },
- }
- if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(gitServerSvc); err != nil {
- framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
- }
- return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
- ginkgo.By("Cleaning up the git server pod")
- if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
- framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
- }
- ginkgo.By("Cleaning up the git server svc")
- if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil {
- framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
- }
- }
- }
- func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
- for i := 0; i < wrappedVolumeRaceGitRepoVolumeCount; i++ {
- volumeName := fmt.Sprintf("racey-git-repo-%d", i)
- volumes = append(volumes, v1.Volume{
- Name: volumeName,
- VolumeSource: v1.VolumeSource{
- GitRepo: &v1.GitRepoVolumeSource{
- Repository: gitURL,
- Directory: gitRepo,
- },
- },
- })
- volumeMounts = append(volumeMounts, v1.VolumeMount{
- Name: volumeName,
- MountPath: fmt.Sprintf("/etc/git-volume-%d", i),
- })
- }
- return
- }
- func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
- ginkgo.By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount))
- for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ {
- configMapName := fmt.Sprintf("racey-configmap-%d", i)
- configMapNames = append(configMapNames, configMapName)
- configMap := &v1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: f.Namespace.Name,
- Name: configMapName,
- },
- Data: map[string]string{
- "data-1": "value-1",
- },
- }
- _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
- framework.ExpectNoError(err)
- }
- return
- }
- func deleteConfigMaps(f *framework.Framework, configMapNames []string) {
- ginkgo.By("Cleaning up the configMaps")
- for _, configMapName := range configMapNames {
- err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil)
- framework.ExpectNoError(err, "unable to delete configMap %v", configMapName)
- }
- }
- func makeConfigMapVolumes(configMapNames []string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
- for i, configMapName := range configMapNames {
- volumeName := fmt.Sprintf("racey-configmap-%d", i)
- volumes = append(volumes, v1.Volume{
- Name: volumeName,
- VolumeSource: v1.VolumeSource{
- ConfigMap: &v1.ConfigMapVolumeSource{
- LocalObjectReference: v1.LocalObjectReference{
- Name: configMapName,
- },
- Items: []v1.KeyToPath{
- {
- Key: "data-1",
- Path: "data-1",
- },
- },
- },
- },
- })
- volumeMounts = append(volumeMounts, v1.VolumeMount{
- Name: volumeName,
- MountPath: fmt.Sprintf("/etc/config-%d", i),
- })
- }
- return
- }
- func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) {
- const nodeHostnameLabelKey = "kubernetes.io/hostname"
- rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
- nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
- gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0))
- targetNode := nodeList.Items[0]
- ginkgo.By("Creating RC which spawns configmap-volume pods")
- affinity := &v1.Affinity{
- NodeAffinity: &v1.NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
- NodeSelectorTerms: []v1.NodeSelectorTerm{
- {
- MatchExpressions: []v1.NodeSelectorRequirement{
- {
- Key: nodeHostnameLabelKey,
- Operator: v1.NodeSelectorOpIn,
- Values: []string{targetNode.Labels[nodeHostnameLabelKey]},
- },
- },
- },
- },
- },
- },
- }
- rc := &v1.ReplicationController{
- ObjectMeta: metav1.ObjectMeta{
- Name: rcName,
- },
- Spec: v1.ReplicationControllerSpec{
- Replicas: &podCount,
- Selector: map[string]string{
- "name": rcName,
- },
- Template: &v1.PodTemplateSpec{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{"name": rcName},
- },
- Spec: v1.PodSpec{
- Containers: []v1.Container{
- {
- Name: "test-container",
- Image: imageutils.GetE2EImage(imageutils.BusyBox),
- Command: []string{"sleep", "10000"},
- Resources: v1.ResourceRequirements{
- Requests: v1.ResourceList{
- v1.ResourceCPU: resource.MustParse("10m"),
- },
- },
- VolumeMounts: volumeMounts,
- },
- },
- Affinity: affinity,
- DNSPolicy: v1.DNSDefault,
- Volumes: volumes,
- },
- },
- },
- }
- _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rc)
- framework.ExpectNoError(err, "error creating replication controller")
- defer func() {
- err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
- framework.ExpectNoError(err)
- }()
- pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
- ginkgo.By("Ensuring each pod is running")
- // Wait for the pods to enter the running state. Waiting loops until the pods
- // are running so non-running pods cause a timeout for this test.
- for _, pod := range pods.Items {
- if pod.DeletionTimestamp != nil {
- continue
- }
- err = f.WaitForPodRunning(pod.Name)
- framework.ExpectNoError(err, "Failed waiting for pod %s to enter running state", pod.Name)
- }
- }
|