update.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package daemon
  14. import (
  15. "bytes"
  16. "fmt"
  17. "reflect"
  18. "sort"
  19. "k8s.io/klog"
  20. apps "k8s.io/api/apps/v1"
  21. "k8s.io/api/core/v1"
  22. "k8s.io/apimachinery/pkg/api/errors"
  23. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  24. "k8s.io/apimachinery/pkg/labels"
  25. "k8s.io/apimachinery/pkg/runtime"
  26. intstrutil "k8s.io/apimachinery/pkg/util/intstr"
  27. "k8s.io/apimachinery/pkg/util/json"
  28. podutil "k8s.io/kubernetes/pkg/api/v1/pod"
  29. "k8s.io/kubernetes/pkg/controller"
  30. "k8s.io/kubernetes/pkg/controller/daemon/util"
  31. labelsutil "k8s.io/kubernetes/pkg/util/labels"
  32. )
  33. // rollingUpdate deletes old daemon set pods making sure that no more than
  34. // ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable
  35. func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
  36. nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
  37. if err != nil {
  38. return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
  39. }
  40. _, oldPods := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods, hash)
  41. maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeList, nodeToDaemonPods)
  42. if err != nil {
  43. return fmt.Errorf("Couldn't get unavailable numbers: %v", err)
  44. }
  45. oldAvailablePods, oldUnavailablePods := util.SplitByAvailablePods(ds.Spec.MinReadySeconds, oldPods)
  46. // for oldPods delete all not running pods
  47. var oldPodsToDelete []string
  48. klog.V(4).Infof("Marking all unavailable old pods for deletion")
  49. for _, pod := range oldUnavailablePods {
  50. // Skip terminating pods. We won't delete them again
  51. if pod.DeletionTimestamp != nil {
  52. continue
  53. }
  54. klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
  55. oldPodsToDelete = append(oldPodsToDelete, pod.Name)
  56. }
  57. klog.V(4).Infof("Marking old pods for deletion")
  58. for _, pod := range oldAvailablePods {
  59. if numUnavailable >= maxUnavailable {
  60. klog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable)
  61. break
  62. }
  63. klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
  64. oldPodsToDelete = append(oldPodsToDelete, pod.Name)
  65. numUnavailable++
  66. }
  67. return dsc.syncNodes(ds, oldPodsToDelete, []string{}, hash)
  68. }
  69. // constructHistory finds all histories controlled by the given DaemonSet, and
  70. // update current history revision number, or create current history if need to.
  71. // It also deduplicates current history, and adds missing unique labels to existing histories.
  72. func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) {
  73. var histories []*apps.ControllerRevision
  74. var currentHistories []*apps.ControllerRevision
  75. histories, err = dsc.controlledHistories(ds)
  76. if err != nil {
  77. return nil, nil, err
  78. }
  79. for _, history := range histories {
  80. // Add the unique label if it's not already added to the history
  81. // We use history name instead of computing hash, so that we don't need to worry about hash collision
  82. if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
  83. toUpdate := history.DeepCopy()
  84. toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
  85. history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
  86. if err != nil {
  87. return nil, nil, err
  88. }
  89. }
  90. // Compare histories with ds to separate cur and old history
  91. found := false
  92. found, err = Match(ds, history)
  93. if err != nil {
  94. return nil, nil, err
  95. }
  96. if found {
  97. currentHistories = append(currentHistories, history)
  98. } else {
  99. old = append(old, history)
  100. }
  101. }
  102. currRevision := maxRevision(old) + 1
  103. switch len(currentHistories) {
  104. case 0:
  105. // Create a new history if the current one isn't found
  106. cur, err = dsc.snapshot(ds, currRevision)
  107. if err != nil {
  108. return nil, nil, err
  109. }
  110. default:
  111. cur, err = dsc.dedupCurHistories(ds, currentHistories)
  112. if err != nil {
  113. return nil, nil, err
  114. }
  115. // Update revision number if necessary
  116. if cur.Revision < currRevision {
  117. toUpdate := cur.DeepCopy()
  118. toUpdate.Revision = currRevision
  119. _, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
  120. if err != nil {
  121. return nil, nil, err
  122. }
  123. }
  124. }
  125. return cur, old, err
  126. }
  127. func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps.ControllerRevision) error {
  128. nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
  129. if err != nil {
  130. return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
  131. }
  132. toKeep := int(*ds.Spec.RevisionHistoryLimit)
  133. toKill := len(old) - toKeep
  134. if toKill <= 0 {
  135. return nil
  136. }
  137. // Find all hashes of live pods
  138. liveHashes := make(map[string]bool)
  139. for _, pods := range nodesToDaemonPods {
  140. for _, pod := range pods {
  141. if hash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]; len(hash) > 0 {
  142. liveHashes[hash] = true
  143. }
  144. }
  145. }
  146. // Find all live history with the above hashes
  147. liveHistory := make(map[string]bool)
  148. for _, history := range old {
  149. if hash := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; liveHashes[hash] {
  150. liveHistory[history.Name] = true
  151. }
  152. }
  153. // Clean up old history from smallest to highest revision (from oldest to newest)
  154. sort.Sort(historiesByRevision(old))
  155. for _, history := range old {
  156. if toKill <= 0 {
  157. break
  158. }
  159. if liveHistory[history.Name] {
  160. continue
  161. }
  162. // Clean up
  163. err := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(history.Name, nil)
  164. if err != nil {
  165. return err
  166. }
  167. toKill--
  168. }
  169. return nil
  170. }
  171. // maxRevision returns the max revision number of the given list of histories
  172. func maxRevision(histories []*apps.ControllerRevision) int64 {
  173. max := int64(0)
  174. for _, history := range histories {
  175. if history.Revision > max {
  176. max = history.Revision
  177. }
  178. }
  179. return max
  180. }
  181. func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) {
  182. if len(curHistories) == 1 {
  183. return curHistories[0], nil
  184. }
  185. var maxRevision int64
  186. var keepCur *apps.ControllerRevision
  187. for _, cur := range curHistories {
  188. if cur.Revision >= maxRevision {
  189. keepCur = cur
  190. maxRevision = cur.Revision
  191. }
  192. }
  193. // Clean up duplicates and relabel pods
  194. for _, cur := range curHistories {
  195. if cur.Name == keepCur.Name {
  196. continue
  197. }
  198. // Relabel pods before dedup
  199. pods, err := dsc.getDaemonPods(ds)
  200. if err != nil {
  201. return nil, err
  202. }
  203. for _, pod := range pods {
  204. if pod.Labels[apps.DefaultDaemonSetUniqueLabelKey] != keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey] {
  205. toUpdate := pod.DeepCopy()
  206. if toUpdate.Labels == nil {
  207. toUpdate.Labels = make(map[string]string)
  208. }
  209. toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
  210. _, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate)
  211. if err != nil {
  212. return nil, err
  213. }
  214. }
  215. }
  216. // Remove duplicates
  217. err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(cur.Name, nil)
  218. if err != nil {
  219. return nil, err
  220. }
  221. }
  222. return keepCur, nil
  223. }
  224. // controlledHistories returns all ControllerRevisions controlled by the given DaemonSet.
  225. // This also reconciles ControllerRef by adopting/orphaning.
  226. // Note that returned histories are pointers to objects in the cache.
  227. // If you want to modify one, you need to deep-copy it first.
  228. func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*apps.ControllerRevision, error) {
  229. selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
  230. if err != nil {
  231. return nil, err
  232. }
  233. // List all histories to include those that don't match the selector anymore
  234. // but have a ControllerRef pointing to the controller.
  235. histories, err := dsc.historyLister.List(labels.Everything())
  236. if err != nil {
  237. return nil, err
  238. }
  239. // If any adoptions are attempted, we should first recheck for deletion with
  240. // an uncached quorum read sometime after listing Pods (see #42639).
  241. canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
  242. fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
  243. if err != nil {
  244. return nil, err
  245. }
  246. if fresh.UID != ds.UID {
  247. return nil, fmt.Errorf("original DaemonSet %v/%v is gone: got uid %v, wanted %v", ds.Namespace, ds.Name, fresh.UID, ds.UID)
  248. }
  249. return fresh, nil
  250. })
  251. // Use ControllerRefManager to adopt/orphan as needed.
  252. cm := controller.NewControllerRevisionControllerRefManager(dsc.crControl, ds, selector, controllerKind, canAdoptFunc)
  253. return cm.ClaimControllerRevisions(histories)
  254. }
  255. // Match check if the given DaemonSet's template matches the template stored in the given history.
  256. func Match(ds *apps.DaemonSet, history *apps.ControllerRevision) (bool, error) {
  257. patch, err := getPatch(ds)
  258. if err != nil {
  259. return false, err
  260. }
  261. return bytes.Equal(patch, history.Data.Raw), nil
  262. }
  263. // getPatch returns a strategic merge patch that can be applied to restore a Daemonset to a
  264. // previous version. If the returned error is nil the patch is valid. The current state that we save is just the
  265. // PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously
  266. // recorded patches.
  267. func getPatch(ds *apps.DaemonSet) ([]byte, error) {
  268. dsBytes, err := json.Marshal(ds)
  269. if err != nil {
  270. return nil, err
  271. }
  272. var raw map[string]interface{}
  273. err = json.Unmarshal(dsBytes, &raw)
  274. if err != nil {
  275. return nil, err
  276. }
  277. objCopy := make(map[string]interface{})
  278. specCopy := make(map[string]interface{})
  279. // Create a patch of the DaemonSet that replaces spec.template
  280. spec := raw["spec"].(map[string]interface{})
  281. template := spec["template"].(map[string]interface{})
  282. specCopy["template"] = template
  283. template["$patch"] = "replace"
  284. objCopy["spec"] = specCopy
  285. patch, err := json.Marshal(objCopy)
  286. return patch, err
  287. }
  288. func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*apps.ControllerRevision, error) {
  289. patch, err := getPatch(ds)
  290. if err != nil {
  291. return nil, err
  292. }
  293. hash := controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount)
  294. name := ds.Name + "-" + hash
  295. history := &apps.ControllerRevision{
  296. ObjectMeta: metav1.ObjectMeta{
  297. Name: name,
  298. Namespace: ds.Namespace,
  299. Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, apps.DefaultDaemonSetUniqueLabelKey, hash),
  300. Annotations: ds.Annotations,
  301. OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, controllerKind)},
  302. },
  303. Data: runtime.RawExtension{Raw: patch},
  304. Revision: revision,
  305. }
  306. history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(history)
  307. if outerErr := err; errors.IsAlreadyExists(outerErr) {
  308. // TODO: Is it okay to get from historyLister?
  309. existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
  310. if getErr != nil {
  311. return nil, getErr
  312. }
  313. // Check if we already created it
  314. done, matchErr := Match(ds, existedHistory)
  315. if matchErr != nil {
  316. return nil, matchErr
  317. }
  318. if done {
  319. return existedHistory, nil
  320. }
  321. // Handle name collisions between different history
  322. // Get the latest DaemonSet from the API server to make sure collision count is only increased when necessary
  323. currDS, getErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
  324. if getErr != nil {
  325. return nil, getErr
  326. }
  327. // If the collision count used to compute hash was in fact stale, there's no need to bump collision count; retry again
  328. if !reflect.DeepEqual(currDS.Status.CollisionCount, ds.Status.CollisionCount) {
  329. return nil, fmt.Errorf("found a stale collision count (%d, expected %d) of DaemonSet %q while processing; will retry until it is updated", ds.Status.CollisionCount, currDS.Status.CollisionCount, ds.Name)
  330. }
  331. if currDS.Status.CollisionCount == nil {
  332. currDS.Status.CollisionCount = new(int32)
  333. }
  334. *currDS.Status.CollisionCount++
  335. _, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(currDS)
  336. if updateErr != nil {
  337. return nil, updateErr
  338. }
  339. klog.V(2).Infof("Found a hash collision for DaemonSet %q - bumping collisionCount to %d to resolve it", ds.Name, *currDS.Status.CollisionCount)
  340. return nil, outerErr
  341. }
  342. return history, err
  343. }
  344. func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod, hash string) ([]*v1.Pod, []*v1.Pod) {
  345. var newPods []*v1.Pod
  346. var oldPods []*v1.Pod
  347. for _, pods := range nodeToDaemonPods {
  348. for _, pod := range pods {
  349. // If the returned error is not nil we have a parse error.
  350. // The controller handles this via the hash.
  351. generation, err := util.GetTemplateGeneration(ds)
  352. if err != nil {
  353. generation = nil
  354. }
  355. if util.IsPodUpdated(pod, hash, generation) {
  356. newPods = append(newPods, pod)
  357. } else {
  358. oldPods = append(oldPods, pod)
  359. }
  360. }
  361. }
  362. return newPods, oldPods
  363. }
  364. func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeList []*v1.Node, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
  365. klog.V(4).Infof("Getting unavailable numbers")
  366. var numUnavailable, desiredNumberScheduled int
  367. for i := range nodeList {
  368. node := nodeList[i]
  369. wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
  370. if err != nil {
  371. return -1, -1, err
  372. }
  373. if !wantToRun {
  374. continue
  375. }
  376. desiredNumberScheduled++
  377. daemonPods, exists := nodeToDaemonPods[node.Name]
  378. if !exists {
  379. numUnavailable++
  380. continue
  381. }
  382. available := false
  383. for _, pod := range daemonPods {
  384. //for the purposes of update we ensure that the Pod is both available and not terminating
  385. if podutil.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) && pod.DeletionTimestamp == nil {
  386. available = true
  387. break
  388. }
  389. }
  390. if !available {
  391. numUnavailable++
  392. }
  393. }
  394. maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, desiredNumberScheduled, true)
  395. if err != nil {
  396. return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
  397. }
  398. klog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable)
  399. return maxUnavailable, numUnavailable, nil
  400. }
  401. type historiesByRevision []*apps.ControllerRevision
  402. func (h historiesByRevision) Len() int { return len(h) }
  403. func (h historiesByRevision) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
  404. func (h historiesByRevision) Less(i, j int) bool {
  405. return h[i].Revision < h[j].Revision
  406. }