cloud_cidr_allocator.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. // +build !providerless
  2. /*
  3. Copyright 2016 The Kubernetes Authors.
  4. Licensed under the Apache License, Version 2.0 (the "License");
  5. you may not use this file except in compliance with the License.
  6. You may obtain a copy of the License at
  7. http://www.apache.org/licenses/LICENSE-2.0
  8. Unless required by applicable law or agreed to in writing, software
  9. distributed under the License is distributed on an "AS IS" BASIS,
  10. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. See the License for the specific language governing permissions and
  12. limitations under the License.
  13. */
  14. package ipam
  15. import (
  16. "fmt"
  17. "math/rand"
  18. "net"
  19. "sync"
  20. "time"
  21. "k8s.io/klog"
  22. v1 "k8s.io/api/core/v1"
  23. "k8s.io/apimachinery/pkg/api/errors"
  24. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  25. "k8s.io/apimachinery/pkg/types"
  26. utilruntime "k8s.io/apimachinery/pkg/util/runtime"
  27. informers "k8s.io/client-go/informers/core/v1"
  28. corelisters "k8s.io/client-go/listers/core/v1"
  29. "k8s.io/client-go/tools/cache"
  30. "k8s.io/client-go/tools/record"
  31. clientset "k8s.io/client-go/kubernetes"
  32. "k8s.io/client-go/kubernetes/scheme"
  33. v1core "k8s.io/client-go/kubernetes/typed/core/v1"
  34. cloudprovider "k8s.io/cloud-provider"
  35. nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
  36. utilnode "k8s.io/kubernetes/pkg/util/node"
  37. utiltaints "k8s.io/kubernetes/pkg/util/taints"
  38. "k8s.io/legacy-cloud-providers/gce"
  39. )
  40. // nodeProcessingInfo tracks information related to current nodes in processing
  41. type nodeProcessingInfo struct {
  42. retries int
  43. }
  44. // cloudCIDRAllocator allocates node CIDRs according to IP address aliases
  45. // assigned by the cloud provider. In this case, the allocation and
  46. // deallocation is delegated to the external provider, and the controller
  47. // merely takes the assignment and updates the node spec.
  48. type cloudCIDRAllocator struct {
  49. client clientset.Interface
  50. cloud *gce.Cloud
  51. // nodeLister is able to list/get nodes and is populated by the shared informer passed to
  52. // NewCloudCIDRAllocator.
  53. nodeLister corelisters.NodeLister
  54. // nodesSynced returns true if the node shared informer has been synced at least once.
  55. nodesSynced cache.InformerSynced
  56. // Channel that is used to pass updating Nodes to the background.
  57. // This increases the throughput of CIDR assignment by parallelization
  58. // and not blocking on long operations (which shouldn't be done from
  59. // event handlers anyway).
  60. nodeUpdateChannel chan string
  61. recorder record.EventRecorder
  62. // Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation
  63. lock sync.Mutex
  64. nodesInProcessing map[string]*nodeProcessingInfo
  65. }
  66. var _ CIDRAllocator = (*cloudCIDRAllocator)(nil)
  67. // NewCloudCIDRAllocator creates a new cloud CIDR allocator.
  68. func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) {
  69. if client == nil {
  70. klog.Fatalf("kubeClient is nil when starting NodeController")
  71. }
  72. eventBroadcaster := record.NewBroadcaster()
  73. recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
  74. eventBroadcaster.StartLogging(klog.Infof)
  75. klog.V(0).Infof("Sending events to api server.")
  76. eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
  77. gceCloud, ok := cloud.(*gce.Cloud)
  78. if !ok {
  79. err := fmt.Errorf("cloudCIDRAllocator does not support %v provider", cloud.ProviderName())
  80. return nil, err
  81. }
  82. ca := &cloudCIDRAllocator{
  83. client: client,
  84. cloud: gceCloud,
  85. nodeLister: nodeInformer.Lister(),
  86. nodesSynced: nodeInformer.Informer().HasSynced,
  87. nodeUpdateChannel: make(chan string, cidrUpdateQueueSize),
  88. recorder: recorder,
  89. nodesInProcessing: map[string]*nodeProcessingInfo{},
  90. }
  91. nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
  92. AddFunc: nodeutil.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR),
  93. UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
  94. if newNode.Spec.PodCIDR == "" {
  95. return ca.AllocateOrOccupyCIDR(newNode)
  96. }
  97. // Even if PodCIDR is assigned, but NetworkUnavailable condition is
  98. // set to true, we need to process the node to set the condition.
  99. networkUnavailableTaint := &v1.Taint{Key: v1.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
  100. _, cond := nodeutil.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable)
  101. if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) {
  102. return ca.AllocateOrOccupyCIDR(newNode)
  103. }
  104. return nil
  105. }),
  106. DeleteFunc: nodeutil.CreateDeleteNodeHandler(ca.ReleaseCIDR),
  107. })
  108. klog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
  109. return ca, nil
  110. }
  111. func (ca *cloudCIDRAllocator) Run(stopCh <-chan struct{}) {
  112. defer utilruntime.HandleCrash()
  113. klog.Infof("Starting cloud CIDR allocator")
  114. defer klog.Infof("Shutting down cloud CIDR allocator")
  115. if !cache.WaitForNamedCacheSync("cidrallocator", stopCh, ca.nodesSynced) {
  116. return
  117. }
  118. for i := 0; i < cidrUpdateWorkers; i++ {
  119. go ca.worker(stopCh)
  120. }
  121. <-stopCh
  122. }
  123. func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) {
  124. for {
  125. select {
  126. case workItem, ok := <-ca.nodeUpdateChannel:
  127. if !ok {
  128. klog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
  129. return
  130. }
  131. if err := ca.updateCIDRAllocation(workItem); err == nil {
  132. klog.V(3).Infof("Updated CIDR for %q", workItem)
  133. } else {
  134. klog.Errorf("Error updating CIDR for %q: %v", workItem, err)
  135. if canRetry, timeout := ca.retryParams(workItem); canRetry {
  136. klog.V(2).Infof("Retrying update for %q after %v", workItem, timeout)
  137. time.AfterFunc(timeout, func() {
  138. // Requeue the failed node for update again.
  139. ca.nodeUpdateChannel <- workItem
  140. })
  141. continue
  142. }
  143. klog.Errorf("Exceeded retry count for %q, dropping from queue", workItem)
  144. }
  145. ca.removeNodeFromProcessing(workItem)
  146. case <-stopChan:
  147. return
  148. }
  149. }
  150. }
  151. func (ca *cloudCIDRAllocator) insertNodeToProcessing(nodeName string) bool {
  152. ca.lock.Lock()
  153. defer ca.lock.Unlock()
  154. if _, found := ca.nodesInProcessing[nodeName]; found {
  155. return false
  156. }
  157. ca.nodesInProcessing[nodeName] = &nodeProcessingInfo{}
  158. return true
  159. }
  160. func (ca *cloudCIDRAllocator) retryParams(nodeName string) (bool, time.Duration) {
  161. ca.lock.Lock()
  162. defer ca.lock.Unlock()
  163. entry, ok := ca.nodesInProcessing[nodeName]
  164. if !ok {
  165. klog.Errorf("Cannot get retryParams for %q as entry does not exist", nodeName)
  166. return false, 0
  167. }
  168. count := entry.retries + 1
  169. if count > updateMaxRetries {
  170. return false, 0
  171. }
  172. ca.nodesInProcessing[nodeName].retries = count
  173. return true, nodeUpdateRetryTimeout(count)
  174. }
  175. func nodeUpdateRetryTimeout(count int) time.Duration {
  176. timeout := updateRetryTimeout
  177. for i := 0; i < count && timeout < maxUpdateRetryTimeout; i++ {
  178. timeout *= 2
  179. }
  180. if timeout > maxUpdateRetryTimeout {
  181. timeout = maxUpdateRetryTimeout
  182. }
  183. return time.Duration(timeout.Nanoseconds()/2 + rand.Int63n(timeout.Nanoseconds()))
  184. }
  185. func (ca *cloudCIDRAllocator) removeNodeFromProcessing(nodeName string) {
  186. ca.lock.Lock()
  187. defer ca.lock.Unlock()
  188. delete(ca.nodesInProcessing, nodeName)
  189. }
  190. // WARNING: If you're adding any return calls or defer any more work from this
  191. // function you have to make sure to update nodesInProcessing properly with the
  192. // disposition of the node when the work is done.
  193. func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
  194. if node == nil {
  195. return nil
  196. }
  197. if !ca.insertNodeToProcessing(node.Name) {
  198. klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
  199. return nil
  200. }
  201. klog.V(4).Infof("Putting node %s into the work queue", node.Name)
  202. ca.nodeUpdateChannel <- node.Name
  203. return nil
  204. }
  205. // updateCIDRAllocation assigns CIDR to Node and sends an update to the API server.
  206. func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
  207. node, err := ca.nodeLister.Get(nodeName)
  208. if err != nil {
  209. if errors.IsNotFound(err) {
  210. return nil // node no longer available, skip processing
  211. }
  212. klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err)
  213. return err
  214. }
  215. cidrs, err := ca.cloud.AliasRanges(types.NodeName(nodeName))
  216. if err != nil {
  217. nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
  218. return fmt.Errorf("failed to allocate cidr: %v", err)
  219. }
  220. if len(cidrs) == 0 {
  221. nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
  222. return fmt.Errorf("failed to allocate cidr: Node %v has no CIDRs", node.Name)
  223. }
  224. _, cidr, err := net.ParseCIDR(cidrs[0])
  225. if err != nil {
  226. return fmt.Errorf("failed to parse string '%s' as a CIDR: %v", cidrs[0], err)
  227. }
  228. podCIDR := cidr.String()
  229. if node.Spec.PodCIDR == podCIDR {
  230. klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
  231. // We don't return here, in order to set the NetworkUnavailable condition later below.
  232. } else {
  233. if node.Spec.PodCIDR != "" {
  234. klog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR)
  235. // We fall through and set the CIDR despite this error. This
  236. // implements the same logic as implemented in the
  237. // rangeAllocator.
  238. //
  239. // See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248
  240. }
  241. for i := 0; i < cidrUpdateRetries; i++ {
  242. if err = utilnode.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil {
  243. klog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
  244. break
  245. }
  246. }
  247. }
  248. if err != nil {
  249. nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed")
  250. klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
  251. return err
  252. }
  253. err = utilnode.SetNodeCondition(ca.client, types.NodeName(node.Name), v1.NodeCondition{
  254. Type: v1.NodeNetworkUnavailable,
  255. Status: v1.ConditionFalse,
  256. Reason: "RouteCreated",
  257. Message: "NodeController create implicit route",
  258. LastTransitionTime: metav1.Now(),
  259. })
  260. if err != nil {
  261. klog.Errorf("Error setting route status for node %v: %v", node.Name, err)
  262. }
  263. return err
  264. }
  265. func (ca *cloudCIDRAllocator) ReleaseCIDR(node *v1.Node) error {
  266. klog.V(2).Infof("Node %v PodCIDR (%v) will be released by external cloud provider (not managed by controller)",
  267. node.Name, node.Spec.PodCIDR)
  268. return nil
  269. }