range_allocator.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package ipam
  14. import (
  15. "fmt"
  16. "net"
  17. "sync"
  18. "k8s.io/klog"
  19. "k8s.io/api/core/v1"
  20. apierrors "k8s.io/apimachinery/pkg/api/errors"
  21. "k8s.io/apimachinery/pkg/types"
  22. utilruntime "k8s.io/apimachinery/pkg/util/runtime"
  23. "k8s.io/apimachinery/pkg/util/sets"
  24. informers "k8s.io/client-go/informers/core/v1"
  25. clientset "k8s.io/client-go/kubernetes"
  26. "k8s.io/client-go/kubernetes/scheme"
  27. v1core "k8s.io/client-go/kubernetes/typed/core/v1"
  28. corelisters "k8s.io/client-go/listers/core/v1"
  29. "k8s.io/client-go/tools/cache"
  30. "k8s.io/client-go/tools/record"
  31. "k8s.io/kubernetes/pkg/controller"
  32. "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
  33. nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
  34. utilnode "k8s.io/kubernetes/pkg/util/node"
  35. )
  36. type rangeAllocator struct {
  37. client clientset.Interface
  38. cidrs *cidrset.CidrSet
  39. clusterCIDR *net.IPNet
  40. maxCIDRs int
  41. // nodeLister is able to list/get nodes and is populated by the shared informer passed to
  42. // NewCloudCIDRAllocator.
  43. nodeLister corelisters.NodeLister
  44. // nodesSynced returns true if the node shared informer has been synced at least once.
  45. nodesSynced cache.InformerSynced
  46. // Channel that is used to pass updating Nodes with assigned CIDRs to the background
  47. // This increases a throughput of CIDR assignment by not blocking on long operations.
  48. nodeCIDRUpdateChannel chan nodeAndCIDR
  49. recorder record.EventRecorder
  50. // Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation
  51. lock sync.Mutex
  52. nodesInProcessing sets.String
  53. }
  54. // NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDR for node
  55. // Caller must ensure subNetMaskSize is not less than cluster CIDR mask size.
  56. // Caller must always pass in a list of existing nodes so the new allocator
  57. // can initialize its CIDR map. NodeList is only nil in testing.
  58. func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
  59. if client == nil {
  60. klog.Fatalf("kubeClient is nil when starting NodeController")
  61. }
  62. eventBroadcaster := record.NewBroadcaster()
  63. recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
  64. eventBroadcaster.StartLogging(klog.Infof)
  65. klog.V(0).Infof("Sending events to api server.")
  66. eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
  67. set, err := cidrset.NewCIDRSet(clusterCIDR, subNetMaskSize)
  68. if err != nil {
  69. return nil, err
  70. }
  71. ra := &rangeAllocator{
  72. client: client,
  73. cidrs: set,
  74. clusterCIDR: clusterCIDR,
  75. nodeLister: nodeInformer.Lister(),
  76. nodesSynced: nodeInformer.Informer().HasSynced,
  77. nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize),
  78. recorder: recorder,
  79. nodesInProcessing: sets.NewString(),
  80. }
  81. if serviceCIDR != nil {
  82. ra.filterOutServiceRange(serviceCIDR)
  83. } else {
  84. klog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.")
  85. }
  86. if nodeList != nil {
  87. for _, node := range nodeList.Items {
  88. if node.Spec.PodCIDR == "" {
  89. klog.Infof("Node %v has no CIDR, ignoring", node.Name)
  90. continue
  91. } else {
  92. klog.Infof("Node %v has CIDR %s, occupying it in CIDR map",
  93. node.Name, node.Spec.PodCIDR)
  94. }
  95. if err := ra.occupyCIDR(&node); err != nil {
  96. // This will happen if:
  97. // 1. We find garbage in the podCIDR field. Retrying is useless.
  98. // 2. CIDR out of range: This means a node CIDR has changed.
  99. // This error will keep crashing controller-manager.
  100. return nil, err
  101. }
  102. }
  103. }
  104. nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
  105. AddFunc: nodeutil.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR),
  106. UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
  107. // If the PodCIDR is not empty we either:
  108. // - already processed a Node that already had a CIDR after NC restarted
  109. // (cidr is marked as used),
  110. // - already processed a Node successfully and allocated a CIDR for it
  111. // (cidr is marked as used),
  112. // - already processed a Node but we did saw a "timeout" response and
  113. // request eventually got through in this case we haven't released
  114. // the allocated CIDR (cidr is still marked as used).
  115. // There's a possible error here:
  116. // - NC sees a new Node and assigns a CIDR X to it,
  117. // - Update Node call fails with a timeout,
  118. // - Node is updated by some other component, NC sees an update and
  119. // assigns CIDR Y to the Node,
  120. // - Both CIDR X and CIDR Y are marked as used in the local cache,
  121. // even though Node sees only CIDR Y
  122. // The problem here is that in in-memory cache we see CIDR X as marked,
  123. // which prevents it from being assigned to any new node. The cluster
  124. // state is correct.
  125. // Restart of NC fixes the issue.
  126. if newNode.Spec.PodCIDR == "" {
  127. return ra.AllocateOrOccupyCIDR(newNode)
  128. }
  129. return nil
  130. }),
  131. DeleteFunc: nodeutil.CreateDeleteNodeHandler(ra.ReleaseCIDR),
  132. })
  133. return ra, nil
  134. }
  135. func (r *rangeAllocator) Run(stopCh <-chan struct{}) {
  136. defer utilruntime.HandleCrash()
  137. klog.Infof("Starting range CIDR allocator")
  138. defer klog.Infof("Shutting down range CIDR allocator")
  139. if !controller.WaitForCacheSync("cidrallocator", stopCh, r.nodesSynced) {
  140. return
  141. }
  142. for i := 0; i < cidrUpdateWorkers; i++ {
  143. go r.worker(stopCh)
  144. }
  145. <-stopCh
  146. }
  147. func (r *rangeAllocator) worker(stopChan <-chan struct{}) {
  148. for {
  149. select {
  150. case workItem, ok := <-r.nodeCIDRUpdateChannel:
  151. if !ok {
  152. klog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
  153. return
  154. }
  155. if err := r.updateCIDRAllocation(workItem); err != nil {
  156. // Requeue the failed node for update again.
  157. r.nodeCIDRUpdateChannel <- workItem
  158. }
  159. case <-stopChan:
  160. return
  161. }
  162. }
  163. }
  164. func (r *rangeAllocator) insertNodeToProcessing(nodeName string) bool {
  165. r.lock.Lock()
  166. defer r.lock.Unlock()
  167. if r.nodesInProcessing.Has(nodeName) {
  168. return false
  169. }
  170. r.nodesInProcessing.Insert(nodeName)
  171. return true
  172. }
  173. func (r *rangeAllocator) removeNodeFromProcessing(nodeName string) {
  174. r.lock.Lock()
  175. defer r.lock.Unlock()
  176. r.nodesInProcessing.Delete(nodeName)
  177. }
  178. func (r *rangeAllocator) occupyCIDR(node *v1.Node) error {
  179. defer r.removeNodeFromProcessing(node.Name)
  180. if node.Spec.PodCIDR == "" {
  181. return nil
  182. }
  183. _, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
  184. if err != nil {
  185. return fmt.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR)
  186. }
  187. if err := r.cidrs.Occupy(podCIDR); err != nil {
  188. return fmt.Errorf("failed to mark cidr as occupied: %v", err)
  189. }
  190. return nil
  191. }
  192. // WARNING: If you're adding any return calls or defer any more work from this
  193. // function you have to make sure to update nodesInProcessing properly with the
  194. // disposition of the node when the work is done.
  195. func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
  196. if node == nil {
  197. return nil
  198. }
  199. if !r.insertNodeToProcessing(node.Name) {
  200. klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
  201. return nil
  202. }
  203. if node.Spec.PodCIDR != "" {
  204. return r.occupyCIDR(node)
  205. }
  206. podCIDR, err := r.cidrs.AllocateNext()
  207. if err != nil {
  208. r.removeNodeFromProcessing(node.Name)
  209. nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
  210. return fmt.Errorf("failed to allocate cidr: %v", err)
  211. }
  212. klog.V(4).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR)
  213. r.nodeCIDRUpdateChannel <- nodeAndCIDR{
  214. nodeName: node.Name,
  215. cidr: podCIDR,
  216. }
  217. return nil
  218. }
  219. func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
  220. if node == nil || node.Spec.PodCIDR == "" {
  221. return nil
  222. }
  223. _, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
  224. if err != nil {
  225. return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", node.Spec.PodCIDR, node.Name, err)
  226. }
  227. klog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR)
  228. if err = r.cidrs.Release(podCIDR); err != nil {
  229. return fmt.Errorf("Error when releasing CIDR %v: %v", node.Spec.PodCIDR, err)
  230. }
  231. return err
  232. }
  233. // Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used,
  234. // so that they won't be assignable.
  235. func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
  236. // Checks if service CIDR has a nonempty intersection with cluster
  237. // CIDR. It is the case if either clusterCIDR contains serviceCIDR with
  238. // clusterCIDR's Mask applied (this means that clusterCIDR contains
  239. // serviceCIDR) or vice versa (which means that serviceCIDR contains
  240. // clusterCIDR).
  241. if !r.clusterCIDR.Contains(serviceCIDR.IP.Mask(r.clusterCIDR.Mask)) && !serviceCIDR.Contains(r.clusterCIDR.IP.Mask(serviceCIDR.Mask)) {
  242. return
  243. }
  244. if err := r.cidrs.Occupy(serviceCIDR); err != nil {
  245. klog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err)
  246. }
  247. }
  248. // updateCIDRAllocation assigns CIDR to Node and sends an update to the API server.
  249. func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
  250. var err error
  251. var node *v1.Node
  252. defer r.removeNodeFromProcessing(data.nodeName)
  253. podCIDR := data.cidr.String()
  254. node, err = r.nodeLister.Get(data.nodeName)
  255. if err != nil {
  256. klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", data.nodeName, err)
  257. return err
  258. }
  259. if node.Spec.PodCIDR == podCIDR {
  260. klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
  261. return nil
  262. }
  263. if node.Spec.PodCIDR != "" {
  264. klog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one %v.", node.Name, node.Spec.PodCIDR, podCIDR)
  265. if err := r.cidrs.Release(data.cidr); err != nil {
  266. klog.Errorf("Error when releasing CIDR %v", podCIDR)
  267. }
  268. return nil
  269. }
  270. // If we reached here, it means that the node has no CIDR currently assigned. So we set it.
  271. for i := 0; i < cidrUpdateRetries; i++ {
  272. if err = utilnode.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil {
  273. klog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
  274. return nil
  275. }
  276. }
  277. klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
  278. nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
  279. // We accept the fact that we may leak CIDRs here. This is safer than releasing
  280. // them in case when we don't know if request went through.
  281. // NodeController restart will return all falsely allocated CIDRs to the pool.
  282. if !apierrors.IsServerTimeout(err) {
  283. klog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", node.Name, err)
  284. if releaseErr := r.cidrs.Release(data.cidr); releaseErr != nil {
  285. klog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr)
  286. }
  287. }
  288. return err
  289. }