startup.sh 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. #!/usr/bin/env bash
  2. # Copyright 2018 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # Script that creates a Kubemark cluster for IBM cloud.
  16. KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
  17. KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
  18. RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
  19. # Generate secret and configMap for the hollow-node pods to work, prepare
  20. # manifests of the hollow-node and heapster replication controllers from
  21. # templates, and finally create these resources through kubectl.
  22. function create-kube-hollow-node-resources {
  23. # Create kubeconfig for Kubelet.
  24. KUBELET_KUBECONFIG_CONTENTS="$(cat <<EOF
  25. apiVersion: v1
  26. kind: Config
  27. users:
  28. - name: kubelet
  29. user:
  30. client-certificate-data: "${KUBELET_CERT_BASE64}"
  31. client-key-data: "${KUBELET_KEY_BASE64}"
  32. clusters:
  33. - name: kubemark
  34. cluster:
  35. certificate-authority-data: "${CA_CERT_BASE64}"
  36. server: https://${MASTER_IP}
  37. contexts:
  38. - context:
  39. cluster: kubemark
  40. user: kubelet
  41. name: kubemark-context
  42. current-context: kubemark-context
  43. EOF
  44. )"
  45. # Create kubeconfig for Kubeproxy.
  46. KUBEPROXY_KUBECONFIG_CONTENTS="$(cat <<EOF
  47. apiVersion: v1
  48. kind: Config
  49. users:
  50. - name: kube-proxy
  51. user:
  52. client-certificate-data: "${KUBELET_CERT_BASE64}"
  53. client-key-data: "${KUBELET_KEY_BASE64}"
  54. clusters:
  55. - name: kubemark
  56. cluster:
  57. insecure-skip-tls-verify: true
  58. server: https://${MASTER_IP}
  59. contexts:
  60. - context:
  61. cluster: kubemark
  62. user: kube-proxy
  63. name: kubemark-context
  64. current-context: kubemark-context
  65. EOF
  66. )"
  67. # Create kubeconfig for Heapster.
  68. HEAPSTER_KUBECONFIG_CONTENTS="$(cat <<EOF
  69. apiVersion: v1
  70. kind: Config
  71. users:
  72. - name: heapster
  73. user:
  74. client-certificate-data: "${KUBELET_CERT_BASE64}"
  75. client-key-data: "${KUBELET_KEY_BASE64}"
  76. clusters:
  77. - name: kubemark
  78. cluster:
  79. insecure-skip-tls-verify: true
  80. server: https://${MASTER_IP}
  81. contexts:
  82. - context:
  83. cluster: kubemark
  84. user: heapster
  85. name: kubemark-context
  86. current-context: kubemark-context
  87. EOF
  88. )"
  89. # Create kubeconfig for Cluster Autoscaler.
  90. CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS="$(cat <<EOF
  91. apiVersion: v1
  92. kind: Config
  93. users:
  94. - name: cluster-autoscaler
  95. user:
  96. client-certificate-data: "${KUBELET_CERT_BASE64}"
  97. client-key-data: "${KUBELET_KEY_BASE64}"
  98. clusters:
  99. - name: kubemark
  100. cluster:
  101. insecure-skip-tls-verify: true
  102. server: https://${MASTER_IP}
  103. contexts:
  104. - context:
  105. cluster: kubemark
  106. user: cluster-autoscaler
  107. name: kubemark-context
  108. current-context: kubemark-context
  109. EOF
  110. )"
  111. # Create kubeconfig for NodeProblemDetector.
  112. NPD_KUBECONFIG_CONTENTS="$(cat <<EOF
  113. apiVersion: v1
  114. kind: Config
  115. users:
  116. - name: node-problem-detector
  117. user:
  118. client-certificate-data: "${KUBELET_CERT_BASE64}"
  119. client-key-data: "${KUBELET_KEY_BASE64}"
  120. clusters:
  121. - name: kubemark
  122. cluster:
  123. insecure-skip-tls-verify: true
  124. server: https://${MASTER_IP}
  125. contexts:
  126. - context:
  127. cluster: kubemark
  128. user: node-problem-detector
  129. name: kubemark-context
  130. current-context: kubemark-context
  131. EOF
  132. )"
  133. # Create kubeconfig for Kube DNS.
  134. KUBE_DNS_KUBECONFIG_CONTENTS="$(cat <<EOF
  135. apiVersion: v1
  136. kind: Config
  137. users:
  138. - name: kube-dns
  139. user:
  140. client-certificate-data: "${KUBELET_CERT_BASE64}"
  141. client-key-data: "${KUBELET_KEY_BASE64}"
  142. clusters:
  143. - name: kubemark
  144. cluster:
  145. insecure-skip-tls-verify: true
  146. server: https://${MASTER_IP}
  147. contexts:
  148. - context:
  149. cluster: kubemark
  150. user: kube-dns
  151. name: kubemark-context
  152. current-context: kubemark-context
  153. EOF
  154. )"
  155. # Create kubemark namespace.
  156. spawn-config
  157. if kubectl get ns | grep -Fq "kubemark"; then
  158. kubectl delete ns kubemark
  159. while kubectl get ns | grep -Fq "kubemark"
  160. do
  161. sleep 10
  162. done
  163. fi
  164. "${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/kubemark-ns.json"
  165. # Create configmap for configuring hollow- kubelet, proxy and npd.
  166. "${KUBECTL}" create configmap "node-configmap" --namespace="kubemark" \
  167. --from-literal=content.type="${TEST_CLUSTER_API_CONTENT_TYPE}" \
  168. --from-file=kernel.monitor="${RESOURCE_DIRECTORY}/kernel-monitor.json"
  169. # Create secret for passing kubeconfigs to kubelet, kubeproxy and npd.
  170. "${KUBECTL}" create secret generic "kubeconfig" --type=Opaque --namespace="kubemark" \
  171. --from-literal=kubelet.kubeconfig="${KUBELET_KUBECONFIG_CONTENTS}" \
  172. --from-literal=kubeproxy.kubeconfig="${KUBEPROXY_KUBECONFIG_CONTENTS}" \
  173. --from-literal=heapster.kubeconfig="${HEAPSTER_KUBECONFIG_CONTENTS}" \
  174. --from-literal=cluster_autoscaler.kubeconfig="${CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS}" \
  175. --from-literal=npd.kubeconfig="${NPD_KUBECONFIG_CONTENTS}" \
  176. --from-literal=dns.kubeconfig="${KUBE_DNS_KUBECONFIG_CONTENTS}"
  177. # Create addon pods.
  178. # Heapster.
  179. mkdir -p "${RESOURCE_DIRECTORY}/addons"
  180. sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
  181. metrics_mem_per_node=4
  182. metrics_mem=$((200 + metrics_mem_per_node*NUM_NODES))
  183. sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
  184. metrics_cpu_per_node_numerator=${NUM_NODES}
  185. metrics_cpu_per_node_denominator=2
  186. metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator))
  187. sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
  188. eventer_mem_per_node=500
  189. eventer_mem=$((200 * 1024 + eventer_mem_per_node*NUM_NODES))
  190. sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
  191. # Cluster Autoscaler.
  192. if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
  193. echo "Setting up Cluster Autoscaler"
  194. KUBEMARK_AUTOSCALER_MIG_NAME="${KUBEMARK_AUTOSCALER_MIG_NAME:-${NODE_INSTANCE_PREFIX}-group}"
  195. KUBEMARK_AUTOSCALER_MIN_NODES="${KUBEMARK_AUTOSCALER_MIN_NODES:-0}"
  196. KUBEMARK_AUTOSCALER_MAX_NODES="${KUBEMARK_AUTOSCALER_MAX_NODES:-${DESIRED_NODES}}"
  197. NUM_NODES=${KUBEMARK_AUTOSCALER_MAX_NODES}
  198. echo "Setting maximum cluster size to ${NUM_NODES}."
  199. KUBEMARK_MIG_CONFIG="autoscaling.k8s.io/nodegroup: ${KUBEMARK_AUTOSCALER_MIG_NAME}"
  200. sed "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/cluster-autoscaler_template.json" > "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
  201. sed -i'' -e "s/{{kubemark_autoscaler_mig_name}}/${KUBEMARK_AUTOSCALER_MIG_NAME}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
  202. sed -i'' -e "s/{{kubemark_autoscaler_min_nodes}}/${KUBEMARK_AUTOSCALER_MIN_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
  203. sed -i'' -e "s/{{kubemark_autoscaler_max_nodes}}/${KUBEMARK_AUTOSCALER_MAX_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
  204. fi
  205. # Kube DNS.
  206. if [[ "${ENABLE_KUBEMARK_KUBE_DNS:-}" == "true" ]]; then
  207. echo "Setting up kube-dns"
  208. sed "s/{{dns_domain}}/${KUBE_DNS_DOMAIN}/g" "${RESOURCE_DIRECTORY}/kube_dns_template.yaml" > "${RESOURCE_DIRECTORY}/addons/kube_dns.yaml"
  209. fi
  210. "${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark"
  211. set-registry-secrets
  212. # Create the replication controller for hollow-nodes.
  213. # We allow to override the NUM_REPLICAS when running Cluster Autoscaler.
  214. NUM_REPLICAS=${NUM_REPLICAS:-${KUBEMARK_NUM_NODES}}
  215. sed "s/{{numreplicas}}/${NUM_REPLICAS}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.yaml" > "${RESOURCE_DIRECTORY}/hollow-node.yaml"
  216. proxy_cpu=20
  217. if [ "${NUM_NODES}" -gt 1000 ]; then
  218. proxy_cpu=50
  219. fi
  220. proxy_mem_per_node=50
  221. proxy_mem=$((100 * 1024 + proxy_mem_per_node*NUM_NODES))
  222. sed -i'' -e "s/{{HOLLOW_PROXY_CPU}}/${proxy_cpu}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
  223. sed -i'' -e "s/{{HOLLOW_PROXY_MEM}}/${proxy_mem}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
  224. sed -i'' -e "s'{{kubemark_image_registry}}'${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
  225. sed -i'' -e "s/{{kubemark_image_tag}}/${KUBEMARK_IMAGE_TAG}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
  226. sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
  227. sed -i'' -e "s/{{hollow_kubelet_params}}/${HOLLOW_KUBELET_TEST_ARGS:-}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
  228. sed -i'' -e "s/{{hollow_proxy_params}}/${HOLLOW_PROXY_TEST_ARGS:-}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
  229. sed -i'' -e "s'{{kubemark_mig_config}}'${KUBEMARK_MIG_CONFIG:-}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
  230. "${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" --namespace="kubemark"
  231. echo "Created secrets, configMaps, replication-controllers required for hollow-nodes."
  232. }
  233. # Wait until all hollow-nodes are running or there is a timeout.
  234. function wait-for-hollow-nodes-to-run-or-timeout {
  235. echo -n "Waiting for all hollow-nodes to become Running"
  236. start=$(date +%s)
  237. # IKS uses a real cluster for hollow master, so need to exclude the real worker nodes
  238. nodes=$("${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node | grep hollow-node 2> /dev/null) || true
  239. ready=$(($(echo "${nodes}" | grep -vc "NotReady") - 1))
  240. until [[ "${ready}" -ge "${NUM_REPLICAS}" ]]; do
  241. echo -n "."
  242. sleep 1
  243. now=$(date +%s)
  244. # Fail it if it already took more than 30 minutes.
  245. if [ $((now - start)) -gt 1800 ]; then
  246. echo ""
  247. # shellcheck disable=SC2154 # Color defined in sourced script
  248. echo -e "${color_red} Timeout waiting for all hollow-nodes to become Running. ${color_norm}"
  249. # Try listing nodes again - if it fails it means that API server is not responding
  250. if "${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node &> /dev/null; then
  251. echo "Found only ${ready} ready hollow-nodes while waiting for ${NUM_NODES}."
  252. else
  253. echo "Got error while trying to list hollow-nodes. Probably API server is down."
  254. fi
  255. spawn-config
  256. pods=$("${KUBECTL}" get pods -l name=hollow-node --namespace=kubemark) || true
  257. running=$(($(echo "${pods}" | grep -c "Running")))
  258. echo "${running} hollow-nodes are reported as 'Running'"
  259. not_running=$(($(echo "${pods}" | grep -vc "Running") - 1))
  260. echo "${not_running} hollow-nodes are reported as NOT 'Running'"
  261. echo "${pods}" | grep -v "Running"
  262. exit 1
  263. fi
  264. nodes=$("${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node 2> /dev/null) || true
  265. ready=$(($(echo "${nodes}" | grep -vc "NotReady") - 1))
  266. done
  267. # shellcheck disable=SC2154 # Color defined in sourced script
  268. echo -e "${color_green} Done!${color_norm}"
  269. }
  270. ############################### Main Function ########################################
  271. # In order for the cluster autoscalar to function, the template file must be changed so that the ":443"
  272. # is removed. This is because the port is already given with the MASTER_IP.
  273. # Create clusters and populate with hollow nodes
  274. complete-login
  275. build-kubemark-image
  276. choose-clusters
  277. generate-values
  278. set-hollow-master
  279. echo "Creating kube hollow node resources"
  280. create-kube-hollow-node-resources
  281. master-config
  282. # shellcheck disable=SC2154 # Color defined in sourced script
  283. echo -e "${color_blue}EXECUTION COMPLETE${color_norm}"
  284. # Check status of Kubemark
  285. # shellcheck disable=SC2154 # Color defined in sourced script
  286. echo -e "${color_yellow}CHECKING STATUS${color_norm}"
  287. wait-for-hollow-nodes-to-run-or-timeout
  288. # Celebrate
  289. echo ""
  290. # shellcheck disable=SC2154 # Color defined in sourced script
  291. echo -e "${color_blue}SUCCESS${color_norm}"
  292. clean-repo
  293. exit 0