local-up-cluster.sh 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. #!/usr/bin/env bash
  2. # Copyright 2014 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
  16. # This script builds and runs a local kubernetes cluster. You may need to run
  17. # this as root to allow kubelet to open docker's socket, and to write the test
  18. # CA in /var/run/kubernetes.
  19. # Usage: `hack/local-up-cluster.sh`.
  20. DOCKER_OPTS=${DOCKER_OPTS:-""}
  21. export DOCKER=(docker "${DOCKER_OPTS[@]}")
  22. DOCKER_ROOT=${DOCKER_ROOT:-""}
  23. ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""}
  24. DENY_SECURITY_CONTEXT_ADMISSION=${DENY_SECURITY_CONTEXT_ADMISSION:-""}
  25. PSP_ADMISSION=${PSP_ADMISSION:-""}
  26. NODE_ADMISSION=${NODE_ADMISSION:-""}
  27. RUNTIME_CONFIG=${RUNTIME_CONFIG:-""}
  28. KUBELET_AUTHORIZATION_WEBHOOK=${KUBELET_AUTHORIZATION_WEBHOOK:-""}
  29. KUBELET_AUTHENTICATION_WEBHOOK=${KUBELET_AUTHENTICATION_WEBHOOK:-""}
  30. POD_MANIFEST_PATH=${POD_MANIFEST_PATH:-"/var/run/kubernetes/static-pods"}
  31. KUBELET_FLAGS=${KUBELET_FLAGS:-""}
  32. KUBELET_IMAGE=${KUBELET_IMAGE:-""}
  33. # many dev environments run with swap on, so we don't fail in this env
  34. FAIL_SWAP_ON=${FAIL_SWAP_ON:-"false"}
  35. # Name of the network plugin, eg: "kubenet"
  36. NET_PLUGIN=${NET_PLUGIN:-""}
  37. # Place the config files and binaries required by NET_PLUGIN in these directory,
  38. # eg: "/etc/cni/net.d" for config files, and "/opt/cni/bin" for binaries.
  39. CNI_CONF_DIR=${CNI_CONF_DIR:-""}
  40. CNI_BIN_DIR=${CNI_BIN_DIR:-""}
  41. CLUSTER_CIDR=${CLUSTER_CIDR:-10.1.0.0/16}
  42. SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/24}
  43. FIRST_SERVICE_CLUSTER_IP=${FIRST_SERVICE_CLUSTER_IP:-10.0.0.1}
  44. # if enabled, must set CGROUP_ROOT
  45. CGROUPS_PER_QOS=${CGROUPS_PER_QOS:-true}
  46. # name of the cgroup driver, i.e. cgroupfs or systemd
  47. CGROUP_DRIVER=${CGROUP_DRIVER:-""}
  48. # if cgroups per qos is enabled, optionally change cgroup root
  49. CGROUP_ROOT=${CGROUP_ROOT:-""}
  50. # owner of client certs, default to current user if not specified
  51. USER=${USER:-$(whoami)}
  52. # enables testing eviction scenarios locally.
  53. EVICTION_HARD=${EVICTION_HARD:-"memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%"}
  54. EVICTION_SOFT=${EVICTION_SOFT:-""}
  55. EVICTION_PRESSURE_TRANSITION_PERIOD=${EVICTION_PRESSURE_TRANSITION_PERIOD:-"1m"}
  56. # This script uses docker0 (or whatever container bridge docker is currently using)
  57. # and we don't know the IP of the DNS pod to pass in as --cluster-dns.
  58. # To set this up by hand, set this flag and change DNS_SERVER_IP.
  59. # Note also that you need API_HOST (defined above) for correct DNS.
  60. KUBE_PROXY_MODE=${KUBE_PROXY_MODE:-""}
  61. ENABLE_CLUSTER_DNS=${KUBE_ENABLE_CLUSTER_DNS:-true}
  62. ENABLE_NODELOCAL_DNS=${KUBE_ENABLE_NODELOCAL_DNS:-false}
  63. DNS_SERVER_IP=${KUBE_DNS_SERVER_IP:-10.0.0.10}
  64. LOCAL_DNS_IP=${KUBE_LOCAL_DNS_IP:-169.254.20.10}
  65. DNS_MEMORY_LIMIT=${KUBE_DNS_MEMORY_LIMIT:-170Mi}
  66. DNS_DOMAIN=${KUBE_DNS_NAME:-"cluster.local"}
  67. KUBECTL=${KUBECTL:-"${KUBE_ROOT}/cluster/kubectl.sh"}
  68. WAIT_FOR_URL_API_SERVER=${WAIT_FOR_URL_API_SERVER:-60}
  69. MAX_TIME_FOR_URL_API_SERVER=${MAX_TIME_FOR_URL_API_SERVER:-1}
  70. ENABLE_DAEMON=${ENABLE_DAEMON:-false}
  71. HOSTNAME_OVERRIDE=${HOSTNAME_OVERRIDE:-"127.0.0.1"}
  72. EXTERNAL_CLOUD_PROVIDER=${EXTERNAL_CLOUD_PROVIDER:-false}
  73. EXTERNAL_CLOUD_PROVIDER_BINARY=${EXTERNAL_CLOUD_PROVIDER_BINARY:-""}
  74. EXTERNAL_CLOUD_VOLUME_PLUGIN=${EXTERNAL_CLOUD_VOLUME_PLUGIN:-""}
  75. CLOUD_PROVIDER=${CLOUD_PROVIDER:-""}
  76. CLOUD_CONFIG=${CLOUD_CONFIG:-""}
  77. KUBELET_PROVIDER_ID=${KUBELET_PROVIDER_ID:-"$(hostname)"}
  78. FEATURE_GATES=${FEATURE_GATES:-"AllAlpha=false"}
  79. STORAGE_BACKEND=${STORAGE_BACKEND:-"etcd3"}
  80. STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-"application/vnd.kubernetes.protobuf"}
  81. # preserve etcd data. you also need to set ETCD_DIR.
  82. PRESERVE_ETCD="${PRESERVE_ETCD:-false}"
  83. # enable kubernetes dashboard
  84. ENABLE_CLUSTER_DASHBOARD=${KUBE_ENABLE_CLUSTER_DASHBOARD:-false}
  85. # RBAC Mode options
  86. AUTHORIZATION_MODE=${AUTHORIZATION_MODE:-"Node,RBAC"}
  87. KUBECONFIG_TOKEN=${KUBECONFIG_TOKEN:-""}
  88. AUTH_ARGS=${AUTH_ARGS:-""}
  89. # WebHook Authentication and Authorization
  90. AUTHORIZATION_WEBHOOK_CONFIG_FILE=${AUTHORIZATION_WEBHOOK_CONFIG_FILE:-""}
  91. AUTHENTICATION_WEBHOOK_CONFIG_FILE=${AUTHENTICATION_WEBHOOK_CONFIG_FILE:-""}
  92. # Install a default storage class (enabled by default)
  93. DEFAULT_STORAGE_CLASS=${KUBE_DEFAULT_STORAGE_CLASS:-true}
  94. # Do not run the mutation detector by default on a local cluster.
  95. # It is intended for a specific type of testing and inherently leaks memory.
  96. KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-false}"
  97. export KUBE_CACHE_MUTATION_DETECTOR
  98. # panic the server on watch decode errors since they are considered coder mistakes
  99. KUBE_PANIC_WATCH_DECODE_ERROR="${KUBE_PANIC_WATCH_DECODE_ERROR:-true}"
  100. export KUBE_PANIC_WATCH_DECODE_ERROR
  101. # Default list of admission Controllers to invoke prior to persisting objects in cluster
  102. # The order defined here does not matter.
  103. ENABLE_ADMISSION_PLUGINS=${ENABLE_ADMISSION_PLUGINS:-"NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,Priority,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"}
  104. DISABLE_ADMISSION_PLUGINS=${DISABLE_ADMISSION_PLUGINS:-""}
  105. ADMISSION_CONTROL_CONFIG_FILE=${ADMISSION_CONTROL_CONFIG_FILE:-""}
  106. # START_MODE can be 'all', 'kubeletonly', 'nokubelet', or 'nokubeproxy'
  107. START_MODE=${START_MODE:-"all"}
  108. # A list of controllers to enable
  109. KUBE_CONTROLLERS="${KUBE_CONTROLLERS:-"*"}"
  110. # Audit policy
  111. AUDIT_POLICY_FILE=${AUDIT_POLICY_FILE:-""}
  112. # sanity check for OpenStack provider
  113. if [ "${CLOUD_PROVIDER}" == "openstack" ]; then
  114. if [ "${CLOUD_CONFIG}" == "" ]; then
  115. echo "Missing CLOUD_CONFIG env for OpenStack provider!"
  116. exit 1
  117. fi
  118. if [ ! -f "${CLOUD_CONFIG}" ]; then
  119. echo "Cloud config ${CLOUD_CONFIG} doesn't exist"
  120. exit 1
  121. fi
  122. fi
  123. if [ "$(id -u)" != "0" ]; then
  124. echo "WARNING : This script MAY be run as root for docker socket / iptables functionality; if failures occur, retry as root." 2>&1
  125. fi
  126. # Stop right away if the build fails
  127. set -e
  128. source "${KUBE_ROOT}/hack/lib/init.sh"
  129. kube::util::ensure-gnu-sed
  130. function usage {
  131. echo "This script starts a local kube cluster. "
  132. echo "Example 0: hack/local-up-cluster.sh -h (this 'help' usage description)"
  133. echo "Example 1: hack/local-up-cluster.sh -o _output/dockerized/bin/linux/amd64/ (run from docker output)"
  134. echo "Example 2: hack/local-up-cluster.sh -O (auto-guess the bin path for your platform)"
  135. echo "Example 3: hack/local-up-cluster.sh (build a local copy of the source)"
  136. }
  137. # This function guesses where the existing cached binary build is for the `-O`
  138. # flag
  139. function guess_built_binary_path {
  140. local apiserver_path
  141. apiserver_path=$(kube::util::find-binary "kube-apiserver")
  142. if [[ -z "${apiserver_path}" ]]; then
  143. return
  144. fi
  145. echo -n "$(dirname "${apiserver_path}")"
  146. }
  147. ### Allow user to supply the source directory.
  148. GO_OUT=${GO_OUT:-}
  149. while getopts "ho:O" OPTION
  150. do
  151. case ${OPTION} in
  152. o)
  153. echo "skipping build"
  154. GO_OUT="${OPTARG}"
  155. echo "using source ${GO_OUT}"
  156. ;;
  157. O)
  158. GO_OUT=$(guess_built_binary_path)
  159. if [ "${GO_OUT}" == "" ]; then
  160. echo "Could not guess the correct output directory to use."
  161. exit 1
  162. fi
  163. ;;
  164. h)
  165. usage
  166. exit
  167. ;;
  168. ?)
  169. usage
  170. exit
  171. ;;
  172. esac
  173. done
  174. if [ "x${GO_OUT}" == "x" ]; then
  175. make -C "${KUBE_ROOT}" WHAT="cmd/kubectl cmd/kube-apiserver cmd/kube-controller-manager cmd/cloud-controller-manager cmd/kubelet cmd/kube-proxy cmd/kube-scheduler"
  176. else
  177. echo "skipped the build."
  178. fi
  179. # Shut down anyway if there's an error.
  180. set +e
  181. API_PORT=${API_PORT:-8080}
  182. API_SECURE_PORT=${API_SECURE_PORT:-6443}
  183. # WARNING: For DNS to work on most setups you should export API_HOST as the docker0 ip address,
  184. API_HOST=${API_HOST:-localhost}
  185. API_HOST_IP=${API_HOST_IP:-"127.0.0.1"}
  186. ADVERTISE_ADDRESS=${ADVERTISE_ADDRESS:-""}
  187. NODE_PORT_RANGE=${NODE_PORT_RANGE:-""}
  188. API_BIND_ADDR=${API_BIND_ADDR:-"0.0.0.0"}
  189. EXTERNAL_HOSTNAME=${EXTERNAL_HOSTNAME:-localhost}
  190. KUBELET_HOST=${KUBELET_HOST:-"127.0.0.1"}
  191. # By default only allow CORS for requests on localhost
  192. API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-/127.0.0.1(:[0-9]+)?$,/localhost(:[0-9]+)?$}
  193. KUBELET_PORT=${KUBELET_PORT:-10250}
  194. LOG_LEVEL=${LOG_LEVEL:-3}
  195. # Use to increase verbosity on particular files, e.g. LOG_SPEC=token_controller*=5,other_controller*=4
  196. LOG_SPEC=${LOG_SPEC:-""}
  197. LOG_DIR=${LOG_DIR:-"/tmp"}
  198. CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"}
  199. CONTAINER_RUNTIME_ENDPOINT=${CONTAINER_RUNTIME_ENDPOINT:-""}
  200. RUNTIME_REQUEST_TIMEOUT=${RUNTIME_REQUEST_TIMEOUT:-"2m"}
  201. IMAGE_SERVICE_ENDPOINT=${IMAGE_SERVICE_ENDPOINT:-""}
  202. CHAOS_CHANCE=${CHAOS_CHANCE:-0.0}
  203. CPU_CFS_QUOTA=${CPU_CFS_QUOTA:-true}
  204. ENABLE_HOSTPATH_PROVISIONER=${ENABLE_HOSTPATH_PROVISIONER:-"false"}
  205. CLAIM_BINDER_SYNC_PERIOD=${CLAIM_BINDER_SYNC_PERIOD:-"15s"} # current k8s default
  206. ENABLE_CONTROLLER_ATTACH_DETACH=${ENABLE_CONTROLLER_ATTACH_DETACH:-"true"} # current default
  207. # This is the default dir and filename where the apiserver will generate a self-signed cert
  208. # which should be able to be used as the CA to verify itself
  209. CERT_DIR=${CERT_DIR:-"/var/run/kubernetes"}
  210. ROOT_CA_FILE=${CERT_DIR}/server-ca.crt
  211. ROOT_CA_KEY=${CERT_DIR}/server-ca.key
  212. CLUSTER_SIGNING_CERT_FILE=${CLUSTER_SIGNING_CERT_FILE:-"${ROOT_CA_FILE}"}
  213. CLUSTER_SIGNING_KEY_FILE=${CLUSTER_SIGNING_KEY_FILE:-"${ROOT_CA_KEY}"}
  214. # Reuse certs will skip generate new ca/cert files under CERT_DIR
  215. # it's useful with PRESERVE_ETCD=true because new ca will make existed service account secrets invalided
  216. REUSE_CERTS=${REUSE_CERTS:-false}
  217. # name of the cgroup driver, i.e. cgroupfs or systemd
  218. if [[ ${CONTAINER_RUNTIME} == "docker" ]]; then
  219. # default cgroup driver to match what is reported by docker to simplify local development
  220. if [[ -z ${CGROUP_DRIVER} ]]; then
  221. # match driver with docker runtime reported value (they must match)
  222. CGROUP_DRIVER=$(docker info | grep "Cgroup Driver:" | sed -e 's/^[[:space:]]*//'|cut -f3- -d' ')
  223. echo "Kubelet cgroup driver defaulted to use: ${CGROUP_DRIVER}"
  224. fi
  225. if [[ -f /var/log/docker.log && ! -f "${LOG_DIR}/docker.log" ]]; then
  226. ln -s /var/log/docker.log "${LOG_DIR}/docker.log"
  227. fi
  228. fi
  229. # Ensure CERT_DIR is created for auto-generated crt/key and kubeconfig
  230. mkdir -p "${CERT_DIR}" &>/dev/null || sudo mkdir -p "${CERT_DIR}"
  231. CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
  232. function test_apiserver_off {
  233. # For the common local scenario, fail fast if server is already running.
  234. # this can happen if you run local-up-cluster.sh twice and kill etcd in between.
  235. if [[ "${API_PORT}" -gt "0" ]]; then
  236. if ! curl --silent -g "${API_HOST}:${API_PORT}" ; then
  237. echo "API SERVER insecure port is free, proceeding..."
  238. else
  239. echo "ERROR starting API SERVER, exiting. Some process on ${API_HOST} is serving already on ${API_PORT}"
  240. exit 1
  241. fi
  242. fi
  243. if ! curl --silent -k -g "${API_HOST}:${API_SECURE_PORT}" ; then
  244. echo "API SERVER secure port is free, proceeding..."
  245. else
  246. echo "ERROR starting API SERVER, exiting. Some process on ${API_HOST} is serving already on ${API_SECURE_PORT}"
  247. exit 1
  248. fi
  249. }
  250. function detect_binary {
  251. # Detect the OS name/arch so that we can find our binary
  252. case "$(uname -s)" in
  253. Darwin)
  254. host_os=darwin
  255. ;;
  256. Linux)
  257. host_os=linux
  258. ;;
  259. *)
  260. echo "Unsupported host OS. Must be Linux or Mac OS X." >&2
  261. exit 1
  262. ;;
  263. esac
  264. case "$(uname -m)" in
  265. x86_64*)
  266. host_arch=amd64
  267. ;;
  268. i?86_64*)
  269. host_arch=amd64
  270. ;;
  271. amd64*)
  272. host_arch=amd64
  273. ;;
  274. aarch64*)
  275. host_arch=arm64
  276. ;;
  277. arm64*)
  278. host_arch=arm64
  279. ;;
  280. arm*)
  281. host_arch=arm
  282. ;;
  283. i?86*)
  284. host_arch=x86
  285. ;;
  286. s390x*)
  287. host_arch=s390x
  288. ;;
  289. ppc64le*)
  290. host_arch=ppc64le
  291. ;;
  292. *)
  293. echo "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le." >&2
  294. exit 1
  295. ;;
  296. esac
  297. GO_OUT="${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}"
  298. }
  299. cleanup()
  300. {
  301. echo "Cleaning up..."
  302. # delete running images
  303. # if [[ "${ENABLE_CLUSTER_DNS}" == true ]]; then
  304. # Still need to figure why this commands throw an error: Error from server: client: etcd cluster is unavailable or misconfigured
  305. # ${KUBECTL} --namespace=kube-system delete service kube-dns
  306. # And this one hang forever:
  307. # ${KUBECTL} --namespace=kube-system delete rc kube-dns-v10
  308. # fi
  309. # Check if the API server is still running
  310. [[ -n "${APISERVER_PID-}" ]] && kube::util::read-array APISERVER_PIDS < <(pgrep -P "${APISERVER_PID}" ; ps -o pid= -p "${APISERVER_PID}")
  311. [[ -n "${APISERVER_PIDS-}" ]] && sudo kill "${APISERVER_PIDS[@]}" 2>/dev/null
  312. # Check if the controller-manager is still running
  313. [[ -n "${CTLRMGR_PID-}" ]] && kube::util::read-array CTLRMGR_PIDS < <(pgrep -P "${CTLRMGR_PID}" ; ps -o pid= -p "${CTLRMGR_PID}")
  314. [[ -n "${CTLRMGR_PIDS-}" ]] && sudo kill "${CTLRMGR_PIDS[@]}" 2>/dev/null
  315. # Check if the cloud-controller-manager is still running
  316. [[ -n "${CLOUD_CTLRMGR_PID-}" ]] && kube::util::read-array CLOUD_CTLRMGR_PIDS < <(pgrep -P "${CLOUD_CTLRMGR_PID}" ; ps -o pid= -p "${CLOUD_CTLRMGR_PID}")
  317. [[ -n "${CLOUD_CTLRMGR_PIDS-}" ]] && sudo kill "${CLOUD_CTLRMGR_PIDS[@]}" 2>/dev/null
  318. # Check if the kubelet is still running
  319. [[ -n "${KUBELET_PID-}" ]] && kube::util::read-array KUBELET_PIDS < <(pgrep -P "${KUBELET_PID}" ; ps -o pid= -p "${KUBELET_PID}")
  320. [[ -n "${KUBELET_PIDS-}" ]] && sudo kill "${KUBELET_PIDS[@]}" 2>/dev/null
  321. # Check if the proxy is still running
  322. [[ -n "${PROXY_PID-}" ]] && kube::util::read-array PROXY_PIDS < <(pgrep -P "${PROXY_PID}" ; ps -o pid= -p "${PROXY_PID}")
  323. [[ -n "${PROXY_PIDS-}" ]] && sudo kill "${PROXY_PIDS[@]}" 2>/dev/null
  324. # Check if the scheduler is still running
  325. [[ -n "${SCHEDULER_PID-}" ]] && kube::util::read-array SCHEDULER_PIDS < <(pgrep -P "${SCHEDULER_PID}" ; ps -o pid= -p "${SCHEDULER_PID}")
  326. [[ -n "${SCHEDULER_PIDS-}" ]] && sudo kill "${SCHEDULER_PIDS[@]}" 2>/dev/null
  327. # Check if the etcd is still running
  328. [[ -n "${ETCD_PID-}" ]] && kube::etcd::stop
  329. if [[ "${PRESERVE_ETCD}" == "false" ]]; then
  330. [[ -n "${ETCD_DIR-}" ]] && kube::etcd::clean_etcd_dir
  331. fi
  332. exit 0
  333. }
  334. # Check if all processes are still running. Prints a warning once each time
  335. # a process dies unexpectedly.
  336. function healthcheck {
  337. if [[ -n "${APISERVER_PID-}" ]] && ! sudo kill -0 "${APISERVER_PID}" 2>/dev/null; then
  338. warning_log "API server terminated unexpectedly, see ${APISERVER_LOG}"
  339. APISERVER_PID=
  340. fi
  341. if [[ -n "${CTLRMGR_PID-}" ]] && ! sudo kill -0 "${CTLRMGR_PID}" 2>/dev/null; then
  342. warning_log "kube-controller-manager terminated unexpectedly, see ${CTLRMGR_LOG}"
  343. CTLRMGR_PID=
  344. fi
  345. if [[ -n "${KUBELET_PID-}" ]] && ! sudo kill -0 "${KUBELET_PID}" 2>/dev/null; then
  346. warning_log "kubelet terminated unexpectedly, see ${KUBELET_LOG}"
  347. KUBELET_PID=
  348. fi
  349. if [[ -n "${PROXY_PID-}" ]] && ! sudo kill -0 "${PROXY_PID}" 2>/dev/null; then
  350. warning_log "kube-proxy terminated unexpectedly, see ${PROXY_LOG}"
  351. PROXY_PID=
  352. fi
  353. if [[ -n "${SCHEDULER_PID-}" ]] && ! sudo kill -0 "${SCHEDULER_PID}" 2>/dev/null; then
  354. warning_log "scheduler terminated unexpectedly, see ${SCHEDULER_LOG}"
  355. SCHEDULER_PID=
  356. fi
  357. if [[ -n "${ETCD_PID-}" ]] && ! sudo kill -0 "${ETCD_PID}" 2>/dev/null; then
  358. warning_log "etcd terminated unexpectedly"
  359. ETCD_PID=
  360. fi
  361. }
  362. function print_color {
  363. message=$1
  364. prefix=${2:+$2: } # add colon only if defined
  365. color=${3:-1} # default is red
  366. echo -n "$(tput bold)$(tput setaf "${color}")"
  367. echo "${prefix}${message}"
  368. echo -n "$(tput sgr0)"
  369. }
  370. function warning_log {
  371. print_color "$1" "W$(date "+%m%d %H:%M:%S")]" 1
  372. }
  373. function start_etcd {
  374. echo "Starting etcd"
  375. export ETCD_LOGFILE=${LOG_DIR}/etcd.log
  376. kube::etcd::start
  377. }
  378. function set_service_accounts {
  379. SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-true}
  380. SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-/tmp/kube-serviceaccount.key}
  381. # Generate ServiceAccount key if needed
  382. if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
  383. mkdir -p "$(dirname "${SERVICE_ACCOUNT_KEY}")"
  384. openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
  385. fi
  386. }
  387. function generate_certs {
  388. # Create CA signers
  389. if [[ "${ENABLE_SINGLE_CA_SIGNER:-}" = true ]]; then
  390. kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"client auth","server auth"'
  391. sudo cp "${CERT_DIR}/server-ca.key" "${CERT_DIR}/client-ca.key"
  392. sudo cp "${CERT_DIR}/server-ca.crt" "${CERT_DIR}/client-ca.crt"
  393. sudo cp "${CERT_DIR}/server-ca-config.json" "${CERT_DIR}/client-ca-config.json"
  394. else
  395. kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"server auth"'
  396. kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" client '"client auth"'
  397. fi
  398. # Create auth proxy client ca
  399. kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header '"client auth"'
  400. # serving cert for kube-apiserver
  401. kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-apiserver kubernetes.default kubernetes.default.svc "localhost" "${API_HOST_IP}" "${API_HOST}" "${FIRST_SERVICE_CLUSTER_IP}"
  402. # Create client certs signed with client-ca, given id, given CN and a number of groups
  403. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' controller system:kube-controller-manager
  404. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' scheduler system:kube-scheduler
  405. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' admin system:admin system:masters
  406. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-apiserver kube-apiserver
  407. # Create matching certificates for kube-aggregator
  408. kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-aggregator api.kube-public.svc "localhost" "${API_HOST_IP}"
  409. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header-ca auth-proxy system:auth-proxy
  410. # TODO remove masters and add rolebinding
  411. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-aggregator system:kube-aggregator system:masters
  412. kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-aggregator
  413. }
  414. function generate_kubeproxy_certs {
  415. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-proxy system:kube-proxy system:nodes
  416. kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-proxy
  417. }
  418. function generate_kubelet_certs {
  419. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet "system:node:${HOSTNAME_OVERRIDE}" system:nodes
  420. kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kubelet
  421. }
  422. function start_apiserver {
  423. security_admission=""
  424. if [[ -n "${DENY_SECURITY_CONTEXT_ADMISSION}" ]]; then
  425. security_admission=",SecurityContextDeny"
  426. fi
  427. if [[ -n "${PSP_ADMISSION}" ]]; then
  428. security_admission=",PodSecurityPolicy"
  429. fi
  430. if [[ -n "${NODE_ADMISSION}" ]]; then
  431. security_admission=",NodeRestriction"
  432. fi
  433. # Append security_admission plugin
  434. ENABLE_ADMISSION_PLUGINS="${ENABLE_ADMISSION_PLUGINS}${security_admission}"
  435. authorizer_arg=""
  436. if [[ -n "${AUTHORIZATION_MODE}" ]]; then
  437. authorizer_arg="--authorization-mode=${AUTHORIZATION_MODE}"
  438. fi
  439. priv_arg=""
  440. if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
  441. priv_arg="--allow-privileged=${ALLOW_PRIVILEGED}"
  442. fi
  443. runtime_config=""
  444. if [[ -n "${RUNTIME_CONFIG}" ]]; then
  445. runtime_config="--runtime-config=${RUNTIME_CONFIG}"
  446. fi
  447. # Let the API server pick a default address when API_HOST_IP
  448. # is set to 127.0.0.1
  449. advertise_address=""
  450. if [[ "${API_HOST_IP}" != "127.0.0.1" ]]; then
  451. advertise_address="--advertise-address=${API_HOST_IP}"
  452. fi
  453. if [[ "${ADVERTISE_ADDRESS}" != "" ]] ; then
  454. advertise_address="--advertise-address=${ADVERTISE_ADDRESS}"
  455. fi
  456. node_port_range=""
  457. if [[ "${NODE_PORT_RANGE}" != "" ]] ; then
  458. node_port_range="--service-node-port-range=${NODE_PORT_RANGE}"
  459. fi
  460. if [[ "${REUSE_CERTS}" != true ]]; then
  461. # Create Certs
  462. generate_certs
  463. fi
  464. cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}"
  465. if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
  466. cloud_config_arg="--cloud-provider=external"
  467. fi
  468. if [[ -z "${AUDIT_POLICY_FILE}" ]]; then
  469. cat <<EOF > /tmp/kube-audit-policy-file
  470. # Log all requests at the Metadata level.
  471. apiVersion: audit.k8s.io/v1
  472. kind: Policy
  473. rules:
  474. - level: Metadata
  475. EOF
  476. AUDIT_POLICY_FILE="/tmp/kube-audit-policy-file"
  477. fi
  478. APISERVER_LOG=${LOG_DIR}/kube-apiserver.log
  479. # shellcheck disable=SC2086
  480. ${CONTROLPLANE_SUDO} "${GO_OUT}/kube-apiserver" "${authorizer_arg}" "${priv_arg}" ${runtime_config} \
  481. ${cloud_config_arg} \
  482. "${advertise_address}" \
  483. "${node_port_range}" \
  484. --v="${LOG_LEVEL}" \
  485. --vmodule="${LOG_SPEC}" \
  486. --audit-policy-file="${AUDIT_POLICY_FILE}" \
  487. --audit-log-path="${LOG_DIR}/kube-apiserver-audit.log" \
  488. --authorization-webhook-config-file="${AUTHORIZATION_WEBHOOK_CONFIG_FILE}" \
  489. --authentication-token-webhook-config-file="${AUTHENTICATION_WEBHOOK_CONFIG_FILE}" \
  490. --cert-dir="${CERT_DIR}" \
  491. --client-ca-file="${CERT_DIR}/client-ca.crt" \
  492. --kubelet-client-certificate="${CERT_DIR}/client-kube-apiserver.crt" \
  493. --kubelet-client-key="${CERT_DIR}/client-kube-apiserver.key" \
  494. --service-account-key-file="${SERVICE_ACCOUNT_KEY}" \
  495. --service-account-lookup="${SERVICE_ACCOUNT_LOOKUP}" \
  496. --service-account-issuer="https://kubernetes.default.svc" \
  497. --service-account-signing-key-file="${SERVICE_ACCOUNT_KEY}" \
  498. --enable-admission-plugins="${ENABLE_ADMISSION_PLUGINS}" \
  499. --disable-admission-plugins="${DISABLE_ADMISSION_PLUGINS}" \
  500. --admission-control-config-file="${ADMISSION_CONTROL_CONFIG_FILE}" \
  501. --bind-address="${API_BIND_ADDR}" \
  502. --secure-port="${API_SECURE_PORT}" \
  503. --tls-cert-file="${CERT_DIR}/serving-kube-apiserver.crt" \
  504. --tls-private-key-file="${CERT_DIR}/serving-kube-apiserver.key" \
  505. --insecure-bind-address="${API_HOST_IP}" \
  506. --insecure-port="${API_PORT}" \
  507. --storage-backend="${STORAGE_BACKEND}" \
  508. --storage-media-type="${STORAGE_MEDIA_TYPE}" \
  509. --etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
  510. --service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}" \
  511. --feature-gates="${FEATURE_GATES}" \
  512. --external-hostname="${EXTERNAL_HOSTNAME}" \
  513. --requestheader-username-headers=X-Remote-User \
  514. --requestheader-group-headers=X-Remote-Group \
  515. --requestheader-extra-headers-prefix=X-Remote-Extra- \
  516. --requestheader-client-ca-file="${CERT_DIR}/request-header-ca.crt" \
  517. --requestheader-allowed-names=system:auth-proxy \
  518. --proxy-client-cert-file="${CERT_DIR}/client-auth-proxy.crt" \
  519. --proxy-client-key-file="${CERT_DIR}/client-auth-proxy.key" \
  520. --cors-allowed-origins="${API_CORS_ALLOWED_ORIGINS}" >"${APISERVER_LOG}" 2>&1 &
  521. APISERVER_PID=$!
  522. # Wait for kube-apiserver to come up before launching the rest of the components.
  523. echo "Waiting for apiserver to come up"
  524. kube::util::wait_for_url "https://${API_HOST_IP}:${API_SECURE_PORT}/healthz" "apiserver: " 1 "${WAIT_FOR_URL_API_SERVER}" "${MAX_TIME_FOR_URL_API_SERVER}" \
  525. || { echo "check apiserver logs: ${APISERVER_LOG}" ; exit 1 ; }
  526. # Create kubeconfigs for all components, using client certs
  527. kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" admin
  528. ${CONTROLPLANE_SUDO} chown "${USER}" "${CERT_DIR}/client-admin.key" # make readable for kubectl
  529. kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" controller
  530. kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" scheduler
  531. if [[ -z "${AUTH_ARGS}" ]]; then
  532. AUTH_ARGS="--client-key=${CERT_DIR}/client-admin.key --client-certificate=${CERT_DIR}/client-admin.crt"
  533. fi
  534. # Grant apiserver permission to speak to the kubelet
  535. ${KUBECTL} --kubeconfig "${CERT_DIR}/admin.kubeconfig" create clusterrolebinding kube-apiserver-kubelet-admin --clusterrole=system:kubelet-api-admin --user=kube-apiserver
  536. ${CONTROLPLANE_SUDO} cp "${CERT_DIR}/admin.kubeconfig" "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
  537. ${CONTROLPLANE_SUDO} chown "$(whoami)" "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
  538. ${KUBECTL} config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/admin-kube-aggregator.kubeconfig" --server="https://${API_HOST_IP}:31090"
  539. echo "use 'kubectl --kubeconfig=${CERT_DIR}/admin-kube-aggregator.kubeconfig' to use the aggregated API server"
  540. }
  541. function start_controller_manager {
  542. node_cidr_args=()
  543. if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
  544. node_cidr_args=("--allocate-node-cidrs=true" "--cluster-cidr=${CLUSTER_CIDR}")
  545. fi
  546. cloud_config_arg=("--cloud-provider=${CLOUD_PROVIDER}" "--cloud-config=${CLOUD_CONFIG}")
  547. if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
  548. cloud_config_arg=("--cloud-provider=external")
  549. cloud_config_arg+=("--external-cloud-volume-plugin=${EXTERNAL_CLOUD_VOLUME_PLUGIN}")
  550. cloud_config_arg+=("--cloud-config=${CLOUD_CONFIG}")
  551. fi
  552. CTLRMGR_LOG=${LOG_DIR}/kube-controller-manager.log
  553. ${CONTROLPLANE_SUDO} "${GO_OUT}/kube-controller-manager" \
  554. --v="${LOG_LEVEL}" \
  555. --vmodule="${LOG_SPEC}" \
  556. --service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \
  557. --root-ca-file="${ROOT_CA_FILE}" \
  558. --cluster-signing-cert-file="${CLUSTER_SIGNING_CERT_FILE}" \
  559. --cluster-signing-key-file="${CLUSTER_SIGNING_KEY_FILE}" \
  560. --enable-hostpath-provisioner="${ENABLE_HOSTPATH_PROVISIONER}" \
  561. ${node_cidr_args[@]+"${node_cidr_args[@]}"} \
  562. --pvclaimbinder-sync-period="${CLAIM_BINDER_SYNC_PERIOD}" \
  563. --feature-gates="${FEATURE_GATES}" \
  564. "${cloud_config_arg[@]}" \
  565. --kubeconfig "${CERT_DIR}"/controller.kubeconfig \
  566. --use-service-account-credentials \
  567. --controllers="${KUBE_CONTROLLERS}" \
  568. --leader-elect=false \
  569. --cert-dir="${CERT_DIR}" \
  570. --master="https://${API_HOST}:${API_SECURE_PORT}" >"${CTLRMGR_LOG}" 2>&1 &
  571. CTLRMGR_PID=$!
  572. }
  573. function start_cloud_controller_manager {
  574. if [ -z "${CLOUD_CONFIG}" ]; then
  575. echo "CLOUD_CONFIG cannot be empty!"
  576. exit 1
  577. fi
  578. if [ ! -f "${CLOUD_CONFIG}" ]; then
  579. echo "Cloud config ${CLOUD_CONFIG} doesn't exist"
  580. exit 1
  581. fi
  582. node_cidr_args=()
  583. if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
  584. node_cidr_args=("--allocate-node-cidrs=true" "--cluster-cidr=${CLUSTER_CIDR}")
  585. fi
  586. CLOUD_CTLRMGR_LOG=${LOG_DIR}/cloud-controller-manager.log
  587. ${CONTROLPLANE_SUDO} "${EXTERNAL_CLOUD_PROVIDER_BINARY:-"${GO_OUT}/cloud-controller-manager"}" \
  588. --v="${LOG_LEVEL}" \
  589. --vmodule="${LOG_SPEC}" \
  590. "${node_cidr_args[@]:-}" \
  591. --feature-gates="${FEATURE_GATES}" \
  592. --cloud-provider="${CLOUD_PROVIDER}" \
  593. --cloud-config="${CLOUD_CONFIG}" \
  594. --kubeconfig "${CERT_DIR}"/controller.kubeconfig \
  595. --use-service-account-credentials \
  596. --leader-elect=false \
  597. --master="https://${API_HOST}:${API_SECURE_PORT}" >"${CLOUD_CTLRMGR_LOG}" 2>&1 &
  598. export CLOUD_CTLRMGR_PID=$!
  599. }
  600. function wait_node_ready(){
  601. # check the nodes information after kubelet daemon start
  602. local nodes_stats="${KUBECTL} --kubeconfig '${CERT_DIR}/admin.kubeconfig' get nodes"
  603. local node_name=$HOSTNAME_OVERRIDE
  604. local system_node_wait_time=30
  605. local interval_time=2
  606. kube::util::wait_for_success "$system_node_wait_time" "$interval_time" "$nodes_stats | grep $node_name"
  607. if [ $? == "1" ]; then
  608. echo "time out on waiting $node_name info"
  609. exit 1
  610. fi
  611. }
  612. function start_kubelet {
  613. KUBELET_LOG=${LOG_DIR}/kubelet.log
  614. mkdir -p "${POD_MANIFEST_PATH}" &>/dev/null || sudo mkdir -p "${POD_MANIFEST_PATH}"
  615. cloud_config_arg=("--cloud-provider=${CLOUD_PROVIDER}" "--cloud-config=${CLOUD_CONFIG}")
  616. if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
  617. cloud_config_arg=("--cloud-provider=external")
  618. if [[ "${CLOUD_PROVIDER:-}" == "aws" ]]; then
  619. cloud_config_arg+=("--provider-id=$(curl http://169.254.169.254/latest/meta-data/instance-id)")
  620. else
  621. cloud_config_arg+=("--provider-id=${KUBELET_PROVIDER_ID}")
  622. fi
  623. fi
  624. mkdir -p "/var/lib/kubelet" &>/dev/null || sudo mkdir -p "/var/lib/kubelet"
  625. # Enable dns
  626. if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
  627. if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
  628. dns_args=("--cluster-dns=${LOCAL_DNS_IP}" "--cluster-domain=${DNS_DOMAIN}")
  629. else
  630. dns_args=("--cluster-dns=${DNS_SERVER_IP}" "--cluster-domain=${DNS_DOMAIN}")
  631. fi
  632. else
  633. # To start a private DNS server set ENABLE_CLUSTER_DNS and
  634. # DNS_SERVER_IP/DOMAIN. This will at least provide a working
  635. # DNS server for real world hostnames.
  636. dns_args=("--cluster-dns=8.8.8.8")
  637. fi
  638. net_plugin_args=()
  639. if [[ -n "${NET_PLUGIN}" ]]; then
  640. net_plugin_args=("--network-plugin=${NET_PLUGIN}")
  641. fi
  642. auth_args=()
  643. if [[ "${KUBELET_AUTHORIZATION_WEBHOOK:-}" != "false" ]]; then
  644. auth_args+=("--authorization-mode=Webhook")
  645. fi
  646. if [[ "${KUBELET_AUTHENTICATION_WEBHOOK:-}" != "false" ]]; then
  647. auth_args+=("--authentication-token-webhook")
  648. fi
  649. if [[ -n "${CLIENT_CA_FILE:-}" ]]; then
  650. auth_args+=("--client-ca-file=${CLIENT_CA_FILE}")
  651. else
  652. auth_args+=("--client-ca-file=${CERT_DIR}/client-ca.crt")
  653. fi
  654. cni_conf_dir_args=()
  655. if [[ -n "${CNI_CONF_DIR}" ]]; then
  656. cni_conf_dir_args=("--cni-conf-dir=${CNI_CONF_DIR}")
  657. fi
  658. cni_bin_dir_args=()
  659. if [[ -n "${CNI_BIN_DIR}" ]]; then
  660. cni_bin_dir_args=("--cni-bin-dir=${CNI_BIN_DIR}")
  661. fi
  662. container_runtime_endpoint_args=()
  663. if [[ -n "${CONTAINER_RUNTIME_ENDPOINT}" ]]; then
  664. container_runtime_endpoint_args=("--container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}")
  665. fi
  666. image_service_endpoint_args=()
  667. if [[ -n "${IMAGE_SERVICE_ENDPOINT}" ]]; then
  668. image_service_endpoint_args=("--image-service-endpoint=${IMAGE_SERVICE_ENDPOINT}")
  669. fi
  670. # shellcheck disable=SC2206
  671. all_kubelet_flags=(
  672. "--v=${LOG_LEVEL}"
  673. "--vmodule=${LOG_SPEC}"
  674. "--chaos-chance=${CHAOS_CHANCE}"
  675. "--container-runtime=${CONTAINER_RUNTIME}"
  676. "--hostname-override=${HOSTNAME_OVERRIDE}"
  677. "${cloud_config_arg[@]}"
  678. "--address=${KUBELET_HOST}"
  679. --kubeconfig "${CERT_DIR}"/kubelet.kubeconfig
  680. "--feature-gates=${FEATURE_GATES}"
  681. "--cpu-cfs-quota=${CPU_CFS_QUOTA}"
  682. "--enable-controller-attach-detach=${ENABLE_CONTROLLER_ATTACH_DETACH}"
  683. "--cgroups-per-qos=${CGROUPS_PER_QOS}"
  684. "--cgroup-driver=${CGROUP_DRIVER}"
  685. "--cgroup-root=${CGROUP_ROOT}"
  686. "--eviction-hard=${EVICTION_HARD}"
  687. "--eviction-soft=${EVICTION_SOFT}"
  688. "--eviction-pressure-transition-period=${EVICTION_PRESSURE_TRANSITION_PERIOD}"
  689. "--pod-manifest-path=${POD_MANIFEST_PATH}"
  690. "--fail-swap-on=${FAIL_SWAP_ON}"
  691. ${auth_args[@]+"${auth_args[@]}"}
  692. ${dns_args[@]+"${dns_args[@]}"}
  693. ${cni_conf_dir_args[@]+"${cni_conf_dir_args[@]}"}
  694. ${cni_bin_dir_args[@]+"${cni_bin_dir_args[@]}"}
  695. ${net_plugin_args[@]+"${net_plugin_args[@]}"}
  696. ${container_runtime_endpoint_args[@]+"${container_runtime_endpoint_args[@]}"}
  697. ${image_service_endpoint_args[@]+"${image_service_endpoint_args[@]}"}
  698. "--runtime-request-timeout=${RUNTIME_REQUEST_TIMEOUT}"
  699. "--port=${KUBELET_PORT}"
  700. ${KUBELET_FLAGS}
  701. )
  702. # warn if users are running with swap allowed
  703. if [ "${FAIL_SWAP_ON}" == "false" ]; then
  704. echo "WARNING : The kubelet is configured to not fail even if swap is enabled; production deployments should disable swap."
  705. fi
  706. if [[ "${REUSE_CERTS}" != true ]]; then
  707. generate_kubelet_certs
  708. fi
  709. # shellcheck disable=SC2024
  710. sudo -E "${GO_OUT}/kubelet" "${all_kubelet_flags[@]}" >"${KUBELET_LOG}" 2>&1 &
  711. KUBELET_PID=$!
  712. # Quick check that kubelet is running.
  713. if [ -n "${KUBELET_PID}" ] && ps -p ${KUBELET_PID} > /dev/null; then
  714. echo "kubelet ( ${KUBELET_PID} ) is running."
  715. else
  716. cat "${KUBELET_LOG}" ; exit 1
  717. fi
  718. }
  719. function start_kubeproxy {
  720. PROXY_LOG=${LOG_DIR}/kube-proxy.log
  721. # wait for kubelet collect node information
  722. echo "wait kubelet ready"
  723. wait_node_ready
  724. cat <<EOF > /tmp/kube-proxy.yaml
  725. apiVersion: kubeproxy.config.k8s.io/v1alpha1
  726. kind: KubeProxyConfiguration
  727. clientConnection:
  728. kubeconfig: ${CERT_DIR}/kube-proxy.kubeconfig
  729. hostnameOverride: ${HOSTNAME_OVERRIDE}
  730. mode: ${KUBE_PROXY_MODE}
  731. EOF
  732. if [[ -n ${FEATURE_GATES} ]]; then
  733. echo "featureGates:"
  734. # Convert from foo=true,bar=false to
  735. # foo: true
  736. # bar: false
  737. for gate in $(echo "${FEATURE_GATES}" | tr ',' ' '); do
  738. echo "${gate}" | ${SED} -e 's/\(.*\)=\(.*\)/ \1: \2/'
  739. done
  740. fi >>/tmp/kube-proxy.yaml
  741. if [[ "${REUSE_CERTS}" != true ]]; then
  742. generate_kubeproxy_certs
  743. fi
  744. # shellcheck disable=SC2024
  745. sudo "${GO_OUT}/kube-proxy" \
  746. --v="${LOG_LEVEL}" \
  747. --config=/tmp/kube-proxy.yaml \
  748. --master="https://${API_HOST}:${API_SECURE_PORT}" >"${PROXY_LOG}" 2>&1 &
  749. PROXY_PID=$!
  750. }
  751. function start_kubescheduler {
  752. SCHEDULER_LOG=${LOG_DIR}/kube-scheduler.log
  753. ${CONTROLPLANE_SUDO} "${GO_OUT}/kube-scheduler" \
  754. --v="${LOG_LEVEL}" \
  755. --leader-elect=false \
  756. --kubeconfig "${CERT_DIR}"/scheduler.kubeconfig \
  757. --feature-gates="${FEATURE_GATES}" \
  758. --master="https://${API_HOST}:${API_SECURE_PORT}" >"${SCHEDULER_LOG}" 2>&1 &
  759. SCHEDULER_PID=$!
  760. }
  761. function start_kubedns {
  762. if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
  763. cp "${KUBE_ROOT}/cluster/addons/dns/kube-dns/kube-dns.yaml.in" kube-dns.yaml
  764. ${SED} -i -e "s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" kube-dns.yaml
  765. ${SED} -i -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" kube-dns.yaml
  766. ${SED} -i -e "s/{{ pillar\['dns_memory_limit'\] }}/${DNS_MEMORY_LIMIT}/g" kube-dns.yaml
  767. # TODO update to dns role once we have one.
  768. # use kubectl to create kubedns addon
  769. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kube-dns.yaml
  770. echo "Kube-dns addon successfully deployed."
  771. rm kube-dns.yaml
  772. fi
  773. }
  774. function start_nodelocaldns {
  775. cp "${KUBE_ROOT}/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml" nodelocaldns.yaml
  776. ${SED} -i -e "s/__PILLAR__DNS__DOMAIN__/${DNS_DOMAIN}/g" nodelocaldns.yaml
  777. ${SED} -i -e "s/__PILLAR__DNS__SERVER__/${DNS_SERVER_IP}/g" nodelocaldns.yaml
  778. ${SED} -i -e "s/__PILLAR__LOCAL__DNS__/${LOCAL_DNS_IP}/g" nodelocaldns.yaml
  779. # use kubectl to create nodelocaldns addon
  780. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f nodelocaldns.yaml
  781. echo "NodeLocalDNS addon successfully deployed."
  782. rm nodelocaldns.yaml
  783. }
  784. function start_kubedashboard {
  785. if [[ "${ENABLE_CLUSTER_DASHBOARD}" = true ]]; then
  786. echo "Creating kubernetes-dashboard"
  787. # use kubectl to create the dashboard
  788. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-secret.yaml"
  789. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-configmap.yaml"
  790. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-rbac.yaml"
  791. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-deployment.yaml"
  792. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml"
  793. echo "kubernetes-dashboard deployment and service successfully deployed."
  794. fi
  795. }
  796. function create_psp_policy {
  797. echo "Create podsecuritypolicy policies for RBAC."
  798. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${KUBE_ROOT}/examples/podsecuritypolicy/rbac/policies.yaml"
  799. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${KUBE_ROOT}/examples/podsecuritypolicy/rbac/roles.yaml"
  800. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${KUBE_ROOT}/examples/podsecuritypolicy/rbac/bindings.yaml"
  801. }
  802. function create_storage_class {
  803. if [ -z "${CLOUD_PROVIDER}" ]; then
  804. CLASS_FILE=${KUBE_ROOT}/cluster/addons/storage-class/local/default.yaml
  805. else
  806. CLASS_FILE=${KUBE_ROOT}/cluster/addons/storage-class/${CLOUD_PROVIDER}/default.yaml
  807. fi
  808. if [ -e "${CLASS_FILE}" ]; then
  809. echo "Create default storage class for ${CLOUD_PROVIDER}"
  810. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${CLASS_FILE}"
  811. else
  812. echo "No storage class available for ${CLOUD_PROVIDER}."
  813. fi
  814. }
  815. function print_success {
  816. if [[ "${START_MODE}" != "kubeletonly" ]]; then
  817. if [[ "${ENABLE_DAEMON}" = false ]]; then
  818. echo "Local Kubernetes cluster is running. Press Ctrl-C to shut it down."
  819. else
  820. echo "Local Kubernetes cluster is running."
  821. fi
  822. cat <<EOF
  823. Logs:
  824. ${APISERVER_LOG:-}
  825. ${CTLRMGR_LOG:-}
  826. ${CLOUD_CTLRMGR_LOG:-}
  827. ${PROXY_LOG:-}
  828. ${SCHEDULER_LOG:-}
  829. EOF
  830. fi
  831. if [[ "${START_MODE}" == "all" ]]; then
  832. echo " ${KUBELET_LOG}"
  833. elif [[ "${START_MODE}" == "nokubelet" ]]; then
  834. echo
  835. echo "No kubelet was started because you set START_MODE=nokubelet"
  836. echo "Run this script again with START_MODE=kubeletonly to run a kubelet"
  837. fi
  838. if [[ "${START_MODE}" != "kubeletonly" ]]; then
  839. echo
  840. if [[ "${ENABLE_DAEMON}" = false ]]; then
  841. echo "To start using your cluster, you can open up another terminal/tab and run:"
  842. else
  843. echo "To start using your cluster, run:"
  844. fi
  845. cat <<EOF
  846. export KUBECONFIG=${CERT_DIR}/admin.kubeconfig
  847. cluster/kubectl.sh
  848. Alternatively, you can write to the default kubeconfig:
  849. export KUBERNETES_PROVIDER=local
  850. cluster/kubectl.sh config set-cluster local --server=https://${API_HOST}:${API_SECURE_PORT} --certificate-authority=${ROOT_CA_FILE}
  851. cluster/kubectl.sh config set-credentials myself ${AUTH_ARGS}
  852. cluster/kubectl.sh config set-context local --cluster=local --user=myself
  853. cluster/kubectl.sh config use-context local
  854. cluster/kubectl.sh
  855. EOF
  856. else
  857. cat <<EOF
  858. The kubelet was started.
  859. Logs:
  860. ${KUBELET_LOG}
  861. EOF
  862. fi
  863. }
  864. # If we are running in the CI, we need a few more things before we can start
  865. if [[ "${KUBETEST_IN_DOCKER:-}" == "true" ]]; then
  866. echo "Preparing to test ..."
  867. "${KUBE_ROOT}"/hack/install-etcd.sh
  868. export PATH="${KUBE_ROOT}/third_party/etcd:${PATH}"
  869. KUBE_FASTBUILD=true make ginkgo cross
  870. apt-get update && apt-get install -y sudo
  871. apt-get remove -y systemd
  872. # configure shared mounts to prevent failure in DIND scenarios
  873. mount --make-rshared /
  874. # kubekins has a special directory for docker root
  875. DOCKER_ROOT="/docker-graph"
  876. fi
  877. # validate that etcd is: not running, in path, and has minimum required version.
  878. if [[ "${START_MODE}" != "kubeletonly" ]]; then
  879. kube::etcd::validate
  880. fi
  881. if [ "${CONTAINER_RUNTIME}" == "docker" ] && ! kube::util::ensure_docker_daemon_connectivity; then
  882. exit 1
  883. fi
  884. if [[ "${START_MODE}" != "kubeletonly" ]]; then
  885. test_apiserver_off
  886. fi
  887. kube::util::test_openssl_installed
  888. kube::util::ensure-cfssl
  889. ### IF the user didn't supply an output/ for the build... Then we detect.
  890. if [ "${GO_OUT}" == "" ]; then
  891. detect_binary
  892. fi
  893. echo "Detected host and ready to start services. Doing some housekeeping first..."
  894. echo "Using GO_OUT ${GO_OUT}"
  895. export KUBELET_CIDFILE=/tmp/kubelet.cid
  896. if [[ "${ENABLE_DAEMON}" = false ]]; then
  897. trap cleanup EXIT
  898. fi
  899. echo "Starting services now!"
  900. if [[ "${START_MODE}" != "kubeletonly" ]]; then
  901. start_etcd
  902. set_service_accounts
  903. start_apiserver
  904. start_controller_manager
  905. if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
  906. start_cloud_controller_manager
  907. fi
  908. start_kubescheduler
  909. start_kubedns
  910. if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
  911. start_nodelocaldns
  912. fi
  913. start_kubedashboard
  914. fi
  915. if [[ "${START_MODE}" != "nokubelet" ]]; then
  916. ## TODO remove this check if/when kubelet is supported on darwin
  917. # Detect the OS name/arch and display appropriate error.
  918. case "$(uname -s)" in
  919. Darwin)
  920. print_color "kubelet is not currently supported in darwin, kubelet aborted."
  921. KUBELET_LOG=""
  922. ;;
  923. Linux)
  924. start_kubelet
  925. ;;
  926. *)
  927. print_color "Unsupported host OS. Must be Linux or Mac OS X, kubelet aborted."
  928. ;;
  929. esac
  930. fi
  931. if [[ "${START_MODE}" != "kubeletonly" ]]; then
  932. if [[ "${START_MODE}" != "nokubeproxy" ]]; then
  933. start_kubeproxy
  934. fi
  935. fi
  936. if [[ -n "${PSP_ADMISSION}" && "${AUTHORIZATION_MODE}" = *RBAC* ]]; then
  937. create_psp_policy
  938. fi
  939. if [[ "${DEFAULT_STORAGE_CLASS}" = "true" ]]; then
  940. create_storage_class
  941. fi
  942. print_success
  943. if [[ "${ENABLE_DAEMON}" = false ]]; then
  944. while true; do sleep 1; healthcheck; done
  945. fi
  946. if [[ "${KUBETEST_IN_DOCKER:-}" == "true" ]]; then
  947. cluster/kubectl.sh config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt
  948. cluster/kubectl.sh config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt
  949. cluster/kubectl.sh config set-context local --cluster=local --user=myself
  950. cluster/kubectl.sh config use-context local
  951. fi