local-up-cluster.sh 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086
  1. #!/usr/bin/env bash
  2. # Copyright 2014 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
  16. # This command builds and runs a local kubernetes cluster.
  17. # You may need to run this as root to allow kubelet to open docker's socket,
  18. # and to write the test CA in /var/run/kubernetes.
  19. DOCKER_OPTS=${DOCKER_OPTS:-""}
  20. export DOCKER=(docker "${DOCKER_OPTS[@]}")
  21. DOCKER_ROOT=${DOCKER_ROOT:-""}
  22. ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""}
  23. DENY_SECURITY_CONTEXT_ADMISSION=${DENY_SECURITY_CONTEXT_ADMISSION:-""}
  24. PSP_ADMISSION=${PSP_ADMISSION:-""}
  25. NODE_ADMISSION=${NODE_ADMISSION:-""}
  26. RUNTIME_CONFIG=${RUNTIME_CONFIG:-""}
  27. KUBELET_AUTHORIZATION_WEBHOOK=${KUBELET_AUTHORIZATION_WEBHOOK:-""}
  28. KUBELET_AUTHENTICATION_WEBHOOK=${KUBELET_AUTHENTICATION_WEBHOOK:-""}
  29. POD_MANIFEST_PATH=${POD_MANIFEST_PATH:-"/var/run/kubernetes/static-pods"}
  30. KUBELET_FLAGS=${KUBELET_FLAGS:-""}
  31. KUBELET_IMAGE=${KUBELET_IMAGE:-""}
  32. # many dev environments run with swap on, so we don't fail in this env
  33. FAIL_SWAP_ON=${FAIL_SWAP_ON:-"false"}
  34. # Name of the network plugin, eg: "kubenet"
  35. NET_PLUGIN=${NET_PLUGIN:-""}
  36. # Place the config files and binaries required by NET_PLUGIN in these directory,
  37. # eg: "/etc/cni/net.d" for config files, and "/opt/cni/bin" for binaries.
  38. CNI_CONF_DIR=${CNI_CONF_DIR:-""}
  39. CNI_BIN_DIR=${CNI_BIN_DIR:-""}
  40. CLUSTER_CIDR=${CLUSTER_CIDR:-10.1.0.0/16}
  41. SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/24}
  42. FIRST_SERVICE_CLUSTER_IP=${FIRST_SERVICE_CLUSTER_IP:-10.0.0.1}
  43. # if enabled, must set CGROUP_ROOT
  44. CGROUPS_PER_QOS=${CGROUPS_PER_QOS:-true}
  45. # name of the cgroup driver, i.e. cgroupfs or systemd
  46. CGROUP_DRIVER=${CGROUP_DRIVER:-""}
  47. # if cgroups per qos is enabled, optionally change cgroup root
  48. CGROUP_ROOT=${CGROUP_ROOT:-""}
  49. # owner of client certs, default to current user if not specified
  50. USER=${USER:-$(whoami)}
  51. # enables testing eviction scenarios locally.
  52. EVICTION_HARD=${EVICTION_HARD:-"memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%"}
  53. EVICTION_SOFT=${EVICTION_SOFT:-""}
  54. EVICTION_PRESSURE_TRANSITION_PERIOD=${EVICTION_PRESSURE_TRANSITION_PERIOD:-"1m"}
  55. # This script uses docker0 (or whatever container bridge docker is currently using)
  56. # and we don't know the IP of the DNS pod to pass in as --cluster-dns.
  57. # To set this up by hand, set this flag and change DNS_SERVER_IP.
  58. # Note also that you need API_HOST (defined above) for correct DNS.
  59. KUBE_PROXY_MODE=${KUBE_PROXY_MODE:-""}
  60. ENABLE_CLUSTER_DNS=${KUBE_ENABLE_CLUSTER_DNS:-true}
  61. ENABLE_NODELOCAL_DNS=${KUBE_ENABLE_NODELOCAL_DNS:-false}
  62. DNS_SERVER_IP=${KUBE_DNS_SERVER_IP:-10.0.0.10}
  63. LOCAL_DNS_IP=${KUBE_LOCAL_DNS_IP:-169.254.20.10}
  64. DNS_MEMORY_LIMIT=${KUBE_DNS_MEMORY_LIMIT:-170Mi}
  65. DNS_DOMAIN=${KUBE_DNS_NAME:-"cluster.local"}
  66. KUBECTL=${KUBECTL:-"${KUBE_ROOT}/cluster/kubectl.sh"}
  67. WAIT_FOR_URL_API_SERVER=${WAIT_FOR_URL_API_SERVER:-60}
  68. MAX_TIME_FOR_URL_API_SERVER=${MAX_TIME_FOR_URL_API_SERVER:-1}
  69. ENABLE_DAEMON=${ENABLE_DAEMON:-false}
  70. HOSTNAME_OVERRIDE=${HOSTNAME_OVERRIDE:-"127.0.0.1"}
  71. EXTERNAL_CLOUD_PROVIDER=${EXTERNAL_CLOUD_PROVIDER:-false}
  72. EXTERNAL_CLOUD_PROVIDER_BINARY=${EXTERNAL_CLOUD_PROVIDER_BINARY:-""}
  73. EXTERNAL_CLOUD_VOLUME_PLUGIN=${EXTERNAL_CLOUD_VOLUME_PLUGIN:-""}
  74. CLOUD_PROVIDER=${CLOUD_PROVIDER:-""}
  75. CLOUD_CONFIG=${CLOUD_CONFIG:-""}
  76. KUBELET_PROVIDER_ID=${KUBELET_PROVIDER_ID:-"$(hostname)"}
  77. FEATURE_GATES=${FEATURE_GATES:-"AllAlpha=false"}
  78. STORAGE_BACKEND=${STORAGE_BACKEND:-"etcd3"}
  79. STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-"application/vnd.kubernetes.protobuf"}
  80. # preserve etcd data. you also need to set ETCD_DIR.
  81. PRESERVE_ETCD="${PRESERVE_ETCD:-false}"
  82. # enable kubernetes dashboard
  83. ENABLE_CLUSTER_DASHBOARD=${KUBE_ENABLE_CLUSTER_DASHBOARD:-false}
  84. # RBAC Mode options
  85. AUTHORIZATION_MODE=${AUTHORIZATION_MODE:-"Node,RBAC"}
  86. KUBECONFIG_TOKEN=${KUBECONFIG_TOKEN:-""}
  87. AUTH_ARGS=${AUTH_ARGS:-""}
  88. # WebHook Authentication and Authorization
  89. AUTHORIZATION_WEBHOOK_CONFIG_FILE=${AUTHORIZATION_WEBHOOK_CONFIG_FILE:-""}
  90. AUTHENTICATION_WEBHOOK_CONFIG_FILE=${AUTHENTICATION_WEBHOOK_CONFIG_FILE:-""}
  91. # Install a default storage class (enabled by default)
  92. DEFAULT_STORAGE_CLASS=${KUBE_DEFAULT_STORAGE_CLASS:-true}
  93. # Do not run the mutation detector by default on a local cluster.
  94. # It is intended for a specific type of testing and inherently leaks memory.
  95. KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-false}"
  96. export KUBE_CACHE_MUTATION_DETECTOR
  97. # panic the server on watch decode errors since they are considered coder mistakes
  98. KUBE_PANIC_WATCH_DECODE_ERROR="${KUBE_PANIC_WATCH_DECODE_ERROR:-true}"
  99. export KUBE_PANIC_WATCH_DECODE_ERROR
  100. # Default list of admission Controllers to invoke prior to persisting objects in cluster
  101. # The order defined here does not matter.
  102. ENABLE_ADMISSION_PLUGINS=${ENABLE_ADMISSION_PLUGINS:-"NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,Priority,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"}
  103. DISABLE_ADMISSION_PLUGINS=${DISABLE_ADMISSION_PLUGINS:-""}
  104. ADMISSION_CONTROL_CONFIG_FILE=${ADMISSION_CONTROL_CONFIG_FILE:-""}
  105. # START_MODE can be 'all', 'kubeletonly', 'nokubelet', or 'nokubeproxy'
  106. START_MODE=${START_MODE:-"all"}
  107. # A list of controllers to enable
  108. KUBE_CONTROLLERS="${KUBE_CONTROLLERS:-"*"}"
  109. # Audit policy
  110. AUDIT_POLICY_FILE=${AUDIT_POLICY_FILE:-""}
  111. # sanity check for OpenStack provider
  112. if [ "${CLOUD_PROVIDER}" == "openstack" ]; then
  113. if [ "${CLOUD_CONFIG}" == "" ]; then
  114. echo "Missing CLOUD_CONFIG env for OpenStack provider!"
  115. exit 1
  116. fi
  117. if [ ! -f "${CLOUD_CONFIG}" ]; then
  118. echo "Cloud config ${CLOUD_CONFIG} doesn't exist"
  119. exit 1
  120. fi
  121. fi
  122. if [ "$(id -u)" != "0" ]; then
  123. echo "WARNING : This script MAY be run as root for docker socket / iptables functionality; if failures occur, retry as root." 2>&1
  124. fi
  125. # Stop right away if the build fails
  126. set -e
  127. source "${KUBE_ROOT}/hack/lib/init.sh"
  128. kube::util::ensure-gnu-sed
  129. function usage {
  130. echo "This script starts a local kube cluster. "
  131. echo "Example 0: hack/local-up-cluster.sh -h (this 'help' usage description)"
  132. echo "Example 1: hack/local-up-cluster.sh -o _output/dockerized/bin/linux/amd64/ (run from docker output)"
  133. echo "Example 2: hack/local-up-cluster.sh -O (auto-guess the bin path for your platform)"
  134. echo "Example 3: hack/local-up-cluster.sh (build a local copy of the source)"
  135. }
  136. # This function guesses where the existing cached binary build is for the `-O`
  137. # flag
  138. function guess_built_binary_path {
  139. local apiserver_path
  140. apiserver_path=$(kube::util::find-binary "kube-apiserver")
  141. if [[ -z "${apiserver_path}" ]]; then
  142. return
  143. fi
  144. echo -n "$(dirname "${apiserver_path}")"
  145. }
  146. ### Allow user to supply the source directory.
  147. GO_OUT=${GO_OUT:-}
  148. while getopts "ho:O" OPTION
  149. do
  150. case ${OPTION} in
  151. o)
  152. echo "skipping build"
  153. GO_OUT="${OPTARG}"
  154. echo "using source ${GO_OUT}"
  155. ;;
  156. O)
  157. GO_OUT=$(guess_built_binary_path)
  158. if [ "${GO_OUT}" == "" ]; then
  159. echo "Could not guess the correct output directory to use."
  160. exit 1
  161. fi
  162. ;;
  163. h)
  164. usage
  165. exit
  166. ;;
  167. ?)
  168. usage
  169. exit
  170. ;;
  171. esac
  172. done
  173. if [ "x${GO_OUT}" == "x" ]; then
  174. make -C "${KUBE_ROOT}" WHAT="cmd/kubectl cmd/kube-apiserver cmd/kube-controller-manager cmd/cloud-controller-manager cmd/kubelet cmd/kube-proxy cmd/kube-scheduler"
  175. else
  176. echo "skipped the build."
  177. fi
  178. # Shut down anyway if there's an error.
  179. set +e
  180. API_PORT=${API_PORT:-8080}
  181. API_SECURE_PORT=${API_SECURE_PORT:-6443}
  182. # WARNING: For DNS to work on most setups you should export API_HOST as the docker0 ip address,
  183. API_HOST=${API_HOST:-localhost}
  184. API_HOST_IP=${API_HOST_IP:-"127.0.0.1"}
  185. ADVERTISE_ADDRESS=${ADVERTISE_ADDRESS:-""}
  186. NODE_PORT_RANGE=${NODE_PORT_RANGE:-""}
  187. API_BIND_ADDR=${API_BIND_ADDR:-"0.0.0.0"}
  188. EXTERNAL_HOSTNAME=${EXTERNAL_HOSTNAME:-localhost}
  189. KUBELET_HOST=${KUBELET_HOST:-"127.0.0.1"}
  190. # By default only allow CORS for requests on localhost
  191. API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-/127.0.0.1(:[0-9]+)?$,/localhost(:[0-9]+)?$}
  192. KUBELET_PORT=${KUBELET_PORT:-10250}
  193. LOG_LEVEL=${LOG_LEVEL:-3}
  194. # Use to increase verbosity on particular files, e.g. LOG_SPEC=token_controller*=5,other_controller*=4
  195. LOG_SPEC=${LOG_SPEC:-""}
  196. LOG_DIR=${LOG_DIR:-"/tmp"}
  197. CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"}
  198. CONTAINER_RUNTIME_ENDPOINT=${CONTAINER_RUNTIME_ENDPOINT:-""}
  199. RUNTIME_REQUEST_TIMEOUT=${RUNTIME_REQUEST_TIMEOUT:-"2m"}
  200. IMAGE_SERVICE_ENDPOINT=${IMAGE_SERVICE_ENDPOINT:-""}
  201. CHAOS_CHANCE=${CHAOS_CHANCE:-0.0}
  202. CPU_CFS_QUOTA=${CPU_CFS_QUOTA:-true}
  203. ENABLE_HOSTPATH_PROVISIONER=${ENABLE_HOSTPATH_PROVISIONER:-"false"}
  204. CLAIM_BINDER_SYNC_PERIOD=${CLAIM_BINDER_SYNC_PERIOD:-"15s"} # current k8s default
  205. ENABLE_CONTROLLER_ATTACH_DETACH=${ENABLE_CONTROLLER_ATTACH_DETACH:-"true"} # current default
  206. # This is the default dir and filename where the apiserver will generate a self-signed cert
  207. # which should be able to be used as the CA to verify itself
  208. CERT_DIR=${CERT_DIR:-"/var/run/kubernetes"}
  209. ROOT_CA_FILE=${CERT_DIR}/server-ca.crt
  210. ROOT_CA_KEY=${CERT_DIR}/server-ca.key
  211. CLUSTER_SIGNING_CERT_FILE=${CLUSTER_SIGNING_CERT_FILE:-"${ROOT_CA_FILE}"}
  212. CLUSTER_SIGNING_KEY_FILE=${CLUSTER_SIGNING_KEY_FILE:-"${ROOT_CA_KEY}"}
  213. # Reuse certs will skip generate new ca/cert files under CERT_DIR
  214. # it's useful with PRESERVE_ETCD=true because new ca will make existed service account secrets invalided
  215. REUSE_CERTS=${REUSE_CERTS:-false}
  216. # name of the cgroup driver, i.e. cgroupfs or systemd
  217. if [[ ${CONTAINER_RUNTIME} == "docker" ]]; then
  218. # default cgroup driver to match what is reported by docker to simplify local development
  219. if [[ -z ${CGROUP_DRIVER} ]]; then
  220. # match driver with docker runtime reported value (they must match)
  221. CGROUP_DRIVER=$(docker info | grep "Cgroup Driver:" | sed -e 's/^[[:space:]]*//'|cut -f3- -d' ')
  222. echo "Kubelet cgroup driver defaulted to use: ${CGROUP_DRIVER}"
  223. fi
  224. if [[ -f /var/log/docker.log && ! -f "${LOG_DIR}/docker.log" ]]; then
  225. ln -s /var/log/docker.log "${LOG_DIR}/docker.log"
  226. fi
  227. fi
  228. # Ensure CERT_DIR is created for auto-generated crt/key and kubeconfig
  229. mkdir -p "${CERT_DIR}" &>/dev/null || sudo mkdir -p "${CERT_DIR}"
  230. CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
  231. function test_apiserver_off {
  232. # For the common local scenario, fail fast if server is already running.
  233. # this can happen if you run local-up-cluster.sh twice and kill etcd in between.
  234. if [[ "${API_PORT}" -gt "0" ]]; then
  235. if ! curl --silent -g "${API_HOST}:${API_PORT}" ; then
  236. echo "API SERVER insecure port is free, proceeding..."
  237. else
  238. echo "ERROR starting API SERVER, exiting. Some process on ${API_HOST} is serving already on ${API_PORT}"
  239. exit 1
  240. fi
  241. fi
  242. if ! curl --silent -k -g "${API_HOST}:${API_SECURE_PORT}" ; then
  243. echo "API SERVER secure port is free, proceeding..."
  244. else
  245. echo "ERROR starting API SERVER, exiting. Some process on ${API_HOST} is serving already on ${API_SECURE_PORT}"
  246. exit 1
  247. fi
  248. }
  249. function detect_binary {
  250. # Detect the OS name/arch so that we can find our binary
  251. case "$(uname -s)" in
  252. Darwin)
  253. host_os=darwin
  254. ;;
  255. Linux)
  256. host_os=linux
  257. ;;
  258. *)
  259. echo "Unsupported host OS. Must be Linux or Mac OS X." >&2
  260. exit 1
  261. ;;
  262. esac
  263. case "$(uname -m)" in
  264. x86_64*)
  265. host_arch=amd64
  266. ;;
  267. i?86_64*)
  268. host_arch=amd64
  269. ;;
  270. amd64*)
  271. host_arch=amd64
  272. ;;
  273. aarch64*)
  274. host_arch=arm64
  275. ;;
  276. arm64*)
  277. host_arch=arm64
  278. ;;
  279. arm*)
  280. host_arch=arm
  281. ;;
  282. i?86*)
  283. host_arch=x86
  284. ;;
  285. s390x*)
  286. host_arch=s390x
  287. ;;
  288. ppc64le*)
  289. host_arch=ppc64le
  290. ;;
  291. *)
  292. echo "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le." >&2
  293. exit 1
  294. ;;
  295. esac
  296. GO_OUT="${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}"
  297. }
  298. cleanup()
  299. {
  300. echo "Cleaning up..."
  301. # delete running images
  302. # if [[ "${ENABLE_CLUSTER_DNS}" == true ]]; then
  303. # Still need to figure why this commands throw an error: Error from server: client: etcd cluster is unavailable or misconfigured
  304. # ${KUBECTL} --namespace=kube-system delete service kube-dns
  305. # And this one hang forever:
  306. # ${KUBECTL} --namespace=kube-system delete rc kube-dns-v10
  307. # fi
  308. # Check if the API server is still running
  309. [[ -n "${APISERVER_PID-}" ]] && kube::util::read-array APISERVER_PIDS < <(pgrep -P "${APISERVER_PID}" ; ps -o pid= -p "${APISERVER_PID}")
  310. [[ -n "${APISERVER_PIDS-}" ]] && sudo kill "${APISERVER_PIDS[@]}" 2>/dev/null
  311. # Check if the controller-manager is still running
  312. [[ -n "${CTLRMGR_PID-}" ]] && kube::util::read-array CTLRMGR_PIDS < <(pgrep -P "${CTLRMGR_PID}" ; ps -o pid= -p "${CTLRMGR_PID}")
  313. [[ -n "${CTLRMGR_PIDS-}" ]] && sudo kill "${CTLRMGR_PIDS[@]}" 2>/dev/null
  314. # Check if the cloud-controller-manager is still running
  315. [[ -n "${CLOUD_CTLRMGR_PID-}" ]] && kube::util::read-array CLOUD_CTLRMGR_PIDS < <(pgrep -P "${CLOUD_CTLRMGR_PID}" ; ps -o pid= -p "${CLOUD_CTLRMGR_PID}")
  316. [[ -n "${CLOUD_CTLRMGR_PIDS-}" ]] && sudo kill "${CLOUD_CTLRMGR_PIDS[@]}" 2>/dev/null
  317. # Check if the kubelet is still running
  318. [[ -n "${KUBELET_PID-}" ]] && kube::util::read-array KUBELET_PIDS < <(pgrep -P "${KUBELET_PID}" ; ps -o pid= -p "${KUBELET_PID}")
  319. [[ -n "${KUBELET_PIDS-}" ]] && sudo kill "${KUBELET_PIDS[@]}" 2>/dev/null
  320. # Check if the proxy is still running
  321. [[ -n "${PROXY_PID-}" ]] && kube::util::read-array PROXY_PIDS < <(pgrep -P "${PROXY_PID}" ; ps -o pid= -p "${PROXY_PID}")
  322. [[ -n "${PROXY_PIDS-}" ]] && sudo kill "${PROXY_PIDS[@]}" 2>/dev/null
  323. # Check if the scheduler is still running
  324. [[ -n "${SCHEDULER_PID-}" ]] && kube::util::read-array SCHEDULER_PIDS < <(pgrep -P "${SCHEDULER_PID}" ; ps -o pid= -p "${SCHEDULER_PID}")
  325. [[ -n "${SCHEDULER_PIDS-}" ]] && sudo kill "${SCHEDULER_PIDS[@]}" 2>/dev/null
  326. # Check if the etcd is still running
  327. [[ -n "${ETCD_PID-}" ]] && kube::etcd::stop
  328. if [[ "${PRESERVE_ETCD}" == "false" ]]; then
  329. [[ -n "${ETCD_DIR-}" ]] && kube::etcd::clean_etcd_dir
  330. fi
  331. exit 0
  332. }
  333. # Check if all processes are still running. Prints a warning once each time
  334. # a process dies unexpectedly.
  335. function healthcheck {
  336. if [[ -n "${APISERVER_PID-}" ]] && ! sudo kill -0 "${APISERVER_PID}" 2>/dev/null; then
  337. warning_log "API server terminated unexpectedly, see ${APISERVER_LOG}"
  338. APISERVER_PID=
  339. fi
  340. if [[ -n "${CTLRMGR_PID-}" ]] && ! sudo kill -0 "${CTLRMGR_PID}" 2>/dev/null; then
  341. warning_log "kube-controller-manager terminated unexpectedly, see ${CTLRMGR_LOG}"
  342. CTLRMGR_PID=
  343. fi
  344. if [[ -n "${KUBELET_PID-}" ]] && ! sudo kill -0 "${KUBELET_PID}" 2>/dev/null; then
  345. warning_log "kubelet terminated unexpectedly, see ${KUBELET_LOG}"
  346. KUBELET_PID=
  347. fi
  348. if [[ -n "${PROXY_PID-}" ]] && ! sudo kill -0 "${PROXY_PID}" 2>/dev/null; then
  349. warning_log "kube-proxy terminated unexpectedly, see ${PROXY_LOG}"
  350. PROXY_PID=
  351. fi
  352. if [[ -n "${SCHEDULER_PID-}" ]] && ! sudo kill -0 "${SCHEDULER_PID}" 2>/dev/null; then
  353. warning_log "scheduler terminated unexpectedly, see ${SCHEDULER_LOG}"
  354. SCHEDULER_PID=
  355. fi
  356. if [[ -n "${ETCD_PID-}" ]] && ! sudo kill -0 "${ETCD_PID}" 2>/dev/null; then
  357. warning_log "etcd terminated unexpectedly"
  358. ETCD_PID=
  359. fi
  360. }
  361. function print_color {
  362. message=$1
  363. prefix=${2:+$2: } # add colon only if defined
  364. color=${3:-1} # default is red
  365. echo -n "$(tput bold)$(tput setaf "${color}")"
  366. echo "${prefix}${message}"
  367. echo -n "$(tput sgr0)"
  368. }
  369. function warning_log {
  370. print_color "$1" "W$(date "+%m%d %H:%M:%S")]" 1
  371. }
  372. function start_etcd {
  373. echo "Starting etcd"
  374. export ETCD_LOGFILE=${LOG_DIR}/etcd.log
  375. kube::etcd::start
  376. }
  377. function set_service_accounts {
  378. SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-true}
  379. SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-/tmp/kube-serviceaccount.key}
  380. # Generate ServiceAccount key if needed
  381. if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
  382. mkdir -p "$(dirname "${SERVICE_ACCOUNT_KEY}")"
  383. openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
  384. fi
  385. }
  386. function generate_certs {
  387. # Create CA signers
  388. if [[ "${ENABLE_SINGLE_CA_SIGNER:-}" = true ]]; then
  389. kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"client auth","server auth"'
  390. sudo cp "${CERT_DIR}/server-ca.key" "${CERT_DIR}/client-ca.key"
  391. sudo cp "${CERT_DIR}/server-ca.crt" "${CERT_DIR}/client-ca.crt"
  392. sudo cp "${CERT_DIR}/server-ca-config.json" "${CERT_DIR}/client-ca-config.json"
  393. else
  394. kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"server auth"'
  395. kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" client '"client auth"'
  396. fi
  397. # Create auth proxy client ca
  398. kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header '"client auth"'
  399. # serving cert for kube-apiserver
  400. kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-apiserver kubernetes.default kubernetes.default.svc "localhost" "${API_HOST_IP}" "${API_HOST}" "${FIRST_SERVICE_CLUSTER_IP}"
  401. # Create client certs signed with client-ca, given id, given CN and a number of groups
  402. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' controller system:kube-controller-manager
  403. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' scheduler system:kube-scheduler
  404. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' admin system:admin system:masters
  405. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-apiserver kube-apiserver
  406. # Create matching certificates for kube-aggregator
  407. kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-aggregator api.kube-public.svc "localhost" "${API_HOST_IP}"
  408. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header-ca auth-proxy system:auth-proxy
  409. # TODO remove masters and add rolebinding
  410. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-aggregator system:kube-aggregator system:masters
  411. kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-aggregator
  412. }
  413. function generate_kubeproxy_certs {
  414. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-proxy system:kube-proxy system:nodes
  415. kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-proxy
  416. }
  417. function generate_kubelet_certs {
  418. kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet "system:node:${HOSTNAME_OVERRIDE}" system:nodes
  419. kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kubelet
  420. }
  421. function start_apiserver {
  422. security_admission=""
  423. if [[ -n "${DENY_SECURITY_CONTEXT_ADMISSION}" ]]; then
  424. security_admission=",SecurityContextDeny"
  425. fi
  426. if [[ -n "${PSP_ADMISSION}" ]]; then
  427. security_admission=",PodSecurityPolicy"
  428. fi
  429. if [[ -n "${NODE_ADMISSION}" ]]; then
  430. security_admission=",NodeRestriction"
  431. fi
  432. # Append security_admission plugin
  433. ENABLE_ADMISSION_PLUGINS="${ENABLE_ADMISSION_PLUGINS}${security_admission}"
  434. authorizer_arg=""
  435. if [[ -n "${AUTHORIZATION_MODE}" ]]; then
  436. authorizer_arg="--authorization-mode=${AUTHORIZATION_MODE}"
  437. fi
  438. priv_arg=""
  439. if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
  440. priv_arg="--allow-privileged=${ALLOW_PRIVILEGED}"
  441. fi
  442. runtime_config=""
  443. if [[ -n "${RUNTIME_CONFIG}" ]]; then
  444. runtime_config="--runtime-config=${RUNTIME_CONFIG}"
  445. fi
  446. # Let the API server pick a default address when API_HOST_IP
  447. # is set to 127.0.0.1
  448. advertise_address=""
  449. if [[ "${API_HOST_IP}" != "127.0.0.1" ]]; then
  450. advertise_address="--advertise-address=${API_HOST_IP}"
  451. fi
  452. if [[ "${ADVERTISE_ADDRESS}" != "" ]] ; then
  453. advertise_address="--advertise-address=${ADVERTISE_ADDRESS}"
  454. fi
  455. node_port_range=""
  456. if [[ "${NODE_PORT_RANGE}" != "" ]] ; then
  457. node_port_range="--service-node-port-range=${NODE_PORT_RANGE}"
  458. fi
  459. if [[ "${REUSE_CERTS}" != true ]]; then
  460. # Create Certs
  461. generate_certs
  462. fi
  463. cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}"
  464. if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
  465. cloud_config_arg="--cloud-provider=external"
  466. fi
  467. if [[ -z "${AUDIT_POLICY_FILE}" ]]; then
  468. cat <<EOF > /tmp/kube-audit-policy-file
  469. # Log all requests at the Metadata level.
  470. apiVersion: audit.k8s.io/v1
  471. kind: Policy
  472. rules:
  473. - level: Metadata
  474. EOF
  475. AUDIT_POLICY_FILE="/tmp/kube-audit-policy-file"
  476. fi
  477. APISERVER_LOG=${LOG_DIR}/kube-apiserver.log
  478. # shellcheck disable=SC2086
  479. ${CONTROLPLANE_SUDO} "${GO_OUT}/kube-apiserver" "${authorizer_arg}" "${priv_arg}" ${runtime_config} \
  480. ${cloud_config_arg} \
  481. "${advertise_address}" \
  482. "${node_port_range}" \
  483. --v="${LOG_LEVEL}" \
  484. --vmodule="${LOG_SPEC}" \
  485. --audit-policy-file="${AUDIT_POLICY_FILE}" \
  486. --audit-log-path="${LOG_DIR}/kube-apiserver-audit.log" \
  487. --authorization-webhook-config-file="${AUTHORIZATION_WEBHOOK_CONFIG_FILE}" \
  488. --authentication-token-webhook-config-file="${AUTHENTICATION_WEBHOOK_CONFIG_FILE}" \
  489. --cert-dir="${CERT_DIR}" \
  490. --client-ca-file="${CERT_DIR}/client-ca.crt" \
  491. --kubelet-client-certificate="${CERT_DIR}/client-kube-apiserver.crt" \
  492. --kubelet-client-key="${CERT_DIR}/client-kube-apiserver.key" \
  493. --service-account-key-file="${SERVICE_ACCOUNT_KEY}" \
  494. --service-account-lookup="${SERVICE_ACCOUNT_LOOKUP}" \
  495. --service-account-issuer="https://kubernetes.default.svc" \
  496. --service-account-signing-key-file="${SERVICE_ACCOUNT_KEY}" \
  497. --enable-admission-plugins="${ENABLE_ADMISSION_PLUGINS}" \
  498. --disable-admission-plugins="${DISABLE_ADMISSION_PLUGINS}" \
  499. --admission-control-config-file="${ADMISSION_CONTROL_CONFIG_FILE}" \
  500. --bind-address="${API_BIND_ADDR}" \
  501. --secure-port="${API_SECURE_PORT}" \
  502. --tls-cert-file="${CERT_DIR}/serving-kube-apiserver.crt" \
  503. --tls-private-key-file="${CERT_DIR}/serving-kube-apiserver.key" \
  504. --insecure-bind-address="${API_HOST_IP}" \
  505. --insecure-port="${API_PORT}" \
  506. --storage-backend="${STORAGE_BACKEND}" \
  507. --storage-media-type="${STORAGE_MEDIA_TYPE}" \
  508. --etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
  509. --service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}" \
  510. --feature-gates="${FEATURE_GATES}" \
  511. --external-hostname="${EXTERNAL_HOSTNAME}" \
  512. --requestheader-username-headers=X-Remote-User \
  513. --requestheader-group-headers=X-Remote-Group \
  514. --requestheader-extra-headers-prefix=X-Remote-Extra- \
  515. --requestheader-client-ca-file="${CERT_DIR}/request-header-ca.crt" \
  516. --requestheader-allowed-names=system:auth-proxy \
  517. --proxy-client-cert-file="${CERT_DIR}/client-auth-proxy.crt" \
  518. --proxy-client-key-file="${CERT_DIR}/client-auth-proxy.key" \
  519. --cors-allowed-origins="${API_CORS_ALLOWED_ORIGINS}" >"${APISERVER_LOG}" 2>&1 &
  520. APISERVER_PID=$!
  521. # Wait for kube-apiserver to come up before launching the rest of the components.
  522. echo "Waiting for apiserver to come up"
  523. kube::util::wait_for_url "https://${API_HOST_IP}:${API_SECURE_PORT}/healthz" "apiserver: " 1 "${WAIT_FOR_URL_API_SERVER}" "${MAX_TIME_FOR_URL_API_SERVER}" \
  524. || { echo "check apiserver logs: ${APISERVER_LOG}" ; exit 1 ; }
  525. # Create kubeconfigs for all components, using client certs
  526. kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" admin
  527. ${CONTROLPLANE_SUDO} chown "${USER}" "${CERT_DIR}/client-admin.key" # make readable for kubectl
  528. kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" controller
  529. kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" scheduler
  530. if [[ -z "${AUTH_ARGS}" ]]; then
  531. AUTH_ARGS="--client-key=${CERT_DIR}/client-admin.key --client-certificate=${CERT_DIR}/client-admin.crt"
  532. fi
  533. # Grant apiserver permission to speak to the kubelet
  534. ${KUBECTL} --kubeconfig "${CERT_DIR}/admin.kubeconfig" create clusterrolebinding kube-apiserver-kubelet-admin --clusterrole=system:kubelet-api-admin --user=kube-apiserver
  535. ${CONTROLPLANE_SUDO} cp "${CERT_DIR}/admin.kubeconfig" "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
  536. ${CONTROLPLANE_SUDO} chown "$(whoami)" "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
  537. ${KUBECTL} config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/admin-kube-aggregator.kubeconfig" --server="https://${API_HOST_IP}:31090"
  538. echo "use 'kubectl --kubeconfig=${CERT_DIR}/admin-kube-aggregator.kubeconfig' to use the aggregated API server"
  539. }
  540. function start_controller_manager {
  541. node_cidr_args=()
  542. if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
  543. node_cidr_args=("--allocate-node-cidrs=true" "--cluster-cidr=${CLUSTER_CIDR}")
  544. fi
  545. cloud_config_arg=("--cloud-provider=${CLOUD_PROVIDER}" "--cloud-config=${CLOUD_CONFIG}")
  546. if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
  547. cloud_config_arg=("--cloud-provider=external")
  548. cloud_config_arg+=("--external-cloud-volume-plugin=${EXTERNAL_CLOUD_VOLUME_PLUGIN}")
  549. cloud_config_arg+=("--cloud-config=${CLOUD_CONFIG}")
  550. fi
  551. CTLRMGR_LOG=${LOG_DIR}/kube-controller-manager.log
  552. ${CONTROLPLANE_SUDO} "${GO_OUT}/kube-controller-manager" \
  553. --v="${LOG_LEVEL}" \
  554. --vmodule="${LOG_SPEC}" \
  555. --service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \
  556. --root-ca-file="${ROOT_CA_FILE}" \
  557. --cluster-signing-cert-file="${CLUSTER_SIGNING_CERT_FILE}" \
  558. --cluster-signing-key-file="${CLUSTER_SIGNING_KEY_FILE}" \
  559. --enable-hostpath-provisioner="${ENABLE_HOSTPATH_PROVISIONER}" \
  560. ${node_cidr_args[@]+"${node_cidr_args[@]}"} \
  561. --pvclaimbinder-sync-period="${CLAIM_BINDER_SYNC_PERIOD}" \
  562. --feature-gates="${FEATURE_GATES}" \
  563. "${cloud_config_arg[@]}" \
  564. --kubeconfig "${CERT_DIR}"/controller.kubeconfig \
  565. --use-service-account-credentials \
  566. --controllers="${KUBE_CONTROLLERS}" \
  567. --leader-elect=false \
  568. --cert-dir="${CERT_DIR}" \
  569. --master="https://${API_HOST}:${API_SECURE_PORT}" >"${CTLRMGR_LOG}" 2>&1 &
  570. CTLRMGR_PID=$!
  571. }
  572. function start_cloud_controller_manager {
  573. if [ -z "${CLOUD_CONFIG}" ]; then
  574. echo "CLOUD_CONFIG cannot be empty!"
  575. exit 1
  576. fi
  577. if [ ! -f "${CLOUD_CONFIG}" ]; then
  578. echo "Cloud config ${CLOUD_CONFIG} doesn't exist"
  579. exit 1
  580. fi
  581. node_cidr_args=()
  582. if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
  583. node_cidr_args=("--allocate-node-cidrs=true" "--cluster-cidr=${CLUSTER_CIDR}")
  584. fi
  585. CLOUD_CTLRMGR_LOG=${LOG_DIR}/cloud-controller-manager.log
  586. ${CONTROLPLANE_SUDO} "${EXTERNAL_CLOUD_PROVIDER_BINARY:-"${GO_OUT}/cloud-controller-manager"}" \
  587. --v="${LOG_LEVEL}" \
  588. --vmodule="${LOG_SPEC}" \
  589. "${node_cidr_args[@]:-}" \
  590. --feature-gates="${FEATURE_GATES}" \
  591. --cloud-provider="${CLOUD_PROVIDER}" \
  592. --cloud-config="${CLOUD_CONFIG}" \
  593. --kubeconfig "${CERT_DIR}"/controller.kubeconfig \
  594. --use-service-account-credentials \
  595. --leader-elect=false \
  596. --master="https://${API_HOST}:${API_SECURE_PORT}" >"${CLOUD_CTLRMGR_LOG}" 2>&1 &
  597. export CLOUD_CTLRMGR_PID=$!
  598. }
  599. function wait_node_ready(){
  600. # check the nodes information after kubelet daemon start
  601. local nodes_stats="${KUBECTL} --kubeconfig '${CERT_DIR}/admin.kubeconfig' get nodes"
  602. local node_name=$HOSTNAME_OVERRIDE
  603. local system_node_wait_time=30
  604. local interval_time=2
  605. kube::util::wait_for_success "$system_node_wait_time" "$interval_time" "$nodes_stats | grep $node_name"
  606. if [ $? == "1" ]; then
  607. echo "time out on waiting $node_name info"
  608. exit 1
  609. fi
  610. }
  611. function start_kubelet {
  612. KUBELET_LOG=${LOG_DIR}/kubelet.log
  613. mkdir -p "${POD_MANIFEST_PATH}" &>/dev/null || sudo mkdir -p "${POD_MANIFEST_PATH}"
  614. cloud_config_arg=("--cloud-provider=${CLOUD_PROVIDER}" "--cloud-config=${CLOUD_CONFIG}")
  615. if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
  616. cloud_config_arg=("--cloud-provider=external")
  617. if [[ "${CLOUD_PROVIDER:-}" == "aws" ]]; then
  618. cloud_config_arg+=("--provider-id=$(curl http://169.254.169.254/latest/meta-data/instance-id)")
  619. else
  620. cloud_config_arg+=("--provider-id=${KUBELET_PROVIDER_ID}")
  621. fi
  622. fi
  623. mkdir -p "/var/lib/kubelet" &>/dev/null || sudo mkdir -p "/var/lib/kubelet"
  624. # Enable dns
  625. if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
  626. if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
  627. dns_args=("--cluster-dns=${LOCAL_DNS_IP}" "--cluster-domain=${DNS_DOMAIN}")
  628. else
  629. dns_args=("--cluster-dns=${DNS_SERVER_IP}" "--cluster-domain=${DNS_DOMAIN}")
  630. fi
  631. else
  632. # To start a private DNS server set ENABLE_CLUSTER_DNS and
  633. # DNS_SERVER_IP/DOMAIN. This will at least provide a working
  634. # DNS server for real world hostnames.
  635. dns_args=("--cluster-dns=8.8.8.8")
  636. fi
  637. net_plugin_args=()
  638. if [[ -n "${NET_PLUGIN}" ]]; then
  639. net_plugin_args=("--network-plugin=${NET_PLUGIN}")
  640. fi
  641. auth_args=()
  642. if [[ "${KUBELET_AUTHORIZATION_WEBHOOK:-}" != "false" ]]; then
  643. auth_args+=("--authorization-mode=Webhook")
  644. fi
  645. if [[ "${KUBELET_AUTHENTICATION_WEBHOOK:-}" != "false" ]]; then
  646. auth_args+=("--authentication-token-webhook")
  647. fi
  648. if [[ -n "${CLIENT_CA_FILE:-}" ]]; then
  649. auth_args+=("--client-ca-file=${CLIENT_CA_FILE}")
  650. else
  651. auth_args+=("--client-ca-file=${CERT_DIR}/client-ca.crt")
  652. fi
  653. cni_conf_dir_args=()
  654. if [[ -n "${CNI_CONF_DIR}" ]]; then
  655. cni_conf_dir_args=("--cni-conf-dir=${CNI_CONF_DIR}")
  656. fi
  657. cni_bin_dir_args=()
  658. if [[ -n "${CNI_BIN_DIR}" ]]; then
  659. cni_bin_dir_args=("--cni-bin-dir=${CNI_BIN_DIR}")
  660. fi
  661. container_runtime_endpoint_args=()
  662. if [[ -n "${CONTAINER_RUNTIME_ENDPOINT}" ]]; then
  663. container_runtime_endpoint_args=("--container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}")
  664. fi
  665. image_service_endpoint_args=()
  666. if [[ -n "${IMAGE_SERVICE_ENDPOINT}" ]]; then
  667. image_service_endpoint_args=("--image-service-endpoint=${IMAGE_SERVICE_ENDPOINT}")
  668. fi
  669. # shellcheck disable=SC2206
  670. all_kubelet_flags=(
  671. "--v=${LOG_LEVEL}"
  672. "--vmodule=${LOG_SPEC}"
  673. "--chaos-chance=${CHAOS_CHANCE}"
  674. "--container-runtime=${CONTAINER_RUNTIME}"
  675. "--hostname-override=${HOSTNAME_OVERRIDE}"
  676. "${cloud_config_arg[@]}"
  677. "--address=${KUBELET_HOST}"
  678. --kubeconfig "${CERT_DIR}"/kubelet.kubeconfig
  679. "--feature-gates=${FEATURE_GATES}"
  680. "--cpu-cfs-quota=${CPU_CFS_QUOTA}"
  681. "--enable-controller-attach-detach=${ENABLE_CONTROLLER_ATTACH_DETACH}"
  682. "--cgroups-per-qos=${CGROUPS_PER_QOS}"
  683. "--cgroup-driver=${CGROUP_DRIVER}"
  684. "--cgroup-root=${CGROUP_ROOT}"
  685. "--eviction-hard=${EVICTION_HARD}"
  686. "--eviction-soft=${EVICTION_SOFT}"
  687. "--eviction-pressure-transition-period=${EVICTION_PRESSURE_TRANSITION_PERIOD}"
  688. "--pod-manifest-path=${POD_MANIFEST_PATH}"
  689. "--fail-swap-on=${FAIL_SWAP_ON}"
  690. ${auth_args[@]+"${auth_args[@]}"}
  691. ${dns_args[@]+"${dns_args[@]}"}
  692. ${cni_conf_dir_args[@]+"${cni_conf_dir_args[@]}"}
  693. ${cni_bin_dir_args[@]+"${cni_bin_dir_args[@]}"}
  694. ${net_plugin_args[@]+"${net_plugin_args[@]}"}
  695. ${container_runtime_endpoint_args[@]+"${container_runtime_endpoint_args[@]}"}
  696. ${image_service_endpoint_args[@]+"${image_service_endpoint_args[@]}"}
  697. "--runtime-request-timeout=${RUNTIME_REQUEST_TIMEOUT}"
  698. "--port=${KUBELET_PORT}"
  699. ${KUBELET_FLAGS}
  700. )
  701. # warn if users are running with swap allowed
  702. if [ "${FAIL_SWAP_ON}" == "false" ]; then
  703. echo "WARNING : The kubelet is configured to not fail even if swap is enabled; production deployments should disable swap."
  704. fi
  705. if [[ "${REUSE_CERTS}" != true ]]; then
  706. generate_kubelet_certs
  707. fi
  708. # shellcheck disable=SC2024
  709. sudo -E "${GO_OUT}/kubelet" "${all_kubelet_flags[@]}" >"${KUBELET_LOG}" 2>&1 &
  710. KUBELET_PID=$!
  711. # Quick check that kubelet is running.
  712. if [ -n "${KUBELET_PID}" ] && ps -p ${KUBELET_PID} > /dev/null; then
  713. echo "kubelet ( ${KUBELET_PID} ) is running."
  714. else
  715. cat "${KUBELET_LOG}" ; exit 1
  716. fi
  717. }
  718. function start_kubeproxy {
  719. PROXY_LOG=${LOG_DIR}/kube-proxy.log
  720. # wait for kubelet collect node information
  721. echo "wait kubelet ready"
  722. wait_node_ready
  723. cat <<EOF > /tmp/kube-proxy.yaml
  724. apiVersion: kubeproxy.config.k8s.io/v1alpha1
  725. kind: KubeProxyConfiguration
  726. clientConnection:
  727. kubeconfig: ${CERT_DIR}/kube-proxy.kubeconfig
  728. hostnameOverride: ${HOSTNAME_OVERRIDE}
  729. mode: ${KUBE_PROXY_MODE}
  730. EOF
  731. if [[ -n ${FEATURE_GATES} ]]; then
  732. echo "featureGates:"
  733. # Convert from foo=true,bar=false to
  734. # foo: true
  735. # bar: false
  736. for gate in $(echo "${FEATURE_GATES}" | tr ',' ' '); do
  737. echo "${gate}" | ${SED} -e 's/\(.*\)=\(.*\)/ \1: \2/'
  738. done
  739. fi >>/tmp/kube-proxy.yaml
  740. if [[ "${REUSE_CERTS}" != true ]]; then
  741. generate_kubeproxy_certs
  742. fi
  743. # shellcheck disable=SC2024
  744. sudo "${GO_OUT}/kube-proxy" \
  745. --v="${LOG_LEVEL}" \
  746. --config=/tmp/kube-proxy.yaml \
  747. --master="https://${API_HOST}:${API_SECURE_PORT}" >"${PROXY_LOG}" 2>&1 &
  748. PROXY_PID=$!
  749. }
  750. function start_kubescheduler {
  751. SCHEDULER_LOG=${LOG_DIR}/kube-scheduler.log
  752. ${CONTROLPLANE_SUDO} "${GO_OUT}/kube-scheduler" \
  753. --v="${LOG_LEVEL}" \
  754. --leader-elect=false \
  755. --kubeconfig "${CERT_DIR}"/scheduler.kubeconfig \
  756. --feature-gates="${FEATURE_GATES}" \
  757. --master="https://${API_HOST}:${API_SECURE_PORT}" >"${SCHEDULER_LOG}" 2>&1 &
  758. SCHEDULER_PID=$!
  759. }
  760. function start_kubedns {
  761. if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
  762. cp "${KUBE_ROOT}/cluster/addons/dns/kube-dns/kube-dns.yaml.in" kube-dns.yaml
  763. ${SED} -i -e "s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" kube-dns.yaml
  764. ${SED} -i -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" kube-dns.yaml
  765. ${SED} -i -e "s/{{ pillar\['dns_memory_limit'\] }}/${DNS_MEMORY_LIMIT}/g" kube-dns.yaml
  766. # TODO update to dns role once we have one.
  767. # use kubectl to create kubedns addon
  768. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kube-dns.yaml
  769. echo "Kube-dns addon successfully deployed."
  770. rm kube-dns.yaml
  771. fi
  772. }
  773. function start_nodelocaldns {
  774. cp "${KUBE_ROOT}/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml" nodelocaldns.yaml
  775. ${SED} -i -e "s/__PILLAR__DNS__DOMAIN__/${DNS_DOMAIN}/g" nodelocaldns.yaml
  776. ${SED} -i -e "s/__PILLAR__DNS__SERVER__/${DNS_SERVER_IP}/g" nodelocaldns.yaml
  777. ${SED} -i -e "s/__PILLAR__LOCAL__DNS__/${LOCAL_DNS_IP}/g" nodelocaldns.yaml
  778. # use kubectl to create nodelocaldns addon
  779. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f nodelocaldns.yaml
  780. echo "NodeLocalDNS addon successfully deployed."
  781. rm nodelocaldns.yaml
  782. }
  783. function start_kubedashboard {
  784. if [[ "${ENABLE_CLUSTER_DASHBOARD}" = true ]]; then
  785. echo "Creating kubernetes-dashboard"
  786. # use kubectl to create the dashboard
  787. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-secret.yaml"
  788. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-configmap.yaml"
  789. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-rbac.yaml"
  790. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-deployment.yaml"
  791. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml"
  792. echo "kubernetes-dashboard deployment and service successfully deployed."
  793. fi
  794. }
  795. function create_psp_policy {
  796. echo "Create podsecuritypolicy policies for RBAC."
  797. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${KUBE_ROOT}/examples/podsecuritypolicy/rbac/policies.yaml"
  798. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${KUBE_ROOT}/examples/podsecuritypolicy/rbac/roles.yaml"
  799. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${KUBE_ROOT}/examples/podsecuritypolicy/rbac/bindings.yaml"
  800. }
  801. function create_storage_class {
  802. if [ -z "${CLOUD_PROVIDER}" ]; then
  803. CLASS_FILE=${KUBE_ROOT}/cluster/addons/storage-class/local/default.yaml
  804. else
  805. CLASS_FILE=${KUBE_ROOT}/cluster/addons/storage-class/${CLOUD_PROVIDER}/default.yaml
  806. fi
  807. if [ -e "${CLASS_FILE}" ]; then
  808. echo "Create default storage class for ${CLOUD_PROVIDER}"
  809. ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${CLASS_FILE}"
  810. else
  811. echo "No storage class available for ${CLOUD_PROVIDER}."
  812. fi
  813. }
  814. function print_success {
  815. if [[ "${START_MODE}" != "kubeletonly" ]]; then
  816. if [[ "${ENABLE_DAEMON}" = false ]]; then
  817. echo "Local Kubernetes cluster is running. Press Ctrl-C to shut it down."
  818. else
  819. echo "Local Kubernetes cluster is running."
  820. fi
  821. cat <<EOF
  822. Logs:
  823. ${APISERVER_LOG:-}
  824. ${CTLRMGR_LOG:-}
  825. ${CLOUD_CTLRMGR_LOG:-}
  826. ${PROXY_LOG:-}
  827. ${SCHEDULER_LOG:-}
  828. EOF
  829. fi
  830. if [[ "${START_MODE}" == "all" ]]; then
  831. echo " ${KUBELET_LOG}"
  832. elif [[ "${START_MODE}" == "nokubelet" ]]; then
  833. echo
  834. echo "No kubelet was started because you set START_MODE=nokubelet"
  835. echo "Run this script again with START_MODE=kubeletonly to run a kubelet"
  836. fi
  837. if [[ "${START_MODE}" != "kubeletonly" ]]; then
  838. echo
  839. if [[ "${ENABLE_DAEMON}" = false ]]; then
  840. echo "To start using your cluster, you can open up another terminal/tab and run:"
  841. else
  842. echo "To start using your cluster, run:"
  843. fi
  844. cat <<EOF
  845. export KUBECONFIG=${CERT_DIR}/admin.kubeconfig
  846. cluster/kubectl.sh
  847. Alternatively, you can write to the default kubeconfig:
  848. export KUBERNETES_PROVIDER=local
  849. cluster/kubectl.sh config set-cluster local --server=https://${API_HOST}:${API_SECURE_PORT} --certificate-authority=${ROOT_CA_FILE}
  850. cluster/kubectl.sh config set-credentials myself ${AUTH_ARGS}
  851. cluster/kubectl.sh config set-context local --cluster=local --user=myself
  852. cluster/kubectl.sh config use-context local
  853. cluster/kubectl.sh
  854. EOF
  855. else
  856. cat <<EOF
  857. The kubelet was started.
  858. Logs:
  859. ${KUBELET_LOG}
  860. EOF
  861. fi
  862. }
  863. # If we are running in the CI, we need a few more things before we can start
  864. if [[ "${KUBETEST_IN_DOCKER:-}" == "true" ]]; then
  865. echo "Preparing to test ..."
  866. "${KUBE_ROOT}"/hack/install-etcd.sh
  867. export PATH="${KUBE_ROOT}/third_party/etcd:${PATH}"
  868. KUBE_FASTBUILD=true make ginkgo cross
  869. apt-get update && apt-get install -y sudo
  870. apt-get remove -y systemd
  871. # configure shared mounts to prevent failure in DIND scenarios
  872. mount --make-rshared /
  873. # kubekins has a special directory for docker root
  874. DOCKER_ROOT="/docker-graph"
  875. fi
  876. # validate that etcd is: not running, in path, and has minimum required version.
  877. if [[ "${START_MODE}" != "kubeletonly" ]]; then
  878. kube::etcd::validate
  879. fi
  880. if [ "${CONTAINER_RUNTIME}" == "docker" ] && ! kube::util::ensure_docker_daemon_connectivity; then
  881. exit 1
  882. fi
  883. if [[ "${START_MODE}" != "kubeletonly" ]]; then
  884. test_apiserver_off
  885. fi
  886. kube::util::test_openssl_installed
  887. kube::util::ensure-cfssl
  888. ### IF the user didn't supply an output/ for the build... Then we detect.
  889. if [ "${GO_OUT}" == "" ]; then
  890. detect_binary
  891. fi
  892. echo "Detected host and ready to start services. Doing some housekeeping first..."
  893. echo "Using GO_OUT ${GO_OUT}"
  894. export KUBELET_CIDFILE=/tmp/kubelet.cid
  895. if [[ "${ENABLE_DAEMON}" = false ]]; then
  896. trap cleanup EXIT
  897. fi
  898. echo "Starting services now!"
  899. if [[ "${START_MODE}" != "kubeletonly" ]]; then
  900. start_etcd
  901. set_service_accounts
  902. start_apiserver
  903. start_controller_manager
  904. if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
  905. start_cloud_controller_manager
  906. fi
  907. start_kubescheduler
  908. start_kubedns
  909. if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
  910. start_nodelocaldns
  911. fi
  912. start_kubedashboard
  913. fi
  914. if [[ "${START_MODE}" != "nokubelet" ]]; then
  915. ## TODO remove this check if/when kubelet is supported on darwin
  916. # Detect the OS name/arch and display appropriate error.
  917. case "$(uname -s)" in
  918. Darwin)
  919. print_color "kubelet is not currently supported in darwin, kubelet aborted."
  920. KUBELET_LOG=""
  921. ;;
  922. Linux)
  923. start_kubelet
  924. ;;
  925. *)
  926. print_color "Unsupported host OS. Must be Linux or Mac OS X, kubelet aborted."
  927. ;;
  928. esac
  929. fi
  930. if [[ "${START_MODE}" != "kubeletonly" ]]; then
  931. if [[ "${START_MODE}" != "nokubeproxy" ]]; then
  932. start_kubeproxy
  933. fi
  934. fi
  935. if [[ -n "${PSP_ADMISSION}" && "${AUTHORIZATION_MODE}" = *RBAC* ]]; then
  936. create_psp_policy
  937. fi
  938. if [[ "${DEFAULT_STORAGE_CLASS}" = "true" ]]; then
  939. create_storage_class
  940. fi
  941. print_success
  942. if [[ "${ENABLE_DAEMON}" = false ]]; then
  943. while true; do sleep 1; healthcheck; done
  944. fi
  945. if [[ "${KUBETEST_IN_DOCKER:-}" == "true" ]]; then
  946. cluster/kubectl.sh config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt
  947. cluster/kubectl.sh config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt
  948. cluster/kubectl.sh config set-context local --cluster=local --user=myself
  949. cluster/kubectl.sh config use-context local
  950. fi