log-dump.sh 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. #!/usr/bin/env bash
  2. # Copyright 2017 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # Call this to dump all master and node logs into the folder specified in $1
  16. # (defaults to _artifacts). Only works if the provider supports SSH.
  17. # TODO(shyamjvs): This script should be moved to test/e2e which is where it ideally belongs.
  18. set -o errexit
  19. set -o nounset
  20. set -o pipefail
  21. readonly report_dir="${1:-_artifacts}"
  22. readonly gcs_artifacts_dir="${2:-}"
  23. readonly logexporter_namespace="${3:-logexporter}"
  24. # In order to more trivially extend log-dump for custom deployments,
  25. # check for a function named log_dump_custom_get_instances. If it's
  26. # defined, we assume the function can me called with one argument, the
  27. # role, which is either "master" or "node".
  28. echo "Checking for custom logdump instances, if any"
  29. if [[ $(type -t log_dump_custom_get_instances) == "function" ]]; then
  30. readonly use_custom_instance_list=yes
  31. else
  32. readonly use_custom_instance_list=
  33. fi
  34. readonly master_ssh_supported_providers="gce aws kubernetes-anywhere"
  35. readonly node_ssh_supported_providers="gce gke aws kubernetes-anywhere"
  36. readonly gcloud_supported_providers="gce gke kubernetes-anywhere"
  37. readonly master_logfiles="kube-apiserver.log kube-apiserver-audit.log kube-scheduler.log kube-controller-manager.log etcd.log etcd-events.log glbc.log cluster-autoscaler.log kube-addon-manager.log fluentd.log kubelet.cov"
  38. readonly node_logfiles="kube-proxy.log fluentd.log node-problem-detector.log kubelet.cov"
  39. readonly node_systemd_services="node-problem-detector"
  40. readonly hollow_node_logfiles="kubelet-hollow-node-*.log kubeproxy-hollow-node-*.log npd-hollow-node-*.log"
  41. readonly aws_logfiles="cloud-init-output.log"
  42. readonly gce_logfiles="startupscript.log"
  43. readonly kern_logfile="kern.log"
  44. readonly initd_logfiles="docker/log"
  45. readonly supervisord_logfiles="kubelet.log supervisor/supervisord.log supervisor/kubelet-stdout.log supervisor/kubelet-stderr.log supervisor/docker-stdout.log supervisor/docker-stderr.log"
  46. readonly systemd_services="kubelet kubelet-monitor kube-container-runtime-monitor ${LOG_DUMP_SYSTEMD_SERVICES:-docker}"
  47. readonly dump_systemd_journal="${LOG_DUMP_SYSTEMD_JOURNAL:-false}"
  48. # Log files found in WINDOWS_LOGS_DIR on Windows nodes:
  49. readonly windows_node_logfiles="kubelet.log kube-proxy.log docker.log"
  50. # Log files found in other directories on Windows nodes:
  51. readonly windows_node_otherfiles="C:\\Windows\\MEMORY.dmp"
  52. # Limit the number of concurrent node connections so that we don't run out of
  53. # file descriptors for large clusters.
  54. readonly max_dump_processes=25
  55. # TODO: Get rid of all the sourcing of bash dependencies eventually.
  56. function setup() {
  57. KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
  58. if [[ -z "${use_custom_instance_list}" ]]; then
  59. : ${KUBE_CONFIG_FILE:="config-test.sh"}
  60. echo "Sourcing kube-util.sh"
  61. source "${KUBE_ROOT}/cluster/kube-util.sh"
  62. echo "Detecting project"
  63. detect-project 2>&1
  64. elif [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
  65. echo "Using 'use_custom_instance_list' with gke, skipping check for LOG_DUMP_SSH_KEY and LOG_DUMP_SSH_USER"
  66. # Source the below script for the ssh-to-node utility function.
  67. # Hack to save and restore the value of the ZONE env as the script overwrites it.
  68. local gke_zone="${ZONE:-}"
  69. source "${KUBE_ROOT}/cluster/gce/util.sh"
  70. ZONE="${gke_zone}"
  71. elif [[ -z "${LOG_DUMP_SSH_KEY:-}" ]]; then
  72. echo "LOG_DUMP_SSH_KEY not set, but required when using log_dump_custom_get_instances"
  73. exit 1
  74. elif [[ -z "${LOG_DUMP_SSH_USER:-}" ]]; then
  75. echo "LOG_DUMP_SSH_USER not set, but required when using log_dump_custom_get_instances"
  76. exit 1
  77. fi
  78. }
  79. function log-dump-ssh() {
  80. if [[ "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
  81. ssh-to-node "$@"
  82. return
  83. fi
  84. local host="$1"
  85. local cmd="$2"
  86. ssh -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${host}" "${cmd}"
  87. }
  88. # Copy all files /var/log/{$3}.log on node $1 into local dir $2.
  89. # $3 should be a space-separated string of files.
  90. # This function shouldn't ever trigger errexit, but doesn't block stderr.
  91. function copy-logs-from-node() {
  92. local -r node="${1}"
  93. local -r dir="${2}"
  94. local files=( ${3} )
  95. # Append "*"
  96. # The * at the end is needed to also copy rotated logs (which happens
  97. # in large clusters and long runs).
  98. files=( "${files[@]/%/*}" )
  99. # Prepend "/var/log/"
  100. files=( "${files[@]/#/\/var\/log\/}" )
  101. # Comma delimit (even the singleton, or scp does the wrong thing), surround by braces.
  102. local -r scp_files="{$(printf "%s," "${files[@]}")}"
  103. if [[ "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
  104. # get-serial-port-output lets you ask for ports 1-4, but currently (11/21/2016) only port 1 contains useful information
  105. gcloud compute instances get-serial-port-output --project "${PROJECT}" --zone "${ZONE}" --port 1 "${node}" > "${dir}/serial-1.log" || true
  106. gcloud compute scp --recurse --project "${PROJECT}" --zone "${ZONE}" "${node}:${scp_files}" "${dir}" > /dev/null || true
  107. elif [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
  108. local ip=$(get_ssh_hostname "${node}")
  109. scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "${SSH_USER}@${ip}:${scp_files}" "${dir}" > /dev/null || true
  110. elif [[ -n "${use_custom_instance_list}" ]]; then
  111. scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${node}:${scp_files}" "${dir}" > /dev/null || true
  112. else
  113. echo "Unknown cloud-provider '${KUBERNETES_PROVIDER}' and use_custom_instance_list is unset too - skipping logdump for '${node}'"
  114. fi
  115. }
  116. # Save logs for node $1 into directory $2. Pass in any non-common files in $3.
  117. # Pass in any non-common systemd services in $4.
  118. # $3 and $4 should be a space-separated list of files.
  119. # Set $5 to true to indicate it is on master. Default to false.
  120. # This function shouldn't ever trigger errexit
  121. function save-logs() {
  122. local -r node_name="${1}"
  123. local -r dir="${2}"
  124. local files="${3}"
  125. local opt_systemd_services="${4:-""}"
  126. local on_master="${5:-"false"}"
  127. if [[ -n "${use_custom_instance_list}" ]]; then
  128. if [[ -n "${LOG_DUMP_SAVE_LOGS:-}" ]]; then
  129. files="${files} ${LOG_DUMP_SAVE_LOGS:-}"
  130. fi
  131. else
  132. case "${KUBERNETES_PROVIDER}" in
  133. gce|gke|kubernetes-anywhere)
  134. files="${files} ${gce_logfiles}"
  135. ;;
  136. aws)
  137. files="${files} ${aws_logfiles}"
  138. ;;
  139. esac
  140. fi
  141. local -r services=( ${systemd_services} ${opt_systemd_services} ${LOG_DUMP_SAVE_SERVICES:-} )
  142. if log-dump-ssh "${node_name}" "command -v journalctl" &> /dev/null; then
  143. if [[ "${on_master}" == "true" ]]; then
  144. log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-master-installation.service" > "${dir}/kube-master-installation.log" || true
  145. log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-master-configuration.service" > "${dir}/kube-master-configuration.log" || true
  146. else
  147. log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-node-installation.service" > "${dir}/kube-node-installation.log" || true
  148. log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-node-configuration.service" > "${dir}/kube-node-configuration.log" || true
  149. fi
  150. log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -k" > "${dir}/kern.log" || true
  151. for svc in "${services[@]}"; do
  152. log-dump-ssh "${node_name}" "sudo journalctl --output=cat -u ${svc}.service" > "${dir}/${svc}.log" || true
  153. done
  154. if [[ "$dump_systemd_journal" == "true" ]]; then
  155. log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise" > "${dir}/systemd.log" || true
  156. fi
  157. else
  158. files="${kern_logfile} ${files} ${initd_logfiles} ${supervisord_logfiles}"
  159. fi
  160. # Try dumping coverage profiles, if it looks like coverage is enabled in the first place.
  161. if log-dump-ssh "${node_name}" "stat /var/log/kubelet.cov" &> /dev/null; then
  162. if log-dump-ssh "${node_name}" "command -v docker" &> /dev/null; then
  163. if [[ "${on_master}" == "true" ]]; then
  164. run-in-docker-container "${node_name}" "kube-apiserver" "cat /tmp/k8s-kube-apiserver.cov" > "${dir}/kube-apiserver.cov" || true
  165. run-in-docker-container "${node_name}" "kube-scheduler" "cat /tmp/k8s-kube-scheduler.cov" > "${dir}/kube-scheduler.cov" || true
  166. run-in-docker-container "${node_name}" "kube-controller-manager" "cat /tmp/k8s-kube-controller-manager.cov" > "${dir}/kube-controller-manager.cov" || true
  167. else
  168. run-in-docker-container "${node_name}" "kube-proxy" "cat /tmp/k8s-kube-proxy.cov" > "${dir}/kube-proxy.cov" || true
  169. fi
  170. else
  171. echo "Coverage profiles seem to exist, but cannot be retrieved from inside containers."
  172. fi
  173. fi
  174. echo "Changing logfiles to be world-readable for download"
  175. log-dump-ssh "${node_name}" "sudo chmod -R a+r /var/log" || true
  176. echo "Copying '${files}' from ${node_name}"
  177. copy-logs-from-node "${node_name}" "${dir}" "${files}"
  178. }
  179. # Saves a copy of the Windows Docker event log to ${WINDOWS_LOGS_DIR}\docker.log
  180. # on node $1.
  181. function export-windows-docker-event-log() {
  182. local -r node="${1}"
  183. local -r powershell_cmd="powershell.exe -Command \$log=\$(Get-EventLog -LogName Application -Source Docker); Set-Content '${WINDOWS_LOGS_DIR}\\docker.log' \$log.Message"
  184. # Retry up to 3 times to allow ssh keys to be properly propagated and
  185. # stored.
  186. for retry in {1..3}; do
  187. if gcloud compute ssh --project "${PROJECT}" --zone "${ZONE}" "${node}" \
  188. --command "$powershell_cmd"; then
  189. break
  190. else
  191. sleep 10
  192. fi
  193. done
  194. }
  195. # Save log files and serial console output from Windows node $1 into local
  196. # directory $2.
  197. # This function shouldn't ever trigger errexit.
  198. function save-logs-windows() {
  199. local -r node="${1}"
  200. local -r dest_dir="${2}"
  201. if [[ ! "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
  202. echo "Not saving logs for ${node}, Windows log dumping requires gcloud support"
  203. return
  204. fi
  205. export-windows-docker-event-log "${node}"
  206. local remote_files=()
  207. for file in ${windows_node_logfiles[@]}; do
  208. remote_files+=( "${WINDOWS_LOGS_DIR}\\${file}" )
  209. done
  210. remote_files+=( "${windows_node_otherfiles[@]}" )
  211. # TODO(pjh, yujuhong): handle rotated logs and copying multiple files at the
  212. # same time.
  213. for remote_file in ${remote_files[@]}; do
  214. # Retry up to 3 times to allow ssh keys to be properly propagated and
  215. # stored.
  216. for retry in {1..3}; do
  217. if gcloud compute scp --recurse --project "${PROJECT}" \
  218. --zone "${ZONE}" "${node}:${remote_file}" "${dest_dir}" \
  219. > /dev/null; then
  220. break
  221. else
  222. sleep 10
  223. fi
  224. done
  225. done
  226. # Serial port 1 contains the Windows console output.
  227. gcloud compute instances get-serial-port-output --project "${PROJECT}" \
  228. --zone "${ZONE}" --port 1 "${node}" > "${dest_dir}/serial-1.log" || true
  229. }
  230. # Execute a command in container $2 on node $1.
  231. # Uses docker because the container may not ordinarily permit direct execution.
  232. function run-in-docker-container() {
  233. local node_name="$1"
  234. local container="$2"
  235. shift 2
  236. log-dump-ssh "${node_name}" "docker exec \"\$(docker ps -f label=io.kubernetes.container.name=${container} --format \"{{.ID}}\")\" $@"
  237. }
  238. function dump_masters() {
  239. local master_names
  240. if [[ -n "${use_custom_instance_list}" ]]; then
  241. master_names=( $(log_dump_custom_get_instances master) )
  242. elif [[ ! "${master_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
  243. echo "Master SSH not supported for ${KUBERNETES_PROVIDER}"
  244. return
  245. elif [[ -n "${KUBEMARK_MASTER_NAME:-}" ]]; then
  246. master_names=( "${KUBEMARK_MASTER_NAME}" )
  247. else
  248. if ! (detect-master); then
  249. echo "Master not detected. Is the cluster up?"
  250. return
  251. fi
  252. master_names=( "${MASTER_NAME}" )
  253. fi
  254. if [[ "${#master_names[@]}" == 0 ]]; then
  255. echo "No masters found?"
  256. return
  257. fi
  258. proc=${max_dump_processes}
  259. for master_name in "${master_names[@]}"; do
  260. master_dir="${report_dir}/${master_name}"
  261. mkdir -p "${master_dir}"
  262. save-logs "${master_name}" "${master_dir}" "${master_logfiles}" "" "true" &
  263. # We don't want to run more than ${max_dump_processes} at a time, so
  264. # wait once we hit that many nodes. This isn't ideal, since one might
  265. # take much longer than the others, but it should help.
  266. proc=$((proc - 1))
  267. if [[ proc -eq 0 ]]; then
  268. proc=${max_dump_processes}
  269. wait
  270. fi
  271. done
  272. # Wait for any remaining processes.
  273. if [[ proc -gt 0 && proc -lt ${max_dump_processes} ]]; then
  274. wait
  275. fi
  276. }
  277. # Dumps logs from nodes in the cluster. Linux nodes to dump logs from can be
  278. # specified via $1 or $use_custom_instance_list. If not specified then the nodes
  279. # to dump logs for will be detected using detect-node-names(); if Windows nodes
  280. # are present then they will be detected and their logs will be dumped too.
  281. function dump_nodes() {
  282. local node_names=()
  283. local windows_node_names=()
  284. if [[ -n "${1:-}" ]]; then
  285. echo "Dumping logs for nodes provided as args to dump_nodes() function"
  286. node_names=( "$@" )
  287. elif [[ -n "${use_custom_instance_list}" ]]; then
  288. echo "Dumping logs for nodes provided by log_dump_custom_get_instances() function"
  289. node_names=( $(log_dump_custom_get_instances node) )
  290. elif [[ ! "${node_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
  291. echo "Node SSH not supported for ${KUBERNETES_PROVIDER}"
  292. return
  293. else
  294. echo "Detecting nodes in the cluster"
  295. detect-node-names &> /dev/null
  296. if [[ -n "${NODE_NAMES:-}" ]]; then
  297. node_names=( "${NODE_NAMES[@]}" )
  298. fi
  299. if [[ -n "${WINDOWS_NODE_NAMES:-}" ]]; then
  300. windows_node_names=( "${WINDOWS_NODE_NAMES[@]}" )
  301. fi
  302. fi
  303. if [[ "${#node_names[@]}" == 0 && "${#windows_node_names[@]}" == 0 ]]; then
  304. echo "No nodes found!"
  305. return
  306. fi
  307. node_logfiles_all="${node_logfiles}"
  308. if [[ "${ENABLE_HOLLOW_NODE_LOGS:-}" == "true" ]]; then
  309. node_logfiles_all="${node_logfiles_all} ${hollow_node_logfiles}"
  310. fi
  311. linux_nodes_selected_for_logs=()
  312. if [[ -n "${LOGDUMP_ONLY_N_RANDOM_NODES:-}" ]]; then
  313. # We randomly choose 'LOGDUMP_ONLY_N_RANDOM_NODES' many nodes for fetching logs.
  314. for index in `shuf -i 0-$(( ${#node_names[*]} - 1 )) -n ${LOGDUMP_ONLY_N_RANDOM_NODES}`
  315. do
  316. linux_nodes_selected_for_logs+=("${node_names[$index]}")
  317. done
  318. else
  319. linux_nodes_selected_for_logs=( "${node_names[@]}" )
  320. fi
  321. all_selected_nodes=( "${linux_nodes_selected_for_logs[@]}" )
  322. all_selected_nodes+=( "${windows_node_names[@]}" )
  323. proc=${max_dump_processes}
  324. for i in "${!all_selected_nodes[@]}"; do
  325. node_name="${all_selected_nodes[$i]}"
  326. node_dir="${report_dir}/${node_name}"
  327. mkdir -p "${node_dir}"
  328. if [[ "${i}" -lt "${#linux_nodes_selected_for_logs[@]}" ]]; then
  329. # Save logs in the background. This speeds up things when there are
  330. # many nodes.
  331. save-logs "${node_name}" "${node_dir}" "${node_logfiles_all}" "${node_systemd_services}" &
  332. else
  333. save-logs-windows "${node_name}" "${node_dir}" &
  334. fi
  335. # We don't want to run more than ${max_dump_processes} at a time, so
  336. # wait once we hit that many nodes. This isn't ideal, since one might
  337. # take much longer than the others, but it should help.
  338. proc=$((proc - 1))
  339. if [[ proc -eq 0 ]]; then
  340. proc=${max_dump_processes}
  341. wait
  342. fi
  343. done
  344. # Wait for any remaining processes.
  345. if [[ proc -gt 0 && proc -lt ${max_dump_processes} ]]; then
  346. wait
  347. fi
  348. }
  349. # Collect names of nodes which didn't run logexporter successfully.
  350. # This function examines NODE_NAMES but not WINDOWS_NODE_NAMES since logexporter
  351. # does not run on Windows nodes.
  352. #
  353. # Note: This step is O(#nodes^2) as we check if each node is present in the list of succeeded nodes.
  354. # Making it linear would add code complexity without much benefit (as it just takes ~1s for 5k nodes).
  355. # Assumes:
  356. # NODE_NAMES
  357. # Sets:
  358. # NON_LOGEXPORTED_NODES
  359. function find_non_logexported_nodes() {
  360. succeeded_nodes=$(gsutil ls ${gcs_artifacts_dir}/logexported-nodes-registry) || return 1
  361. echo "Successfully listed marker files for successful nodes"
  362. NON_LOGEXPORTED_NODES=()
  363. for node in "${NODE_NAMES[@]}"; do
  364. if [[ ! "${succeeded_nodes}" =~ "${node}" ]]; then
  365. NON_LOGEXPORTED_NODES+=("${node}")
  366. fi
  367. done
  368. }
  369. # This function examines NODE_NAMES but not WINDOWS_NODE_NAMES since logexporter
  370. # does not run on Windows nodes.
  371. function dump_nodes_with_logexporter() {
  372. if [[ -n "${use_custom_instance_list}" ]]; then
  373. echo "Dumping logs for nodes provided by log_dump_custom_get_instances() function"
  374. NODE_NAMES=( $(log_dump_custom_get_instances node) )
  375. else
  376. echo "Detecting nodes in the cluster"
  377. detect-node-names &> /dev/null
  378. fi
  379. if [[ -z "${NODE_NAMES:-}" ]]; then
  380. echo "No nodes found!"
  381. return
  382. fi
  383. # Obtain parameters required by logexporter.
  384. local -r service_account_credentials="$(cat ${GOOGLE_APPLICATION_CREDENTIALS} | base64 | tr -d '\n')"
  385. local -r cloud_provider="${KUBERNETES_PROVIDER}"
  386. local -r enable_hollow_node_logs="${ENABLE_HOLLOW_NODE_LOGS:-false}"
  387. local -r logexport_sleep_seconds="$(( 90 + NUM_NODES / 3 ))"
  388. # Fill in the parameters in the logexporter daemonset template.
  389. sed -i'' -e "s@{{.LogexporterNamespace}}@${logexporter_namespace}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
  390. sed -i'' -e "s@{{.ServiceAccountCredentials}}@${service_account_credentials}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
  391. sed -i'' -e "s@{{.CloudProvider}}@${cloud_provider}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
  392. sed -i'' -e "s@{{.GCSPath}}@${gcs_artifacts_dir}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
  393. sed -i'' -e "s@{{.EnableHollowNodeLogs}}@${enable_hollow_node_logs}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
  394. sed -i'' -e "s@{{.DumpSystemdJournal}}@${dump_systemd_journal}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
  395. # Create the logexporter namespace, service-account secret and the logexporter daemonset within that namespace.
  396. KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
  397. if ! "${KUBECTL}" create -f "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"; then
  398. echo "Failed to create logexporter daemonset.. falling back to logdump through SSH"
  399. "${KUBECTL}" delete namespace "${logexporter_namespace}" || true
  400. dump_nodes "${NODE_NAMES[@]}"
  401. return
  402. fi
  403. # Periodically fetch list of already logexported nodes to verify
  404. # if we aren't already done.
  405. start="$(date +%s)"
  406. while true; do
  407. now="$(date +%s)"
  408. if [[ $((now - start)) -gt ${logexport_sleep_seconds} ]]; then
  409. echo "Waiting for all nodes to be logexported timed out."
  410. break
  411. fi
  412. if find_non_logexported_nodes; then
  413. if [[ -z "${NON_LOGEXPORTED_NODES:-}" ]]; then
  414. break
  415. fi
  416. fi
  417. sleep 15
  418. done
  419. # Store logs from logexporter pods to allow debugging log exporting process
  420. # itself.
  421. proc=${max_dump_processes}
  422. "${KUBECTL}" get pods -n "${logexporter_namespace}" -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.nodeName}{"\n"}{end}' | while read pod node; do
  423. echo "Fetching logs from ${pod} running on ${node}"
  424. mkdir -p ${report_dir}/${node}
  425. "${KUBECTL}" logs -n "${logexporter_namespace}" ${pod} > ${report_dir}/${node}/${pod}.log &
  426. # We don't want to run more than ${max_dump_processes} at a time, so
  427. # wait once we hit that many nodes. This isn't ideal, since one might
  428. # take much longer than the others, but it should help.
  429. proc=$((proc - 1))
  430. if [[ proc -eq 0 ]]; then
  431. proc=${max_dump_processes}
  432. wait
  433. fi
  434. done
  435. # Wait for any remaining processes.
  436. if [[ proc -gt 0 && proc -lt ${max_dump_processes} ]]; then
  437. wait
  438. fi
  439. # List registry of marker files (of nodes whose logexporter succeeded) from GCS.
  440. local nodes_succeeded
  441. for retry in {1..10}; do
  442. if find_non_logexported_nodes; then
  443. break
  444. else
  445. echo "Attempt ${retry} failed to list marker files for succeessful nodes"
  446. if [[ "${retry}" == 10 ]]; then
  447. echo "Final attempt to list marker files failed.. falling back to logdump through SSH"
  448. "${KUBECTL}" delete namespace "${logexporter_namespace}" || true
  449. dump_nodes "${NODE_NAMES[@]}"
  450. return
  451. fi
  452. sleep 2
  453. fi
  454. done
  455. failed_nodes=()
  456. # The following if is needed, because defaulting for empty arrays
  457. # seems to treat them as non-empty with single empty string.
  458. if [[ -n "${NON_LOGEXPORTED_NODES:-}" ]]; then
  459. for node in "${NON_LOGEXPORTED_NODES[@]:-}"; do
  460. echo "Logexporter didn't succeed on node ${node}. Queuing it for logdump through SSH."
  461. failed_nodes+=("${node}")
  462. done
  463. fi
  464. # Delete the logexporter resources and dump logs for the failed nodes (if any) through SSH.
  465. "${KUBECTL}" get pods --namespace "${logexporter_namespace}" || true
  466. "${KUBECTL}" delete namespace "${logexporter_namespace}" || true
  467. if [[ "${#failed_nodes[@]}" != 0 ]]; then
  468. echo -e "Dumping logs through SSH for the following nodes:\n${failed_nodes[@]}"
  469. dump_nodes "${failed_nodes[@]}"
  470. fi
  471. }
  472. function detect_node_failures() {
  473. if ! [[ "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
  474. return
  475. fi
  476. detect-node-names
  477. if [[ "${KUBERNETES_PROVIDER}" == "gce" ]]; then
  478. local all_instance_groups=(${INSTANCE_GROUPS[@]} ${WINDOWS_INSTANCE_GROUPS[@]})
  479. else
  480. local all_instance_groups=(${INSTANCE_GROUPS[@]})
  481. fi
  482. if [ -z "${all_instance_groups:-}" ]; then
  483. return
  484. fi
  485. for group in "${all_instance_groups[@]}"; do
  486. local creation_timestamp=$(gcloud compute instance-groups managed describe \
  487. "${group}" \
  488. --project "${PROJECT}" \
  489. --zone "${ZONE}" \
  490. --format='value(creationTimestamp)')
  491. echo "Failures for ${group}"
  492. gcloud logging read --order=asc \
  493. --format='table(timestamp,jsonPayload.resource.name,jsonPayload.event_subtype)' \
  494. --project "${PROJECT}" \
  495. "resource.type=\"gce_instance\"
  496. logName=\"projects/${PROJECT}/logs/compute.googleapis.com%2Factivity_log\"
  497. (jsonPayload.event_subtype=\"compute.instances.hostError\" OR jsonPayload.event_subtype=\"compute.instances.automaticRestart\")
  498. jsonPayload.resource.name:\"${group}\"
  499. timestamp >= \"${creation_timestamp}\""
  500. done
  501. }
  502. function main() {
  503. setup
  504. # Copy master logs to artifacts dir locally (through SSH).
  505. echo "Dumping logs from master locally to '${report_dir}'"
  506. dump_masters
  507. if [[ "${DUMP_ONLY_MASTER_LOGS:-}" == "true" ]]; then
  508. echo "Skipping dumping of node logs"
  509. return
  510. fi
  511. # Copy logs from nodes to GCS directly or to artifacts dir locally (through SSH).
  512. if [[ -n "${gcs_artifacts_dir}" ]]; then
  513. echo "Dumping logs from nodes to GCS directly at '${gcs_artifacts_dir}' using logexporter"
  514. dump_nodes_with_logexporter
  515. else
  516. echo "Dumping logs from nodes locally to '${report_dir}'"
  517. dump_nodes
  518. fi
  519. detect_node_failures
  520. }
  521. main