log-dump.sh 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615
  1. #!/usr/bin/env bash
  2. # Copyright 2017 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # Call this to dump all master and node logs into the folder specified in $1
  16. # (defaults to _artifacts). Only works if the provider supports SSH.
  17. # TODO(shyamjvs): This script should be moved to test/e2e which is where it ideally belongs.
  18. set -o errexit
  19. set -o nounset
  20. set -o pipefail
  21. readonly report_dir="${1:-_artifacts}"
  22. readonly gcs_artifacts_dir="${2:-}"
  23. readonly logexporter_namespace="${3:-logexporter}"
  24. # In order to more trivially extend log-dump for custom deployments,
  25. # check for a function named log_dump_custom_get_instances. If it's
  26. # defined, we assume the function can me called with one argument, the
  27. # role, which is either "master" or "node".
  28. echo "Checking for custom logdump instances, if any"
  29. if [[ $(type -t log_dump_custom_get_instances) == "function" ]]; then
  30. readonly use_custom_instance_list=yes
  31. else
  32. readonly use_custom_instance_list=
  33. fi
  34. readonly master_ssh_supported_providers="gce aws kubernetes-anywhere"
  35. readonly node_ssh_supported_providers="gce gke aws kubernetes-anywhere"
  36. readonly gcloud_supported_providers="gce gke kubernetes-anywhere"
  37. readonly master_logfiles="kube-apiserver.log kube-apiserver-audit.log kube-scheduler.log kube-controller-manager.log etcd.log etcd-events.log glbc.log cluster-autoscaler.log kube-addon-manager.log konnectivity-server.log fluentd.log kubelet.cov"
  38. readonly node_logfiles="kube-proxy.log fluentd.log node-problem-detector.log kubelet.cov"
  39. readonly node_systemd_services="node-problem-detector"
  40. readonly hollow_node_logfiles="kubelet-hollow-node-*.log kubeproxy-hollow-node-*.log npd-hollow-node-*.log"
  41. readonly aws_logfiles="cloud-init-output.log"
  42. readonly gce_logfiles="startupscript.log"
  43. readonly kern_logfile="kern.log"
  44. readonly initd_logfiles="docker/log"
  45. readonly supervisord_logfiles="kubelet.log supervisor/supervisord.log supervisor/kubelet-stdout.log supervisor/kubelet-stderr.log supervisor/docker-stdout.log supervisor/docker-stderr.log"
  46. readonly systemd_services="kubelet kubelet-monitor kube-container-runtime-monitor ${LOG_DUMP_SYSTEMD_SERVICES:-docker}"
  47. readonly dump_systemd_journal="${LOG_DUMP_SYSTEMD_JOURNAL:-false}"
  48. # Log files found in WINDOWS_LOGS_DIR on Windows nodes:
  49. readonly windows_node_logfiles="kubelet.log kube-proxy.log docker.log"
  50. # Log files found in other directories on Windows nodes:
  51. readonly windows_node_otherfiles="C:\\Windows\\MEMORY.dmp"
  52. # Limit the number of concurrent node connections so that we don't run out of
  53. # file descriptors for large clusters.
  54. readonly max_dump_processes=25
  55. # TODO: Get rid of all the sourcing of bash dependencies eventually.
  56. function setup() {
  57. KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
  58. if [[ -z "${use_custom_instance_list}" ]]; then
  59. : ${KUBE_CONFIG_FILE:="config-test.sh"}
  60. echo "Sourcing kube-util.sh"
  61. source "${KUBE_ROOT}/cluster/kube-util.sh"
  62. echo "Detecting project"
  63. detect-project 2>&1
  64. elif [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
  65. echo "Using 'use_custom_instance_list' with gke, skipping check for LOG_DUMP_SSH_KEY and LOG_DUMP_SSH_USER"
  66. # Source the below script for the ssh-to-node utility function.
  67. # Hack to save and restore the value of the ZONE env as the script overwrites it.
  68. local gke_zone="${ZONE:-}"
  69. source "${KUBE_ROOT}/cluster/gce/util.sh"
  70. ZONE="${gke_zone}"
  71. elif [[ -z "${LOG_DUMP_SSH_KEY:-}" ]]; then
  72. echo "LOG_DUMP_SSH_KEY not set, but required when using log_dump_custom_get_instances"
  73. exit 1
  74. elif [[ -z "${LOG_DUMP_SSH_USER:-}" ]]; then
  75. echo "LOG_DUMP_SSH_USER not set, but required when using log_dump_custom_get_instances"
  76. exit 1
  77. fi
  78. }
  79. function log-dump-ssh() {
  80. if [[ "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
  81. ssh-to-node "$@"
  82. return
  83. fi
  84. local host="$1"
  85. local cmd="$2"
  86. ssh -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${host}" "${cmd}"
  87. }
  88. # Copy all files /var/log/{$3}.log on node $1 into local dir $2.
  89. # $3 should be a space-separated string of files.
  90. # This function shouldn't ever trigger errexit, but doesn't block stderr.
  91. function copy-logs-from-node() {
  92. local -r node="${1}"
  93. local -r dir="${2}"
  94. local files=( ${3} )
  95. # Append "*"
  96. # The * at the end is needed to also copy rotated logs (which happens
  97. # in large clusters and long runs).
  98. files=( "${files[@]/%/*}" )
  99. # Prepend "/var/log/"
  100. files=( "${files[@]/#/\/var\/log\/}" )
  101. # Comma delimit (even the singleton, or scp does the wrong thing), surround by braces.
  102. local -r scp_files="{$(printf "%s," "${files[@]}")}"
  103. if [[ "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
  104. # get-serial-port-output lets you ask for ports 1-4, but currently (11/21/2016) only port 1 contains useful information
  105. gcloud compute instances get-serial-port-output --project "${PROJECT}" --zone "${ZONE}" --port 1 "${node}" > "${dir}/serial-1.log" || true
  106. gcloud compute scp --recurse --project "${PROJECT}" --zone "${ZONE}" "${node}:${scp_files}" "${dir}" > /dev/null || true
  107. elif [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
  108. local ip=$(get_ssh_hostname "${node}")
  109. scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "${SSH_USER}@${ip}:${scp_files}" "${dir}" > /dev/null || true
  110. elif [[ -n "${use_custom_instance_list}" ]]; then
  111. scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${node}:${scp_files}" "${dir}" > /dev/null || true
  112. else
  113. echo "Unknown cloud-provider '${KUBERNETES_PROVIDER}' and use_custom_instance_list is unset too - skipping logdump for '${node}'"
  114. fi
  115. }
  116. # Save logs for node $1 into directory $2. Pass in any non-common files in $3.
  117. # Pass in any non-common systemd services in $4.
  118. # $3 and $4 should be a space-separated list of files.
  119. # Set $5 to true to indicate it is on master. Default to false.
  120. # This function shouldn't ever trigger errexit
  121. function save-logs() {
  122. local -r node_name="${1}"
  123. local -r dir="${2}"
  124. local files="${3}"
  125. local opt_systemd_services="${4:-""}"
  126. local on_master="${5:-"false"}"
  127. if [[ -n "${use_custom_instance_list}" ]]; then
  128. if [[ -n "${LOG_DUMP_SAVE_LOGS:-}" ]]; then
  129. files="${files} ${LOG_DUMP_SAVE_LOGS:-}"
  130. fi
  131. else
  132. case "${KUBERNETES_PROVIDER}" in
  133. gce|gke|kubernetes-anywhere)
  134. files="${files} ${gce_logfiles}"
  135. ;;
  136. aws)
  137. files="${files} ${aws_logfiles}"
  138. ;;
  139. esac
  140. fi
  141. local -r services=( ${systemd_services} ${opt_systemd_services} ${LOG_DUMP_SAVE_SERVICES:-} )
  142. if log-dump-ssh "${node_name}" "command -v journalctl" &> /dev/null; then
  143. if [[ "${on_master}" == "true" ]]; then
  144. log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-master-installation.service" > "${dir}/kube-master-installation.log" || true
  145. log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-master-configuration.service" > "${dir}/kube-master-configuration.log" || true
  146. else
  147. log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-node-installation.service" > "${dir}/kube-node-installation.log" || true
  148. log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-node-configuration.service" > "${dir}/kube-node-configuration.log" || true
  149. fi
  150. log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -k" > "${dir}/kern.log" || true
  151. for svc in "${services[@]}"; do
  152. log-dump-ssh "${node_name}" "sudo journalctl --output=cat -u ${svc}.service" > "${dir}/${svc}.log" || true
  153. done
  154. if [[ "$dump_systemd_journal" == "true" ]]; then
  155. log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise" > "${dir}/systemd.log" || true
  156. fi
  157. else
  158. files="${kern_logfile} ${files} ${initd_logfiles} ${supervisord_logfiles}"
  159. fi
  160. # Try dumping coverage profiles, if it looks like coverage is enabled in the first place.
  161. if log-dump-ssh "${node_name}" "stat /var/log/kubelet.cov" &> /dev/null; then
  162. if log-dump-ssh "${node_name}" "command -v docker" &> /dev/null; then
  163. if [[ "${on_master}" == "true" ]]; then
  164. run-in-docker-container "${node_name}" "kube-apiserver" "cat /tmp/k8s-kube-apiserver.cov" > "${dir}/kube-apiserver.cov" || true
  165. run-in-docker-container "${node_name}" "kube-scheduler" "cat /tmp/k8s-kube-scheduler.cov" > "${dir}/kube-scheduler.cov" || true
  166. run-in-docker-container "${node_name}" "kube-controller-manager" "cat /tmp/k8s-kube-controller-manager.cov" > "${dir}/kube-controller-manager.cov" || true
  167. else
  168. run-in-docker-container "${node_name}" "kube-proxy" "cat /tmp/k8s-kube-proxy.cov" > "${dir}/kube-proxy.cov" || true
  169. fi
  170. else
  171. echo "Coverage profiles seem to exist, but cannot be retrieved from inside containers."
  172. fi
  173. fi
  174. echo "Changing logfiles to be world-readable for download"
  175. log-dump-ssh "${node_name}" "sudo chmod -R a+r /var/log" || true
  176. echo "Copying '${files}' from ${node_name}"
  177. copy-logs-from-node "${node_name}" "${dir}" "${files}"
  178. }
  179. # Saves a copy of the Windows Docker event log to ${WINDOWS_LOGS_DIR}\docker.log
  180. # on node $1.
  181. function export-windows-docker-event-log() {
  182. local -r node="${1}"
  183. local -r powershell_cmd="powershell.exe -Command \"\$logs=\$(Get-EventLog -LogName Application -Source Docker | Format-Table -Property TimeGenerated, EntryType, Message -Wrap); \$logs | Out-File -FilePath '${WINDOWS_LOGS_DIR}\\docker.log'\""
  184. # Retry up to 3 times to allow ssh keys to be properly propagated and
  185. # stored.
  186. for retry in {1..3}; do
  187. if gcloud compute ssh --project "${PROJECT}" --zone "${ZONE}" "${node}" \
  188. --command "$powershell_cmd"; then
  189. break
  190. else
  191. sleep 10
  192. fi
  193. done
  194. }
  195. # Saves log files from diagnostics tool.(https://github.com/GoogleCloudPlatform/compute-image-tools/tree/master/cli_tools/diagnostics)
  196. function save-windows-logs-via-diagnostics-tool() {
  197. local node="${1}"
  198. local dest_dir="${2}"
  199. gcloud compute instances add-metadata ${node} --metadata enable-diagnostics=true --project=${PROJECT} --zone=${ZONE}
  200. local logs_archive_in_gcs=$(gcloud alpha compute diagnose export-logs ${node} --zone=${ZONE} --project=${PROJECT} | tail -n 1)
  201. local temp_local_path="${node}.zip"
  202. for retry in {1..20}; do
  203. if gsutil mv "${logs_archive_in_gcs}" "${temp_local_path}" > /dev/null 2>&1; then
  204. echo "Downloaded diagnostics log from ${logs_archive_in_gcs}"
  205. break
  206. else
  207. sleep 10
  208. fi
  209. done
  210. if [[ -f "${temp_local_path}" ]]; then
  211. unzip ${temp_local_path} -d "${dest_dir}" > /dev/null
  212. rm -f ${temp_local_path}
  213. fi
  214. }
  215. # Saves log files from SSH
  216. function save-windows-logs-via-ssh() {
  217. local node="${1}"
  218. local dest_dir="${2}"
  219. export-windows-docker-event-log "${node}"
  220. local remote_files=()
  221. for file in ${windows_node_logfiles[@]}; do
  222. remote_files+=( "${WINDOWS_LOGS_DIR}\\${file}" )
  223. done
  224. remote_files+=( "${windows_node_otherfiles[@]}" )
  225. # TODO(pjh, yujuhong): handle rotated logs and copying multiple files at the
  226. # same time.
  227. for remote_file in ${remote_files[@]}; do
  228. # Retry up to 3 times to allow ssh keys to be properly propagated and
  229. # stored.
  230. for retry in {1..3}; do
  231. if gcloud compute scp --recurse --project "${PROJECT}" \
  232. --zone "${ZONE}" "${node}:${remote_file}" "${dest_dir}" \
  233. > /dev/null; then
  234. break
  235. else
  236. sleep 10
  237. fi
  238. done
  239. done
  240. }
  241. # Save log files and serial console output from Windows node $1 into local
  242. # directory $2.
  243. # This function shouldn't ever trigger errexit.
  244. function save-logs-windows() {
  245. local -r node="${1}"
  246. local -r dest_dir="${2}"
  247. if [[ ! "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
  248. echo "Not saving logs for ${node}, Windows log dumping requires gcloud support"
  249. return
  250. fi
  251. if [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
  252. save-windows-logs-via-diagnostics-tool "${node}" "${dest_dir}"
  253. else
  254. save-windows-logs-via-ssh "${node}" "${dest_dir}"
  255. fi
  256. # Serial port 1 contains the Windows console output.
  257. gcloud compute instances get-serial-port-output --project "${PROJECT}" \
  258. --zone "${ZONE}" --port 1 "${node}" > "${dest_dir}/serial-1.log" || true
  259. }
  260. # Execute a command in container $2 on node $1.
  261. # Uses docker because the container may not ordinarily permit direct execution.
  262. function run-in-docker-container() {
  263. local node_name="$1"
  264. local container="$2"
  265. shift 2
  266. log-dump-ssh "${node_name}" "docker exec \"\$(docker ps -f label=io.kubernetes.container.name=${container} --format \"{{.ID}}\")\" $@"
  267. }
  268. function dump_masters() {
  269. local master_names
  270. if [[ -n "${use_custom_instance_list}" ]]; then
  271. master_names=( $(log_dump_custom_get_instances master) )
  272. elif [[ ! "${master_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
  273. echo "Master SSH not supported for ${KUBERNETES_PROVIDER}"
  274. return
  275. elif [[ -n "${KUBEMARK_MASTER_NAME:-}" ]]; then
  276. master_names=( "${KUBEMARK_MASTER_NAME}" )
  277. else
  278. if ! (detect-master); then
  279. echo "Master not detected. Is the cluster up?"
  280. return
  281. fi
  282. master_names=( "${MASTER_NAME}" )
  283. fi
  284. if [[ "${#master_names[@]}" == 0 ]]; then
  285. echo "No masters found?"
  286. return
  287. fi
  288. proc=${max_dump_processes}
  289. for master_name in "${master_names[@]}"; do
  290. master_dir="${report_dir}/${master_name}"
  291. mkdir -p "${master_dir}"
  292. save-logs "${master_name}" "${master_dir}" "${master_logfiles}" "" "true" &
  293. # We don't want to run more than ${max_dump_processes} at a time, so
  294. # wait once we hit that many nodes. This isn't ideal, since one might
  295. # take much longer than the others, but it should help.
  296. proc=$((proc - 1))
  297. if [[ proc -eq 0 ]]; then
  298. proc=${max_dump_processes}
  299. wait
  300. fi
  301. done
  302. # Wait for any remaining processes.
  303. if [[ proc -gt 0 && proc -lt ${max_dump_processes} ]]; then
  304. wait
  305. fi
  306. }
  307. # Dumps logs from nodes in the cluster. Linux nodes to dump logs from can be
  308. # specified via $1 or $use_custom_instance_list. If not specified then the nodes
  309. # to dump logs for will be detected using detect-node-names(); if Windows nodes
  310. # are present then they will be detected and their logs will be dumped too.
  311. function dump_nodes() {
  312. local node_names=()
  313. local windows_node_names=()
  314. if [[ -n "${1:-}" ]]; then
  315. echo "Dumping logs for nodes provided as args to dump_nodes() function"
  316. node_names=( "$@" )
  317. elif [[ -n "${use_custom_instance_list}" ]]; then
  318. echo "Dumping logs for nodes provided by log_dump_custom_get_instances() function"
  319. node_names=( $(log_dump_custom_get_instances node) )
  320. elif [[ ! "${node_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
  321. echo "Node SSH not supported for ${KUBERNETES_PROVIDER}"
  322. return
  323. else
  324. echo "Detecting nodes in the cluster"
  325. detect-node-names &> /dev/null
  326. if [[ -n "${NODE_NAMES:-}" ]]; then
  327. node_names=( "${NODE_NAMES[@]}" )
  328. fi
  329. if [[ -n "${WINDOWS_NODE_NAMES:-}" ]]; then
  330. windows_node_names=( "${WINDOWS_NODE_NAMES[@]}" )
  331. fi
  332. fi
  333. if [[ "${#node_names[@]}" == 0 && "${#windows_node_names[@]}" == 0 ]]; then
  334. echo "No nodes found!"
  335. return
  336. fi
  337. node_logfiles_all="${node_logfiles}"
  338. if [[ "${ENABLE_HOLLOW_NODE_LOGS:-}" == "true" ]]; then
  339. node_logfiles_all="${node_logfiles_all} ${hollow_node_logfiles}"
  340. fi
  341. linux_nodes_selected_for_logs=()
  342. if [[ -n "${LOGDUMP_ONLY_N_RANDOM_NODES:-}" ]]; then
  343. # We randomly choose 'LOGDUMP_ONLY_N_RANDOM_NODES' many nodes for fetching logs.
  344. for index in `shuf -i 0-$(( ${#node_names[*]} - 1 )) -n ${LOGDUMP_ONLY_N_RANDOM_NODES}`
  345. do
  346. linux_nodes_selected_for_logs+=("${node_names[$index]}")
  347. done
  348. else
  349. linux_nodes_selected_for_logs=( "${node_names[@]}" )
  350. fi
  351. all_selected_nodes=( "${linux_nodes_selected_for_logs[@]}" )
  352. all_selected_nodes+=( "${windows_node_names[@]}" )
  353. proc=${max_dump_processes}
  354. for i in "${!all_selected_nodes[@]}"; do
  355. node_name="${all_selected_nodes[$i]}"
  356. node_dir="${report_dir}/${node_name}"
  357. mkdir -p "${node_dir}"
  358. if [[ "${i}" -lt "${#linux_nodes_selected_for_logs[@]}" ]]; then
  359. # Save logs in the background. This speeds up things when there are
  360. # many nodes.
  361. save-logs "${node_name}" "${node_dir}" "${node_logfiles_all}" "${node_systemd_services}" &
  362. else
  363. save-logs-windows "${node_name}" "${node_dir}" &
  364. fi
  365. # We don't want to run more than ${max_dump_processes} at a time, so
  366. # wait once we hit that many nodes. This isn't ideal, since one might
  367. # take much longer than the others, but it should help.
  368. proc=$((proc - 1))
  369. if [[ proc -eq 0 ]]; then
  370. proc=${max_dump_processes}
  371. wait
  372. fi
  373. done
  374. # Wait for any remaining processes.
  375. if [[ proc -gt 0 && proc -lt ${max_dump_processes} ]]; then
  376. wait
  377. fi
  378. }
  379. # Collect names of nodes which didn't run logexporter successfully.
  380. # This function examines NODE_NAMES but not WINDOWS_NODE_NAMES since logexporter
  381. # does not run on Windows nodes.
  382. #
  383. # Note: This step is O(#nodes^2) as we check if each node is present in the list of succeeded nodes.
  384. # Making it linear would add code complexity without much benefit (as it just takes ~1s for 5k nodes).
  385. # Assumes:
  386. # NODE_NAMES
  387. # Sets:
  388. # NON_LOGEXPORTED_NODES
  389. function find_non_logexported_nodes() {
  390. succeeded_nodes=$(gsutil ls ${gcs_artifacts_dir}/logexported-nodes-registry) || return 1
  391. echo "Successfully listed marker files for successful nodes"
  392. NON_LOGEXPORTED_NODES=()
  393. for node in "${NODE_NAMES[@]}"; do
  394. if [[ ! "${succeeded_nodes}" =~ "${node}" ]]; then
  395. NON_LOGEXPORTED_NODES+=("${node}")
  396. fi
  397. done
  398. }
  399. # This function examines NODE_NAMES but not WINDOWS_NODE_NAMES since logexporter
  400. # does not run on Windows nodes.
  401. function dump_nodes_with_logexporter() {
  402. if [[ -n "${use_custom_instance_list}" ]]; then
  403. echo "Dumping logs for nodes provided by log_dump_custom_get_instances() function"
  404. NODE_NAMES=( $(log_dump_custom_get_instances node) )
  405. else
  406. echo "Detecting nodes in the cluster"
  407. detect-node-names &> /dev/null
  408. fi
  409. if [[ -z "${NODE_NAMES:-}" ]]; then
  410. echo "No nodes found!"
  411. return
  412. fi
  413. # Obtain parameters required by logexporter.
  414. local -r service_account_credentials="$(cat ${GOOGLE_APPLICATION_CREDENTIALS} | base64 | tr -d '\n')"
  415. local -r cloud_provider="${KUBERNETES_PROVIDER}"
  416. local -r enable_hollow_node_logs="${ENABLE_HOLLOW_NODE_LOGS:-false}"
  417. local -r logexport_sleep_seconds="$(( 90 + NUM_NODES / 3 ))"
  418. # Fill in the parameters in the logexporter daemonset template.
  419. sed -i'' -e "s@{{.LogexporterNamespace}}@${logexporter_namespace}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
  420. sed -i'' -e "s@{{.ServiceAccountCredentials}}@${service_account_credentials}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
  421. sed -i'' -e "s@{{.CloudProvider}}@${cloud_provider}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
  422. sed -i'' -e "s@{{.GCSPath}}@${gcs_artifacts_dir}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
  423. sed -i'' -e "s@{{.EnableHollowNodeLogs}}@${enable_hollow_node_logs}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
  424. sed -i'' -e "s@{{.DumpSystemdJournal}}@${dump_systemd_journal}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
  425. # Create the logexporter namespace, service-account secret and the logexporter daemonset within that namespace.
  426. KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
  427. if ! "${KUBECTL}" create -f "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"; then
  428. echo "Failed to create logexporter daemonset.. falling back to logdump through SSH"
  429. "${KUBECTL}" delete namespace "${logexporter_namespace}" || true
  430. dump_nodes "${NODE_NAMES[@]}"
  431. return
  432. fi
  433. # Periodically fetch list of already logexported nodes to verify
  434. # if we aren't already done.
  435. start="$(date +%s)"
  436. while true; do
  437. now="$(date +%s)"
  438. if [[ $((now - start)) -gt ${logexport_sleep_seconds} ]]; then
  439. echo "Waiting for all nodes to be logexported timed out."
  440. break
  441. fi
  442. if find_non_logexported_nodes; then
  443. if [[ -z "${NON_LOGEXPORTED_NODES:-}" ]]; then
  444. break
  445. fi
  446. fi
  447. sleep 15
  448. done
  449. # Store logs from logexporter pods to allow debugging log exporting process
  450. # itself.
  451. proc=${max_dump_processes}
  452. "${KUBECTL}" get pods -n "${logexporter_namespace}" -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.nodeName}{"\n"}{end}' | (while read -r pod node; do
  453. echo "Fetching logs from ${pod} running on ${node}"
  454. mkdir -p "${report_dir}/${node}"
  455. "${KUBECTL}" logs -n "${logexporter_namespace}" "${pod}" > "${report_dir}/${node}/${pod}.log" &
  456. # We don't want to run more than ${max_dump_processes} at a time, so
  457. # wait once we hit that many nodes. This isn't ideal, since one might
  458. # take much longer than the others, but it should help.
  459. proc=$((proc - 1))
  460. if [[ proc -eq 0 ]]; then
  461. proc=${max_dump_processes}
  462. wait
  463. fi
  464. # Wait for any remaining processes.
  465. done; wait)
  466. # List registry of marker files (of nodes whose logexporter succeeded) from GCS.
  467. local nodes_succeeded
  468. for retry in {1..10}; do
  469. if find_non_logexported_nodes; then
  470. break
  471. else
  472. echo "Attempt ${retry} failed to list marker files for successful nodes"
  473. if [[ "${retry}" == 10 ]]; then
  474. echo "Final attempt to list marker files failed.. falling back to logdump through SSH"
  475. "${KUBECTL}" delete namespace "${logexporter_namespace}" || true
  476. dump_nodes "${NODE_NAMES[@]}"
  477. return
  478. fi
  479. sleep 2
  480. fi
  481. done
  482. failed_nodes=()
  483. # The following if is needed, because defaulting for empty arrays
  484. # seems to treat them as non-empty with single empty string.
  485. if [[ -n "${NON_LOGEXPORTED_NODES:-}" ]]; then
  486. for node in "${NON_LOGEXPORTED_NODES[@]:-}"; do
  487. echo "Logexporter didn't succeed on node ${node}. Queuing it for logdump through SSH."
  488. failed_nodes+=("${node}")
  489. done
  490. fi
  491. # Delete the logexporter resources and dump logs for the failed nodes (if any) through SSH.
  492. "${KUBECTL}" get pods --namespace "${logexporter_namespace}" || true
  493. "${KUBECTL}" delete namespace "${logexporter_namespace}" || true
  494. if [[ "${#failed_nodes[@]}" != 0 ]]; then
  495. echo -e "Dumping logs through SSH for the following nodes:\n${failed_nodes[@]}"
  496. dump_nodes "${failed_nodes[@]}"
  497. fi
  498. }
  499. function detect_node_failures() {
  500. if ! [[ "${gcloud_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
  501. return
  502. fi
  503. detect-node-names
  504. if [[ "${KUBERNETES_PROVIDER}" == "gce" ]]; then
  505. local all_instance_groups=(${INSTANCE_GROUPS[@]} ${WINDOWS_INSTANCE_GROUPS[@]})
  506. else
  507. local all_instance_groups=(${INSTANCE_GROUPS[@]})
  508. fi
  509. if [ -z "${all_instance_groups:-}" ]; then
  510. return
  511. fi
  512. for group in "${all_instance_groups[@]}"; do
  513. local creation_timestamp=$(gcloud compute instance-groups managed describe \
  514. "${group}" \
  515. --project "${PROJECT}" \
  516. --zone "${ZONE}" \
  517. --format='value(creationTimestamp)')
  518. echo "Failures for ${group} (if any):"
  519. gcloud logging read --order=asc \
  520. --format='table(timestamp,jsonPayload.resource.name,jsonPayload.event_subtype)' \
  521. --project "${PROJECT}" \
  522. "resource.type=\"gce_instance\"
  523. logName=\"projects/${PROJECT}/logs/compute.googleapis.com%2Factivity_log\"
  524. (jsonPayload.event_subtype=\"compute.instances.hostError\" OR jsonPayload.event_subtype=\"compute.instances.automaticRestart\")
  525. jsonPayload.resource.name:\"${group}\"
  526. timestamp >= \"${creation_timestamp}\""
  527. done
  528. }
  529. function main() {
  530. setup
  531. # Copy master logs to artifacts dir locally (through SSH).
  532. echo "Dumping logs from master locally to '${report_dir}'"
  533. dump_masters
  534. if [[ "${DUMP_ONLY_MASTER_LOGS:-}" == "true" ]]; then
  535. echo "Skipping dumping of node logs"
  536. return
  537. fi
  538. # Copy logs from nodes to GCS directly or to artifacts dir locally (through SSH).
  539. if [[ -n "${gcs_artifacts_dir}" ]]; then
  540. echo "Dumping logs from nodes to GCS directly at '${gcs_artifacts_dir}' using logexporter"
  541. dump_nodes_with_logexporter
  542. else
  543. echo "Dumping logs from nodes locally to '${report_dir}'"
  544. dump_nodes
  545. fi
  546. detect_node_failures
  547. }
  548. main