fluentd-gcp-configmap-old.yaml 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. # This ConfigMap is used to ingest logs against old resources like
  2. # "gke_container" and "gce_instance" when $LOGGING_STACKDRIVER_RESOURCE_TYPES is
  3. # set to "old".
  4. # When $LOGGING_STACKDRIVER_RESOURCE_TYPES is set to "new", the ConfigMap in
  5. # fluentd-gcp-configmap.yaml will be used for ingesting logs against new
  6. # resources like "k8s_container" and "k8s_node".
  7. kind: ConfigMap
  8. apiVersion: v1
  9. data:
  10. containers.input.conf: |-
  11. # This configuration file for Fluentd is used
  12. # to watch changes to Docker log files that live in the
  13. # directory /var/lib/docker/containers/ and are symbolically
  14. # linked to from the /var/log/containers directory using names that capture the
  15. # pod name and container name. These logs are then submitted to
  16. # Google Cloud Logging which assumes the installation of the cloud-logging plug-in.
  17. #
  18. # Example
  19. # =======
  20. # A line in the Docker log file might look like this JSON:
  21. #
  22. # {"log":"2014/09/25 21:15:03 Got request with path wombat\\n",
  23. # "stream":"stderr",
  24. # "time":"2014-09-25T21:15:03.499185026Z"}
  25. #
  26. # The original tag is derived from the log file's location.
  27. # For example a Docker container's logs might be in the directory:
  28. # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
  29. # and in the file:
  30. # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
  31. # where 997599971ee6... is the Docker ID of the running container.
  32. # The Kubernetes kubelet makes a symbolic link to this file on the host
  33. # machine in the /var/log/containers directory which includes the pod name,
  34. # the namespace name and the Kubernetes container name:
  35. # synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
  36. # ->
  37. # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
  38. # The /var/log directory on the host is mapped to the /var/log directory in the container
  39. # running this instance of Fluentd and we end up collecting the file:
  40. # /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
  41. # This results in the tag:
  42. # var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
  43. # where 'synthetic-logger-0.25lps-pod' is the pod name, 'default' is the
  44. # namespace name, 'synth-lgr' is the container name and '997599971ee6..' is
  45. # the container ID.
  46. # The record reformer is used is discard the var.log.containers prefix and
  47. # the Docker container ID suffix and "kubernetes." is pre-pended giving the tag:
  48. # kubernetes.synthetic-logger-0.25lps-pod_default_synth-lgr
  49. # Tag is then parsed by google_cloud plugin and translated to the metadata,
  50. # visible in the log viewer
  51. # Json Log Example:
  52. # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
  53. # CRI Log Example:
  54. # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
  55. <source>
  56. @type tail
  57. path /var/log/containers/*.log
  58. pos_file /var/log/gcp-containers.log.pos
  59. # Tags at this point are in the format of:
  60. # reform.var.log.containers.<POD_NAME>_<NAMESPACE_NAME>_<CONTAINER_NAME>-<CONTAINER_ID>.log
  61. tag reform.*
  62. read_from_head true
  63. <parse>
  64. @type multi_format
  65. <pattern>
  66. format json
  67. time_key time
  68. time_format %Y-%m-%dT%H:%M:%S.%NZ
  69. </pattern>
  70. <pattern>
  71. format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
  72. time_format %Y-%m-%dT%H:%M:%S.%N%:z
  73. </pattern>
  74. </parse>
  75. </source>
  76. <filter reform.**>
  77. @type parser
  78. format /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<log>.*)/
  79. reserve_data true
  80. suppress_parse_error_log true
  81. emit_invalid_record_to_error false
  82. key_name log
  83. </filter>
  84. <match reform.**>
  85. @type record_reformer
  86. enable_ruby true
  87. # Tags at this point are in the format of:
  88. # 'raw.kubernetes.<POD_NAME>_<NAMESPACE_NAME>_<CONTAINER_NAME>'.
  89. tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')}
  90. </match>
  91. # Detect exceptions in the log output and forward them as one log entry.
  92. <match raw.kubernetes.**>
  93. @type detect_exceptions
  94. remove_tag_prefix raw
  95. message log
  96. stream stream
  97. multiline_flush_interval 5
  98. max_bytes 500000
  99. max_lines 1000
  100. </match>
  101. system.input.conf: |-
  102. # Example:
  103. # Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
  104. <source>
  105. @type tail
  106. format syslog
  107. path /var/log/startupscript.log
  108. pos_file /var/log/gcp-startupscript.log.pos
  109. tag startupscript
  110. </source>
  111. # Examples:
  112. # time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
  113. # time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
  114. # TODO(random-liu): Remove this after cri container runtime rolls out.
  115. <source>
  116. @type tail
  117. format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
  118. path /var/log/docker.log
  119. pos_file /var/log/gcp-docker.log.pos
  120. tag docker
  121. </source>
  122. # Example:
  123. # 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
  124. <source>
  125. @type tail
  126. # Not parsing this, because it doesn't have anything particularly useful to
  127. # parse out of it (like severities).
  128. format none
  129. path /var/log/etcd.log
  130. pos_file /var/log/gcp-etcd.log.pos
  131. tag etcd
  132. </source>
  133. # Multi-line parsing is required for all the kube logs because very large log
  134. # statements, such as those that include entire object bodies, get split into
  135. # multiple lines by glog.
  136. # Example:
  137. # I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
  138. <source>
  139. @type tail
  140. format multiline
  141. multiline_flush_interval 5s
  142. format_firstline /^\w\d{4}/
  143. format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
  144. time_format %m%d %H:%M:%S.%N
  145. path /var/log/kubelet.log
  146. pos_file /var/log/gcp-kubelet.log.pos
  147. tag kubelet
  148. </source>
  149. # Example:
  150. # I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
  151. <source>
  152. @type tail
  153. format multiline
  154. multiline_flush_interval 5s
  155. format_firstline /^\w\d{4}/
  156. format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
  157. time_format %m%d %H:%M:%S.%N
  158. path /var/log/kube-proxy.log
  159. pos_file /var/log/gcp-kube-proxy.log.pos
  160. tag kube-proxy
  161. </source>
  162. # Example:
  163. # I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
  164. <source>
  165. @type tail
  166. format multiline
  167. multiline_flush_interval 5s
  168. format_firstline /^\w\d{4}/
  169. format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
  170. time_format %m%d %H:%M:%S.%N
  171. path /var/log/kube-apiserver.log
  172. pos_file /var/log/gcp-kube-apiserver.log.pos
  173. tag kube-apiserver
  174. </source>
  175. # Example:
  176. # I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
  177. <source>
  178. @type tail
  179. format multiline
  180. multiline_flush_interval 5s
  181. format_firstline /^\w\d{4}/
  182. format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
  183. time_format %m%d %H:%M:%S.%N
  184. path /var/log/kube-controller-manager.log
  185. pos_file /var/log/gcp-kube-controller-manager.log.pos
  186. tag kube-controller-manager
  187. </source>
  188. # Example:
  189. # W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
  190. <source>
  191. @type tail
  192. format multiline
  193. multiline_flush_interval 5s
  194. format_firstline /^\w\d{4}/
  195. format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
  196. time_format %m%d %H:%M:%S.%N
  197. path /var/log/kube-scheduler.log
  198. pos_file /var/log/gcp-kube-scheduler.log.pos
  199. tag kube-scheduler
  200. </source>
  201. # Example:
  202. # I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
  203. <source>
  204. @type tail
  205. format multiline
  206. multiline_flush_interval 5s
  207. format_firstline /^\w\d{4}/
  208. format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
  209. time_format %m%d %H:%M:%S.%N
  210. path /var/log/glbc.log
  211. pos_file /var/log/gcp-glbc.log.pos
  212. tag glbc
  213. </source>
  214. # Example:
  215. # I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
  216. <source>
  217. @type tail
  218. format multiline
  219. multiline_flush_interval 5s
  220. format_firstline /^\w\d{4}/
  221. format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
  222. time_format %m%d %H:%M:%S.%N
  223. path /var/log/cluster-autoscaler.log
  224. pos_file /var/log/gcp-cluster-autoscaler.log.pos
  225. tag cluster-autoscaler
  226. </source>
  227. # Logs from systemd-journal for interesting services.
  228. # TODO(random-liu): Keep this for compatibility, remove this after
  229. # cri container runtime rolls out.
  230. <source>
  231. @type systemd
  232. filters [{ "_SYSTEMD_UNIT": "docker.service" }]
  233. pos_file /var/log/gcp-journald-docker.pos
  234. read_from_head true
  235. tag docker
  236. </source>
  237. <source>
  238. @type systemd
  239. filters [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }]
  240. pos_file /var/log/gcp-journald-container-runtime.pos
  241. read_from_head true
  242. tag container-runtime
  243. </source>
  244. <source>
  245. @type systemd
  246. filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
  247. pos_file /var/log/gcp-journald-kubelet.pos
  248. read_from_head true
  249. tag kubelet
  250. </source>
  251. <source>
  252. @type systemd
  253. filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
  254. pos_file /var/log/gcp-journald-node-problem-detector.pos
  255. read_from_head true
  256. tag node-problem-detector
  257. </source>
  258. # BEGIN_NODE_JOURNAL
  259. # Whether to include node-journal or not is determined when starting the
  260. # cluster. It is not changed when the cluster is already running.
  261. <source>
  262. @type systemd
  263. pos_file /var/log/gcp-journald.pos
  264. read_from_head true
  265. tag node-journal
  266. </source>
  267. <filter node-journal>
  268. @type grep
  269. <exclude>
  270. key _SYSTEMD_UNIT
  271. pattern ^(docker|{{ fluentd_container_runtime_service }}|kubelet|node-problem-detector)\.service$
  272. </exclude>
  273. </filter>
  274. # END_NODE_JOURNAL
  275. monitoring.conf: |-
  276. # This source is used to acquire approximate process start timestamp,
  277. # which purpose is explained before the corresponding output plugin.
  278. <source>
  279. @type exec
  280. command /bin/sh -c 'date +%s'
  281. tag process_start
  282. time_format %Y-%m-%d %H:%M:%S
  283. keys process_start_timestamp
  284. </source>
  285. # This filter is used to convert process start timestamp to integer
  286. # value for correct ingestion in the prometheus output plugin.
  287. <filter process_start>
  288. @type record_transformer
  289. enable_ruby true
  290. auto_typecast true
  291. <record>
  292. process_start_timestamp ${record["process_start_timestamp"].to_i}
  293. </record>
  294. </filter>
  295. output.conf: |-
  296. # This match is placed before the all-matching output to provide metric
  297. # exporter with a process start timestamp for correct exporting of
  298. # cumulative metrics to Stackdriver.
  299. <match process_start>
  300. @type prometheus
  301. <metric>
  302. type gauge
  303. name process_start_time_seconds
  304. desc Timestamp of the process start in seconds
  305. key process_start_timestamp
  306. </metric>
  307. </match>
  308. # This filter allows to count the number of log entries read by fluentd
  309. # before they are processed by the output plugin. This in turn allows to
  310. # monitor the number of log entries that were read but never sent, e.g.
  311. # because of liveness probe removing buffer.
  312. <filter **>
  313. @type prometheus
  314. <metric>
  315. type counter
  316. name logging_entry_count
  317. desc Total number of log entries generated by either application containers or system components
  318. </metric>
  319. </filter>
  320. # TODO(instrumentation): Reconsider this workaround later.
  321. # Trim the entries which exceed slightly less than 100KB, to avoid
  322. # dropping them. It is a necessity, because Stackdriver only supports
  323. # entries that are up to 100KB in size.
  324. <filter kubernetes.**>
  325. @type record_transformer
  326. enable_ruby true
  327. <record>
  328. log ${record['log'].length > 100000 ? "[Trimmed]#{record['log'][0..100000]}..." : record['log']}
  329. </record>
  330. </filter>
  331. # Do not collect fluentd's own logs to avoid infinite loops.
  332. <match fluent.**>
  333. @type null
  334. </match>
  335. # We use 2 output stanzas - one to handle the container logs and one to handle
  336. # the node daemon logs, the latter of which explicitly sends its logs to the
  337. # compute.googleapis.com service rather than container.googleapis.com to keep
  338. # them separate since most users don't care about the node logs.
  339. <match kubernetes.**>
  340. @type google_cloud
  341. # Try to detect JSON formatted log entries.
  342. detect_json true
  343. # Collect metrics in Prometheus registry about plugin activity.
  344. enable_monitoring true
  345. monitoring_type prometheus
  346. # Allow log entries from multiple containers to be sent in the same request.
  347. split_logs_by_tag false
  348. # Set the buffer type to file to improve the reliability and reduce the memory consumption
  349. buffer_type file
  350. buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer
  351. # Set queue_full action to block because we want to pause gracefully
  352. # in case of the off-the-limits load instead of throwing an exception
  353. buffer_queue_full_action block
  354. # Set the chunk limit conservatively to avoid exceeding the recommended
  355. # chunk size of 5MB per write request.
  356. buffer_chunk_limit 1M
  357. # Cap the combined memory usage of this buffer and the one below to
  358. # 1MiB/chunk * (6 + 2) chunks = 8 MiB
  359. buffer_queue_limit 6
  360. # Never wait more than 5 seconds before flushing logs in the non-error case.
  361. flush_interval 5s
  362. # Never wait longer than 30 seconds between retries.
  363. max_retry_wait 30
  364. # Disable the limit on the number of retries (retry forever).
  365. disable_retry_limit
  366. # Use multiple threads for processing.
  367. num_threads 2
  368. use_grpc true
  369. </match>
  370. # Keep a smaller buffer here since these logs are less important than the user's
  371. # container logs.
  372. <match **>
  373. @type google_cloud
  374. detect_json true
  375. enable_monitoring true
  376. monitoring_type prometheus
  377. # Allow entries from multiple system logs to be sent in the same request.
  378. split_logs_by_tag false
  379. detect_subservice false
  380. buffer_type file
  381. buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
  382. buffer_queue_full_action block
  383. buffer_chunk_limit 1M
  384. buffer_queue_limit 2
  385. flush_interval 5s
  386. max_retry_wait 30
  387. disable_retry_limit
  388. num_threads 2
  389. use_grpc true
  390. </match>
  391. metadata:
  392. name: fluentd-gcp-config-old-v1.2.5
  393. namespace: kube-system
  394. labels:
  395. addonmanager.kubernetes.io/mode: Reconcile