handler.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618
  1. // Copyright 2018 Google Inc. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package libcontainer
  15. import (
  16. "bufio"
  17. "fmt"
  18. "io"
  19. "io/ioutil"
  20. "os"
  21. "path"
  22. "strconv"
  23. "strings"
  24. "time"
  25. "github.com/google/cadvisor/container"
  26. info "github.com/google/cadvisor/info/v1"
  27. "bytes"
  28. "github.com/opencontainers/runc/libcontainer"
  29. "github.com/opencontainers/runc/libcontainer/cgroups"
  30. "k8s.io/klog"
  31. )
  32. /*
  33. #include <unistd.h>
  34. */
  35. import "C"
  36. type Handler struct {
  37. cgroupManager cgroups.Manager
  38. rootFs string
  39. pid int
  40. includedMetrics container.MetricSet
  41. pidMetricsCache map[int]*info.CpuSchedstat
  42. }
  43. func NewHandler(cgroupManager cgroups.Manager, rootFs string, pid int, includedMetrics container.MetricSet) *Handler {
  44. return &Handler{
  45. cgroupManager: cgroupManager,
  46. rootFs: rootFs,
  47. pid: pid,
  48. includedMetrics: includedMetrics,
  49. pidMetricsCache: make(map[int]*info.CpuSchedstat),
  50. }
  51. }
  52. // Get cgroup and networking stats of the specified container
  53. func (h *Handler) GetStats() (*info.ContainerStats, error) {
  54. cgroupStats, err := h.cgroupManager.GetStats()
  55. if err != nil {
  56. return nil, err
  57. }
  58. libcontainerStats := &libcontainer.Stats{
  59. CgroupStats: cgroupStats,
  60. }
  61. stats := newContainerStats(libcontainerStats, h.includedMetrics)
  62. if h.includedMetrics.Has(container.ProcessSchedulerMetrics) {
  63. pids, err := h.cgroupManager.GetAllPids()
  64. if err != nil {
  65. klog.V(4).Infof("Could not get PIDs for container %d: %v", h.pid, err)
  66. } else {
  67. stats.Cpu.Schedstat, err = schedulerStatsFromProcs(h.rootFs, pids, h.pidMetricsCache)
  68. if err != nil {
  69. klog.V(4).Infof("Unable to get Process Scheduler Stats: %v", err)
  70. }
  71. }
  72. }
  73. // If we know the pid then get network stats from /proc/<pid>/net/dev
  74. if h.pid == 0 {
  75. return stats, nil
  76. }
  77. if h.includedMetrics.Has(container.NetworkUsageMetrics) {
  78. netStats, err := networkStatsFromProc(h.rootFs, h.pid)
  79. if err != nil {
  80. klog.V(4).Infof("Unable to get network stats from pid %d: %v", h.pid, err)
  81. } else {
  82. stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
  83. }
  84. }
  85. if h.includedMetrics.Has(container.NetworkTcpUsageMetrics) {
  86. t, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp")
  87. if err != nil {
  88. klog.V(4).Infof("Unable to get tcp stats from pid %d: %v", h.pid, err)
  89. } else {
  90. stats.Network.Tcp = t
  91. }
  92. t6, err := tcpStatsFromProc(h.rootFs, h.pid, "net/tcp6")
  93. if err != nil {
  94. klog.V(4).Infof("Unable to get tcp6 stats from pid %d: %v", h.pid, err)
  95. } else {
  96. stats.Network.Tcp6 = t6
  97. }
  98. }
  99. if h.includedMetrics.Has(container.NetworkUdpUsageMetrics) {
  100. u, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp")
  101. if err != nil {
  102. klog.V(4).Infof("Unable to get udp stats from pid %d: %v", h.pid, err)
  103. } else {
  104. stats.Network.Udp = u
  105. }
  106. u6, err := udpStatsFromProc(h.rootFs, h.pid, "net/udp6")
  107. if err != nil {
  108. klog.V(4).Infof("Unable to get udp6 stats from pid %d: %v", h.pid, err)
  109. } else {
  110. stats.Network.Udp6 = u6
  111. }
  112. }
  113. if h.includedMetrics.Has(container.ProcessMetrics) {
  114. paths := h.cgroupManager.GetPaths()
  115. path, ok := paths["cpu"]
  116. if !ok {
  117. klog.V(4).Infof("Could not find cgroups CPU for container %d", h.pid)
  118. } else {
  119. stats.Processes, err = processStatsFromProcs(h.rootFs, path)
  120. if err != nil {
  121. klog.V(4).Infof("Unable to get Process Stats: %v", err)
  122. }
  123. }
  124. }
  125. // For backwards compatibility.
  126. if len(stats.Network.Interfaces) > 0 {
  127. stats.Network.InterfaceStats = stats.Network.Interfaces[0]
  128. }
  129. return stats, nil
  130. }
  131. func processStatsFromProcs(rootFs string, cgroupPath string) (info.ProcessStats, error) {
  132. var fdCount uint64
  133. filePath := path.Join(cgroupPath, "cgroup.procs")
  134. out, err := ioutil.ReadFile(filePath)
  135. if err != nil {
  136. return info.ProcessStats{}, fmt.Errorf("couldn't open cpu cgroup procs file %v : %v", filePath, err)
  137. }
  138. pids := strings.Split(string(out), "\n")
  139. // EOL is also treated as a new line while reading "cgroup.procs" file with ioutil.ReadFile.
  140. // The last value is an empty string "". Ex: pids = ["22", "1223", ""]
  141. // Trim the last value
  142. if len(pids) != 0 && pids[len(pids)-1] == "" {
  143. pids = pids[:len(pids)-1]
  144. }
  145. for _, pid := range pids {
  146. dirPath := path.Join(rootFs, "/proc", pid, "fd")
  147. fds, err := ioutil.ReadDir(dirPath)
  148. if err != nil {
  149. klog.V(4).Infof("error while listing directory %q to measure fd count: %v", dirPath, err)
  150. continue
  151. }
  152. fdCount += uint64(len(fds))
  153. }
  154. processStats := info.ProcessStats{
  155. ProcessCount: uint64(len(pids)),
  156. FdCount: fdCount,
  157. }
  158. return processStats, nil
  159. }
  160. func schedulerStatsFromProcs(rootFs string, pids []int, pidMetricsCache map[int]*info.CpuSchedstat) (info.CpuSchedstat, error) {
  161. for _, pid := range pids {
  162. f, err := os.Open(path.Join(rootFs, "proc", strconv.Itoa(pid), "schedstat"))
  163. if err != nil {
  164. return info.CpuSchedstat{}, fmt.Errorf("couldn't open scheduler statistics for process %d: %v", pid, err)
  165. }
  166. defer f.Close()
  167. contents, err := ioutil.ReadAll(f)
  168. if err != nil {
  169. return info.CpuSchedstat{}, fmt.Errorf("couldn't read scheduler statistics for process %d: %v", pid, err)
  170. }
  171. rawMetrics := bytes.Split(bytes.TrimRight(contents, "\n"), []byte(" "))
  172. if len(rawMetrics) != 3 {
  173. return info.CpuSchedstat{}, fmt.Errorf("unexpected number of metrics in schedstat file for process %d", pid)
  174. }
  175. cacheEntry, ok := pidMetricsCache[pid]
  176. if !ok {
  177. cacheEntry = &info.CpuSchedstat{}
  178. pidMetricsCache[pid] = cacheEntry
  179. }
  180. for i, rawMetric := range rawMetrics {
  181. metric, err := strconv.ParseUint(string(rawMetric), 10, 64)
  182. if err != nil {
  183. return info.CpuSchedstat{}, fmt.Errorf("parsing error while reading scheduler statistics for process: %d: %v", pid, err)
  184. }
  185. switch i {
  186. case 0:
  187. cacheEntry.RunTime = metric
  188. case 1:
  189. cacheEntry.RunqueueTime = metric
  190. case 2:
  191. cacheEntry.RunPeriods = metric
  192. }
  193. }
  194. }
  195. schedstats := info.CpuSchedstat{}
  196. for _, v := range pidMetricsCache {
  197. schedstats.RunPeriods += v.RunPeriods
  198. schedstats.RunqueueTime += v.RunqueueTime
  199. schedstats.RunTime += v.RunTime
  200. }
  201. return schedstats, nil
  202. }
  203. func networkStatsFromProc(rootFs string, pid int) ([]info.InterfaceStats, error) {
  204. netStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), "/net/dev")
  205. ifaceStats, err := scanInterfaceStats(netStatsFile)
  206. if err != nil {
  207. return []info.InterfaceStats{}, fmt.Errorf("couldn't read network stats: %v", err)
  208. }
  209. return ifaceStats, nil
  210. }
  211. var (
  212. ignoredDevicePrefixes = []string{"lo", "veth", "docker"}
  213. )
  214. func isIgnoredDevice(ifName string) bool {
  215. for _, prefix := range ignoredDevicePrefixes {
  216. if strings.HasPrefix(strings.ToLower(ifName), prefix) {
  217. return true
  218. }
  219. }
  220. return false
  221. }
  222. func scanInterfaceStats(netStatsFile string) ([]info.InterfaceStats, error) {
  223. file, err := os.Open(netStatsFile)
  224. if err != nil {
  225. return nil, fmt.Errorf("failure opening %s: %v", netStatsFile, err)
  226. }
  227. defer file.Close()
  228. scanner := bufio.NewScanner(file)
  229. // Discard header lines
  230. for i := 0; i < 2; i++ {
  231. if b := scanner.Scan(); !b {
  232. return nil, scanner.Err()
  233. }
  234. }
  235. stats := []info.InterfaceStats{}
  236. for scanner.Scan() {
  237. line := scanner.Text()
  238. line = strings.Replace(line, ":", "", -1)
  239. fields := strings.Fields(line)
  240. // If the format of the line is invalid then don't trust any of the stats
  241. // in this file.
  242. if len(fields) != 17 {
  243. return nil, fmt.Errorf("invalid interface stats line: %v", line)
  244. }
  245. devName := fields[0]
  246. if isIgnoredDevice(devName) {
  247. continue
  248. }
  249. i := info.InterfaceStats{
  250. Name: devName,
  251. }
  252. statFields := append(fields[1:5], fields[9:13]...)
  253. statPointers := []*uint64{
  254. &i.RxBytes, &i.RxPackets, &i.RxErrors, &i.RxDropped,
  255. &i.TxBytes, &i.TxPackets, &i.TxErrors, &i.TxDropped,
  256. }
  257. err := setInterfaceStatValues(statFields, statPointers)
  258. if err != nil {
  259. return nil, fmt.Errorf("cannot parse interface stats (%v): %v", err, line)
  260. }
  261. stats = append(stats, i)
  262. }
  263. return stats, nil
  264. }
  265. func setInterfaceStatValues(fields []string, pointers []*uint64) error {
  266. for i, v := range fields {
  267. val, err := strconv.ParseUint(v, 10, 64)
  268. if err != nil {
  269. return err
  270. }
  271. *pointers[i] = val
  272. }
  273. return nil
  274. }
  275. func tcpStatsFromProc(rootFs string, pid int, file string) (info.TcpStat, error) {
  276. tcpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
  277. tcpStats, err := scanTcpStats(tcpStatsFile)
  278. if err != nil {
  279. return tcpStats, fmt.Errorf("couldn't read tcp stats: %v", err)
  280. }
  281. return tcpStats, nil
  282. }
  283. func scanTcpStats(tcpStatsFile string) (info.TcpStat, error) {
  284. var stats info.TcpStat
  285. data, err := ioutil.ReadFile(tcpStatsFile)
  286. if err != nil {
  287. return stats, fmt.Errorf("failure opening %s: %v", tcpStatsFile, err)
  288. }
  289. tcpStateMap := map[string]uint64{
  290. "01": 0, //ESTABLISHED
  291. "02": 0, //SYN_SENT
  292. "03": 0, //SYN_RECV
  293. "04": 0, //FIN_WAIT1
  294. "05": 0, //FIN_WAIT2
  295. "06": 0, //TIME_WAIT
  296. "07": 0, //CLOSE
  297. "08": 0, //CLOSE_WAIT
  298. "09": 0, //LAST_ACK
  299. "0A": 0, //LISTEN
  300. "0B": 0, //CLOSING
  301. }
  302. reader := strings.NewReader(string(data))
  303. scanner := bufio.NewScanner(reader)
  304. scanner.Split(bufio.ScanLines)
  305. // Discard header line
  306. if b := scanner.Scan(); !b {
  307. return stats, scanner.Err()
  308. }
  309. for scanner.Scan() {
  310. line := scanner.Text()
  311. state := strings.Fields(line)
  312. // TCP state is the 4th field.
  313. // Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
  314. tcpState := state[3]
  315. _, ok := tcpStateMap[tcpState]
  316. if !ok {
  317. return stats, fmt.Errorf("invalid TCP stats line: %v", line)
  318. }
  319. tcpStateMap[tcpState]++
  320. }
  321. stats = info.TcpStat{
  322. Established: tcpStateMap["01"],
  323. SynSent: tcpStateMap["02"],
  324. SynRecv: tcpStateMap["03"],
  325. FinWait1: tcpStateMap["04"],
  326. FinWait2: tcpStateMap["05"],
  327. TimeWait: tcpStateMap["06"],
  328. Close: tcpStateMap["07"],
  329. CloseWait: tcpStateMap["08"],
  330. LastAck: tcpStateMap["09"],
  331. Listen: tcpStateMap["0A"],
  332. Closing: tcpStateMap["0B"],
  333. }
  334. return stats, nil
  335. }
  336. func udpStatsFromProc(rootFs string, pid int, file string) (info.UdpStat, error) {
  337. var err error
  338. var udpStats info.UdpStat
  339. udpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
  340. r, err := os.Open(udpStatsFile)
  341. if err != nil {
  342. return udpStats, fmt.Errorf("failure opening %s: %v", udpStatsFile, err)
  343. }
  344. udpStats, err = scanUdpStats(r)
  345. if err != nil {
  346. return udpStats, fmt.Errorf("couldn't read udp stats: %v", err)
  347. }
  348. return udpStats, nil
  349. }
  350. func scanUdpStats(r io.Reader) (info.UdpStat, error) {
  351. var stats info.UdpStat
  352. scanner := bufio.NewScanner(r)
  353. scanner.Split(bufio.ScanLines)
  354. // Discard header line
  355. if b := scanner.Scan(); !b {
  356. return stats, scanner.Err()
  357. }
  358. listening := uint64(0)
  359. dropped := uint64(0)
  360. rxQueued := uint64(0)
  361. txQueued := uint64(0)
  362. for scanner.Scan() {
  363. line := scanner.Text()
  364. // Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
  365. listening++
  366. fs := strings.Fields(line)
  367. if len(fs) != 13 {
  368. continue
  369. }
  370. rx, tx := uint64(0), uint64(0)
  371. fmt.Sscanf(fs[4], "%X:%X", &rx, &tx)
  372. rxQueued += rx
  373. txQueued += tx
  374. d, err := strconv.Atoi(string(fs[12]))
  375. if err != nil {
  376. continue
  377. }
  378. dropped += uint64(d)
  379. }
  380. stats = info.UdpStat{
  381. Listen: listening,
  382. Dropped: dropped,
  383. RxQueued: rxQueued,
  384. TxQueued: txQueued,
  385. }
  386. return stats, nil
  387. }
  388. func (h *Handler) GetProcesses() ([]int, error) {
  389. pids, err := h.cgroupManager.GetPids()
  390. if err != nil {
  391. return nil, err
  392. }
  393. return pids, nil
  394. }
  395. func minUint32(x, y uint32) uint32 {
  396. if x < y {
  397. return x
  398. }
  399. return y
  400. }
  401. // var to allow unit tests to stub it out
  402. var numCpusFunc = getNumberOnlineCPUs
  403. // Convert libcontainer stats to info.ContainerStats.
  404. func setCpuStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
  405. ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode
  406. ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode
  407. ret.Cpu.Usage.Total = s.CpuStats.CpuUsage.TotalUsage
  408. ret.Cpu.CFS.Periods = s.CpuStats.ThrottlingData.Periods
  409. ret.Cpu.CFS.ThrottledPeriods = s.CpuStats.ThrottlingData.ThrottledPeriods
  410. ret.Cpu.CFS.ThrottledTime = s.CpuStats.ThrottlingData.ThrottledTime
  411. if !withPerCPU {
  412. return
  413. }
  414. if len(s.CpuStats.CpuUsage.PercpuUsage) == 0 {
  415. // libcontainer's 'GetStats' can leave 'PercpuUsage' nil if it skipped the
  416. // cpuacct subsystem.
  417. return
  418. }
  419. numPossible := uint32(len(s.CpuStats.CpuUsage.PercpuUsage))
  420. // Note that as of https://patchwork.kernel.org/patch/8607101/ (kernel v4.7),
  421. // the percpu usage information includes extra zero values for all additional
  422. // possible CPUs. This is to allow statistic collection after CPU-hotplug.
  423. // We intentionally ignore these extra zeroes.
  424. numActual, err := numCpusFunc()
  425. if err != nil {
  426. klog.Errorf("unable to determine number of actual cpus; defaulting to maximum possible number: errno %v", err)
  427. numActual = numPossible
  428. }
  429. if numActual > numPossible {
  430. // The real number of cores should never be greater than the number of
  431. // datapoints reported in cpu usage.
  432. klog.Errorf("PercpuUsage had %v cpus, but the actual number is %v; ignoring extra CPUs", numPossible, numActual)
  433. }
  434. numActual = minUint32(numPossible, numActual)
  435. ret.Cpu.Usage.PerCpu = make([]uint64, numActual)
  436. for i := uint32(0); i < numActual; i++ {
  437. ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i]
  438. }
  439. }
  440. // Copied from
  441. // https://github.com/moby/moby/blob/8b1adf55c2af329a4334f21d9444d6a169000c81/daemon/stats/collector_unix.go#L73
  442. // Apache 2.0, Copyright Docker, Inc.
  443. func getNumberOnlineCPUs() (uint32, error) {
  444. i, err := C.sysconf(C._SC_NPROCESSORS_ONLN)
  445. // According to POSIX - errno is undefined after successful
  446. // sysconf, and can be non-zero in several cases, so look for
  447. // error in returned value not in errno.
  448. // (https://sourceware.org/bugzilla/show_bug.cgi?id=21536)
  449. if i == -1 {
  450. return 0, err
  451. }
  452. return uint32(i), nil
  453. }
  454. func setDiskIoStats(s *cgroups.Stats, ret *info.ContainerStats) {
  455. ret.DiskIo.IoServiceBytes = DiskStatsCopy(s.BlkioStats.IoServiceBytesRecursive)
  456. ret.DiskIo.IoServiced = DiskStatsCopy(s.BlkioStats.IoServicedRecursive)
  457. ret.DiskIo.IoQueued = DiskStatsCopy(s.BlkioStats.IoQueuedRecursive)
  458. ret.DiskIo.Sectors = DiskStatsCopy(s.BlkioStats.SectorsRecursive)
  459. ret.DiskIo.IoServiceTime = DiskStatsCopy(s.BlkioStats.IoServiceTimeRecursive)
  460. ret.DiskIo.IoWaitTime = DiskStatsCopy(s.BlkioStats.IoWaitTimeRecursive)
  461. ret.DiskIo.IoMerged = DiskStatsCopy(s.BlkioStats.IoMergedRecursive)
  462. ret.DiskIo.IoTime = DiskStatsCopy(s.BlkioStats.IoTimeRecursive)
  463. }
  464. func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
  465. ret.Memory.Usage = s.MemoryStats.Usage.Usage
  466. ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage
  467. ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
  468. if s.MemoryStats.UseHierarchy {
  469. ret.Memory.Cache = s.MemoryStats.Stats["total_cache"]
  470. ret.Memory.RSS = s.MemoryStats.Stats["total_rss"]
  471. ret.Memory.Swap = s.MemoryStats.Stats["total_swap"]
  472. ret.Memory.MappedFile = s.MemoryStats.Stats["total_mapped_file"]
  473. } else {
  474. ret.Memory.Cache = s.MemoryStats.Stats["cache"]
  475. ret.Memory.RSS = s.MemoryStats.Stats["rss"]
  476. ret.Memory.Swap = s.MemoryStats.Stats["swap"]
  477. ret.Memory.MappedFile = s.MemoryStats.Stats["mapped_file"]
  478. }
  479. if v, ok := s.MemoryStats.Stats["pgfault"]; ok {
  480. ret.Memory.ContainerData.Pgfault = v
  481. ret.Memory.HierarchicalData.Pgfault = v
  482. }
  483. if v, ok := s.MemoryStats.Stats["pgmajfault"]; ok {
  484. ret.Memory.ContainerData.Pgmajfault = v
  485. ret.Memory.HierarchicalData.Pgmajfault = v
  486. }
  487. workingSet := ret.Memory.Usage
  488. if v, ok := s.MemoryStats.Stats["total_inactive_file"]; ok {
  489. if workingSet < v {
  490. workingSet = 0
  491. } else {
  492. workingSet -= v
  493. }
  494. }
  495. ret.Memory.WorkingSet = workingSet
  496. }
  497. func setNetworkStats(libcontainerStats *libcontainer.Stats, ret *info.ContainerStats) {
  498. ret.Network.Interfaces = make([]info.InterfaceStats, len(libcontainerStats.Interfaces))
  499. for i := range libcontainerStats.Interfaces {
  500. ret.Network.Interfaces[i] = info.InterfaceStats{
  501. Name: libcontainerStats.Interfaces[i].Name,
  502. RxBytes: libcontainerStats.Interfaces[i].RxBytes,
  503. RxPackets: libcontainerStats.Interfaces[i].RxPackets,
  504. RxErrors: libcontainerStats.Interfaces[i].RxErrors,
  505. RxDropped: libcontainerStats.Interfaces[i].RxDropped,
  506. TxBytes: libcontainerStats.Interfaces[i].TxBytes,
  507. TxPackets: libcontainerStats.Interfaces[i].TxPackets,
  508. TxErrors: libcontainerStats.Interfaces[i].TxErrors,
  509. TxDropped: libcontainerStats.Interfaces[i].TxDropped,
  510. }
  511. }
  512. // Add to base struct for backwards compatibility.
  513. if len(ret.Network.Interfaces) > 0 {
  514. ret.Network.InterfaceStats = ret.Network.Interfaces[0]
  515. }
  516. }
  517. func newContainerStats(libcontainerStats *libcontainer.Stats, includedMetrics container.MetricSet) *info.ContainerStats {
  518. ret := &info.ContainerStats{
  519. Timestamp: time.Now(),
  520. }
  521. if s := libcontainerStats.CgroupStats; s != nil {
  522. setCpuStats(s, ret, includedMetrics.Has(container.PerCpuUsageMetrics))
  523. if includedMetrics.Has(container.DiskIOMetrics) {
  524. setDiskIoStats(s, ret)
  525. }
  526. setMemoryStats(s, ret)
  527. }
  528. if len(libcontainerStats.Interfaces) > 0 {
  529. setNetworkStats(libcontainerStats, ret)
  530. }
  531. return ret
  532. }