util.go 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package benchmark
  14. import (
  15. "encoding/json"
  16. "flag"
  17. "fmt"
  18. "io/ioutil"
  19. "math"
  20. "path"
  21. "sort"
  22. "time"
  23. v1 "k8s.io/api/core/v1"
  24. "k8s.io/apimachinery/pkg/labels"
  25. "k8s.io/apimachinery/pkg/runtime/schema"
  26. coreinformers "k8s.io/client-go/informers/core/v1"
  27. clientset "k8s.io/client-go/kubernetes"
  28. restclient "k8s.io/client-go/rest"
  29. "k8s.io/component-base/metrics/legacyregistry"
  30. "k8s.io/component-base/metrics/testutil"
  31. "k8s.io/klog"
  32. "k8s.io/kubernetes/test/integration/util"
  33. )
  34. const (
  35. dateFormat = "2006-01-02T15:04:05Z"
  36. throughputSampleFrequency = time.Second
  37. )
  38. var dataItemsDir = flag.String("data-items-dir", "", "destination directory for storing generated data items for perf dashboard")
  39. // mustSetupScheduler starts the following components:
  40. // - k8s api server (a.k.a. master)
  41. // - scheduler
  42. // It returns clientset and destroyFunc which should be used to
  43. // remove resources after finished.
  44. // Notes on rate limiter:
  45. // - client rate limit is set to 5000.
  46. func mustSetupScheduler() (util.ShutdownFunc, coreinformers.PodInformer, clientset.Interface) {
  47. apiURL, apiShutdown := util.StartApiserver()
  48. clientSet := clientset.NewForConfigOrDie(&restclient.Config{
  49. Host: apiURL,
  50. ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}},
  51. QPS: 5000.0,
  52. Burst: 5000,
  53. })
  54. _, podInformer, schedulerShutdown := util.StartScheduler(clientSet)
  55. fakePVControllerShutdown := util.StartFakePVController(clientSet)
  56. shutdownFunc := func() {
  57. fakePVControllerShutdown()
  58. schedulerShutdown()
  59. apiShutdown()
  60. }
  61. return shutdownFunc, podInformer, clientSet
  62. }
  63. func getScheduledPods(podInformer coreinformers.PodInformer) ([]*v1.Pod, error) {
  64. pods, err := podInformer.Lister().List(labels.Everything())
  65. if err != nil {
  66. return nil, err
  67. }
  68. scheduled := make([]*v1.Pod, 0, len(pods))
  69. for i := range pods {
  70. pod := pods[i]
  71. if len(pod.Spec.NodeName) > 0 {
  72. scheduled = append(scheduled, pod)
  73. }
  74. }
  75. return scheduled, nil
  76. }
  77. // DataItem is the data point.
  78. type DataItem struct {
  79. // Data is a map from bucket to real data point (e.g. "Perc90" -> 23.5). Notice
  80. // that all data items with the same label combination should have the same buckets.
  81. Data map[string]float64 `json:"data"`
  82. // Unit is the data unit. Notice that all data items with the same label combination
  83. // should have the same unit.
  84. Unit string `json:"unit"`
  85. // Labels is the labels of the data item.
  86. Labels map[string]string `json:"labels,omitempty"`
  87. }
  88. // DataItems is the data point set. It is the struct that perf dashboard expects.
  89. type DataItems struct {
  90. Version string `json:"version"`
  91. DataItems []DataItem `json:"dataItems"`
  92. }
  93. func dataItems2JSONFile(dataItems DataItems, namePrefix string) error {
  94. b, err := json.Marshal(dataItems)
  95. if err != nil {
  96. return err
  97. }
  98. destFile := fmt.Sprintf("%v_%v.json", namePrefix, time.Now().Format(dateFormat))
  99. if *dataItemsDir != "" {
  100. destFile = path.Join(*dataItemsDir, destFile)
  101. }
  102. return ioutil.WriteFile(destFile, b, 0644)
  103. }
  104. // metricsCollectorConfig is the config to be marshalled to YAML config file.
  105. type metricsCollectorConfig struct {
  106. Metrics []string
  107. }
  108. // metricsCollector collects metrics from legacyregistry.DefaultGatherer.Gather() endpoint.
  109. // Currently only Histrogram metrics are supported.
  110. type metricsCollector struct {
  111. metricsCollectorConfig
  112. labels map[string]string
  113. }
  114. func newMetricsCollector(config metricsCollectorConfig, labels map[string]string) *metricsCollector {
  115. return &metricsCollector{
  116. metricsCollectorConfig: config,
  117. labels: labels,
  118. }
  119. }
  120. func (*metricsCollector) run(stopCh chan struct{}) {
  121. // metricCollector doesn't need to start before the tests, so nothing to do here.
  122. }
  123. func (pc *metricsCollector) collect() []DataItem {
  124. var dataItems []DataItem
  125. for _, metric := range pc.Metrics {
  126. dataItem := collectHistogram(metric, pc.labels)
  127. if dataItem != nil {
  128. dataItems = append(dataItems, *dataItem)
  129. }
  130. }
  131. return dataItems
  132. }
  133. func collectHistogram(metric string, labels map[string]string) *DataItem {
  134. hist, err := testutil.GetHistogramFromGatherer(legacyregistry.DefaultGatherer, metric)
  135. if err != nil {
  136. klog.Error(err)
  137. return nil
  138. }
  139. if err := hist.Validate(); err != nil {
  140. klog.Error(err)
  141. return nil
  142. }
  143. q50 := hist.Quantile(0.50)
  144. q90 := hist.Quantile(0.90)
  145. q99 := hist.Quantile(0.95)
  146. avg := hist.Average()
  147. // clear the metrics so that next test always starts with empty prometheus
  148. // metrics (since the metrics are shared among all tests run inside the same binary)
  149. hist.Clear()
  150. msFactor := float64(time.Second) / float64(time.Millisecond)
  151. // Copy labels and add "Metric" label for this metric.
  152. labelMap := map[string]string{"Metric": metric}
  153. for k, v := range labels {
  154. labelMap[k] = v
  155. }
  156. return &DataItem{
  157. Labels: labelMap,
  158. Data: map[string]float64{
  159. "Perc50": q50 * msFactor,
  160. "Perc90": q90 * msFactor,
  161. "Perc99": q99 * msFactor,
  162. "Average": avg * msFactor,
  163. },
  164. Unit: "ms",
  165. }
  166. }
  167. type throughputCollector struct {
  168. podInformer coreinformers.PodInformer
  169. schedulingThroughputs []float64
  170. labels map[string]string
  171. }
  172. func newThroughputCollector(podInformer coreinformers.PodInformer, labels map[string]string) *throughputCollector {
  173. return &throughputCollector{
  174. podInformer: podInformer,
  175. labels: labels,
  176. }
  177. }
  178. func (tc *throughputCollector) run(stopCh chan struct{}) {
  179. podsScheduled, err := getScheduledPods(tc.podInformer)
  180. if err != nil {
  181. klog.Fatalf("%v", err)
  182. }
  183. lastScheduledCount := len(podsScheduled)
  184. for {
  185. select {
  186. case <-stopCh:
  187. return
  188. case <-time.After(throughputSampleFrequency):
  189. podsScheduled, err := getScheduledPods(tc.podInformer)
  190. if err != nil {
  191. klog.Fatalf("%v", err)
  192. }
  193. scheduled := len(podsScheduled)
  194. samplingRatioSeconds := float64(throughputSampleFrequency) / float64(time.Second)
  195. throughput := float64(scheduled-lastScheduledCount) / samplingRatioSeconds
  196. tc.schedulingThroughputs = append(tc.schedulingThroughputs, throughput)
  197. lastScheduledCount = scheduled
  198. klog.Infof("%d pods scheduled", lastScheduledCount)
  199. }
  200. }
  201. }
  202. func (tc *throughputCollector) collect() []DataItem {
  203. throughputSummary := DataItem{Labels: tc.labels}
  204. if length := len(tc.schedulingThroughputs); length > 0 {
  205. sort.Float64s(tc.schedulingThroughputs)
  206. sum := 0.0
  207. for i := range tc.schedulingThroughputs {
  208. sum += tc.schedulingThroughputs[i]
  209. }
  210. throughputSummary.Labels = map[string]string{
  211. "Metric": "SchedulingThroughput",
  212. }
  213. throughputSummary.Data = map[string]float64{
  214. "Average": sum / float64(length),
  215. "Perc50": tc.schedulingThroughputs[int(math.Ceil(float64(length*50)/100))-1],
  216. "Perc90": tc.schedulingThroughputs[int(math.Ceil(float64(length*90)/100))-1],
  217. "Perc99": tc.schedulingThroughputs[int(math.Ceil(float64(length*99)/100))-1],
  218. }
  219. throughputSummary.Unit = "pods/s"
  220. }
  221. return []DataItem{throughputSummary}
  222. }