service_latency.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package network
  14. import (
  15. "fmt"
  16. "sort"
  17. "strings"
  18. "time"
  19. v1 "k8s.io/api/core/v1"
  20. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  21. "k8s.io/apimachinery/pkg/runtime"
  22. "k8s.io/apimachinery/pkg/util/sets"
  23. "k8s.io/apimachinery/pkg/watch"
  24. restclient "k8s.io/client-go/rest"
  25. "k8s.io/client-go/tools/cache"
  26. "k8s.io/client-go/util/flowcontrol"
  27. "k8s.io/kubernetes/test/e2e/framework"
  28. e2elog "k8s.io/kubernetes/test/e2e/framework/log"
  29. testutils "k8s.io/kubernetes/test/utils"
  30. imageutils "k8s.io/kubernetes/test/utils/image"
  31. "github.com/onsi/ginkgo"
  32. )
  33. type durations []time.Duration
  34. func (d durations) Len() int { return len(d) }
  35. func (d durations) Less(i, j int) bool { return d[i] < d[j] }
  36. func (d durations) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
  37. var _ = SIGDescribe("Service endpoints latency", func() {
  38. f := framework.NewDefaultFramework("svc-latency")
  39. /*
  40. Release : v1.9
  41. Testname: Service endpoint latency, thresholds
  42. Description: Run 100 iterations of create service with the Pod running the pause image, measure the time it takes for creating the service and the endpoint with the service name is available. These durations are captured for 100 iterations, then the durations are sorted to compute 50th, 90th and 99th percentile. The single server latency MUST not exceed liberally set thresholds of 20s for 50th percentile and 50s for the 90th percentile.
  43. */
  44. framework.ConformanceIt("should not be very high ", func() {
  45. const (
  46. // These are very generous criteria. Ideally we will
  47. // get this much lower in the future. See issue
  48. // #10436.
  49. limitMedian = time.Second * 20
  50. limitTail = time.Second * 50
  51. // Numbers chosen to make the test complete in a short amount
  52. // of time. This sample size is not actually large enough to
  53. // reliably measure tails (it may give false positives, but not
  54. // false negatives), but it should catch low hanging fruit.
  55. //
  56. // Note that these are fixed and do not depend on the
  57. // size of the cluster. Setting parallelTrials larger
  58. // distorts the measurements. Perhaps this wouldn't be
  59. // true on HA clusters.
  60. totalTrials = 200
  61. parallelTrials = 15
  62. minSampleSize = 100
  63. // Acceptable failure ratio for getting service latencies.
  64. acceptableFailureRatio = .05
  65. )
  66. // Turn off rate limiting--it interferes with our measurements.
  67. oldThrottle := f.ClientSet.CoreV1().RESTClient().GetRateLimiter()
  68. f.ClientSet.CoreV1().RESTClient().(*restclient.RESTClient).Throttle = flowcontrol.NewFakeAlwaysRateLimiter()
  69. defer func() { f.ClientSet.CoreV1().RESTClient().(*restclient.RESTClient).Throttle = oldThrottle }()
  70. failing := sets.NewString()
  71. d, err := runServiceLatencies(f, parallelTrials, totalTrials, acceptableFailureRatio)
  72. if err != nil {
  73. failing.Insert(fmt.Sprintf("Not all RC/pod/service trials succeeded: %v", err))
  74. }
  75. dSorted := durations(d)
  76. sort.Sort(dSorted)
  77. n := len(dSorted)
  78. if n < minSampleSize {
  79. failing.Insert(fmt.Sprintf("Did not get a good sample size: %v", dSorted))
  80. }
  81. if n < 2 {
  82. failing.Insert("Less than two runs succeeded; aborting.")
  83. framework.Failf(strings.Join(failing.List(), "\n"))
  84. }
  85. percentile := func(p int) time.Duration {
  86. est := n * p / 100
  87. if est >= n {
  88. return dSorted[n-1]
  89. }
  90. return dSorted[est]
  91. }
  92. e2elog.Logf("Latencies: %v", dSorted)
  93. p50 := percentile(50)
  94. p90 := percentile(90)
  95. p99 := percentile(99)
  96. e2elog.Logf("50 %%ile: %v", p50)
  97. e2elog.Logf("90 %%ile: %v", p90)
  98. e2elog.Logf("99 %%ile: %v", p99)
  99. e2elog.Logf("Total sample count: %v", len(dSorted))
  100. if p50 > limitMedian {
  101. failing.Insert("Median latency should be less than " + limitMedian.String())
  102. }
  103. if p99 > limitTail {
  104. failing.Insert("Tail (99 percentile) latency should be less than " + limitTail.String())
  105. }
  106. if failing.Len() > 0 {
  107. errList := strings.Join(failing.List(), "\n")
  108. helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99)
  109. framework.Failf(errList + helpfulInfo)
  110. }
  111. })
  112. })
  113. func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptableFailureRatio float32) (output []time.Duration, err error) {
  114. cfg := testutils.RCConfig{
  115. Client: f.ClientSet,
  116. Image: imageutils.GetPauseImageName(),
  117. Name: "svc-latency-rc",
  118. Namespace: f.Namespace.Name,
  119. Replicas: 1,
  120. PollInterval: time.Second,
  121. }
  122. if err := framework.RunRC(cfg); err != nil {
  123. return nil, err
  124. }
  125. // Run a single watcher, to reduce the number of API calls we have to
  126. // make; this is to minimize the timing error. It's how kube-proxy
  127. // consumes the endpoints data, so it seems like the right thing to
  128. // test.
  129. endpointQueries := newQuerier()
  130. startEndpointWatcher(f, endpointQueries)
  131. defer close(endpointQueries.stop)
  132. // run one test and throw it away-- this is to make sure that the pod's
  133. // ready status has propagated.
  134. singleServiceLatency(f, cfg.Name, endpointQueries)
  135. // These channels are never closed, and each attempt sends on exactly
  136. // one of these channels, so the sum of the things sent over them will
  137. // be exactly total.
  138. errs := make(chan error, total)
  139. durations := make(chan time.Duration, total)
  140. blocker := make(chan struct{}, inParallel)
  141. for i := 0; i < total; i++ {
  142. go func() {
  143. defer ginkgo.GinkgoRecover()
  144. blocker <- struct{}{}
  145. defer func() { <-blocker }()
  146. if d, err := singleServiceLatency(f, cfg.Name, endpointQueries); err != nil {
  147. errs <- err
  148. } else {
  149. durations <- d
  150. }
  151. }()
  152. }
  153. errCount := 0
  154. for i := 0; i < total; i++ {
  155. select {
  156. case e := <-errs:
  157. e2elog.Logf("Got error: %v", e)
  158. errCount++
  159. case d := <-durations:
  160. output = append(output, d)
  161. }
  162. }
  163. if errCount != 0 {
  164. e2elog.Logf("Got %d errors out of %d tries", errCount, total)
  165. errRatio := float32(errCount) / float32(total)
  166. if errRatio > acceptableFailureRatio {
  167. return output, fmt.Errorf("error ratio %g is higher than the acceptable ratio %g", errRatio, acceptableFailureRatio)
  168. }
  169. }
  170. return output, nil
  171. }
  172. type endpointQuery struct {
  173. endpointsName string
  174. endpoints *v1.Endpoints
  175. result chan<- struct{}
  176. }
  177. type endpointQueries struct {
  178. requests map[string]*endpointQuery
  179. stop chan struct{}
  180. requestChan chan *endpointQuery
  181. seenChan chan *v1.Endpoints
  182. }
  183. func newQuerier() *endpointQueries {
  184. eq := &endpointQueries{
  185. requests: map[string]*endpointQuery{},
  186. stop: make(chan struct{}, 100),
  187. requestChan: make(chan *endpointQuery),
  188. seenChan: make(chan *v1.Endpoints, 100),
  189. }
  190. go eq.join()
  191. return eq
  192. }
  193. // join merges the incoming streams of requests and added endpoints. It has
  194. // nice properties like:
  195. // * remembering an endpoint if it happens to arrive before it is requested.
  196. // * closing all outstanding requests (returning nil) if it is stopped.
  197. func (eq *endpointQueries) join() {
  198. defer func() {
  199. // Terminate all pending requests, so that no goroutine will
  200. // block indefinitely.
  201. for _, req := range eq.requests {
  202. if req.result != nil {
  203. close(req.result)
  204. }
  205. }
  206. }()
  207. for {
  208. select {
  209. case <-eq.stop:
  210. return
  211. case req := <-eq.requestChan:
  212. if cur, ok := eq.requests[req.endpointsName]; ok && cur.endpoints != nil {
  213. // We've already gotten the result, so we can
  214. // immediately satisfy this request.
  215. delete(eq.requests, req.endpointsName)
  216. req.endpoints = cur.endpoints
  217. close(req.result)
  218. } else {
  219. // Save this request.
  220. eq.requests[req.endpointsName] = req
  221. }
  222. case got := <-eq.seenChan:
  223. if req, ok := eq.requests[got.Name]; ok {
  224. if req.result != nil {
  225. // Satisfy a request.
  226. delete(eq.requests, got.Name)
  227. req.endpoints = got
  228. close(req.result)
  229. } else {
  230. // We've already recorded a result, but
  231. // haven't gotten the request yet. Only
  232. // keep the first result.
  233. }
  234. } else {
  235. // We haven't gotten the corresponding request
  236. // yet, save this result.
  237. eq.requests[got.Name] = &endpointQuery{
  238. endpoints: got,
  239. }
  240. }
  241. }
  242. }
  243. }
  244. // request blocks until the requested endpoint is seen.
  245. func (eq *endpointQueries) request(endpointsName string) *v1.Endpoints {
  246. result := make(chan struct{})
  247. req := &endpointQuery{
  248. endpointsName: endpointsName,
  249. result: result,
  250. }
  251. eq.requestChan <- req
  252. <-result
  253. return req.endpoints
  254. }
  255. // marks e as added; does not block.
  256. func (eq *endpointQueries) added(e *v1.Endpoints) {
  257. eq.seenChan <- e
  258. }
  259. // blocks until it has finished syncing.
  260. func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
  261. _, controller := cache.NewInformer(
  262. &cache.ListWatch{
  263. ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
  264. obj, err := f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).List(options)
  265. return runtime.Object(obj), err
  266. },
  267. WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
  268. return f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).Watch(options)
  269. },
  270. },
  271. &v1.Endpoints{},
  272. 0,
  273. cache.ResourceEventHandlerFuncs{
  274. AddFunc: func(obj interface{}) {
  275. if e, ok := obj.(*v1.Endpoints); ok {
  276. if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 {
  277. q.added(e)
  278. }
  279. }
  280. },
  281. UpdateFunc: func(old, cur interface{}) {
  282. if e, ok := cur.(*v1.Endpoints); ok {
  283. if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 {
  284. q.added(e)
  285. }
  286. }
  287. },
  288. },
  289. )
  290. go controller.Run(q.stop)
  291. // Wait for the controller to sync, so that we don't count any warm-up time.
  292. for !controller.HasSynced() {
  293. time.Sleep(100 * time.Millisecond)
  294. }
  295. }
  296. func singleServiceLatency(f *framework.Framework, name string, q *endpointQueries) (time.Duration, error) {
  297. // Make a service that points to that pod.
  298. svc := &v1.Service{
  299. ObjectMeta: metav1.ObjectMeta{
  300. GenerateName: "latency-svc-",
  301. },
  302. Spec: v1.ServiceSpec{
  303. Ports: []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: 80}},
  304. Selector: map[string]string{"name": name},
  305. Type: v1.ServiceTypeClusterIP,
  306. SessionAffinity: v1.ServiceAffinityNone,
  307. },
  308. }
  309. startTime := time.Now()
  310. gotSvc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc)
  311. if err != nil {
  312. return 0, err
  313. }
  314. e2elog.Logf("Created: %v", gotSvc.Name)
  315. if e := q.request(gotSvc.Name); e == nil {
  316. return 0, fmt.Errorf("Never got a result for endpoint %v", gotSvc.Name)
  317. }
  318. stopTime := time.Now()
  319. d := stopTime.Sub(startTime)
  320. e2elog.Logf("Got endpoints: %v [%v]", gotSvc.Name, d)
  321. return d, nil
  322. }