Browse Source

added v1.15.4

iwita 5 years ago
parent
commit
21196b4a6d
100 changed files with 139450 additions and 0 deletions
  1. 1 0
      kubernetes-v1.15.4/.bazelrc
  2. 27 0
      kubernetes-v1.15.4/.generated_files
  3. 10 0
      kubernetes-v1.15.4/.gitattributes
  4. 29 0
      kubernetes-v1.15.4/.github/ISSUE_TEMPLATE/bug-report.md
  5. 11 0
      kubernetes-v1.15.4/.github/ISSUE_TEMPLATE/enhancement.md
  6. 20 0
      kubernetes-v1.15.4/.github/ISSUE_TEMPLATE/failing-test.md
  7. 18 0
      kubernetes-v1.15.4/.github/ISSUE_TEMPLATE/support.md
  8. 16 0
      kubernetes-v1.15.4/.github/OWNERS
  9. 44 0
      kubernetes-v1.15.4/.github/PULL_REQUEST_TEMPLATE.md
  10. 134 0
      kubernetes-v1.15.4/.gitignore
  11. 1 0
      kubernetes-v1.15.4/.kazelcfg.json
  12. 1 0
      kubernetes-v1.15.4/BUILD.bazel
  13. 1613 0
      kubernetes-v1.15.4/CHANGELOG-1.15.md
  14. 23 0
      kubernetes-v1.15.4/CHANGELOG.md
  15. 7 0
      kubernetes-v1.15.4/CONTRIBUTING.md
  16. 22140 0
      kubernetes-v1.15.4/Godeps/LICENSES
  17. 4 0
      kubernetes-v1.15.4/Godeps/OWNERS
  18. 202 0
      kubernetes-v1.15.4/LICENSE
  19. 1 0
      kubernetes-v1.15.4/Makefile
  20. 1 0
      kubernetes-v1.15.4/Makefile.generated_files
  21. 41 0
      kubernetes-v1.15.4/OWNERS
  22. 450 0
      kubernetes-v1.15.4/OWNERS_ALIASES
  23. 84 0
      kubernetes-v1.15.4/README.md
  24. 17 0
      kubernetes-v1.15.4/SECURITY_CONTACTS
  25. 29 0
      kubernetes-v1.15.4/SUPPORT.md
  26. 1 0
      kubernetes-v1.15.4/WORKSPACE
  27. 9 0
      kubernetes-v1.15.4/api/OWNERS
  28. 42 0
      kubernetes-v1.15.4/api/api-rules/README.md
  29. 176 0
      kubernetes-v1.15.4/api/api-rules/violation_exceptions.list
  30. 21 0
      kubernetes-v1.15.4/api/openapi-spec/BUILD
  31. 60 0
      kubernetes-v1.15.4/api/openapi-spec/README.md
  32. 108048 0
      kubernetes-v1.15.4/api/openapi-spec/swagger.json
  33. 191 0
      kubernetes-v1.15.4/build/BUILD
  34. 17 0
      kubernetes-v1.15.4/build/OWNERS
  35. 130 0
      kubernetes-v1.15.4/build/README.md
  36. 45 0
      kubernetes-v1.15.4/build/bindata.bzl
  37. 54 0
      kubernetes-v1.15.4/build/build-image/Dockerfile
  38. 1 0
      kubernetes-v1.15.4/build/build-image/VERSION
  39. 79 0
      kubernetes-v1.15.4/build/build-image/cross/Dockerfile
  40. 27 0
      kubernetes-v1.15.4/build/build-image/cross/Makefile
  41. 1 0
      kubernetes-v1.15.4/build/build-image/cross/VERSION
  42. 83 0
      kubernetes-v1.15.4/build/build-image/rsyncd.sh
  43. 81 0
      kubernetes-v1.15.4/build/code_generation.bzl
  44. 49 0
      kubernetes-v1.15.4/build/code_generation_test.bzl
  45. 740 0
      kubernetes-v1.15.4/build/common.sh
  46. 132 0
      kubernetes-v1.15.4/build/container.bzl
  47. 26 0
      kubernetes-v1.15.4/build/copy-output.sh
  48. 19 0
      kubernetes-v1.15.4/build/debian-base/Dockerfile
  49. 102 0
      kubernetes-v1.15.4/build/debian-base/Dockerfile.build
  50. 105 0
      kubernetes-v1.15.4/build/debian-base/Makefile
  51. 10 0
      kubernetes-v1.15.4/build/debian-base/OWNERS
  52. 12 0
      kubernetes-v1.15.4/build/debian-base/README.md
  53. 36 0
      kubernetes-v1.15.4/build/debian-base/clean-install
  54. 10 0
      kubernetes-v1.15.4/build/debian-base/excludes
  55. 1 0
      kubernetes-v1.15.4/build/debian-hyperkube-base/.gitignore
  56. 63 0
      kubernetes-v1.15.4/build/debian-hyperkube-base/Dockerfile
  57. 85 0
      kubernetes-v1.15.4/build/debian-hyperkube-base/Makefile
  58. 12 0
      kubernetes-v1.15.4/build/debian-hyperkube-base/OWNERS
  59. 25 0
      kubernetes-v1.15.4/build/debian-hyperkube-base/README.md
  60. 23 0
      kubernetes-v1.15.4/build/debian-iptables/Dockerfile
  61. 62 0
      kubernetes-v1.15.4/build/debian-iptables/Makefile
  62. 18 0
      kubernetes-v1.15.4/build/debian-iptables/OWNERS
  63. 24 0
      kubernetes-v1.15.4/build/debian-iptables/README.md
  64. 11 0
      kubernetes-v1.15.4/build/debs/10-kubeadm.conf
  65. 2 0
      kubernetes-v1.15.4/build/debs/50-kubeadm.conf
  66. 276 0
      kubernetes-v1.15.4/build/debs/BUILD
  67. 15 0
      kubernetes-v1.15.4/build/debs/OWNERS
  68. 2 0
      kubernetes-v1.15.4/build/debs/kubeadm.conf
  69. 12 0
      kubernetes-v1.15.4/build/debs/kubelet.service
  70. 30 0
      kubernetes-v1.15.4/build/debs/postinst
  71. 103 0
      kubernetes-v1.15.4/build/go.bzl
  72. 183 0
      kubernetes-v1.15.4/build/kazel_generated.bzl
  73. 648 0
      kubernetes-v1.15.4/build/lib/release.sh
  74. 31 0
      kubernetes-v1.15.4/build/make-build-image.sh
  75. 26 0
      kubernetes-v1.15.4/build/make-clean.sh
  76. 2 0
      kubernetes-v1.15.4/build/nsswitch.conf
  77. 21 0
      kubernetes-v1.15.4/build/openapi.bzl
  78. 27 0
      kubernetes-v1.15.4/build/package-tarballs.sh
  79. 3 0
      kubernetes-v1.15.4/build/pause/.gitignore
  80. 8 0
      kubernetes-v1.15.4/build/pause/CHANGELOG.md
  81. 18 0
      kubernetes-v1.15.4/build/pause/Dockerfile
  82. 109 0
      kubernetes-v1.15.4/build/pause/Makefile
  83. 36 0
      kubernetes-v1.15.4/build/pause/orphan.c
  84. 68 0
      kubernetes-v1.15.4/build/pause/pause.c
  85. 189 0
      kubernetes-v1.15.4/build/platforms.bzl
  86. 42 0
      kubernetes-v1.15.4/build/release-images.sh
  87. 49 0
      kubernetes-v1.15.4/build/release-in-a-container.sh
  88. 321 0
      kubernetes-v1.15.4/build/release-tars/BUILD
  89. 45 0
      kubernetes-v1.15.4/build/release.sh
  90. 79 0
      kubernetes-v1.15.4/build/root/.bazelrc
  91. 14 0
      kubernetes-v1.15.4/build/root/.kazelcfg.json
  92. 125 0
      kubernetes-v1.15.4/build/root/BUILD.root
  93. 639 0
      kubernetes-v1.15.4/build/root/Makefile
  94. 527 0
      kubernetes-v1.15.4/build/root/Makefile.generated_files
  95. 89 0
      kubernetes-v1.15.4/build/root/WORKSPACE
  96. 11 0
      kubernetes-v1.15.4/build/rpms/10-kubeadm.conf
  97. 2 0
      kubernetes-v1.15.4/build/rpms/50-kubeadm.conf
  98. 128 0
      kubernetes-v1.15.4/build/rpms/BUILD
  99. 15 0
      kubernetes-v1.15.4/build/rpms/OWNERS
  100. 0 0
      kubernetes-v1.15.4/build/rpms/cri-tools.spec

+ 1 - 0
kubernetes-v1.15.4/.bazelrc

@@ -0,0 +1 @@
+build/root/.bazelrc

+ 27 - 0
kubernetes-v1.15.4/.generated_files

@@ -0,0 +1,27 @@
+# Files that should be ignored by tools which do not want to consider generated
+# code.
+#
+# https://github.com/kubernetes/contrib/blob/master/mungegithub/mungers/size.go
+#
+# This file is a series of lines, each of the form:
+#     <type> <name>
+#
+# Type can be:
+#    path - an exact path to a single file
+#    file-name - an exact leaf filename, regardless of path
+#    path-prefix - a prefix match on the file path
+#    file-prefix - a prefix match of the leaf filename (no path)
+#    paths-from-repo - read a file from the repo and load file paths
+#
+
+file-prefix	zz_generated.
+
+file-name	BUILD
+file-name	types.generated.go
+file-name	generated.pb.go
+file-name	generated.proto
+file-name	types_swagger_doc_generated.go
+
+path-prefix	Godeps/
+path-prefix	vendor/
+path-prefix	pkg/generated/

+ 10 - 0
kubernetes-v1.15.4/.gitattributes

@@ -0,0 +1,10 @@
+hack/verify-flags/known-flags.txt merge=union
+test/test_owners.csv merge=union
+
+**/zz_generated.*.go linguist-generated=true
+**/types.generated.go linguist-generated=true
+**/generated.pb.go linguist-generated=true
+**/generated.proto
+**/types_swagger_doc_generated.go linguist-generated=true
+api/openapi-spec/*.json linguist-generated=true
+staging/**/go.sum linguist-generated=true

+ 29 - 0
kubernetes-v1.15.4/.github/ISSUE_TEMPLATE/bug-report.md

@@ -0,0 +1,29 @@
+---
+name: Bug Report
+about: Report a bug encountered while operating Kubernetes
+labels: kind/bug
+
+---
+
+<!-- Please use this template while reporting a bug and provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner. Thanks!
+
+If the matter is security related, please disclose it privately via https://kubernetes.io/security/
+-->
+
+
+**What happened**:
+
+**What you expected to happen**:
+
+**How to reproduce it (as minimally and precisely as possible)**:
+
+**Anything else we need to know?**:
+
+**Environment**:
+- Kubernetes version (use `kubectl version`):
+- Cloud provider or hardware configuration:
+- OS (e.g: `cat /etc/os-release`):
+- Kernel (e.g. `uname -a`):
+- Install tools:
+- Network plugin and version (if this is a network-related bug):
+- Others:

+ 11 - 0
kubernetes-v1.15.4/.github/ISSUE_TEMPLATE/enhancement.md

@@ -0,0 +1,11 @@
+---
+name: Enhancement Request
+about: Suggest an enhancement to the Kubernetes project
+labels: kind/feature
+
+---
+<!-- Please only use this template for submitting enhancement requests -->
+
+**What would you like to be added**:
+
+**Why is this needed**:

+ 20 - 0
kubernetes-v1.15.4/.github/ISSUE_TEMPLATE/failing-test.md

@@ -0,0 +1,20 @@
+---
+name: Failing Test
+about: Report test failures in Kubernetes CI jobs
+labels: kind/failing-test
+
+---
+
+<!-- Please only use this template for submitting reports about failing tests in Kubernetes CI jobs -->
+
+**Which jobs are failing**:
+
+**Which test(s) are failing**:
+
+**Since when has it been failing**:
+
+**Testgrid link**:
+
+**Reason for failure**:
+
+**Anything else we need to know**:

+ 18 - 0
kubernetes-v1.15.4/.github/ISSUE_TEMPLATE/support.md

@@ -0,0 +1,18 @@
+---
+name: Support Request
+about: Support request or question relating to Kubernetes
+labels: triage/support
+
+---
+
+<!--
+STOP -- PLEASE READ!
+
+GitHub is not the right place for support requests.
+
+If you're looking for help, check [Stack Overflow](https://stackoverflow.com/questions/tagged/kubernetes) and the [troubleshooting guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/).
+
+You can also post your question on the [Kubernetes Slack](http://slack.k8s.io/) or the [Discuss Kubernetes](https://discuss.kubernetes.io/) forum.
+
+If the matter is security related, please disclose it privately via https://kubernetes.io/security/.
+-->

+ 16 - 0
kubernetes-v1.15.4/.github/OWNERS

@@ -0,0 +1,16 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+  - castrojo
+  - cblecker
+  - nikhita
+  - parispittman
+  - Phillels
+approvers:
+  - castrojo
+  - cblecker
+  - nikhita
+  - parispittman
+  - Phillels
+labels:
+  - sig/contributor-experience

+ 44 - 0
kubernetes-v1.15.4/.github/PULL_REQUEST_TEMPLATE.md

@@ -0,0 +1,44 @@
+<!--  Thanks for sending a pull request!  Here are some tips for you:
+
+1. If this is your first time, please read our contributor guidelines: https://git.k8s.io/community/contributors/guide#your-first-contribution and developer guide https://git.k8s.io/community/contributors/devel/development.md#development-guide
+2. Please label this pull request according to what type of issue you are addressing, especially if this is a release targeted pull request. For reference on required PR/issue labels, read here:
+https://git.k8s.io/community/contributors/devel/sig-release/release.md#issuepr-kind-label
+3. Ensure you have added or ran the appropriate tests for your PR: https://git.k8s.io/community/contributors/devel/testing.md
+4. If you want *faster* PR reviews, read how: https://git.k8s.io/community/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
+5. Follow the instructions for writing a release note: https://git.k8s.io/community/contributors/guide/release-notes.md
+6. If the PR is unfinished, see how to mark it: https://git.k8s.io/community/contributors/guide/pull-requests.md#marking-unfinished-pull-requests
+-->
+
+**What type of PR is this?**
+> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line:
+>
+> /kind api-change
+> /kind bug
+> /kind cleanup
+> /kind design
+> /kind documentation
+> /kind failing-test
+> /kind feature
+> /kind flake
+
+**What this PR does / why we need it**:
+
+**Which issue(s) this PR fixes**:
+<!--
+*Automatically closes linked issue when PR is merged.
+Usage: `Fixes #<issue number>`, or `Fixes (paste link of issue)`.
+_If PR is about `failing-tests or flakes`, please post the related issues/tests in a comment and do not use `Fixes`_*
+-->
+Fixes #
+
+**Special notes for your reviewer**:
+
+**Does this PR introduce a user-facing change?**:
+<!--
+If no, just write "NONE" in the release-note block below.
+If yes, a release note is required:
+Enter your extended release note in the block below. If the PR requires additional action from users switching to the new release, include the string "action required".
+-->
+```release-note
+
+```

+ 134 - 0
kubernetes-v1.15.4/.gitignore

@@ -0,0 +1,134 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# OSX trash
+.DS_Store
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
+.idea/
+*.iml
+
+# Vscode files
+.vscode
+
+# This is where the result of the go build goes
+/output*/
+/_output*/
+/_output
+
+# Emacs save files
+*~
+\#*\#
+.\#*
+
+# Vim-related files
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+*.un~
+Session.vim
+.netrwhist
+
+# cscope-related files
+cscope.*
+
+# Go test binaries
+*.test
+/hack/.test-cmd-auth
+
+# JUnit test output from ginkgo e2e tests
+/junit*.xml
+
+# Mercurial files
+**/.hg
+**/.hg*
+
+# Vagrant
+.vagrant
+network_closure.sh
+
+# Local cluster env variables
+/cluster/env.sh
+
+# Compiled binaries in third_party
+/third_party/pkg
+
+# Also ignore etcd installed by hack/install-etcd.sh
+/third_party/etcd*
+/default.etcd
+
+# User cluster configs
+.kubeconfig
+
+.tags*
+
+# Version file for dockerized build
+.dockerized-kube-version-defs
+
+# Web UI
+/www/master/node_modules/
+/www/master/npm-debug.log
+/www/master/shared/config/development.json
+
+# Karma output
+/www/test_out
+
+# precommit temporary directories created by ./hack/verify-generated-docs.sh and ./hack/lib/util.sh
+/_tmp/
+/doc_tmp/
+
+# Test artifacts produced by Jenkins jobs
+/_artifacts/
+
+# Go dependencies installed on Jenkins
+/_gopath/
+
+# Config directories created by gcloud and gsutil on Jenkins
+/.config/gcloud*/
+/.gsutil/
+
+# CoreOS stuff
+/cluster/libvirt-coreos/coreos_*.img
+
+# Juju Stuff
+/cluster/juju/charms/*
+/cluster/juju/bundles/local.yaml
+
+# Downloaded Kubernetes binary release
+/kubernetes/
+
+# direnv .envrc files
+.envrc
+
+# Downloaded kubernetes binary release tar ball
+kubernetes.tar.gz
+
+# generated files in any directory
+# TODO(thockin): uncomment this when we stop committing the generated files.
+#zz_generated.*
+zz_generated.openapi.go
+# TODO(roycaihw): remove this when we stop committing the generated definition
+!staging/src/k8s.io/apiextensions-apiserver/pkg/generated/openapi/zz_generated.openapi.go
+zz_generated_*_test.go
+
+# make-related metadata
+/.make/
+
+# Just in time generated data in the source, should never be committed
+/test/e2e/generated/bindata.go
+
+# This file used by some vendor repos (e.g. github.com/go-openapi/...) to store secret variables and should not be ignored
+!\.drone\.sec
+
+# Godeps workspace
+/Godeps/_workspace
+
+/bazel-*
+*.pyc
+
+# generated by verify-vendor.sh
+vendordiff.patch

+ 1 - 0
kubernetes-v1.15.4/.kazelcfg.json

@@ -0,0 +1 @@
+build/root/.kazelcfg.json

+ 1 - 0
kubernetes-v1.15.4/BUILD.bazel

@@ -0,0 +1 @@
+build/root/BUILD.root

File diff suppressed because it is too large
+ 1613 - 0
kubernetes-v1.15.4/CHANGELOG-1.15.md


+ 23 - 0
kubernetes-v1.15.4/CHANGELOG.md

@@ -0,0 +1,23 @@
+## Development release:
+
+
+## Current release:
+
+- [CHANGELOG-1.14.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.14.md)
+
+## Older releases:
+
+- [CHANGELOG-1.13.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md)
+- [CHANGELOG-1.12.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.md)
+- [CHANGELOG-1.11.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md)
+- [CHANGELOG-1.10.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md)
+- [CHANGELOG-1.9.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.9.md)
+- [CHANGELOG-1.8.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.8.md)
+- [CHANGELOG-1.7.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.7.md)
+- [CHANGELOG-1.6.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.6.md)
+- [CHANGELOG-1.5.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.5.md)
+- [CHANGELOG-1.4.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.4.md)
+- [CHANGELOG-1.3.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.3.md)
+- [CHANGELOG-1.2.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.2.md)
+
+[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/CHANGELOG.md?pixel)]()

+ 7 - 0
kubernetes-v1.15.4/CONTRIBUTING.md

@@ -0,0 +1,7 @@
+# Contributing
+
+Welcome to Kubernetes! If you are interested in contributing to the [Kubernetes code repo](README.md) then checkout the [Contributor's Guide](https://git.k8s.io/community/contributors/guide/)
+
+The [Kubernetes community repo](https://github.com/kubernetes/community) contains information on how the community is organized and other information that is pertinent to contributing.
+
+[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/CONTRIBUTING.md?pixel)]()

File diff suppressed because it is too large
+ 22140 - 0
kubernetes-v1.15.4/Godeps/LICENSES


+ 4 - 0
kubernetes-v1.15.4/Godeps/OWNERS

@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+- dep-approvers

+ 202 - 0
kubernetes-v1.15.4/LICENSE

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 1 - 0
kubernetes-v1.15.4/Makefile

@@ -0,0 +1 @@
+build/root/Makefile

+ 1 - 0
kubernetes-v1.15.4/Makefile.generated_files

@@ -0,0 +1 @@
+build/root/Makefile.generated_files

+ 41 - 0
kubernetes-v1.15.4/OWNERS

@@ -0,0 +1,41 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+filters:
+  ".*":
+    reviewers:
+      - brendandburns
+      - dchen1107
+      - jbeda
+      - lavalamp
+      - smarterclayton
+      - thockin
+      - liggitt
+    approvers:
+      - bgrant0607
+      - brendandburns
+      - dchen1107
+      - jbeda
+      - monopole # To move code per kubernetes/community#598
+      - lavalamp
+      - smarterclayton
+      - thockin
+      - wojtek-t
+      - liggitt
+
+  # Bazel build infrastructure changes often touch files throughout the tree
+  "\\.bzl$":
+    reviewers:
+      - ixdy
+    approvers:
+      - ixdy
+  "BUILD(\\.bazel)?$":
+    approvers:
+      - ixdy
+
+  # go.{mod,sum} files relate to go dependencies, and should be reviewed by the
+  # dep-approvers
+  "go\\.(mod|sum)$":
+    required_reviewers:
+      - kubernetes/dep-approvers
+    labels:
+      - area/dependency

+ 450 - 0
kubernetes-v1.15.4/OWNERS_ALIASES

@@ -0,0 +1,450 @@
+aliases:
+  # sig-auth subproject aliases
+  sig-auth-audit-approvers:
+    - sttts
+    - tallclair
+  sig-auth-audit-reviewers:
+    - CaoShuFeng
+    - hzxuzhonghu
+    - lavalamp
+    - sttts
+    - tallclair
+
+  sig-auth-authenticators-approvers:
+    - deads2k
+    - liggitt
+    - mikedanese
+  sig-auth-authenticators-reviewers:
+    - deads2k
+    - enj
+    - jianhuiz
+    - lavalamp
+    - liggitt
+    - mbohlool
+    - mikedanese
+    - sttts
+    - wojtek-t
+
+  sig-auth-authorizers-approvers:
+    - deads2k
+    - liggitt
+    - mikedanese
+  sig-auth-authorizers-reviewers:
+    - david-mcmahon
+    - deads2k
+    - dims
+    - enj
+    - erictune
+    - jianhuiz
+    - krousey
+    - lavalamp
+    - liggitt
+    - mbohlool
+    - mikedanese
+    - mml
+    - ncdc
+    - nikhiljindal
+    - smarterclayton
+    - sttts
+    - thockin
+    - wojtek-t
+
+  sig-auth-certificates-approvers:
+    - liggitt
+    - mikedanese
+    - smarterclayton
+  sig-auth-certificates-reviewers:
+    - awly
+    - caesarxuchao
+    - david-mcmahon
+    - deads2k
+    - dims
+    - enj
+    - errordeveloper
+    - hongchaodeng
+    - jianhuiz
+    - lavalamp
+    - liggitt
+    - mbohlool
+    - mikedanese
+    - smarterclayton
+    - sttts
+    - thockin
+    - timothysc
+    - wojtek-t
+
+  sig-auth-encryption-at-rest-approvers:
+    - immutableT
+    - smarterclayton
+  sig-auth-encryption-at-rest-reviewers:
+    - enj
+    - immutableT
+    - lavalamp
+    - liggitt
+    - sakshamsharma
+    - smarterclayton
+    - wojtek-t
+
+  sig-auth-node-isolation-approvers:
+    - deads2k
+    - liggitt
+    - mikedanese
+    - tallclair
+  sig-auth-node-isolation-reviewers:
+    - deads2k
+    - liggitt
+    - mikedanese
+    - tallclair
+
+  sig-auth-policy-approvers:
+    - deads2k
+    - liggitt
+    - tallclair
+  sig-auth-policy-reviewers:
+    - deads2k
+    - hongchaodeng
+    - jianhuiz
+    - liggitt
+    - mbohlool
+    - pweil-
+    - tallclair
+    - krmayankk
+
+  sig-auth-serviceaccounts-approvers:
+    - deads2k
+    - liggitt
+    - mikedanese
+  sig-auth-serviceaccounts-reviewers:
+    - awly
+    - deads2k
+    - enj
+    - liggitt
+    - mikedanese
+    - WanLinghao
+
+  sig-storage-reviewers:
+    - saad-ali
+    - childsb
+
+  sig-scheduling-maintainers:
+    - bsalamat
+    - k82cn
+    - wojtek-t
+    - ravisantoshgudimetla
+    - Huang-Wei
+  sig-scheduling:
+    - bsalamat
+    - k82cn
+    - resouer
+    - ravisantoshgudimetla
+    - misterikkit
+    - Huang-Wei
+    - wgliang
+
+  sig-cli-maintainers:
+    - adohe
+    - brendandburns
+    - deads2k
+    - janetkuo
+    - liggitt
+    - seans3
+    - monopole
+    - droot
+    - apelisse
+    - mengqiy
+    - smarterclayton
+    - soltysh
+  sig-cli:
+    - adohe
+    - deads2k
+    - derekwaynecarr
+    - dixudx
+    - dims
+    - dshulyak
+    - eparis
+    - ghodss
+    - juanvallejo
+    - mengqiy
+    - rootfs
+    - seans3
+    - shiywang
+    - smarterclayton
+    - soltysh
+  sig-testing-reviewers:
+    - bentheelder
+    - cblecker
+    - fejta
+    - krzyzacy
+    - ixdy
+    - spiffxp
+  sig-testing-approvers:
+    - bentheelder
+    - cblecker
+    - fejta
+    - krzyzacy
+    - ixdy
+    - spiffxp
+  sig-node-reviewers:
+    - Random-Liu
+    - dashpole
+    - dchen1107
+    - derekwaynecarr
+    - dims
+    - feiskyer
+    - mtaufen
+    - pmorie
+    - resouer
+    - sjenning
+    - sjpotter
+    - tallclair
+    - tmrts
+    - vishh
+    - yifan-gu
+    - yujuhong
+    - krmayankk
+  sig-network-approvers:
+    - bowei
+    - caseydavenport
+    - danwinship
+    - dcbw
+    - dnardo
+    - freehan
+    - johnbelamaric
+    - mrhohn
+    - nicksardo
+    - thockin
+  sig-network-reviewers:
+    - bowei
+    - caseydavenport
+    - danwinship
+    - dcbw
+    - dnardo
+    - freehan
+    - johnbelamaric
+    - mrhohn
+    - nicksardo
+    - thockin
+    - rramkumar1
+    - cmluciano
+    - m1093782566
+  sig-apps-approvers:
+    - kow3ns
+    - janetkuo
+    - soltysh
+    - tnozicka
+    - erictune
+    - smarterclayton
+  sig-apps-reviewers:
+    - enisoc
+    - erictune
+    - foxish
+    - janetkuo
+    - kow3ns
+    - mortent
+    - smarterclayton
+    - soltysh
+    - tnozicka
+    - krmayankk
+  sig-autoscaling-maintainers:
+    - aleksandra-malinowska
+    - bskiba
+    - MaciekPytel
+    - mwielgus
+
+  api-approvers:
+    - erictune
+    - lavalamp
+    - smarterclayton
+    - thockin
+    - liggitt
+    # - bgrant0607 # manual escalations only
+
+  # subsets of api-approvers by sig area to help focus approval requests to those with domain knowledge
+  sig-api-machinery-api-approvers:
+    - lavalamp
+    - liggitt
+    - smarterclayton
+
+  sig-apps-api-approvers:
+    - erictune
+    - lavalamp
+    - liggitt
+    - smarterclayton
+
+  sig-auth-api-approvers:
+    - liggitt
+    - smarterclayton
+
+  sig-cli-api-approvers:
+    - liggitt
+    - smarterclayton
+
+  sig-cloud-provider-api-approvers:
+    - liggitt
+    - thockin
+
+  sig-cluster-lifecycle-api-approvers:
+    - liggitt
+    - smarterclayton
+
+  sig-network-api-approvers:
+    - smarterclayton
+    - thockin
+
+  sig-network-api-reviewers:
+    - caseydavenport
+    - cmluciano
+    - danwinship
+    - thockin
+
+  sig-node-api-approvers:
+    - smarterclayton
+    - thockin
+
+  sig-scheduling-api-approvers:
+    - lavalamp
+    - smarterclayton
+    - thockin
+
+  sig-storage-api-approvers:
+    - liggitt
+    - thockin
+
+  sig-windows-api-approvers:
+    - smarterclayton
+    - thockin
+    - liggitt
+  
+  api-reviewers:
+    - erictune
+    - lavalamp
+    - smarterclayton
+    - thockin
+    - liggitt
+    - wojtek-t
+    - deads2k
+    - yujuhong
+    - brendandburns
+    - derekwaynecarr
+    - caesarxuchao
+    - vishh
+    - mikedanese
+    - nikhiljindal
+    - gmarek
+    - davidopp
+    - pmorie
+    - sttts
+    - dchen1107
+    - saad-ali
+    - zmerlynn
+    - luxas
+    - janetkuo
+    - justinsb
+    - pwittrock
+    - roberthbailey
+    - ncdc
+    - tallclair
+    - yifan-gu
+    - eparis
+    - mwielgus
+    - timothysc
+    - soltysh
+    - piosz
+    - jsafrane
+    - jbeda
+
+  # api-reviewers targeted by sig area
+  # see https://git.k8s.io/community/sig-architecture/api-review-process.md#training-reviews
+  
+  sig-api-machinery-api-reviewers:
+    - caesarxuchao
+    - deads2k
+    - jpbetz
+    - sttts
+  
+  # sig-apps-api-reviewers:
+  #   - 
+  #   - 
+
+  sig-auth-api-reviewers:
+    - enj
+    - mikedanese
+
+  sig-cli-api-reviewers:
+    - pwittrock
+    - soltysh
+
+  sig-cloud-provider-api-reviewers:
+    - andrewsykim
+    - cheftako
+  
+  # sig-cluster-lifecycle-api-reviewers:
+  #   - 
+  #   -
+  
+  sig-node-api-reviewers:
+    - dchen1107
+    - derekwaynecarr
+    - tallclair
+    - yujuhong
+  
+  sig-scheduling-api-reviewers:
+      - bsalamat
+      - k82cn
+  
+  sig-storage-api-reviewers:
+    - saad-ali
+    - msau42
+    - jsafrane
+
+  
+  sig-windows-api-reviewers:
+    - patricklang
+    - ddebroy
+    - benmoss
+
+  dep-approvers:
+    - apelisse
+    - BenTheElder
+    - cblecker
+    - dims
+    - thockin
+    - sttts
+    - soltysh
+  feature-approvers:
+    - bgrant0607      # Architecture
+    - brancz          # Instrumentation
+    - bsalamat        # Scheduling
+    - calebamiles     # Release
+    - caseydavenport  # Network
+    - childsb         # Storage
+    - countspongebob  # Scalability
+    - csbell          # Multicluster
+    - dcbw            # Network
+    - dchen1107       # Node
+    - deads2k         # API Machinery
+    - derekwaynecarr  # Node
+    - dghubble        # On Premise
+    - jdumars         # Architecture, Cluster Ops, Release
+    - kow3ns          # Apps
+    - lavalamp        # API Machinery
+    - liggitt         # Auth
+    - lukemarsden     # Cluster Lifecycle
+    - luxas           # Cluster Lifecycle
+    - marcoceppi      # On Premise
+    - mattfarina      # Apps
+    - michmike        # Windows
+    - mwielgus        # Autoscaling
+    - piosz           # Instrumentation
+    - prydonius       # Apps
+    - pwittrock       # CLI
+    - quinton-hoole   # Multicluster
+    - roberthbailey   # Cluster Lifecycle
+    - saad-ali        # Storage
+    - seans3          # CLI
+    - soltysh         # CLI
+    - tallclair       # Auth
+    - thockin         # Network
+    - timothysc       # Cluster Lifecycle, Scheduling
+    - wojtek-t        # Scalability
+    - zehicle         # Cluster Ops

+ 84 - 0
kubernetes-v1.15.4/README.md

@@ -0,0 +1,84 @@
+# Kubernetes
+
+[![GoDoc Widget]][GoDoc] [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/569/badge)](https://bestpractices.coreinfrastructure.org/projects/569)
+
+<img src="https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png" width="100">
+
+----
+
+Kubernetes is an open source system for managing [containerized applications]
+across multiple hosts; providing basic mechanisms for deployment, maintenance,
+and scaling of applications.
+
+Kubernetes builds upon a decade and a half of experience at Google running
+production workloads at scale using a system called [Borg],
+combined with best-of-breed ideas and practices from the community.
+
+Kubernetes is hosted by the Cloud Native Computing Foundation ([CNCF]).
+If you are a company that wants to help shape the evolution of
+technologies that are container-packaged, dynamically-scheduled
+and microservices-oriented, consider joining the CNCF.
+For details about who's involved and how Kubernetes plays a role,
+read the CNCF [announcement].
+
+----
+
+## To start using Kubernetes
+
+See our documentation on [kubernetes.io].
+
+Try our [interactive tutorial].
+
+Take a free course on [Scalable Microservices with Kubernetes].
+
+## To start developing Kubernetes
+
+The [community repository] hosts all information about
+building Kubernetes from source, how to contribute code
+and documentation, who to contact about what, etc.
+
+If you want to build Kubernetes right away there are two options:
+
+##### You have a working [Go environment].
+
+```
+go get -d k8s.io/kubernetes
+cd $GOPATH/src/k8s.io/kubernetes
+make
+```
+
+##### You have a working [Docker environment].
+
+```
+git clone https://github.com/kubernetes/kubernetes
+cd kubernetes
+make quick-release
+```
+
+For the full story, head over to the [developer's documentation].
+
+## Support
+
+If you need support, start with the [troubleshooting guide],
+and work your way through the process that we've outlined.
+
+That said, if you have questions, reach out to us
+[one way or another][communication].
+
+[announcement]: https://cncf.io/news/announcement/2015/07/new-cloud-native-computing-foundation-drive-alignment-among-container
+[Borg]: https://research.google.com/pubs/pub43438.html
+[CNCF]: https://www.cncf.io/about
+[communication]: https://git.k8s.io/community/communication
+[community repository]: https://git.k8s.io/community
+[containerized applications]: https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/
+[developer's documentation]: https://git.k8s.io/community/contributors/devel#readme
+[Docker environment]: https://docs.docker.com/engine
+[Go environment]: https://golang.org/doc/install
+[GoDoc]: https://godoc.org/k8s.io/kubernetes
+[GoDoc Widget]: https://godoc.org/k8s.io/kubernetes?status.svg
+[interactive tutorial]: https://kubernetes.io/docs/tutorials/kubernetes-basics
+[kubernetes.io]: https://kubernetes.io
+[Scalable Microservices with Kubernetes]: https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615
+[troubleshooting guide]: https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/
+
+[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/README.md?pixel)]()

+ 17 - 0
kubernetes-v1.15.4/SECURITY_CONTACTS

@@ -0,0 +1,17 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Committee to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+cjcullen
+jessfraz
+liggitt
+philips
+tallclair

+ 29 - 0
kubernetes-v1.15.4/SUPPORT.md

@@ -0,0 +1,29 @@
+## Support for deploying and using Kubernetes
+
+Welcome to Kubernetes! We use GitHub for tracking bugs and feature requests.
+This isn't the right place to get support for using Kubernetes, but the following
+resources are available below, thanks for understanding.
+
+### Stack Overflow
+
+The Kubernetes Community is active on Stack Overflow, you can post your questions there:
+
+* [Kubernetes on Stack Overflow](https://stackoverflow.com/questions/tagged/kubernetes)
+
+  * Here are some tips for [about how to ask good questions](https://stackoverflow.com/help/how-to-ask).
+  * Don't forget to check to see [what's on topic](http://stackoverflow.com/help/on-topic).
+
+### Documentation
+
+* [User Documentation](https://kubernetes.io/docs/)
+* [Troubleshooting Guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/)
+
+### Real-time Chat
+
+* [Slack](https://kubernetes.slack.com) ([registration](http://slack.k8s.io)):
+The `#kubernetes-users` and `#kubernetes-novice` channels are usual places where
+people offer support.
+
+### Forum
+
+* [Kubernetes Official Forum](https://discuss.kubernetes.io)

+ 1 - 0
kubernetes-v1.15.4/WORKSPACE

@@ -0,0 +1 @@
+build/root/WORKSPACE

+ 9 - 0
kubernetes-v1.15.4/api/OWNERS

@@ -0,0 +1,9 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+# Disable inheritance as this is an api owners file
+options:
+  no_parent_owners: true
+approvers:
+- api-approvers
+reviewers:
+- api-reviewers

+ 42 - 0
kubernetes-v1.15.4/api/api-rules/README.md

@@ -0,0 +1,42 @@
+# Existing API Rule Violations
+
+This folder contains the checked-in report file of known API rule violations.
+The file violation\_exceptions.list is used by Make rule during OpenAPI spec generation to make
+sure that no new API rule violation is introduced into our code base.
+
+## API Rule Violation Format
+
+The report file [violation\_exceptions.list](./violation_exceptions.list) is in format of:
+
+ * ***API rule violation: \<RULE\>,\<PACKAGE\>,\<TYPE\>,\<FIELD\>***
+
+e.g.
+
+ * ***API rule violation: names_match,k8s.io/api/core/v1,Event,ReportingController***
+
+And the violation list is sorted alphabetically in each of the \<RULE\>, \<PACKAGE\>, \<TYPE\>, \<FIELD\> levels.
+
+## How to resolve API Rule Check Failure
+
+Make rule returns an error when the newly generated violation report differs from this
+checked-in violation report.
+
+Our goal is that exceptions should never be added to this list, only fixed and removed.
+For new APIs, this is a hard requirement. For APIs that are e.g. being moved between
+versions or groups without other changes, it is OK for your API reviewer to make an
+exception.
+
+If you're removing violations from the exception list, or if you have good
+reasons to add new violations to this list, please update the file using:
+
+ - `make generated_files UPDATE_API_KNOWN_VIOLATIONS=true`
+
+It is up to API reviewers to review the list and make sure new APIs follow our API conventions.
+
+**NOTE**: please don't hide changes to this file in a "generated changes" commit, treat it as
+source code instead.
+
+## API Rules Being Enforced
+
+For more information about the API rules being checked, please refer to
+https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators/rules

+ 176 - 0
kubernetes-v1.15.4/api/api-rules/violation_exceptions.list

@@ -0,0 +1,176 @@
+API rule violation: names_match,k8s.io/api/authorization/v1beta1,SubjectAccessReviewSpec,Groups
+API rule violation: names_match,k8s.io/api/core/v1,AzureDiskVolumeSource,DataDiskURI
+API rule violation: names_match,k8s.io/api/core/v1,ContainerStatus,LastTerminationState
+API rule violation: names_match,k8s.io/api/core/v1,DaemonEndpoint,Port
+API rule violation: names_match,k8s.io/api/core/v1,Event,ReportingController
+API rule violation: names_match,k8s.io/api/core/v1,FCVolumeSource,WWIDs
+API rule violation: names_match,k8s.io/api/core/v1,GlusterfsPersistentVolumeSource,EndpointsName
+API rule violation: names_match,k8s.io/api/core/v1,GlusterfsVolumeSource,EndpointsName
+API rule violation: names_match,k8s.io/api/core/v1,ISCSIPersistentVolumeSource,DiscoveryCHAPAuth
+API rule violation: names_match,k8s.io/api/core/v1,ISCSIPersistentVolumeSource,SessionCHAPAuth
+API rule violation: names_match,k8s.io/api/core/v1,ISCSIVolumeSource,DiscoveryCHAPAuth
+API rule violation: names_match,k8s.io/api/core/v1,ISCSIVolumeSource,SessionCHAPAuth
+API rule violation: names_match,k8s.io/api/core/v1,NodeResources,Capacity
+API rule violation: names_match,k8s.io/api/core/v1,NodeSpec,DoNotUse_ExternalID
+API rule violation: names_match,k8s.io/api/core/v1,PersistentVolumeSource,CephFS
+API rule violation: names_match,k8s.io/api/core/v1,PersistentVolumeSource,StorageOS
+API rule violation: names_match,k8s.io/api/core/v1,PodSpec,DeprecatedServiceAccount
+API rule violation: names_match,k8s.io/api/core/v1,RBDPersistentVolumeSource,CephMonitors
+API rule violation: names_match,k8s.io/api/core/v1,RBDPersistentVolumeSource,RBDImage
+API rule violation: names_match,k8s.io/api/core/v1,RBDPersistentVolumeSource,RBDPool
+API rule violation: names_match,k8s.io/api/core/v1,RBDPersistentVolumeSource,RadosUser
+API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,CephMonitors
+API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RBDImage
+API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RBDPool
+API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RadosUser
+API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,CephFS
+API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,StorageOS
+API rule violation: names_match,k8s.io/api/policy/v1beta1,PodDisruptionBudgetStatus,PodDisruptionsAllowed
+API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,CustomResourceColumnDefinition,JSONPath
+API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSON,Raw
+API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,Ref
+API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,Schema
+API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,XEmbeddedResource
+API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,XIntOrString
+API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,XPreserveUnknownFields
+API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaPropsOrArray,JSONSchemas
+API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaPropsOrArray,Schema
+API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaPropsOrBool,Allows
+API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaPropsOrBool,Schema
+API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaPropsOrStringArray,Property
+API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaPropsOrStringArray,Schema
+API rule violation: names_match,k8s.io/apimachinery/pkg/api/resource,Quantity,Format
+API rule violation: names_match,k8s.io/apimachinery/pkg/api/resource,Quantity,d
+API rule violation: names_match,k8s.io/apimachinery/pkg/api/resource,Quantity,i
+API rule violation: names_match,k8s.io/apimachinery/pkg/api/resource,Quantity,s
+API rule violation: names_match,k8s.io/apimachinery/pkg/api/resource,int64Amount,scale
+API rule violation: names_match,k8s.io/apimachinery/pkg/api/resource,int64Amount,value
+API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,APIResourceList,APIResources
+API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,Duration,Duration
+API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,InternalEvent,Object
+API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,InternalEvent,Type
+API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,MicroTime,Time
+API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,StatusCause,Type
+API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,Time,Time
+API rule violation: names_match,k8s.io/apimachinery/pkg/runtime,RawExtension,Raw
+API rule violation: names_match,k8s.io/apimachinery/pkg/runtime,Unknown,ContentEncoding
+API rule violation: names_match,k8s.io/apimachinery/pkg/runtime,Unknown,ContentType
+API rule violation: names_match,k8s.io/apimachinery/pkg/runtime,Unknown,Raw
+API rule violation: names_match,k8s.io/apimachinery/pkg/util/intstr,IntOrString,IntVal
+API rule violation: names_match,k8s.io/apimachinery/pkg/util/intstr,IntOrString,StrVal
+API rule violation: names_match,k8s.io/apimachinery/pkg/util/intstr,IntOrString,Type
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,AttachDetachControllerConfiguration,DisableAttachDetachReconcilerSync
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,AttachDetachControllerConfiguration,ReconcilerSyncLoopPeriod
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,CSRSigningControllerConfiguration,ClusterSigningCertFile
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,CSRSigningControllerConfiguration,ClusterSigningDuration
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,CSRSigningControllerConfiguration,ClusterSigningKeyFile
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,CloudProviderConfiguration,CloudConfigFile
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,CloudProviderConfiguration,Name
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,DaemonSetControllerConfiguration,ConcurrentDaemonSetSyncs
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,DeploymentControllerConfiguration,ConcurrentDeploymentSyncs
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,DeploymentControllerConfiguration,DeploymentControllerSyncPeriod
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,DeprecatedControllerConfiguration,DeletingPodsBurst
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,DeprecatedControllerConfiguration,DeletingPodsQPS
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,DeprecatedControllerConfiguration,RegisterRetryCount
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,EndpointControllerConfiguration,ConcurrentEndpointSyncs
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GarbageCollectorControllerConfiguration,ConcurrentGCSyncs
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GarbageCollectorControllerConfiguration,EnableGarbageCollector
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GarbageCollectorControllerConfiguration,GCIgnoredResources
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GenericControllerManagerConfiguration,Address
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GenericControllerManagerConfiguration,ClientConnection
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GenericControllerManagerConfiguration,ControllerStartInterval
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GenericControllerManagerConfiguration,Controllers
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GenericControllerManagerConfiguration,Debugging
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GenericControllerManagerConfiguration,LeaderElection
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GenericControllerManagerConfiguration,MinResyncPeriod
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GenericControllerManagerConfiguration,Port
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GroupResource,Group
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,GroupResource,Resource
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerCPUInitializationPeriod
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerDownscaleForbiddenWindow
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerDownscaleStabilizationWindow
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerInitialReadinessDelay
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerSyncPeriod
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerTolerance
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerUpscaleForbiddenWindow
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,HPAControllerConfiguration,HorizontalPodAutoscalerUseRESTClients
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,JobControllerConfiguration,ConcurrentJobSyncs
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeCloudSharedConfiguration,AllocateNodeCIDRs
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeCloudSharedConfiguration,AllowUntaggedCloud
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeCloudSharedConfiguration,CIDRAllocatorType
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeCloudSharedConfiguration,CloudProvider
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeCloudSharedConfiguration,ClusterCIDR
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeCloudSharedConfiguration,ClusterName
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeCloudSharedConfiguration,ConfigureCloudRoutes
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeCloudSharedConfiguration,ExternalCloudVolumePlugin
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeCloudSharedConfiguration,NodeMonitorPeriod
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeCloudSharedConfiguration,NodeSyncPeriod
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeCloudSharedConfiguration,RouteReconciliationPeriod
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeCloudSharedConfiguration,UseServiceAccountCredentials
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,AttachDetachController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,CSRSigningController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,DaemonSetController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,DeploymentController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,DeprecatedController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,EndpointController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,GarbageCollectorController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,Generic
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,HPAController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,JobController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,KubeCloudShared
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,NamespaceController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,NodeIPAMController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,NodeLifecycleController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,PersistentVolumeBinderController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,PodGCController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,ReplicaSetController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,ReplicationController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,ResourceQuotaController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,SAController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,ServiceController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,KubeControllerManagerConfiguration,TTLAfterFinishedController
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NamespaceControllerConfiguration,ConcurrentNamespaceSyncs
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NamespaceControllerConfiguration,NamespaceSyncPeriod
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,NodeCIDRMaskSize
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,ServiceCIDR
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,EnableTaintManager
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,LargeClusterSizeThreshold
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,NodeEvictionRate
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,NodeMonitorGracePeriod
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,NodeStartupGracePeriod
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,PodEvictionTimeout
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,SecondaryNodeEvictionRate
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,UnhealthyZoneThreshold
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,PersistentVolumeBinderControllerConfiguration,PVClaimBinderSyncPeriod
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,PersistentVolumeBinderControllerConfiguration,VolumeConfiguration
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,PersistentVolumeRecyclerConfiguration,IncrementTimeoutHostPath
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,PersistentVolumeRecyclerConfiguration,IncrementTimeoutNFS
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,PersistentVolumeRecyclerConfiguration,MaximumRetry
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,PersistentVolumeRecyclerConfiguration,MinimumTimeoutHostPath
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,PersistentVolumeRecyclerConfiguration,MinimumTimeoutNFS
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,PersistentVolumeRecyclerConfiguration,PodTemplateFilePathHostPath
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,PersistentVolumeRecyclerConfiguration,PodTemplateFilePathNFS
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,PodGCControllerConfiguration,TerminatedPodGCThreshold
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,ReplicaSetControllerConfiguration,ConcurrentRSSyncs
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,ReplicationControllerConfiguration,ConcurrentRCSyncs
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,ResourceQuotaControllerConfiguration,ConcurrentResourceQuotaSyncs
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,ResourceQuotaControllerConfiguration,ResourceQuotaSyncPeriod
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,SAControllerConfiguration,ConcurrentSATokenSyncs
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,SAControllerConfiguration,RootCAFile
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,SAControllerConfiguration,ServiceAccountKeyFile
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,ServiceControllerConfiguration,ConcurrentServiceSyncs
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,TTLAfterFinishedControllerConfiguration,ConcurrentTTLSyncs
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,VolumeConfiguration,EnableDynamicProvisioning
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,VolumeConfiguration,EnableHostPathProvisioning
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,VolumeConfiguration,FlexVolumePluginDir
+API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,VolumeConfiguration,PersistentVolumeRecyclerConfiguration
+API rule violation: names_match,k8s.io/kube-proxy/config/v1alpha1,KubeProxyConfiguration,IPTables
+API rule violation: names_match,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,IPTablesDropBit
+API rule violation: names_match,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,IPTablesMasqueradeBit
+API rule violation: names_match,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,ResolverConfig
+API rule violation: names_match,k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/v1alpha1,CloudControllerManagerConfiguration,Generic
+API rule violation: names_match,k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/v1alpha1,CloudControllerManagerConfiguration,KubeCloudShared
+API rule violation: names_match,k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/v1alpha1,CloudControllerManagerConfiguration,NodeStatusUpdateFrequency
+API rule violation: names_match,k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/v1alpha1,CloudControllerManagerConfiguration,ServiceController
+API rule violation: names_match,k8s.io/metrics/pkg/apis/custom_metrics/v1beta1,MetricValue,WindowSeconds
+API rule violation: names_match,k8s.io/metrics/pkg/apis/external_metrics/v1beta1,ExternalMetricValue,WindowSeconds

+ 21 - 0
kubernetes-v1.15.4/api/openapi-spec/BUILD

@@ -0,0 +1,21 @@
+package(default_visibility = ["//visibility:public"])
+
+filegroup(
+    name = "openapi-spec",
+    srcs = glob([
+        "**/*.json",
+    ]),
+)
+
+filegroup(
+    name = "package-srcs",
+    srcs = glob(["**"]),
+    tags = ["automanaged"],
+    visibility = ["//visibility:private"],
+)
+
+filegroup(
+    name = "all-srcs",
+    srcs = [":package-srcs"],
+    tags = ["automanaged"],
+)

+ 60 - 0
kubernetes-v1.15.4/api/openapi-spec/README.md

@@ -0,0 +1,60 @@
+# Kubernetes's OpenAPI Specification
+
+This folder contains an [OpenAPI specification](https://github.com/OAI/OpenAPI-Specification) for Kubernetes API.
+
+## Vendor Extensions
+
+Kubernetes extends OpenAPI using these extensions. Note the version that
+extensions has been added.
+
+### `x-kubernetes-group-version-kind`
+
+Operations and Definitions may have `x-kubernetes-group-version-kind` if they
+are associated with a [kubernetes resource](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources).
+
+
+For example:
+
+``` json
+"paths": {
+    ...
+    "/api/v1/namespaces/{namespace}/pods/{name}": {
+        ...
+        "get": {
+        ...
+            "x-kubernetes-group-version-kind": {
+            "group": "",
+            "version": "v1",
+            "kind": "Pod"
+            }
+        }
+    }
+}
+```
+
+### `x-kubernetes-action`
+
+Operations and Definitions may have `x-kubernetes-action` if they
+are associated with a [kubernetes resource](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources).
+Action can be one of `get`, `list`, `put`, `patch`, `post`, `delete`, `deletecollection`, `watch`, `watchlist`, `proxy`, or `connect`.
+
+
+For example:
+
+``` json
+"paths": {
+    ...
+    "/api/v1/namespaces/{namespace}/pods/{name}": {
+        ...
+        "get": {
+        ...
+            "x-kubernetes-action": "list"
+        }
+    }
+}
+```
+
+### `x-kubernetes-patch-strategy` and `x-kubernetes-patch-merge-key`
+
+Some of the definitions may have these extensions. For more information about PatchStrategy and PatchMergeKey see
+[strategic-merge-patch](https://git.k8s.io/community/contributors/devel/sig-api-machinery/strategic-merge-patch.md).

File diff suppressed because it is too large
+ 108048 - 0
kubernetes-v1.15.4/api/openapi-spec/swagger.json


+ 191 - 0
kubernetes-v1.15.4/build/BUILD

@@ -0,0 +1,191 @@
+package(default_visibility = ["//visibility:public"])
+
+load("@io_k8s_repo_infra//defs:build.bzl", "release_filegroup")
+load(":code_generation_test.bzl", "code_generation_test_suite")
+load(":container.bzl", "multi_arch_container", "multi_arch_container_push")
+load(":platforms.bzl", "SERVER_PLATFORMS", "for_platforms")
+
+code_generation_test_suite(
+    name = "code_generation_tests",
+)
+
+filegroup(
+    name = "package-srcs",
+    srcs = glob(["**"]),
+    tags = ["automanaged"],
+)
+
+filegroup(
+    name = "all-srcs",
+    srcs = [
+        ":package-srcs",
+        "//build/debs:all-srcs",
+        "//build/release-tars:all-srcs",
+        "//build/rpms:all-srcs",
+        "//build/visible_to:all-srcs",
+    ],
+    tags = ["automanaged"],
+)
+
+# This list should roughly match kube::build::get_docker_wrapped_binaries()
+# in build/common.sh.
+DOCKERIZED_BINARIES = {
+    "cloud-controller-manager": {
+        "base": "@debian-base-{ARCH}//image",
+        "target": "//cmd/cloud-controller-manager:cloud-controller-manager",
+    },
+    "kube-apiserver": {
+        "base": "@debian-base-{ARCH}//image",
+        "target": "//cmd/kube-apiserver:kube-apiserver",
+    },
+    "kube-controller-manager": {
+        "base": "@debian-base-{ARCH}//image",
+        "target": "//cmd/kube-controller-manager:kube-controller-manager",
+    },
+    "kube-scheduler": {
+        "base": "@debian-base-{ARCH}//image",
+        "target": "//cmd/kube-scheduler:kube-scheduler",
+    },
+    "kube-proxy": {
+        "base": "@debian-iptables-{ARCH}//image",
+        "target": "//cmd/kube-proxy:kube-proxy",
+    },
+}
+
+# In the bash-based build (build/lib/release.sh), the images built for amd64 do not use
+# an arch in their name (but other arches do), and the GCE cluster scripts
+# (which sideload the images via tarfiles) expect there not to be an arch.
+# When pushing to gcr.io, we want to use an arch, since the archless name is now used for a
+# manifest list. Bazel doesn't support manifest lists (yet), so we can't do that either.
+# For now, we use the archless name for the image tars saved in the server tarball,
+# to satisfy GCE and other similar providers. (If one were to pull the images via the manifest
+# list, the arch wouldn't appear in the name either.)
+[multi_arch_container(
+    name = binary,
+    architectures = SERVER_PLATFORMS["linux"],
+    base = meta["base"],
+    cmd = ["/usr/bin/" + binary],
+    debs = select(for_platforms(
+        for_server = ["//build/debs:%s-{ARCH}.deb" % binary],
+        only_os = "linux",
+    )),
+    # Since the multi_arch_container macro replaces the {ARCH} format string,
+    # we need to escape the stamping vars.
+    # Also see comment above about why the push tags use ARCH while the
+    # non-push tags do not.
+    docker_push_tags = ["{{STABLE_DOCKER_PUSH_REGISTRY}}/%s-{ARCH}:{{STABLE_DOCKER_TAG}}" % binary],
+    docker_tags = ["{{STABLE_DOCKER_REGISTRY}}/%s:{{STABLE_DOCKER_TAG}}" % binary],
+    stamp = True,
+    symlinks = {
+        # Some cluster startup scripts expect to find the binaries in /usr/local/bin,
+        # but the debs install the binaries into /usr/bin.
+        "/usr/local/bin/" + binary: "/usr/bin/" + binary,
+    },
+    tags = ["manual"],
+    visibility = ["//visibility:private"],
+) for binary, meta in DOCKERIZED_BINARIES.items()]
+
+# Also roll up all images into a single bundle to push with one target.
+multi_arch_container_push(
+    name = "server-images",
+    architectures = SERVER_PLATFORMS["linux"],
+    docker_tags_images = {
+        "{{STABLE_DOCKER_PUSH_REGISTRY}}/%s-{ARCH}:{{STABLE_DOCKER_TAG}}" % binary: "%s-internal" % binary
+        for binary in DOCKERIZED_BINARIES.keys()
+    },
+    tags = ["manual"],
+)
+
+[genrule(
+    name = binary + "_docker_tag",
+    srcs = [meta["target"]],
+    outs = [binary + ".docker_tag"],
+    cmd = "grep ^STABLE_DOCKER_TAG bazel-out/stable-status.txt | awk '{print $$2}' >$@",
+    stamp = 1,
+) for binary, meta in DOCKERIZED_BINARIES.items()]
+
+genrule(
+    name = "os_package_version",
+    outs = ["version"],
+    cmd = """
+grep ^STABLE_BUILD_SCM_REVISION bazel-out/stable-status.txt \
+    | awk '{print $$2}' \
+    | sed -e 's/^v//' -Ee 's/-([a-z]+)/~\\1/' -e 's/-/+/g' \
+    >$@
+""",
+    stamp = 1,
+)
+
+release_filegroup(
+    name = "docker-artifacts",
+    srcs = [":%s.tar" % binary for binary in DOCKERIZED_BINARIES.keys()] +
+           [":%s.docker_tag" % binary for binary in DOCKERIZED_BINARIES.keys()],
+)
+
+# KUBE_CLIENT_TARGETS
+release_filegroup(
+    name = "client-targets",
+    conditioned_srcs = for_platforms(for_client = [
+        "//cmd/kubectl",
+    ]),
+)
+
+# KUBE_NODE_TARGETS
+release_filegroup(
+    name = "node-targets",
+    conditioned_srcs = for_platforms(for_node = [
+        "//cmd/kube-proxy",
+        "//cmd/kubeadm",
+        "//cmd/kubelet",
+    ]),
+)
+
+# KUBE_SERVER_TARGETS
+# No need to duplicate CLIENT_TARGETS or NODE_TARGETS here,
+# since we include them in the actual build rule.
+release_filegroup(
+    name = "server-targets",
+    conditioned_srcs = for_platforms(for_server = [
+        "//cluster/gce/gci/mounter",
+        "//cmd/cloud-controller-manager",
+        "//cmd/hyperkube",
+        "//cmd/kube-apiserver",
+        "//cmd/kube-controller-manager",
+        "//cmd/kube-scheduler",
+    ]),
+)
+
+# kube::golang::test_targets
+filegroup(
+    name = "test-targets",
+    srcs = select(for_platforms(
+        for_server = [
+            "//cmd/kubemark",
+            "//test/e2e_node:e2e_node.test_binary",
+        ],
+        for_test = [
+            "//cmd/gendocs",
+            "//cmd/genkubedocs",
+            "//cmd/genman",
+            "//cmd/genswaggertypedocs",
+            "//cmd/genyaml",
+            "//cmd/linkcheck",
+            "//test/e2e:e2e.test_binary",
+            "//vendor/github.com/onsi/ginkgo/ginkgo",
+        ],
+    )),
+)
+
+# KUBE_TEST_PORTABLE
+filegroup(
+    name = "test-portable-targets",
+    srcs = [
+        "//hack:e2e.go",
+        "//hack:get-build.sh",
+        "//hack:ginkgo-e2e.sh",
+        "//hack/e2e-internal:all-srcs",
+        "//hack/lib:all-srcs",
+        "//test/e2e/testing-manifests:all-srcs",
+        "//test/kubemark:all-srcs",
+    ],
+)

+ 17 - 0
kubernetes-v1.15.4/build/OWNERS

@@ -0,0 +1,17 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+  - bentheelder
+  - cblecker
+  - fejta
+  - jbeda
+  - lavalamp
+  - spiffxp
+  - zmerlynn
+approvers:
+  - cblecker
+  - fejta
+  - jbeda
+  - lavalamp
+  - mikedanese
+  - zmerlynn

+ 130 - 0
kubernetes-v1.15.4/build/README.md

@@ -0,0 +1,130 @@
+# Building Kubernetes
+
+Building Kubernetes is easy if you take advantage of the containerized build environment. This document will help guide you through understanding this build process.
+
+## Requirements
+
+1. Docker, using one of the following configurations:
+  * **macOS** You can either use Docker for Mac or docker-machine. See installation instructions [here](https://docs.docker.com/docker-for-mac/).
+     **Note**: You will want to set the Docker VM to have at least 4.5GB of initial memory or building will likely fail. (See: [#11852]( http://issue.k8s.io/11852)).
+  * **Linux with local Docker**  Install Docker according to the [instructions](https://docs.docker.com/installation/#installation) for your OS.
+  * **Remote Docker engine** Use a big machine in the cloud to build faster. This is a little trickier so look at the section later on.
+2. **Optional** [Google Cloud SDK](https://developers.google.com/cloud/sdk/)
+
+You must install and configure Google Cloud SDK if you want to upload your release to Google Cloud Storage and may safely omit this otherwise.
+
+## Overview
+
+While it is possible to build Kubernetes using a local golang installation, we have a build process that runs in a Docker container.  This simplifies initial set up and provides for a very consistent build and test environment.
+
+## Key scripts
+
+The following scripts are found in the `build/` directory. Note that all scripts must be run from the Kubernetes root directory.
+
+* `build/run.sh`: Run a command in a build docker container.  Common invocations:
+  *  `build/run.sh make`: Build just linux binaries in the container.  Pass options and packages as necessary.
+  *  `build/run.sh make cross`: Build all binaries for all platforms
+  *  `build/run.sh make kubectl KUBE_BUILD_PLATFORMS=darwin/amd64`: Build the specific binary for the specific platform (`kubectl` and `darwin/amd64` respectively in this example)
+  *  `build/run.sh make test`: Run all unit tests
+  *  `build/run.sh make test-integration`: Run integration test
+  *  `build/run.sh make test-cmd`: Run CLI tests
+* `build/copy-output.sh`: This will copy the contents of `_output/dockerized/bin` from the Docker container to the local `_output/dockerized/bin`. It will also copy out specific file patterns that are generated as part of the build process. This is run automatically as part of `build/run.sh`.
+* `build/make-clean.sh`: Clean out the contents of `_output`, remove any locally built container images and remove the data container.
+* `build/shell.sh`: Drop into a `bash` shell in a build container with a snapshot of the current repo code.
+
+## Basic Flow
+
+The scripts directly under `build/` are used to build and test.  They will ensure that the `kube-build` Docker image is built (based on `build/build-image/Dockerfile`) and then execute the appropriate command in that container.  These scripts will both ensure that the right data is cached from run to run for incremental builds and will copy the results back out of the container.
+
+The `kube-build` container image is built by first creating a "context" directory in `_output/images/build-image`.  It is done there instead of at the root of the Kubernetes repo to minimize the amount of data we need to package up when building the image.
+
+There are 3 different containers instances that are run from this image.  The first is a "data" container to store all data that needs to persist across to support incremental builds. Next there is an "rsync" container that is used to transfer data in and out to the data container.  Lastly there is a "build" container that is used for actually doing build actions.  The data container persists across runs while the rsync and build containers are deleted after each use.
+
+`rsync` is used transparently behind the scenes to efficiently move data in and out of the container.  This will use an ephemeral port picked by Docker.  You can modify this by setting the `KUBE_RSYNC_PORT` env variable.
+
+All Docker names are suffixed with a hash derived from the file path (to allow concurrent usage on things like CI machines) and a version number.  When the version number changes all state is cleared and clean build is started.  This allows the build infrastructure to be changed and signal to CI systems that old artifacts need to be deleted.
+
+## Proxy Settings
+
+If you are behind a proxy and you are letting these scripts use `docker-machine` to set up your local VM for you on macOS, you need to export proxy settings for Kubernetes build, the following environment variables should be defined.
+
+```
+export KUBERNETES_HTTP_PROXY=http://username:password@proxyaddr:proxyport
+export KUBERNETES_HTTPS_PROXY=https://username:password@proxyaddr:proxyport
+```
+
+Optionally, you can specify addresses of no proxy for Kubernetes build, for example
+
+```
+export KUBERNETES_NO_PROXY=127.0.0.1
+```
+
+If you are using sudo to make Kubernetes build for example make quick-release, you need run `sudo -E make quick-release` to pass the environment variables.
+
+## Really Remote Docker Engine
+
+It is possible to use a Docker Engine that is running remotely (under your desk or in the cloud).  Docker must be configured to connect to that machine and the local rsync port must be forwarded (via SSH or nc) from localhost to the remote machine.
+
+To do this easily with GCE and `docker-machine`, do something like this:
+```
+# Create the remote docker machine on GCE.  This is a pretty beefy machine with SSD disk.
+KUBE_BUILD_VM=k8s-build
+KUBE_BUILD_GCE_PROJECT=<project>
+docker-machine create \
+  --driver=google \
+  --google-project=${KUBE_BUILD_GCE_PROJECT} \
+  --google-zone=us-west1-a \
+  --google-machine-type=n1-standard-8 \
+  --google-disk-size=50 \
+  --google-disk-type=pd-ssd \
+  ${KUBE_BUILD_VM}
+
+# Set up local docker to talk to that machine
+eval $(docker-machine env ${KUBE_BUILD_VM})
+
+# Pin down the port that rsync will be exposed on the remote machine
+export KUBE_RSYNC_PORT=8730
+
+# forward local 8730 to that machine so that rsync works
+docker-machine ssh ${KUBE_BUILD_VM} -L ${KUBE_RSYNC_PORT}:localhost:${KUBE_RSYNC_PORT} -N &
+```
+
+Look at `docker-machine stop`, `docker-machine start` and `docker-machine rm` to manage this VM.
+
+## Releasing
+
+The `build/release.sh` script will build a release.  It will build binaries, run tests, (optionally) build runtime Docker images.
+
+The main output is a tar file: `kubernetes.tar.gz`.  This includes:
+* Cross compiled client utilities.
+* Script (`kubectl`) for picking and running the right client binary based on platform.
+* Examples
+* Cluster deployment scripts for various clouds
+* Tar file containing all server binaries
+
+In addition, there are some other tar files that are created:
+* `kubernetes-client-*.tar.gz` Client binaries for a specific platform.
+* `kubernetes-server-*.tar.gz` Server binaries for a specific platform.
+
+When building final release tars, they are first staged into `_output/release-stage` before being tar'd up and put into `_output/release-tars`.
+
+## Reproducibility
+`make release`, its variant `make quick-release`, and Bazel all provide a
+hermetic build environment which should provide some level of reproducibility
+for builds. `make` itself is **not** hermetic.
+
+The Kubernetes build environment supports the [`SOURCE_DATE_EPOCH` environment
+variable](https://reproducible-builds.org/specs/source-date-epoch/) specified by
+the Reproducible Builds project, which can be set to a UNIX epoch timestamp.
+This will be used for the build timestamps embedded in compiled Go binaries,
+and maybe someday also Docker images.
+
+One reasonable setting for this variable is to use the commit timestamp from the
+tip of the tree being built; this is what the Kubernetes CI system uses. For
+example, you could use the following one-liner:
+
+```bash
+SOURCE_DATE_EPOCH=$(git show -s --format=format:%ct HEAD)
+```
+
+[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/build/README.md?pixel)]()

+ 45 - 0
kubernetes-v1.15.4/build/bindata.bzl

@@ -0,0 +1,45 @@
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Genrule wrapper around the go-bindata utility.
+# IMPORTANT: Any changes to this rule may also require changes to hack/generate-bindata.sh.
+def go_bindata(
+    name, srcs, outs,
+    compress=True,
+    include_metadata=True,
+    pkg="generated",
+    ignores=["\.jpg", "\.png", "\.md", "BUILD(\.bazel)?"],
+    **kw):
+
+  args = []
+  for ignore in ignores:
+    args.extend(["-ignore", "'%s'" % ignore])
+  if not include_metadata:
+    args.append("-nometadata")
+  if not compress:
+    args.append("-nocompress")
+
+  native.genrule(
+    name = name,
+    srcs = srcs,
+    outs = outs,
+    cmd = """
+    $(location //vendor/github.com/jteeuwen/go-bindata/go-bindata:go-bindata) \
+      -o "$@" -pkg %s -prefix $$(pwd) %s $(SRCS)
+    """ % (pkg, " ".join(args)),
+    tools = [
+      "//vendor/github.com/jteeuwen/go-bindata/go-bindata",
+    ],
+    **kw
+  )

+ 54 - 0
kubernetes-v1.15.4/build/build-image/Dockerfile

@@ -0,0 +1,54 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file creates a standard build environment for building Kubernetes
+FROM k8s.gcr.io/kube-cross:KUBE_BUILD_IMAGE_CROSS_TAG
+
+# Mark this as a kube-build container
+RUN touch /kube-build-image
+
+# To run as non-root we sometimes need to rebuild go stdlib packages.
+RUN chmod -R a+rwx /usr/local/go/pkg
+
+# For running integration tests /var/run/kubernetes is required
+# and should be writable by user
+RUN mkdir /var/run/kubernetes && chmod a+rwx /var/run/kubernetes
+
+# The kubernetes source is expected to be mounted here.  This will be the base
+# of operations.
+ENV HOME /go/src/k8s.io/kubernetes
+WORKDIR ${HOME}
+
+# Make output from the dockerized build go someplace else
+ENV KUBE_OUTPUT_SUBPATH _output/dockerized
+
+# Pick up version stuff here as we don't copy our .git over.
+ENV KUBE_GIT_VERSION_FILE ${HOME}/.dockerized-kube-version-defs
+
+# Add system-wide git user information
+RUN git config --system user.email "nobody@k8s.io" \
+  && git config --system user.name "kube-build-image"
+
+# Fix permissions on gopath
+RUN chmod -R a+rwx $GOPATH
+
+# Make log messages use the right timezone
+ADD localtime /etc/localtime
+RUN chmod a+r /etc/localtime
+
+# Set up rsyncd
+ADD rsyncd.password /
+RUN chmod a+r /rsyncd.password
+ADD rsyncd.sh /
+RUN chmod a+rx /rsyncd.sh

+ 1 - 0
kubernetes-v1.15.4/build/build-image/VERSION

@@ -0,0 +1 @@
+5

+ 79 - 0
kubernetes-v1.15.4/build/build-image/cross/Dockerfile

@@ -0,0 +1,79 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file creates a standard build environment for building cross
+# platform go binary for the architecture kubernetes cares about.
+
+FROM golang:1.12.9
+
+ENV GOARM 7
+ENV KUBE_DYNAMIC_CROSSPLATFORMS \
+  armhf \
+  arm64 \
+  s390x \
+  ppc64el
+
+ENV KUBE_CROSSPLATFORMS \
+  linux/386 \
+  linux/arm linux/arm64 \
+  linux/ppc64le \
+  linux/s390x \
+  darwin/amd64 darwin/386 \
+  windows/amd64 windows/386
+
+# Pre-compile the standard go library when cross-compiling. This is much easier now when we have go1.5+
+RUN for platform in ${KUBE_CROSSPLATFORMS}; do GOOS=${platform%/*} GOARCH=${platform##*/} go install std; done \
+    && go clean -cache
+
+# Install g++, then download and install protoc for generating protobuf output
+RUN apt-get update \
+  && apt-get install -y rsync jq apt-utils file patch unzip \
+  && apt-get clean && rm -rf /var/lib/apt/lists/*
+
+# Use dynamic cgo linking for architectures other than amd64 for the server platforms
+# To install crossbuild essential for other architectures add the following repository.
+RUN echo "deb http://archive.ubuntu.com/ubuntu xenial main universe" > /etc/apt/sources.list.d/cgocrosscompiling.list \
+  && apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 40976EAF437D05B5 3B4FE6ACC0B21F32 \
+  && apt-get update \
+  && apt-get install -y build-essential \
+  && for platform in ${KUBE_DYNAMIC_CROSSPLATFORMS}; do apt-get install -y crossbuild-essential-${platform}; done \
+  && apt-get clean && rm -rf /var/lib/apt/lists/*
+
+RUN PROTOBUF_VERSION=3.0.2; ZIPNAME="protoc-${PROTOBUF_VERSION}-linux-x86_64.zip"; \
+  mkdir /tmp/protoc && cd /tmp/protoc \
+  && wget "https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/${ZIPNAME}" \
+  && unzip "${ZIPNAME}" \
+  && chmod -R +rX /tmp/protoc \
+  && cp -pr bin /usr/local \
+  && cp -pr include /usr/local \
+  && rm -rf /tmp/protoc \
+  && protoc --version
+
+# work around 64MB tmpfs size in Docker 1.6
+ENV TMPDIR /tmp.k8s
+RUN mkdir $TMPDIR \
+  && chmod a+rwx $TMPDIR \
+  && chmod o+t $TMPDIR
+
+# Get the code coverage tool and goimports
+RUN go get golang.org/x/tools/cmd/cover \
+           golang.org/x/tools/cmd/goimports \
+    && go clean -cache
+
+# Download and symlink etcd. We need this for our integration tests.
+RUN export ETCD_VERSION=v3.2.24; \
+  mkdir -p /usr/local/src/etcd \
+  && cd /usr/local/src/etcd \
+  && curl -fsSL https://github.com/coreos/etcd/releases/download/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz | tar -xz \
+  && ln -s ../src/etcd/etcd-${ETCD_VERSION}-linux-amd64/etcd /usr/local/bin/

+ 27 - 0
kubernetes-v1.15.4/build/build-image/cross/Makefile

@@ -0,0 +1,27 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY:	build push
+
+IMAGE=kube-cross
+TAG=$(shell cat VERSION)
+
+
+all: push
+
+build:
+	docker build --pull -t staging-k8s.gcr.io/$(IMAGE):$(TAG) .
+
+push: build
+	docker push staging-k8s.gcr.io/$(IMAGE):$(TAG)

+ 1 - 0
kubernetes-v1.15.4/build/build-image/cross/VERSION

@@ -0,0 +1 @@
+v1.12.9-1

+ 83 - 0
kubernetes-v1.15.4/build/build-image/rsyncd.sh

@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script will set up and run rsyncd to allow data to move into and out of
+# our dockerized build system.  This is used for syncing sources and changes of
+# sources into the docker-build-container.  It is also used to transfer built binaries
+# and generated files back out.
+#
+# When run as root (rare) it'll preserve the file ids as sent from the client.
+# Usually it'll be run as non-dockerized UID/GID and end up translating all file
+# ownership to that.
+
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# The directory that gets sync'd
+VOLUME=${HOME}
+
+# Assume that this is running in Docker on a bridge.  Allow connections from
+# anything on the local subnet.
+ALLOW=$(ip route | awk  '/^default via/ { reg = "^[0-9./]+ dev "$5 } ; $0 ~ reg { print $1 }')
+
+CONFDIR="/tmp/rsync.k8s"
+PIDFILE="${CONFDIR}/rsyncd.pid"
+CONFFILE="${CONFDIR}/rsyncd.conf"
+SECRETS="${CONFDIR}/rsyncd.secrets"
+
+mkdir -p "${CONFDIR}"
+
+if [[ -f "${PIDFILE}" ]]; then
+  PID=$(cat "${PIDFILE}")
+  echo "Cleaning up old PID file: ${PIDFILE}"
+  kill "${PID}" &> /dev/null || true
+  rm "${PIDFILE}"
+fi
+
+PASSWORD=$(</rsyncd.password)
+
+cat <<EOF >"${SECRETS}"
+k8s:${PASSWORD}
+EOF
+chmod go= "${SECRETS}"
+
+USER_CONFIG=
+if [[ "$(id -u)" == "0" ]]; then
+  USER_CONFIG="  uid = 0"$'\n'"  gid = 0"
+fi
+
+cat <<EOF >"${CONFFILE}"
+pid file = ${PIDFILE}
+use chroot = no
+log file = /dev/stdout
+reverse lookup = no
+munge symlinks = no
+port = 8730
+[k8s]
+  numeric ids = true
+  $USER_CONFIG
+  hosts deny = *
+  hosts allow = ${ALLOW} ${ALLOW_HOST-}
+  auth users = k8s
+  secrets file = ${SECRETS}
+  read only = false
+  path = ${VOLUME}
+  filter = - /.make/ - /_tmp/
+EOF
+
+exec /usr/bin/rsync --no-detach --daemon --config="${CONFFILE}" "$@"

+ 81 - 0
kubernetes-v1.15.4/build/code_generation.bzl

@@ -0,0 +1,81 @@
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("//build:kazel_generated.bzl", "go_prefix", "tags_values_pkgs")
+load("//build:openapi.bzl", "openapi_vendor_prefix")
+load("@io_k8s_repo_infra//defs:go.bzl", "go_genrule")
+
+def bazel_go_library(pkg):
+    """Returns the Bazel label for the Go library for the provided package.
+    This is intended to be used with the //build:kazel_generated.bzl tag dictionaries; for example:
+    load("//build:kazel_generated.bzl", "tags_values_pkgs")
+    some_rule(
+        ...
+        deps = [bazel_go_library(pkg) for pkg in tags_values_pkgs["openapi-gen"]["true"]],
+        ...
+    )
+    """
+    return "//%s:go_default_library" % pkg
+
+def go_pkg(pkg):
+    """Returns the full Go package name for the provided workspace-relative package.
+    This is suitable to pass to tools depending on the Go build library.
+    If any packages are in staging/src, they are remapped to their intended path in vendor/.
+    This is intended to be used with the //build:kazel_generated.bzl tag dictionaries.
+    For example:
+    load("//build:kazel_generated.bzl", "tags_values_pkgs")
+    genrule(
+        ...
+        cmd = "do something --pkgs=%s" % ",".join([go_pkg(pkg) for pkg in tags_values_pkgs["openapi-gen"]["true"]]),
+        ...
+    )
+    """
+    return go_prefix + "/" + pkg.replace("staging/src/", "vendor/", maxsplit = 1)
+
+def openapi_deps():
+    deps = [
+        "//vendor/github.com/go-openapi/spec:go_default_library",
+        "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library",
+    ]
+    deps.extend([bazel_go_library(pkg) for pkg in tags_values_pkgs["openapi-gen"]["true"]])
+    return deps
+
+def gen_openapi(outs, output_pkg):
+    """Calls openapi-gen to produce the zz_generated.openapi.go file,
+    which should be provided in outs.
+    output_pkg should be set to the full go package name for this generated file.
+    """
+    go_genrule(
+        name = "zz_generated.openapi",
+        srcs = ["//" + openapi_vendor_prefix + "hack/boilerplate:boilerplate.generatego.txt"],
+        outs = outs,
+        # In order for vendored dependencies to be imported correctly,
+        # the generator must run from the repo root inside the generated GOPATH.
+        # All of bazel's $(location)s are relative to the original working directory, however.
+        cmd = " ".join([
+            "cd $$GOPATH/src/" + go_prefix + ";",
+            "$$GO_GENRULE_EXECROOT/$(location //vendor/k8s.io/kube-openapi/cmd/openapi-gen)",
+            "--v 1",
+            "--logtostderr",
+            "--go-header-file $$GO_GENRULE_EXECROOT/$(location //" + openapi_vendor_prefix + "hack/boilerplate:boilerplate.generatego.txt)",
+            "--output-file-base zz_generated.openapi",
+            "--output-package " + output_pkg,
+            "--report-filename tmp_api_violations.report",
+            "--input-dirs " + ",".join([go_pkg(pkg) for pkg in tags_values_pkgs["openapi-gen"]["true"]]),
+            "&& cp $$GOPATH/src/" + output_pkg + "/zz_generated.openapi.go $$GO_GENRULE_EXECROOT/$(location :zz_generated.openapi.go)",
+            "&& rm tmp_api_violations.report",
+        ]),
+        go_deps = openapi_deps(),
+        tools = ["//vendor/k8s.io/kube-openapi/cmd/openapi-gen"],
+    )

+ 49 - 0
kubernetes-v1.15.4/build/code_generation_test.bzl

@@ -0,0 +1,49 @@
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load(":code_generation.bzl", "bazel_go_library", "go_pkg")
+load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest")
+
+def _bazel_go_library_test_impl(ctx):
+    env = unittest.begin(ctx)
+    test_cases = [
+        ("pkg/kubectl/util", "//pkg/kubectl/util:go_default_library"),
+        ("vendor/some/third/party", "//vendor/some/third/party:go_default_library"),
+        ("staging/src/k8s.io/apimachinery/api", "//staging/src/k8s.io/apimachinery/api:go_default_library"),
+    ]
+    for input, expected in test_cases:
+        asserts.equals(env, expected, bazel_go_library(input))
+    unittest.end(env)
+
+bazel_go_library_test = unittest.make(_bazel_go_library_test_impl)
+
+def _go_pkg_test_impl(ctx):
+    env = unittest.begin(ctx)
+    test_cases = [
+        ("pkg/kubectl/util", "k8s.io/kubernetes/pkg/kubectl/util"),
+        ("vendor/some/third/party", "k8s.io/kubernetes/vendor/some/third/party"),
+        ("staging/src/k8s.io/apimachinery/api", "k8s.io/kubernetes/vendor/k8s.io/apimachinery/api"),
+    ]
+    for input, expected in test_cases:
+        asserts.equals(env, expected, go_pkg(input))
+    unittest.end(env)
+
+go_pkg_test = unittest.make(_go_pkg_test_impl)
+
+def code_generation_test_suite(name):
+    unittest.suite(
+        name,
+        bazel_go_library_test,
+        go_pkg_test,
+    )

+ 740 - 0
kubernetes-v1.15.4/build/common.sh

@@ -0,0 +1,740 @@
+#!/usr/bin/env bash
+
+# Copyright 2014 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Common utilities, variables and checks for all build scripts.
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# Unset CDPATH, having it set messes up with script import paths
+unset CDPATH
+
+USER_ID=$(id -u)
+GROUP_ID=$(id -g)
+
+DOCKER_OPTS=${DOCKER_OPTS:-""}
+IFS=" " read -r -a DOCKER <<< "docker ${DOCKER_OPTS}"
+DOCKER_HOST=${DOCKER_HOST:-""}
+DOCKER_MACHINE_NAME=${DOCKER_MACHINE_NAME:-"kube-dev"}
+readonly DOCKER_MACHINE_DRIVER=${DOCKER_MACHINE_DRIVER:-"virtualbox --virtualbox-cpu-count -1"}
+
+# This will canonicalize the path
+KUBE_ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd -P)
+
+source "${KUBE_ROOT}/hack/lib/init.sh"
+
+# Constants
+readonly KUBE_BUILD_IMAGE_REPO=kube-build
+readonly KUBE_BUILD_IMAGE_CROSS_TAG="$(cat "${KUBE_ROOT}/build/build-image/cross/VERSION")"
+
+# This version number is used to cause everyone to rebuild their data containers
+# and build image.  This is especially useful for automated build systems like
+# Jenkins.
+#
+# Increment/change this number if you change the build image (anything under
+# build/build-image) or change the set of volumes in the data container.
+readonly KUBE_BUILD_IMAGE_VERSION_BASE="$(cat "${KUBE_ROOT}/build/build-image/VERSION")"
+readonly KUBE_BUILD_IMAGE_VERSION="${KUBE_BUILD_IMAGE_VERSION_BASE}-${KUBE_BUILD_IMAGE_CROSS_TAG}"
+
+# Here we map the output directories across both the local and remote _output
+# directories:
+#
+# *_OUTPUT_ROOT    - the base of all output in that environment.
+# *_OUTPUT_SUBPATH - location where golang stuff is built/cached.  Also
+#                    persisted across docker runs with a volume mount.
+# *_OUTPUT_BINPATH - location where final binaries are placed.  If the remote
+#                    is really remote, this is the stuff that has to be copied
+#                    back.
+# OUT_DIR can come in from the Makefile, so honor it.
+readonly LOCAL_OUTPUT_ROOT="${KUBE_ROOT}/${OUT_DIR:-_output}"
+readonly LOCAL_OUTPUT_SUBPATH="${LOCAL_OUTPUT_ROOT}/dockerized"
+readonly LOCAL_OUTPUT_BINPATH="${LOCAL_OUTPUT_SUBPATH}/bin"
+readonly LOCAL_OUTPUT_GOPATH="${LOCAL_OUTPUT_SUBPATH}/go"
+readonly LOCAL_OUTPUT_IMAGE_STAGING="${LOCAL_OUTPUT_ROOT}/images"
+
+# This is a symlink to binaries for "this platform" (e.g. build tools).
+readonly THIS_PLATFORM_BIN="${LOCAL_OUTPUT_ROOT}/bin"
+
+readonly REMOTE_ROOT="/go/src/${KUBE_GO_PACKAGE}"
+readonly REMOTE_OUTPUT_ROOT="${REMOTE_ROOT}/_output"
+readonly REMOTE_OUTPUT_SUBPATH="${REMOTE_OUTPUT_ROOT}/dockerized"
+readonly REMOTE_OUTPUT_BINPATH="${REMOTE_OUTPUT_SUBPATH}/bin"
+readonly REMOTE_OUTPUT_GOPATH="${REMOTE_OUTPUT_SUBPATH}/go"
+
+# This is the port on the workstation host to expose RSYNC on.  Set this if you
+# are doing something fancy with ssh tunneling.
+readonly KUBE_RSYNC_PORT="${KUBE_RSYNC_PORT:-}"
+
+# This is the port that rsync is running on *inside* the container. This may be
+# mapped to KUBE_RSYNC_PORT via docker networking.
+readonly KUBE_CONTAINER_RSYNC_PORT=8730
+
+# Get the set of master binaries that run in Docker (on Linux)
+# Entry format is "<name-of-binary>,<base-image>".
+# Binaries are placed in /usr/local/bin inside the image.
+#
+# $1 - server architecture
+kube::build::get_docker_wrapped_binaries() {
+  local arch=$1
+  local debian_base_version=v1.0.0
+  local debian_iptables_version=v11.0.2
+  ### If you change any of these lists, please also update DOCKERIZED_BINARIES
+  ### in build/BUILD. And kube::golang::server_image_targets
+  local targets=(
+    cloud-controller-manager,"k8s.gcr.io/debian-base-${arch}:${debian_base_version}"
+    kube-apiserver,"k8s.gcr.io/debian-base-${arch}:${debian_base_version}"
+    kube-controller-manager,"k8s.gcr.io/debian-base-${arch}:${debian_base_version}"
+    kube-scheduler,"k8s.gcr.io/debian-base-${arch}:${debian_base_version}"
+    kube-proxy,"k8s.gcr.io/debian-iptables-${arch}:${debian_iptables_version}"
+  )
+
+  echo "${targets[@]}"
+}
+
+# ---------------------------------------------------------------------------
+# Basic setup functions
+
+# Verify that the right utilities and such are installed for building Kube. Set
+# up some dynamic constants.
+# Args:
+#   $1 - boolean of whether to require functioning docker (default true)
+#
+# Vars set:
+#   KUBE_ROOT_HASH
+#   KUBE_BUILD_IMAGE_TAG_BASE
+#   KUBE_BUILD_IMAGE_TAG
+#   KUBE_BUILD_IMAGE
+#   KUBE_BUILD_CONTAINER_NAME_BASE
+#   KUBE_BUILD_CONTAINER_NAME
+#   KUBE_DATA_CONTAINER_NAME_BASE
+#   KUBE_DATA_CONTAINER_NAME
+#   KUBE_RSYNC_CONTAINER_NAME_BASE
+#   KUBE_RSYNC_CONTAINER_NAME
+#   DOCKER_MOUNT_ARGS
+#   LOCAL_OUTPUT_BUILD_CONTEXT
+function kube::build::verify_prereqs() {
+  local -r require_docker=${1:-true}
+  kube::log::status "Verifying Prerequisites...."
+  kube::build::ensure_tar || return 1
+  kube::build::ensure_rsync || return 1
+  if ${require_docker}; then
+    kube::build::ensure_docker_in_path || return 1
+    if kube::build::is_osx; then
+        kube::build::docker_available_on_osx || return 1
+    fi
+    kube::util::ensure_docker_daemon_connectivity || return 1
+
+    if (( KUBE_VERBOSE > 6 )); then
+      kube::log::status "Docker Version:"
+      "${DOCKER[@]}" version | kube::log::info_from_stdin
+    fi
+  fi
+
+  KUBE_GIT_BRANCH=$(git symbolic-ref --short -q HEAD 2>/dev/null || true)
+  KUBE_ROOT_HASH=$(kube::build::short_hash "${HOSTNAME:-}:${KUBE_ROOT}:${KUBE_GIT_BRANCH}")
+  KUBE_BUILD_IMAGE_TAG_BASE="build-${KUBE_ROOT_HASH}"
+  KUBE_BUILD_IMAGE_TAG="${KUBE_BUILD_IMAGE_TAG_BASE}-${KUBE_BUILD_IMAGE_VERSION}"
+  KUBE_BUILD_IMAGE="${KUBE_BUILD_IMAGE_REPO}:${KUBE_BUILD_IMAGE_TAG}"
+  KUBE_BUILD_CONTAINER_NAME_BASE="kube-build-${KUBE_ROOT_HASH}"
+  KUBE_BUILD_CONTAINER_NAME="${KUBE_BUILD_CONTAINER_NAME_BASE}-${KUBE_BUILD_IMAGE_VERSION}"
+  KUBE_RSYNC_CONTAINER_NAME_BASE="kube-rsync-${KUBE_ROOT_HASH}"
+  KUBE_RSYNC_CONTAINER_NAME="${KUBE_RSYNC_CONTAINER_NAME_BASE}-${KUBE_BUILD_IMAGE_VERSION}"
+  KUBE_DATA_CONTAINER_NAME_BASE="kube-build-data-${KUBE_ROOT_HASH}"
+  KUBE_DATA_CONTAINER_NAME="${KUBE_DATA_CONTAINER_NAME_BASE}-${KUBE_BUILD_IMAGE_VERSION}"
+  DOCKER_MOUNT_ARGS=(--volumes-from "${KUBE_DATA_CONTAINER_NAME}")
+  LOCAL_OUTPUT_BUILD_CONTEXT="${LOCAL_OUTPUT_IMAGE_STAGING}/${KUBE_BUILD_IMAGE}"
+
+  kube::version::get_version_vars
+  kube::version::save_version_vars "${KUBE_ROOT}/.dockerized-kube-version-defs"
+}
+
+# ---------------------------------------------------------------------------
+# Utility functions
+
+function kube::build::docker_available_on_osx() {
+  if [[ -z "${DOCKER_HOST}" ]]; then
+    if [[ -S "/var/run/docker.sock" ]]; then
+      kube::log::status "Using Docker for MacOS"
+      return 0
+    fi
+
+    kube::log::status "No docker host is set. Checking options for setting one..."
+    if [[ -z "$(which docker-machine)" ]]; then
+      kube::log::status "It looks like you're running Mac OS X, yet neither Docker for Mac nor docker-machine can be found."
+      kube::log::status "See: https://docs.docker.com/engine/installation/mac/ for installation instructions."
+      return 1
+    elif [[ -n "$(which docker-machine)" ]]; then
+      kube::build::prepare_docker_machine
+    fi
+  fi
+}
+
+function kube::build::prepare_docker_machine() {
+  kube::log::status "docker-machine was found."
+
+  local available_memory_bytes
+  available_memory_bytes=$(sysctl -n hw.memsize 2>/dev/null)
+
+  local bytes_in_mb=1048576
+
+  # Give virtualbox 1/2 the system memory. Its necessary to divide by 2, instead
+  # of multiple by .5, because bash can only multiply by ints.
+  local memory_divisor=2
+
+  local virtualbox_memory_mb=$(( available_memory_bytes / (bytes_in_mb * memory_divisor) ))
+
+  docker-machine inspect "${DOCKER_MACHINE_NAME}" &> /dev/null || {
+    kube::log::status "Creating a machine to build Kubernetes"
+    docker-machine create --driver "${DOCKER_MACHINE_DRIVER}" \
+      --virtualbox-memory "${virtualbox_memory_mb}" \
+      --engine-env HTTP_PROXY="${KUBERNETES_HTTP_PROXY:-}" \
+      --engine-env HTTPS_PROXY="${KUBERNETES_HTTPS_PROXY:-}" \
+      --engine-env NO_PROXY="${KUBERNETES_NO_PROXY:-127.0.0.1}" \
+      "${DOCKER_MACHINE_NAME}" > /dev/null || {
+      kube::log::error "Something went wrong creating a machine."
+      kube::log::error "Try the following: "
+      kube::log::error "docker-machine create -d ${DOCKER_MACHINE_DRIVER} --virtualbox-memory ${virtualbox_memory_mb} ${DOCKER_MACHINE_NAME}"
+      return 1
+    }
+  }
+  docker-machine start "${DOCKER_MACHINE_NAME}" &> /dev/null
+  # it takes `docker-machine env` a few seconds to work if the machine was just started
+  local docker_machine_out
+  while ! docker_machine_out=$(docker-machine env "${DOCKER_MACHINE_NAME}" 2>&1); do
+    if [[ ${docker_machine_out} =~ "Error checking TLS connection" ]]; then
+      echo "${docker_machine_out}"
+      docker-machine regenerate-certs "${DOCKER_MACHINE_NAME}"
+    else
+      sleep 1
+    fi
+  done
+  eval "$(docker-machine env "${DOCKER_MACHINE_NAME}")"
+  kube::log::status "A Docker host using docker-machine named '${DOCKER_MACHINE_NAME}' is ready to go!"
+  return 0
+}
+
+function kube::build::is_osx() {
+  [[ "$(uname)" == "Darwin" ]]
+}
+
+function kube::build::is_gnu_sed() {
+  [[ $(sed --version 2>&1) == *GNU* ]]
+}
+
+function kube::build::ensure_rsync() {
+  if [[ -z "$(which rsync)" ]]; then
+    kube::log::error "Can't find 'rsync' in PATH, please fix and retry."
+    return 1
+  fi
+}
+
+function kube::build::update_dockerfile() {
+  if kube::build::is_gnu_sed; then
+    sed_opts=(-i)
+  else
+    sed_opts=(-i '')
+  fi
+  sed "${sed_opts[@]}" "s/KUBE_BUILD_IMAGE_CROSS_TAG/${KUBE_BUILD_IMAGE_CROSS_TAG}/" "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile"
+}
+
+function  kube::build::set_proxy() {
+  if [[ -n "${KUBERNETES_HTTPS_PROXY:-}" ]]; then
+    echo "ENV https_proxy $KUBERNETES_HTTPS_PROXY" >> "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile"
+  fi
+  if [[ -n "${KUBERNETES_HTTP_PROXY:-}" ]]; then
+    echo "ENV http_proxy $KUBERNETES_HTTP_PROXY" >> "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile"
+  fi
+  if [[ -n "${KUBERNETES_NO_PROXY:-}" ]]; then
+    echo "ENV no_proxy $KUBERNETES_NO_PROXY" >> "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile"
+  fi
+}
+
+function kube::build::ensure_docker_in_path() {
+  if [[ -z "$(which docker)" ]]; then
+    kube::log::error "Can't find 'docker' in PATH, please fix and retry."
+    kube::log::error "See https://docs.docker.com/installation/#installation for installation instructions."
+    return 1
+  fi
+}
+
+function kube::build::ensure_tar() {
+  if [[ -n "${TAR:-}" ]]; then
+    return
+  fi
+
+  # Find gnu tar if it is available, bomb out if not.
+  TAR=tar
+  if which gtar &>/dev/null; then
+      TAR=gtar
+  else
+      if which gnutar &>/dev/null; then
+	  TAR=gnutar
+      fi
+  fi
+  if ! "${TAR}" --version | grep -q GNU; then
+    echo "  !!! Cannot find GNU tar. Build on Linux or install GNU tar"
+    echo "      on Mac OS X (brew install gnu-tar)."
+    return 1
+  fi
+}
+
+function kube::build::has_docker() {
+  which docker &> /dev/null
+}
+
+function kube::build::has_ip() {
+  which ip &> /dev/null && ip -Version | grep 'iproute2' &> /dev/null
+}
+
+# Detect if a specific image exists
+#
+# $1 - image repo name
+# #2 - image tag
+function kube::build::docker_image_exists() {
+  [[ -n $1 && -n $2 ]] || {
+    kube::log::error "Internal error. Image not specified in docker_image_exists."
+    exit 2
+  }
+
+  [[ $("${DOCKER[@]}" images -q "${1}:${2}") ]]
+}
+
+# Delete all images that match a tag prefix except for the "current" version
+#
+# $1: The image repo/name
+# $2: The tag base. We consider any image that matches $2*
+# $3: The current image not to delete if provided
+function kube::build::docker_delete_old_images() {
+  # In Docker 1.12, we can replace this with
+  #    docker images "$1" --format "{{.Tag}}"
+  for tag in $("${DOCKER[@]}" images "${1}" | tail -n +2 | awk '{print $2}') ; do
+    if [[ "${tag}" != "${2}"* ]] ; then
+      V=3 kube::log::status "Keeping image ${1}:${tag}"
+      continue
+    fi
+
+    if [[ -z "${3:-}" || "${tag}" != "${3}" ]] ; then
+      V=2 kube::log::status "Deleting image ${1}:${tag}"
+      "${DOCKER[@]}" rmi "${1}:${tag}" >/dev/null
+    else
+      V=3 kube::log::status "Keeping image ${1}:${tag}"
+    fi
+  done
+}
+
+# Stop and delete all containers that match a pattern
+#
+# $1: The base container prefix
+# $2: The current container to keep, if provided
+function kube::build::docker_delete_old_containers() {
+  # In Docker 1.12 we can replace this line with
+  #   docker ps -a --format="{{.Names}}"
+  for container in $("${DOCKER[@]}" ps -a | tail -n +2 | awk '{print $NF}') ; do
+    if [[ "${container}" != "${1}"* ]] ; then
+      V=3 kube::log::status "Keeping container ${container}"
+      continue
+    fi
+    if [[ -z "${2:-}" || "${container}" != "${2}" ]] ; then
+      V=2 kube::log::status "Deleting container ${container}"
+      kube::build::destroy_container "${container}"
+    else
+      V=3 kube::log::status "Keeping container ${container}"
+    fi
+  done
+}
+
+# Takes $1 and computes a short has for it. Useful for unique tag generation
+function kube::build::short_hash() {
+  [[ $# -eq 1 ]] || {
+    kube::log::error "Internal error.  No data based to short_hash."
+    exit 2
+  }
+
+  local short_hash
+  if which md5 >/dev/null 2>&1; then
+    short_hash=$(md5 -q -s "$1")
+  else
+    short_hash=$(echo -n "$1" | md5sum)
+  fi
+  echo "${short_hash:0:10}"
+}
+
+# Pedantically kill, wait-on and remove a container. The -f -v options
+# to rm don't actually seem to get the job done, so force kill the
+# container, wait to ensure it's stopped, then try the remove. This is
+# a workaround for bug https://github.com/docker/docker/issues/3968.
+function kube::build::destroy_container() {
+  "${DOCKER[@]}" kill "$1" >/dev/null 2>&1 || true
+  if [[ $("${DOCKER[@]}" version --format '{{.Server.Version}}') = 17.06.0* ]]; then
+    # Workaround https://github.com/moby/moby/issues/33948.
+    # TODO: remove when 17.06.0 is not relevant anymore
+    DOCKER_API_VERSION=v1.29 "${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true
+  else
+    "${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true
+  fi
+  "${DOCKER[@]}" rm -f -v "$1" >/dev/null 2>&1 || true
+}
+
+# ---------------------------------------------------------------------------
+# Building
+
+
+function kube::build::clean() {
+  if kube::build::has_docker ; then
+    kube::build::docker_delete_old_containers "${KUBE_BUILD_CONTAINER_NAME_BASE}"
+    kube::build::docker_delete_old_containers "${KUBE_RSYNC_CONTAINER_NAME_BASE}"
+    kube::build::docker_delete_old_containers "${KUBE_DATA_CONTAINER_NAME_BASE}"
+    kube::build::docker_delete_old_images "${KUBE_BUILD_IMAGE_REPO}" "${KUBE_BUILD_IMAGE_TAG_BASE}"
+
+    V=2 kube::log::status "Cleaning all untagged docker images"
+    "${DOCKER[@]}" rmi "$("${DOCKER[@]}" images -q --filter 'dangling=true')" 2> /dev/null || true
+  fi
+
+  if [[ -d "${LOCAL_OUTPUT_ROOT}" ]]; then
+    kube::log::status "Removing _output directory"
+    rm -rf "${LOCAL_OUTPUT_ROOT}"
+  fi
+}
+
+# Set up the context directory for the kube-build image and build it.
+function kube::build::build_image() {
+  mkdir -p "${LOCAL_OUTPUT_BUILD_CONTEXT}"
+  # Make sure the context directory owned by the right user for syncing sources to container.
+  chown -R "${USER_ID}":"${GROUP_ID}" "${LOCAL_OUTPUT_BUILD_CONTEXT}"
+
+  cp /etc/localtime "${LOCAL_OUTPUT_BUILD_CONTEXT}/"
+
+  cp "${KUBE_ROOT}/build/build-image/Dockerfile" "${LOCAL_OUTPUT_BUILD_CONTEXT}/Dockerfile"
+  cp "${KUBE_ROOT}/build/build-image/rsyncd.sh" "${LOCAL_OUTPUT_BUILD_CONTEXT}/"
+  dd if=/dev/urandom bs=512 count=1 2>/dev/null | LC_ALL=C tr -dc 'A-Za-z0-9' | dd bs=32 count=1 2>/dev/null > "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password"
+  chmod go= "${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password"
+
+  kube::build::update_dockerfile
+  kube::build::set_proxy
+  kube::build::docker_build "${KUBE_BUILD_IMAGE}" "${LOCAL_OUTPUT_BUILD_CONTEXT}" 'false'
+
+  # Clean up old versions of everything
+  kube::build::docker_delete_old_containers "${KUBE_BUILD_CONTAINER_NAME_BASE}" "${KUBE_BUILD_CONTAINER_NAME}"
+  kube::build::docker_delete_old_containers "${KUBE_RSYNC_CONTAINER_NAME_BASE}" "${KUBE_RSYNC_CONTAINER_NAME}"
+  kube::build::docker_delete_old_containers "${KUBE_DATA_CONTAINER_NAME_BASE}" "${KUBE_DATA_CONTAINER_NAME}"
+  kube::build::docker_delete_old_images "${KUBE_BUILD_IMAGE_REPO}" "${KUBE_BUILD_IMAGE_TAG_BASE}" "${KUBE_BUILD_IMAGE_TAG}"
+
+  kube::build::ensure_data_container
+  kube::build::sync_to_container
+}
+
+# Build a docker image from a Dockerfile.
+# $1 is the name of the image to build
+# $2 is the location of the "context" directory, with the Dockerfile at the root.
+# $3 is the value to set the --pull flag for docker build; true by default
+function kube::build::docker_build() {
+  local -r image=$1
+  local -r context_dir=$2
+  local -r pull="${3:-true}"
+  local -ra build_cmd=("${DOCKER[@]}" build -t "${image}" "--pull=${pull}" "${context_dir}")
+
+  kube::log::status "Building Docker image ${image}"
+  local docker_output
+  docker_output=$("${build_cmd[@]}" 2>&1) || {
+    cat <<EOF >&2
++++ Docker build command failed for ${image}
+
+${docker_output}
+
+To retry manually, run:
+
+${build_cmd[*]}
+
+EOF
+    return 1
+  }
+}
+
+function kube::build::ensure_data_container() {
+  # If the data container exists AND exited successfully, we can use it.
+  # Otherwise nuke it and start over.
+  local ret=0
+  local code=0
+
+  code=$(docker inspect \
+      -f '{{.State.ExitCode}}' \
+      "${KUBE_DATA_CONTAINER_NAME}" 2>/dev/null) || ret=$?
+  if [[ "${ret}" == 0 && "${code}" != 0 ]]; then
+    kube::build::destroy_container "${KUBE_DATA_CONTAINER_NAME}"
+    ret=1
+  fi
+  if [[ "${ret}" != 0 ]]; then
+    kube::log::status "Creating data container ${KUBE_DATA_CONTAINER_NAME}"
+    # We have to ensure the directory exists, or else the docker run will
+    # create it as root.
+    mkdir -p "${LOCAL_OUTPUT_GOPATH}"
+    # We want this to run as root to be able to chown, so non-root users can
+    # later use the result as a data container.  This run both creates the data
+    # container and chowns the GOPATH.
+    #
+    # The data container creates volumes for all of the directories that store
+    # intermediates for the Go build. This enables incremental builds across
+    # Docker sessions. The *_cgo paths are re-compiled versions of the go std
+    # libraries for true static building.
+    local -ra docker_cmd=(
+      "${DOCKER[@]}" run
+      --volume "${REMOTE_ROOT}"   # white-out the whole output dir
+      --volume /usr/local/go/pkg/linux_386_cgo
+      --volume /usr/local/go/pkg/linux_amd64_cgo
+      --volume /usr/local/go/pkg/linux_arm_cgo
+      --volume /usr/local/go/pkg/linux_arm64_cgo
+      --volume /usr/local/go/pkg/linux_ppc64le_cgo
+      --volume /usr/local/go/pkg/darwin_amd64_cgo
+      --volume /usr/local/go/pkg/darwin_386_cgo
+      --volume /usr/local/go/pkg/windows_amd64_cgo
+      --volume /usr/local/go/pkg/windows_386_cgo
+      --name "${KUBE_DATA_CONTAINER_NAME}"
+      --hostname "${HOSTNAME}"
+      "${KUBE_BUILD_IMAGE}"
+      chown -R "${USER_ID}":"${GROUP_ID}"
+        "${REMOTE_ROOT}"
+        /usr/local/go/pkg/
+    )
+    "${docker_cmd[@]}"
+  fi
+}
+
+# Run a command in the kube-build image.  This assumes that the image has
+# already been built.
+function kube::build::run_build_command() {
+  kube::log::status "Running build command..."
+  kube::build::run_build_command_ex "${KUBE_BUILD_CONTAINER_NAME}" -- "$@"
+}
+
+# Run a command in the kube-build image.  This assumes that the image has
+# already been built.
+#
+# Arguments are in the form of
+#  <container name> <extra docker args> -- <command>
+function kube::build::run_build_command_ex() {
+  [[ $# != 0 ]] || { echo "Invalid input - please specify a container name." >&2; return 4; }
+  local container_name="${1}"
+  shift
+
+  local -a docker_run_opts=(
+    "--name=${container_name}"
+    "--user=$(id -u):$(id -g)"
+    "--hostname=${HOSTNAME}"
+    "${DOCKER_MOUNT_ARGS[@]}"
+  )
+
+  local detach=false
+
+  [[ $# != 0 ]] || { echo "Invalid input - please specify docker arguments followed by --." >&2; return 4; }
+  # Everything before "--" is an arg to docker
+  until [ -z "${1-}" ] ; do
+    if [[ "$1" == "--" ]]; then
+      shift
+      break
+    fi
+    docker_run_opts+=("$1")
+    if [[ "$1" == "-d" || "$1" == "--detach" ]] ; then
+      detach=true
+    fi
+    shift
+  done
+
+  # Everything after "--" is the command to run
+  [[ $# != 0 ]] || { echo "Invalid input - please specify a command to run." >&2; return 4; }
+  local -a cmd=()
+  until [ -z "${1-}" ] ; do
+    cmd+=("$1")
+    shift
+  done
+
+  docker_run_opts+=(
+    --env "KUBE_FASTBUILD=${KUBE_FASTBUILD:-false}"
+    --env "KUBE_BUILDER_OS=${OSTYPE:-notdetected}"
+    --env "KUBE_VERBOSE=${KUBE_VERBOSE}"
+    --env "KUBE_BUILD_WITH_COVERAGE=${KUBE_BUILD_WITH_COVERAGE:-}"
+    --env "GOFLAGS=${GOFLAGS:-}"
+    --env "GOLDFLAGS=${GOLDFLAGS:-}"
+    --env "GOGCFLAGS=${GOGCFLAGS:-}"
+    --env "SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH:-}"
+  )
+
+  if [[ -n "${DOCKER_CGROUP_PARENT:-}" ]]; then
+    kube::log::status "Using ${DOCKER_CGROUP_PARENT} as container cgroup parent"
+    docker_run_opts+=(--cgroup-parent "${DOCKER_CGROUP_PARENT}")
+  fi
+
+  # If we have stdin we can run interactive.  This allows things like 'shell.sh'
+  # to work.  However, if we run this way and don't have stdin, then it ends up
+  # running in a daemon-ish mode.  So if we don't have a stdin, we explicitly
+  # attach stderr/stdout but don't bother asking for a tty.
+  if [[ -t 0 ]]; then
+    docker_run_opts+=(--interactive --tty)
+  elif [[ "${detach}" == false ]]; then
+    docker_run_opts+=("--attach=stdout" "--attach=stderr")
+  fi
+
+  local -ra docker_cmd=(
+    "${DOCKER[@]}" run "${docker_run_opts[@]}" "${KUBE_BUILD_IMAGE}")
+
+  # Clean up container from any previous run
+  kube::build::destroy_container "${container_name}"
+  "${docker_cmd[@]}" "${cmd[@]}"
+  if [[ "${detach}" == false ]]; then
+    kube::build::destroy_container "${container_name}"
+  fi
+}
+
+function kube::build::rsync_probe {
+  # Wait unil rsync is up and running.
+  local tries=20
+  while (( tries > 0 )) ; do
+    if rsync "rsync://k8s@${1}:${2}/" \
+         --password-file="${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password" \
+         &> /dev/null ; then
+      return 0
+    fi
+    tries=$(( tries - 1))
+    sleep 0.1
+  done
+
+  return 1
+}
+
+# Start up the rsync container in the background. This should be explicitly
+# stopped with kube::build::stop_rsyncd_container.
+#
+# This will set the global var KUBE_RSYNC_ADDR to the effective port that the
+# rsync daemon can be reached out.
+function kube::build::start_rsyncd_container() {
+  IPTOOL=ifconfig
+  if kube::build::has_ip ; then
+    IPTOOL="ip address"
+  fi
+  kube::build::stop_rsyncd_container
+  V=3 kube::log::status "Starting rsyncd container"
+  kube::build::run_build_command_ex \
+    "${KUBE_RSYNC_CONTAINER_NAME}" -p 127.0.0.1:"${KUBE_RSYNC_PORT}":"${KUBE_CONTAINER_RSYNC_PORT}" -d \
+    -e ALLOW_HOST="$(${IPTOOL} | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1')" \
+    -- /rsyncd.sh >/dev/null
+
+  local mapped_port
+  if ! mapped_port=$("${DOCKER[@]}" port "${KUBE_RSYNC_CONTAINER_NAME}" ${KUBE_CONTAINER_RSYNC_PORT} 2> /dev/null | cut -d: -f 2) ; then
+    kube::log::error "Could not get effective rsync port"
+    return 1
+  fi
+
+  local container_ip
+  container_ip=$("${DOCKER[@]}" inspect --format '{{ .NetworkSettings.IPAddress }}' "${KUBE_RSYNC_CONTAINER_NAME}")
+
+  # Sometimes we can reach rsync through localhost and a NAT'd port.  Other
+  # times (when we are running in another docker container on the Jenkins
+  # machines) we have to talk directly to the container IP.  There is no one
+  # strategy that works in all cases so we test to figure out which situation we
+  # are in.
+  if kube::build::rsync_probe 127.0.0.1 "${mapped_port}"; then
+    KUBE_RSYNC_ADDR="127.0.0.1:${mapped_port}"
+    return 0
+  elif kube::build::rsync_probe "${container_ip}" ${KUBE_CONTAINER_RSYNC_PORT}; then
+    KUBE_RSYNC_ADDR="${container_ip}:${KUBE_CONTAINER_RSYNC_PORT}"
+    return 0
+  fi
+
+  kube::log::error "Could not connect to rsync container. See build/README.md for setting up remote Docker engine."
+  return 1
+}
+
+function kube::build::stop_rsyncd_container() {
+  V=3 kube::log::status "Stopping any currently running rsyncd container"
+  unset KUBE_RSYNC_ADDR
+  kube::build::destroy_container "${KUBE_RSYNC_CONTAINER_NAME}"
+}
+
+function kube::build::rsync {
+  local -a rsync_opts=(
+    --archive
+    "--password-file=${LOCAL_OUTPUT_BUILD_CONTEXT}/rsyncd.password"
+  )
+  if (( KUBE_VERBOSE >= 6 )); then
+    rsync_opts+=("-iv")
+  fi
+  if (( KUBE_RSYNC_COMPRESS > 0 )); then
+     rsync_opts+=("--compress-level=${KUBE_RSYNC_COMPRESS}")
+  fi
+  V=3 kube::log::status "Running rsync"
+  rsync "${rsync_opts[@]}" "$@"
+}
+
+# This will launch rsyncd in a container and then sync the source tree to the
+# container over the local network.
+function kube::build::sync_to_container() {
+  kube::log::status "Syncing sources to container"
+
+  kube::build::start_rsyncd_container
+
+  # rsync filters are a bit confusing.  Here we are syncing everything except
+  # output only directories and things that are not necessary like the git
+  # directory and generated files. The '- /' filter prevents rsync
+  # from trying to set the uid/gid/perms on the root of the sync tree.
+  # As an exception, we need to sync generated files in staging/, because
+  # they will not be re-generated by 'make'. Note that the 'H' filtered files
+  # are hidden from rsync so they will be deleted in the target container if
+  # they exist. This will allow them to be re-created in the container if
+  # necessary.
+  kube::build::rsync \
+    --delete \
+    --filter='H /.git' \
+    --filter='- /.make/' \
+    --filter='- /_tmp/' \
+    --filter='- /_output/' \
+    --filter='- /' \
+    --filter='H zz_generated.*' \
+    --filter='H generated.proto' \
+    "${KUBE_ROOT}/" "rsync://k8s@${KUBE_RSYNC_ADDR}/k8s/"
+
+  kube::build::stop_rsyncd_container
+}
+
+# Copy all build results back out.
+function kube::build::copy_output() {
+  kube::log::status "Syncing out of container"
+
+  kube::build::start_rsyncd_container
+
+  # The filter syntax for rsync is a little obscure. It filters on files and
+  # directories.  If you don't go in to a directory you won't find any files
+  # there.  Rules are evaluated in order.  The last two rules are a little
+  # magic. '+ */' says to go in to every directory and '- /**' says to ignore
+  # any file or directory that isn't already specifically allowed.
+  #
+  # We are looking to copy out all of the built binaries along with various
+  # generated files.
+  kube::build::rsync \
+    --prune-empty-dirs \
+    --filter='- /_temp/' \
+    --filter='+ /vendor/' \
+    --filter='+ /Godeps/' \
+    --filter='+ /staging/***/Godeps/**' \
+    --filter='+ /_output/dockerized/bin/**' \
+    --filter='+ zz_generated.*' \
+    --filter='+ generated.proto' \
+    --filter='+ *.pb.go' \
+    --filter='+ types.go' \
+    --filter='+ */' \
+    --filter='- /**' \
+    "rsync://k8s@${KUBE_RSYNC_ADDR}/k8s/" "${KUBE_ROOT}"
+
+  kube::build::stop_rsyncd_container
+}

+ 132 - 0
kubernetes-v1.15.4/build/container.bzl

@@ -0,0 +1,132 @@
+# Copyright 2019 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_docker//container:container.bzl", "container_bundle", "container_image")
+load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
+load("//build:platforms.bzl", "go_platform_constraint")
+
+# multi_arch_container produces a private internal container_image, multiple
+# arch-specific tagged container_bundles (named NAME-ARCH), an alias
+# from NAME to the appropriately NAME-ARCH container_bundle target, and a
+# genrule for NAME.tar copying the appropriate NAME-ARCH container bundle
+# tarball output for the currently-configured architecture.
+# Additionally, if docker_push_tags is provided, uses multi_arch_container_push
+# to create container_bundles named push-NAME-ARCH with the provided push tags,
+# along with a push-NAME docker_push target.
+# Args:
+#   name: name used for the alias; the internal container_image and
+#     container_bundles are based on this name
+#   architectures: list of architectures (in GOARCH naming parlance) to
+#     configure
+#   base: base image to use for the containers. The format string {ARCH} will
+#     be replaced with the configured GOARCH.
+#   docker_tags: list of docker tags to apply to the image. The format string
+#     {ARCH} will be replaced with the configured GOARCH; any stamping variables
+#     should be escaped, e.g. {{STABLE_MY_VAR}}.
+#   docker_push_tags: list of docker tags to apply to the image for pushing.
+#     The format string {ARCH} will be replaced with the configured GOARCH;
+#     any stamping variables should be escaped, e.g. {{STABLE_MY_VAR}}.
+#   tags: will be applied to all targets
+#   visiblity: will be applied only to the container_bundles; the internal
+#     container_image is private
+#   All other args will be applied to the internal container_image.
+def multi_arch_container(
+        name,
+        architectures,
+        base,
+        docker_tags,
+        docker_push_tags = None,
+        tags = None,
+        visibility = None,
+        **kwargs):
+    container_image(
+        name = "%s-internal" % name,
+        base = select({
+            go_platform_constraint(os = "linux", arch = arch): base.format(ARCH = arch)
+            for arch in architectures
+        }),
+        tags = tags,
+        visibility = ["//visibility:private"],
+        **kwargs
+    )
+
+    for arch in architectures:
+        container_bundle(
+            name = "%s-%s" % (name, arch),
+            images = {
+                docker_tag.format(ARCH = arch): ":%s-internal" % name
+                for docker_tag in docker_tags
+            },
+            tags = tags,
+            visibility = visibility,
+        )
+    native.alias(
+        name = name,
+        actual = select({
+            go_platform_constraint(os = "linux", arch = arch): "%s-%s" % (name, arch)
+            for arch in architectures
+        }),
+    )
+    native.genrule(
+        name = "gen_%s.tar" % name,
+        outs = ["%s.tar" % name],
+        srcs = select({
+            go_platform_constraint(os = "linux", arch = arch): ["%s-%s.tar" % (name, arch)]
+            for arch in architectures
+        }),
+        cmd = "cp $< $@",
+        output_to_bindir = True,
+    )
+
+    if docker_push_tags:
+        multi_arch_container_push(
+            name = name,
+            architectures = architectures,
+            docker_tags_images = {docker_push_tag: ":%s-internal" % name for docker_push_tag in docker_push_tags},
+            tags = tags,
+        )
+
+# multi_arch_container_push creates container_bundles named push-NAME-ARCH for
+# the provided architectures, populating them with the images directory.
+# It additionally creates a push-NAME docker_push rule which can be run to
+# push the images to a Docker repository.
+# Args:
+#   name: name used for targets created by this macro; the internal
+#     container_bundles are based on this name
+#   architectures: list of architectures (in GOARCH naming parlance) to
+#     configure
+#   docker_tags_images: dictionary mapping docker tag to the corresponding
+#     container_image target. The format string {ARCH} will be replaced
+#     in tags with the configured GOARCH; any stamping variables should be
+#     escaped, e.g. {{STABLE_MY_VAR}}.
+#   tags: applied to container_bundle targets
+def multi_arch_container_push(
+        name,
+        architectures,
+        docker_tags_images,
+        tags = None):
+    for arch in architectures:
+        container_bundle(
+            name = "push-%s-%s" % (name, arch),
+            images = {tag.format(ARCH = arch): image for tag, image in docker_tags_images.items()},
+            tags = tags,
+            visibility = ["//visibility:private"],
+        )
+    docker_push(
+        name = "push-%s" % name,
+        bundle = select({
+            go_platform_constraint(os = "linux", arch = arch): "push-%s-%s" % (name, arch)
+            for arch in architectures
+        }),
+    )

+ 26 - 0
kubernetes-v1.15.4/build/copy-output.sh

@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+# Copyright 2014 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Copies any built binaries (and other generated files) out of the Docker build container.
+set -o errexit
+set -o nounset
+set -o pipefail
+
+KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+source "${KUBE_ROOT}/build/common.sh"
+
+kube::build::verify_prereqs
+kube::build::copy_output

+ 19 - 0
kubernetes-v1.15.4/build/debian-base/Dockerfile

@@ -0,0 +1,19 @@
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM scratch
+
+ADD rootfs.tar /
+
+CMD ["/bin/sh"]

+ 102 - 0
kubernetes-v1.15.4/build/debian-base/Dockerfile.build

@@ -0,0 +1,102 @@
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM BASEIMAGE
+
+# If we're building for another architecture than amd64, the CROSS_BUILD_ placeholder is removed so
+# e.g. CROSS_BUILD_COPY turns into COPY
+# If we're building normally, for amd64, CROSS_BUILD lines are removed
+CROSS_BUILD_COPY qemu-ARCH-static /usr/bin/
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+# Smaller package install size.
+COPY excludes /etc/dpkg/dpkg.cfg.d/excludes
+
+# Convenience script for building on this base image.
+COPY clean-install /usr/local/bin/clean-install
+
+# Update system packages.
+RUN apt-get update \
+    && apt-get dist-upgrade -y
+
+# Hold required packages to avoid breaking the installation of packages
+RUN apt-mark hold apt gnupg adduser passwd libsemanage1 libcap2
+
+# Remove unnecessary packages.
+# This list was generated manually by listing the installed packages (`apt list --installed`),
+# then running `apt-cache rdepends --installed --no-recommends` to find the "root" packages.
+# The root packages were evaluated based on whether they were needed in the container image.
+# Several utilities (e.g. ping) were kept for usefulness, but may be removed in later versions.
+RUN echo "Yes, do as I say!" | apt-get purge \
+    bash \
+    debconf-i18n \
+    e2fslibs \
+    e2fsprogs \
+    init \
+    initscripts \
+    libcap2-bin \
+    libkmod2 \
+    libmount1 \
+    libsmartcols1 \
+    libudev1 \
+    libblkid1 \
+    libncursesw5 \
+    libprocps6 \
+    libslang2 \
+    libss2 \
+    libsystemd0 \
+    libtext-charwidth-perl libtext-iconv-perl libtext-wrapi18n-perl \
+    ncurses-base \
+    ncurses-bin \
+    systemd \
+    systemd-sysv \
+    sysv-rc \
+    tzdata
+
+# No-op stubs replace some unnecessary binaries that may be depended on in the install process (in
+# particular we don't run an init process).
+WORKDIR /usr/local/bin
+RUN touch noop && \
+    chmod 555 noop && \
+    ln -s noop runlevel && \
+    ln -s noop invoke-rc.d && \
+    ln -s noop update-rc.d
+WORKDIR /
+
+# Cleanup cached and unnecessary files.
+RUN apt-get autoremove -y && \
+    apt-get clean -y && \
+    tar -czf /usr/share/copyrights.tar.gz /usr/share/common-licenses /usr/share/doc/*/copyright && \
+    rm -rf \
+        /usr/share/doc \
+        /usr/share/man \
+        /usr/share/info \
+        /usr/share/locale \
+        /var/lib/apt/lists/* \
+        /var/log/* \
+        /var/cache/debconf/* \
+        /usr/share/common-licenses* \
+        /usr/share/bash-completion \
+        ~/.bashrc \
+        ~/.profile \
+        /etc/systemd \
+        /lib/lsb \
+        /lib/udev \
+        /usr/lib/x86_64-linux-gnu/gconv/IBM* \
+        /usr/lib/x86_64-linux-gnu/gconv/EBC* && \
+    mkdir -p /usr/share/man/man1 /usr/share/man/man2 \
+        /usr/share/man/man3 /usr/share/man/man4 \
+        /usr/share/man/man5 /usr/share/man/man6 \
+        /usr/share/man/man7 /usr/share/man/man8

+ 105 - 0
kubernetes-v1.15.4/build/debian-base/Makefile

@@ -0,0 +1,105 @@
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+all: all-build
+
+REGISTRY ?= staging-k8s.gcr.io
+IMAGE ?= $(REGISTRY)/debian-base
+BUILD_IMAGE ?= debian-build
+
+TAG ?= v1.0.0
+
+TAR_FILE ?= rootfs.tar
+ARCH?=amd64
+ALL_ARCH = amd64 arm arm64 ppc64le s390x
+
+TEMP_DIR:=$(shell mktemp -d)
+QEMUVERSION=v2.9.1
+
+SUDO=$(if $(filter 0,$(shell id -u)),,sudo)
+
+# This option is for running docker manifest command
+export DOCKER_CLI_EXPERIMENTAL := enabled
+
+ifeq ($(ARCH),amd64)
+	BASEIMAGE?=debian:stretch
+endif
+ifeq ($(ARCH),arm)
+	BASEIMAGE?=arm32v7/debian:stretch
+	QEMUARCH=arm
+endif
+ifeq ($(ARCH),arm64)
+	BASEIMAGE?=arm64v8/debian:stretch
+	QEMUARCH=aarch64
+endif
+ifeq ($(ARCH),ppc64le)
+	BASEIMAGE?=ppc64le/debian:stretch
+	QEMUARCH=ppc64le
+endif
+ifeq ($(ARCH),s390x)
+	BASEIMAGE?=s390x/debian:stretch
+	QEMUARCH=s390x
+endif
+
+sub-build-%:
+	$(MAKE) ARCH=$* build
+
+all-build: $(addprefix sub-build-,$(ALL_ARCH))
+
+sub-push-image-%:
+	$(MAKE) ARCH=$* push
+
+all-push-images: $(addprefix sub-push-image-,$(ALL_ARCH))
+
+all-push: all-push-images push-manifest
+
+push-manifest:
+	docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g")
+	@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done
+	docker manifest push --purge ${IMAGE}:${TAG}
+
+build: clean
+	cp ./* $(TEMP_DIR)
+	cat Dockerfile.build \
+		| sed "s|BASEIMAGE|$(BASEIMAGE)|g" \
+		| sed "s|ARCH|$(QEMUARCH)|g" \
+		> $(TEMP_DIR)/Dockerfile.build
+
+ifeq ($(ARCH),amd64)
+	# When building "normally" for amd64, remove the whole line, it has no part in the amd64 image
+	sed "/CROSS_BUILD_/d" $(TEMP_DIR)/Dockerfile.build > $(TEMP_DIR)/Dockerfile.build.tmp
+else
+	# When cross-building, only the placeholder "CROSS_BUILD_" should be removed
+	# Register /usr/bin/qemu-ARCH-static as the handler for non-x86 binaries in the kernel
+	$(SUDO) ../../third_party/multiarch/qemu-user-static/register/register.sh --reset
+	curl -sSL https://github.com/multiarch/qemu-user-static/releases/download/$(QEMUVERSION)/x86_64_qemu-$(QEMUARCH)-static.tar.gz | tar -xz -C $(TEMP_DIR)
+	# Ensure we don't get surprised by umask settings
+	chmod 0755 $(TEMP_DIR)/qemu-$(QEMUARCH)-static
+	sed "s/CROSS_BUILD_//g" $(TEMP_DIR)/Dockerfile.build > $(TEMP_DIR)/Dockerfile.build.tmp
+endif
+	mv $(TEMP_DIR)/Dockerfile.build.tmp $(TEMP_DIR)/Dockerfile.build
+
+	docker build --pull -t $(BUILD_IMAGE) -f $(TEMP_DIR)/Dockerfile.build $(TEMP_DIR)
+	docker create --name $(BUILD_IMAGE) $(BUILD_IMAGE)
+	docker export $(BUILD_IMAGE) > $(TEMP_DIR)/$(TAR_FILE)
+	docker build -t $(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR)
+	rm -rf $(TEMP_DIR)
+
+push: build
+	docker push $(IMAGE)-$(ARCH):$(TAG)
+
+clean:
+	docker rmi -f $(IMAGE)-$(ARCH):$(TAG) || true
+	docker rmi -f $(BUILD_IMAGE)   || true
+	docker rm  -f $(BUILD_IMAGE)   || true

+ 10 - 0
kubernetes-v1.15.4/build/debian-base/OWNERS

@@ -0,0 +1,10 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+  - BenTheElder
+  - mkumatag
+  - tallclair
+approvers:
+  - BenTheElder
+  - mkumatag
+  - tallclair

+ 12 - 0
kubernetes-v1.15.4/build/debian-base/README.md

@@ -0,0 +1,12 @@
+# Kubernetes Debian Base
+
+The Kubernetes debian-base image provides a common base for Kubernetes system images that require
+external dependencies (such as `iptables`, `sh`, or anything that is more than a static go-binary).
+
+This image differs from the standard debian image by removing a lot of packages and files that are
+generally not necessary in containers. The end result is an image that is just over 40 MB, down from
+123 MB.
+
+The image also provides a convenience script `/usr/local/bin/clean-install` that encapsulates the
+process of updating apt repositories, installing the packages, and then cleaning up unnecessary
+caches & logs.

+ 36 - 0
kubernetes-v1.15.4/build/debian-base/clean-install

@@ -0,0 +1,36 @@
+#!/bin/sh
+
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# A script encapsulating a common Dockerimage pattern for installing packages
+# and then cleaning up the unnecessary install artifacts.
+# e.g. clean-install iptables ebtables conntrack
+
+set -o errexit
+
+if [ $# = 0 ]; then
+  echo >&2 "No packages specified"
+  exit 1
+fi
+
+apt-get update
+apt-get install -y --no-install-recommends $@
+apt-get clean -y
+rm -rf \
+   /var/cache/debconf/* \
+   /var/lib/apt/lists/* \
+   /var/log/* \
+   /tmp/* \
+   /var/tmp/*

+ 10 - 0
kubernetes-v1.15.4/build/debian-base/excludes

@@ -0,0 +1,10 @@
+path-exclude /usr/share/doc/*
+path-include /usr/share/doc/*/copyright
+path-exclude /usr/share/groff/*
+path-exclude /usr/share/i18n/locales/*
+path-include /usr/share/i18n/locales/en_US*
+path-exclude /usr/share/info/*
+path-exclude /usr/share/locale/*
+path-include /usr/share/locale/en_US*
+path-include /usr/share/locale/locale.alias
+path-exclude /usr/share/man/*

+ 1 - 0
kubernetes-v1.15.4/build/debian-hyperkube-base/.gitignore

@@ -0,0 +1 @@
+/cni-tars

+ 63 - 0
kubernetes-v1.15.4/build/debian-hyperkube-base/Dockerfile

@@ -0,0 +1,63 @@
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM BASEIMAGE
+
+# TODO(#69896): deprecate the shortened aliases in /
+RUN ln -s /hyperkube /apiserver \
+ && ln -s /hyperkube /cloud-controller-manager \
+ && ln -s /hyperkube /controller-manager \
+ && ln -s /hyperkube /kubectl \
+ && ln -s /hyperkube /kubelet \
+ && ln -s /hyperkube /proxy \
+ && ln -s /hyperkube /scheduler \
+ && ln -s /hyperkube /usr/local/bin/cloud-controller-manager \
+ && ln -s /hyperkube /usr/local/bin/kube-apiserver \
+ && ln -s /hyperkube /usr/local/bin/kube-controller-manager \
+ && ln -s /hyperkube /usr/local/bin/kube-proxy \
+ && ln -s /hyperkube /usr/local/bin/kube-scheduler \
+ && ln -s /hyperkube /usr/local/bin/kubectl \
+ && ln -s /hyperkube /usr/local/bin/kubelet
+
+RUN echo CACHEBUST>/dev/null && clean-install \
+    bash
+
+# The samba-common, cifs-utils, and nfs-common packages depend on
+# ucf, which itself depends on /bin/bash.
+RUN echo "dash dash/sh boolean false" | debconf-set-selections
+RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash
+
+RUN echo CACHEBUST>/dev/null && clean-install \
+    ca-certificates \
+    ceph-common \
+    cifs-utils \
+    conntrack \
+    e2fsprogs \
+    xfsprogs \
+    ebtables \
+    ethtool \
+    git \
+    glusterfs-client \
+    iptables \
+    ipset \
+    jq \
+    kmod \
+    openssh-client \
+    netbase \
+    nfs-common \
+    socat \
+    udev \
+    util-linux
+
+COPY cni-bin/bin /opt/cni/bin

+ 85 - 0
kubernetes-v1.15.4/build/debian-hyperkube-base/Makefile

@@ -0,0 +1,85 @@
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build the hyperkube base image. This image is used to build the hyperkube image.
+#
+# Usage:
+#   [ARCH=amd64] [REGISTRY="staging-k8s.gcr.io"] make (build|push)
+
+REGISTRY?=staging-k8s.gcr.io
+IMAGE?=$(REGISTRY)/debian-hyperkube-base
+TAG=0.12.1
+ARCH?=amd64
+ALL_ARCH = amd64 arm arm64 ppc64le s390x
+CACHEBUST?=1
+
+BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.4.1
+CNI_VERSION=v0.7.5
+
+TEMP_DIR:=$(shell mktemp -d)
+CNI_TARBALL=cni-plugins-$(ARCH)-$(CNI_VERSION).tgz
+
+# This option is for running docker manifest command
+export DOCKER_CLI_EXPERIMENTAL := enabled
+
+SUDO=$(if $(filter 0,$(shell id -u)),,sudo)
+
+.PHONY: all build push clean all-build all-push-images all-push push-manifest
+
+all: all-push
+
+sub-build-%:
+	$(MAKE) ARCH=$* build
+
+all-build: $(addprefix sub-build-,$(ALL_ARCH))
+
+sub-push-image-%:
+	$(MAKE) ARCH=$* push
+
+all-push-images: $(addprefix sub-push-image-,$(ALL_ARCH))
+
+all-push: all-push-images push-manifest
+
+push-manifest:
+	docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g")
+	@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done
+	docker manifest push --purge ${IMAGE}:${TAG}
+
+cni-tars/$(CNI_TARBALL):
+	mkdir -p cni-tars/
+	cd cni-tars/ && curl -sSLO --retry 5 https://storage.googleapis.com/kubernetes-release/network-plugins/${CNI_TARBALL}
+
+clean:
+	rm -rf cni-tars/
+
+build: cni-tars/$(CNI_TARBALL)
+	cp Dockerfile $(TEMP_DIR)
+	cd $(TEMP_DIR) && sed -i "s|BASEIMAGE|$(BASEIMAGE)|g" Dockerfile
+
+ifeq ($(CACHEBUST),1)
+	cd ${TEMP_DIR} && sed -i.back "s|CACHEBUST|$(shell uuidgen)|g" Dockerfile
+endif
+
+	mkdir -p ${TEMP_DIR}/cni-bin/bin
+	tar -xz -C ${TEMP_DIR}/cni-bin/bin -f "cni-tars/${CNI_TARBALL}"
+
+ifneq ($(ARCH),amd64)
+	# Register /usr/bin/qemu-ARCH-static as the handler for non-x86 binaries in the kernel
+	$(SUDO) ../../third_party/multiarch/qemu-user-static/register/register.sh --reset
+endif
+	docker build --pull -t $(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR)
+	rm -rf $(TEMP_DIR)
+
+push: build
+	docker push $(IMAGE)-$(ARCH):$(TAG)

+ 12 - 0
kubernetes-v1.15.4/build/debian-hyperkube-base/OWNERS

@@ -0,0 +1,12 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+  - BenTheElder
+  - mkumatag
+  - tallclair
+approvers:
+  - BenTheElder
+  - mkumatag
+  - tallclair
+labels:
+- sig/release

+ 25 - 0
kubernetes-v1.15.4/build/debian-hyperkube-base/README.md

@@ -0,0 +1,25 @@
+### debian-hyperkube-base
+
+Serves as the base image for `k8s.gcr.io/hyperkube-${ARCH}`
+images.
+
+This image is compiled for multiple architectures.
+
+#### How to release
+
+If you're editing the Dockerfile or some other thing, please bump the `TAG` in the Makefile.
+
+```console
+# Build and  push images for all the architectures
+$ make all-push
+# ---> staging-k8s.gcr.io/debian-hyperkube-base-amd64:TAG
+# ---> staging-k8s.gcr.io/debian-hyperkube-base-arm:TAG
+# ---> staging-k8s.gcr.io/debian-hyperkube-base-arm64:TAG
+# ---> staging-k8s.gcr.io/debian-hyperkube-base-ppc64le:TAG
+# ---> staging-k8s.gcr.io/debian-hyperkube-base-s390x:TAG
+```
+
+If you don't want to push the images, run `make all-build` instead
+
+
+[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/build/debian-hyperkube-base/README.md?pixel)]()

+ 23 - 0
kubernetes-v1.15.4/build/debian-iptables/Dockerfile

@@ -0,0 +1,23 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM BASEIMAGE
+
+RUN clean-install \
+    conntrack \
+    ebtables \
+    ipset \
+    iptables \
+    kmod \
+    netbase

+ 62 - 0
kubernetes-v1.15.4/build/debian-iptables/Makefile

@@ -0,0 +1,62 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY:	build push all all-build all-push-images all-push push-manifest
+
+REGISTRY?="staging-k8s.gcr.io"
+IMAGE=$(REGISTRY)/debian-iptables
+TAG?=v11.0.2
+ARCH?=amd64
+ALL_ARCH = amd64 arm arm64 ppc64le s390x
+TEMP_DIR:=$(shell mktemp -d)
+
+BASEIMAGE?=k8s.gcr.io/debian-base-$(ARCH):v1.0.0
+
+# This option is for running docker manifest command
+export DOCKER_CLI_EXPERIMENTAL := enabled
+
+SUDO=$(if $(filter 0,$(shell id -u)),,sudo)
+
+build:
+	cp ./* $(TEMP_DIR)
+	cd $(TEMP_DIR) && sed -i "s|BASEIMAGE|$(BASEIMAGE)|g" Dockerfile
+
+ifneq ($(ARCH),amd64)
+	# Register /usr/bin/qemu-ARCH-static as the handler for non-x86 binaries in the kernel
+	$(SUDO) ../../third_party/multiarch/qemu-user-static/register/register.sh --reset
+endif
+
+	docker build --pull -t $(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR)
+
+push: build
+	docker push $(IMAGE)-$(ARCH):$(TAG)
+
+sub-build-%:
+	$(MAKE) ARCH=$* build
+
+all-build: $(addprefix sub-build-,$(ALL_ARCH))
+
+sub-push-image-%:
+	$(MAKE) ARCH=$* push
+
+all-push-images: $(addprefix sub-push-image-,$(ALL_ARCH))
+
+all-push: all-push-images push-manifest
+
+push-manifest:
+	docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g")
+	@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done
+	docker manifest push --purge ${IMAGE}:${TAG}
+
+all: all-push

+ 18 - 0
kubernetes-v1.15.4/build/debian-iptables/OWNERS

@@ -0,0 +1,18 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+  - BenTheElder
+  - bowei
+  - freehan
+  - jingax10
+  - mkumatag
+  - mrhohn
+  - tallclair
+approvers:
+  - BenTheElder
+  - bowei
+  - freehan
+  - jingax10
+  - mkumatag
+  - mrhohn
+  - tallclair

+ 24 - 0
kubernetes-v1.15.4/build/debian-iptables/README.md

@@ -0,0 +1,24 @@
+### debian-iptables
+
+Serves as the base image for `k8s.gcr.io/kube-proxy-${ARCH}` and multiarch (not `amd64`) `k8s.gcr.io/flannel-${ARCH}` images.
+
+This image is compiled for multiple architectures.
+
+#### How to release
+
+If you're editing the Dockerfile or some other thing, please bump the `TAG` in the Makefile.
+
+```console
+Build and  push images for all the architectures
+$ make all-push
+# ---> staging-k8s.gcr.io/debian-iptables-amd64:TAG
+# ---> staging-k8s.gcr.io/debian-iptables-arm:TAG
+# ---> staging-k8s.gcr.io/debian-iptables-arm64:TAG
+# ---> staging-k8s.gcr.io/debian-iptables-ppc64le:TAG
+# ---> staging-k8s.gcr.io/debian-iptables-s390x:TAG
+```
+
+If you don't want to push the images, run `make build ARCH={target_arch}` or `make all-build` instead
+
+
+[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/build/debian-iptables/README.md?pixel)]()

+ 11 - 0
kubernetes-v1.15.4/build/debs/10-kubeadm.conf

@@ -0,0 +1,11 @@
+# Note: This dropin only works with kubeadm and kubelet v1.11+
+[Service]
+Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
+Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
+# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
+EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
+# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
+# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
+EnvironmentFile=-/etc/default/kubelet
+ExecStart=
+ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS

+ 2 - 0
kubernetes-v1.15.4/build/debs/50-kubeadm.conf

@@ -0,0 +1,2 @@
+# The file is provided as part of the kubeadm package
+net.ipv4.ip_forward = 1

+ 276 - 0
kubernetes-v1.15.4/build/debs/BUILD

@@ -0,0 +1,276 @@
+package(default_visibility = ["//visibility:public"])
+
+load("@io_k8s_repo_infra//defs:deb.bzl", "deb_data", "k8s_deb")
+load("@io_k8s_repo_infra//defs:build.bzl", "release_filegroup")
+load("@io_k8s_repo_infra//defs:pkg.bzl", "pkg_tar")
+load(
+    "//build:platforms.bzl",
+    "CLIENT_PLATFORMS",
+    "NODE_PLATFORMS",
+    "SERVER_PLATFORMS",
+    "for_platforms",
+)
+load("//build:workspace.bzl", "CNI_VERSION", "CRI_TOOLS_VERSION")
+
+# We do not include kube-scheduler, kube-controller-manager,
+# kube-apiserver, and kube-proxy in this list even though we
+# produce debs for them. We recommend that they be run in docker
+# images. We use the debs that we produce here to build those
+# images.
+release_filegroup(
+    name = "debs",
+    conditioned_srcs = for_platforms(
+        default = [],
+        for_client = [":kubectl.deb"],
+        for_node = [
+            ":cri-tools.deb",
+            ":kubeadm.deb",
+            ":kubelet.deb",
+            ":kubernetes-cni.deb",
+        ],
+        only_os = "linux",
+    ),
+)
+
+# Create genrules to copy the arch-specific debs to debs without the arch in their filename.
+genrule(
+    name = "kubectl",
+    srcs = select(for_platforms(
+        for_client = [":kubectl-{ARCH}.deb"],
+        only_os = "linux",
+    )),
+    outs = ["kubectl.deb"],
+    cmd = "cp $< $@",
+    output_to_bindir = True,
+)
+
+[genrule(
+    name = pkg,
+    srcs = select(for_platforms(
+        for_node = [":%s-{ARCH}.deb" % pkg],
+        only_os = "linux",
+    )),
+    outs = ["%s.deb" % pkg],
+    cmd = "cp $< $@",
+    output_to_bindir = True,
+) for pkg in [
+    "cri-tools",
+    "kubeadm",
+    "kubelet",
+    "kubernetes-cni",
+]]
+
+[deb_data(
+    name = "kubectl",
+    data = [
+        {
+            "files": ["//cmd/kubectl"],
+            "mode": "0755",
+            "dir": "/usr/bin",
+        },
+    ],
+    goarch = arch,
+    tags = ["manual"],
+) for arch in CLIENT_PLATFORMS["linux"]]
+
+[[deb_data(
+    name = binary,
+    data = [
+        {
+            "files": ["//cmd/" + binary],
+            "mode": "0755",
+            "dir": "/usr/bin",
+        },
+    ],
+    goarch = arch,
+    tags = ["manual"],
+) for binary in [
+    "cloud-controller-manager",
+    "kube-apiserver",
+    "kube-controller-manager",
+    "kube-proxy",
+    "kube-scheduler",
+]] for arch in SERVER_PLATFORMS["linux"]]
+
+[deb_data(
+    name = "kubelet",
+    data = [
+        {
+            "files": ["//cmd/kubelet"],
+            "mode": "0755",
+            "dir": "/usr/bin",
+        },
+        {
+            "files": ["kubelet.service"],
+            "mode": "644",
+            "dir": "/lib/systemd/system",
+        },
+    ],
+    goarch = arch,
+    tags = ["manual"],
+) for arch in NODE_PLATFORMS["linux"]]
+
+[deb_data(
+    name = "kubeadm",
+    data = [
+        {
+            "files": ["//cmd/kubeadm"],
+            "mode": "0755",
+            "dir": "/usr/bin",
+        },
+        {
+            "files": ["10-kubeadm.conf"],
+            "mode": "644",
+            "dir": "/etc/systemd/system/kubelet.service.d",
+        },
+        {
+            "files": ["kubeadm.conf"],
+            "mode": "644",
+            "dir": "/usr/lib/modules-load.d",
+        },
+        {
+            "files": ["50-kubeadm.conf"],
+            "mode": "644",
+            "dir": "/etc/sysctl.d",
+        },
+    ],
+    goarch = arch,
+    tags = ["manual"],
+) for arch in NODE_PLATFORMS["linux"]]
+
+[pkg_tar(
+    name = "kubernetes-cni-data-%s" % goarch,
+    package_dir = "/opt/cni/bin",
+    tags = ["manual"],
+    deps = ["@kubernetes_cni_%s//file" % goarch],
+) for goarch in NODE_PLATFORMS["linux"]]
+
+[pkg_tar(
+    name = "cri-tools-data-%s" % goarch,
+    package_dir = "/usr/bin",
+    tags = ["manual"],
+    deps = ["@cri_tools_%s//file" % goarch],
+) for goarch in NODE_PLATFORMS["linux"]]
+
+[k8s_deb(
+    name = "cloud-controller-manager",
+    description = "Kubernetes Cloud Controller Manager",
+    goarch = arch,
+    tags = ["manual"],
+    version_file = "//build:os_package_version",
+) for arch in SERVER_PLATFORMS["linux"]]
+
+[k8s_deb(
+    name = "kubectl",
+    description = """Kubernetes Command Line Tool
+The Kubernetes command line tool for interacting with the Kubernetes API.
+""",
+    goarch = arch,
+    tags = ["manual"],
+    version_file = "//build:os_package_version",
+) for arch in CLIENT_PLATFORMS["linux"]]
+
+[k8s_deb(
+    name = "kube-apiserver",
+    description = "Kubernetes API Server",
+    goarch = arch,
+    tags = ["manual"],
+    version_file = "//build:os_package_version",
+) for arch in SERVER_PLATFORMS["linux"]]
+
+[k8s_deb(
+    name = "kube-controller-manager",
+    description = "Kubernetes Controller Manager",
+    goarch = arch,
+    tags = ["manual"],
+    version_file = "//build:os_package_version",
+) for arch in SERVER_PLATFORMS["linux"]]
+
+[k8s_deb(
+    name = "kube-scheduler",
+    description = "Kubernetes Scheduler",
+    goarch = arch,
+    tags = ["manual"],
+    version_file = "//build:os_package_version",
+) for arch in SERVER_PLATFORMS["linux"]]
+
+[k8s_deb(
+    name = "kube-proxy",
+    depends = [
+        "iptables (>= 1.4.21)",
+        "iproute2",
+    ],
+    description = "Kubernetes Service Proxy",
+    goarch = arch,
+    tags = ["manual"],
+    version_file = "//build:os_package_version",
+) for arch in NODE_PLATFORMS["linux"]]
+
+[k8s_deb(
+    name = "kubelet",
+    depends = [
+        "conntrack",
+        "ebtables",
+        "ethtool",
+        "iproute2",
+        "iptables (>= 1.4.21)",
+        "kubernetes-cni (>= %s)" % CNI_VERSION,
+        "mount",
+        "socat",
+        "util-linux",
+    ],
+    description = """Kubernetes Node Agent
+The node agent of Kubernetes, the container cluster manager
+""",
+    goarch = arch,
+    tags = ["manual"],
+    version_file = "//build:os_package_version",
+) for arch in NODE_PLATFORMS["linux"]]
+
+[k8s_deb(
+    name = "kubeadm",
+    depends = [
+        "kubelet (>= 1.8.0)",
+        "kubectl (>= 1.8.0)",
+        "kubernetes-cni (>= %s)" % CNI_VERSION,
+        "cri-tools (>= 1.11.0)",
+    ],
+    description = """Kubernetes Cluster Bootstrapping Tool
+The Kubernetes command line tool for bootstrapping a Kubernetes cluster.
+""",
+    goarch = arch,
+    postinst = "postinst",
+    tags = ["manual"],
+    version_file = "//build:os_package_version",
+) for arch in NODE_PLATFORMS["linux"]]
+
+[k8s_deb(
+    name = "kubernetes-cni",
+    description = """Kubernetes Packaging of CNI
+The Container Networking Interface tools for provisioning container networks.
+""",
+    goarch = arch,
+    tags = ["manual"],
+    version = CNI_VERSION,
+) for arch in NODE_PLATFORMS["linux"]]
+
+[k8s_deb(
+    name = "cri-tools",
+    description = """Container Runtime Interface tools (crictl)""",
+    goarch = arch,
+    tags = ["manual"],
+    version = CRI_TOOLS_VERSION,
+) for arch in NODE_PLATFORMS["linux"]]
+
+filegroup(
+    name = "package-srcs",
+    srcs = glob(["**"]),
+    tags = ["automanaged"],
+    visibility = ["//visibility:private"],
+)
+
+filegroup(
+    name = "all-srcs",
+    srcs = [":package-srcs"],
+    tags = ["automanaged"],
+)

+ 15 - 0
kubernetes-v1.15.4/build/debs/OWNERS

@@ -0,0 +1,15 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+  - luxas
+  - jbeda
+  - mikedanese
+  - pipejakob
+  - chuckha
+  - timothysc
+approvers:
+  - luxas
+  - jbeda
+  - mikedanese
+  - pipejakob
+  - timothysc

+ 2 - 0
kubernetes-v1.15.4/build/debs/kubeadm.conf

@@ -0,0 +1,2 @@
+# Load br_netfilter module at boot
+br_netfilter

+ 12 - 0
kubernetes-v1.15.4/build/debs/kubelet.service

@@ -0,0 +1,12 @@
+[Unit]
+Description=kubelet: The Kubernetes Node Agent
+Documentation=http://kubernetes.io/docs/
+
+[Service]
+ExecStart=/usr/bin/kubelet
+Restart=always
+StartLimitInterval=0
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target

+ 30 - 0
kubernetes-v1.15.4/build/debs/postinst

@@ -0,0 +1,30 @@
+#!/bin/sh
+# see: dh_installdeb(1)
+
+set -o errexit
+set -o nounset
+
+case "$1" in
+    configure)
+        # because kubeadm package adds kubelet drop-ins, we must daemon-reload
+        # and restart kubelet now. restarting kubelet is ok because kubelet
+        # postinst configure step auto-starts it.
+        systemctl daemon-reload 2>/dev/null || true
+        systemctl restart kubelet 2>/dev/null || true
+    ;;
+
+    abort-upgrade|abort-remove|abort-deconfigure)
+    ;;
+
+    *)
+        echo "postinst called with unknown argument \`$1'" >&2
+        exit 1
+    ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0

+ 103 - 0
kubernetes-v1.15.4/build/go.bzl

@@ -0,0 +1,103 @@
+# Copyright 2019 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_test")
+
+# Defines several go_binary rules to work around a Bazel issue which makes
+# the pure attribute on go_binary not configurable.
+# The name provided will have cgo enabled if targeting Linux, and will
+# be a pure go binary otherwise. Additionally, if targeting Windows, the
+# output filename will have a .exe suffix.
+def go_binary_conditional_pure(name, tags = None, **kwargs):
+    tags = tags or []
+    tags.append("manual")
+    go_binary(
+        name = "_%s-cgo" % name,
+        out = name,
+        pure = "off",
+        tags = tags,
+        **kwargs
+    )
+
+    # Define a rule for both Unix and Windows exe suffixes.
+    [go_binary(
+        name = "_%s-pure" % out,
+        out = out,
+        pure = "on",
+        tags = tags,
+        **kwargs
+    ) for out in [name, name + ".exe"]]
+
+    # The real magic, where we work around the pure attribute not being
+    # configurable: select the appropriate go_binary rule above based on the
+    # configured platform.
+    native.alias(
+        name = name,
+        actual = select({
+            "@io_bazel_rules_go//go/platform:linux": ":_%s-cgo" % name,
+            "@io_bazel_rules_go//go/platform:windows": ":_%s.exe-pure" % name,
+            "//conditions:default": ":_%s-pure" % name,
+        }),
+    )
+
+# Defines several go_test rules to work around a Bazel issue which makes
+# the pure attribute on go_test not configurable.
+# This also defines genrules to produce test binaries named ${out} and
+# ${out}.exe, and an alias named ${out}_binary which automatically selects
+# the correct filename suffix (i.e. with a .exe on Windows).
+def go_test_conditional_pure(name, out, tags = None, **kwargs):
+    tags = tags or []
+    tags.append("manual")
+
+    go_test(
+        name = "_%s-cgo" % name,
+        pure = "off",
+        testonly = False,
+        tags = tags,
+        **kwargs
+    )
+
+    go_test(
+        name = "_%s-pure" % name,
+        pure = "on",
+        testonly = False,
+        tags = tags,
+        **kwargs
+    )
+
+    native.alias(
+        name = name,
+        actual = select({
+            "@io_bazel_rules_go//go/platform:linux": ":_%s-cgo" % name,
+            "//conditions:default": ":_%s-pure" % name,
+        }),
+    )
+
+    [native.genrule(
+        name = "gen_%s" % o,
+        srcs = [name],
+        outs = [o],
+        cmd = "cp $< $@;",
+        output_to_bindir = True,
+        executable = True,
+        tags = tags,
+    ) for o in [out, out + ".exe"]]
+
+    native.alias(
+        name = "%s_binary" % out,
+        actual = select({
+            "@io_bazel_rules_go//go/platform:windows": ":gen_%s.exe" % out,
+            "//conditions:default": ":gen_%s" % out,
+        }),
+    )

+ 183 - 0
kubernetes-v1.15.4/build/kazel_generated.bzl

@@ -0,0 +1,183 @@
+# Copyright The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# #################################################
+# # # # # # # # # # # # # # # # # # # # # # # # # #
+# This file is autogenerated by kazel. DO NOT EDIT.
+# # # # # # # # # # # # # # # # # # # # # # # # # #
+# #################################################
+#
+# The go prefix passed to kazel
+go_prefix = "k8s.io/kubernetes"
+
+# The list of codegen tags kazel is configured to find
+kazel_configured_tags = ["openapi-gen"]
+
+# tags_values_pkgs is a dictionary mapping {k8s build tag: {tag value: [pkgs including that tag:value]}}
+tags_values_pkgs = {"openapi-gen": {
+    "false": [
+        "staging/src/k8s.io/api/admission/v1beta1",
+        "staging/src/k8s.io/api/core/v1",
+        "staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1",
+        "staging/src/k8s.io/apiserver/pkg/apis/example/v1",
+        "staging/src/k8s.io/apiserver/pkg/apis/example2/v1",
+    ],
+    "true": [
+        "cmd/cloud-controller-manager/app/apis/config/v1alpha1",
+        "pkg/apis/abac/v0",
+        "pkg/apis/abac/v1beta1",
+        "pkg/apis/auditregistration",
+        "pkg/version",
+        "staging/src/k8s.io/api/admissionregistration/v1beta1",
+        "staging/src/k8s.io/api/apps/v1",
+        "staging/src/k8s.io/api/apps/v1beta1",
+        "staging/src/k8s.io/api/apps/v1beta2",
+        "staging/src/k8s.io/api/auditregistration/v1alpha1",
+        "staging/src/k8s.io/api/authentication/v1",
+        "staging/src/k8s.io/api/authentication/v1beta1",
+        "staging/src/k8s.io/api/authorization/v1",
+        "staging/src/k8s.io/api/authorization/v1beta1",
+        "staging/src/k8s.io/api/autoscaling/v1",
+        "staging/src/k8s.io/api/autoscaling/v2beta1",
+        "staging/src/k8s.io/api/autoscaling/v2beta2",
+        "staging/src/k8s.io/api/batch/v1",
+        "staging/src/k8s.io/api/batch/v1beta1",
+        "staging/src/k8s.io/api/batch/v2alpha1",
+        "staging/src/k8s.io/api/certificates/v1beta1",
+        "staging/src/k8s.io/api/coordination/v1",
+        "staging/src/k8s.io/api/coordination/v1beta1",
+        "staging/src/k8s.io/api/core/v1",
+        "staging/src/k8s.io/api/events/v1beta1",
+        "staging/src/k8s.io/api/extensions/v1beta1",
+        "staging/src/k8s.io/api/imagepolicy/v1alpha1",
+        "staging/src/k8s.io/api/networking/v1",
+        "staging/src/k8s.io/api/networking/v1beta1",
+        "staging/src/k8s.io/api/node/v1alpha1",
+        "staging/src/k8s.io/api/node/v1beta1",
+        "staging/src/k8s.io/api/policy/v1beta1",
+        "staging/src/k8s.io/api/rbac/v1",
+        "staging/src/k8s.io/api/rbac/v1alpha1",
+        "staging/src/k8s.io/api/rbac/v1beta1",
+        "staging/src/k8s.io/api/scheduling/v1",
+        "staging/src/k8s.io/api/scheduling/v1alpha1",
+        "staging/src/k8s.io/api/scheduling/v1beta1",
+        "staging/src/k8s.io/api/settings/v1alpha1",
+        "staging/src/k8s.io/api/storage/v1",
+        "staging/src/k8s.io/api/storage/v1alpha1",
+        "staging/src/k8s.io/api/storage/v1beta1",
+        "staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1",
+        "staging/src/k8s.io/apimachinery/pkg/api/resource",
+        "staging/src/k8s.io/apimachinery/pkg/apis/meta/v1",
+        "staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1",
+        "staging/src/k8s.io/apimachinery/pkg/runtime",
+        "staging/src/k8s.io/apimachinery/pkg/util/intstr",
+        "staging/src/k8s.io/apimachinery/pkg/version",
+        "staging/src/k8s.io/apiserver/pkg/apis/audit/v1",
+        "staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1",
+        "staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1",
+        "staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1",
+        "staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1",
+        "staging/src/k8s.io/client-go/pkg/version",
+        "staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1",
+        "staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1",
+        "staging/src/k8s.io/kube-controller-manager/config/v1alpha1",
+        "staging/src/k8s.io/kube-proxy/config/v1alpha1",
+        "staging/src/k8s.io/kube-scheduler/config/v1alpha1",
+        "staging/src/k8s.io/kubelet/config/v1beta1",
+        "staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1",
+        "staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta2",
+        "staging/src/k8s.io/metrics/pkg/apis/external_metrics/v1beta1",
+        "staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1",
+        "staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1",
+        "staging/src/k8s.io/node-api/pkg/apis/node/v1alpha1",
+    ],
+}}
+
+# tags_pkgs_values is a dictionary mapping {k8s build tag: {pkg: [tag values in pkg]}}
+tags_pkgs_values = {"openapi-gen": {
+    "cmd/cloud-controller-manager/app/apis/config/v1alpha1": ["true"],
+    "pkg/apis/abac/v0": ["true"],
+    "pkg/apis/abac/v1beta1": ["true"],
+    "pkg/apis/auditregistration": ["true"],
+    "pkg/version": ["true"],
+    "staging/src/k8s.io/api/admission/v1beta1": ["false"],
+    "staging/src/k8s.io/api/admissionregistration/v1beta1": ["true"],
+    "staging/src/k8s.io/api/apps/v1": ["true"],
+    "staging/src/k8s.io/api/apps/v1beta1": ["true"],
+    "staging/src/k8s.io/api/apps/v1beta2": ["true"],
+    "staging/src/k8s.io/api/auditregistration/v1alpha1": ["true"],
+    "staging/src/k8s.io/api/authentication/v1": ["true"],
+    "staging/src/k8s.io/api/authentication/v1beta1": ["true"],
+    "staging/src/k8s.io/api/authorization/v1": ["true"],
+    "staging/src/k8s.io/api/authorization/v1beta1": ["true"],
+    "staging/src/k8s.io/api/autoscaling/v1": ["true"],
+    "staging/src/k8s.io/api/autoscaling/v2beta1": ["true"],
+    "staging/src/k8s.io/api/autoscaling/v2beta2": ["true"],
+    "staging/src/k8s.io/api/batch/v1": ["true"],
+    "staging/src/k8s.io/api/batch/v1beta1": ["true"],
+    "staging/src/k8s.io/api/batch/v2alpha1": ["true"],
+    "staging/src/k8s.io/api/certificates/v1beta1": ["true"],
+    "staging/src/k8s.io/api/coordination/v1": ["true"],
+    "staging/src/k8s.io/api/coordination/v1beta1": ["true"],
+    "staging/src/k8s.io/api/core/v1": [
+        "false",
+        "true",
+    ],
+    "staging/src/k8s.io/api/events/v1beta1": ["true"],
+    "staging/src/k8s.io/api/extensions/v1beta1": ["true"],
+    "staging/src/k8s.io/api/imagepolicy/v1alpha1": ["true"],
+    "staging/src/k8s.io/api/networking/v1": ["true"],
+    "staging/src/k8s.io/api/networking/v1beta1": ["true"],
+    "staging/src/k8s.io/api/node/v1alpha1": ["true"],
+    "staging/src/k8s.io/api/node/v1beta1": ["true"],
+    "staging/src/k8s.io/api/policy/v1beta1": ["true"],
+    "staging/src/k8s.io/api/rbac/v1": ["true"],
+    "staging/src/k8s.io/api/rbac/v1alpha1": ["true"],
+    "staging/src/k8s.io/api/rbac/v1beta1": ["true"],
+    "staging/src/k8s.io/api/scheduling/v1": ["true"],
+    "staging/src/k8s.io/api/scheduling/v1alpha1": ["true"],
+    "staging/src/k8s.io/api/scheduling/v1beta1": ["true"],
+    "staging/src/k8s.io/api/settings/v1alpha1": ["true"],
+    "staging/src/k8s.io/api/storage/v1": ["true"],
+    "staging/src/k8s.io/api/storage/v1alpha1": ["true"],
+    "staging/src/k8s.io/api/storage/v1beta1": ["true"],
+    "staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1": ["true"],
+    "staging/src/k8s.io/apimachinery/pkg/api/resource": ["true"],
+    "staging/src/k8s.io/apimachinery/pkg/apis/meta/v1": ["true"],
+    "staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1": ["true"],
+    "staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1": ["false"],
+    "staging/src/k8s.io/apimachinery/pkg/runtime": ["true"],
+    "staging/src/k8s.io/apimachinery/pkg/util/intstr": ["true"],
+    "staging/src/k8s.io/apimachinery/pkg/version": ["true"],
+    "staging/src/k8s.io/apiserver/pkg/apis/audit/v1": ["true"],
+    "staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1": ["true"],
+    "staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1": ["true"],
+    "staging/src/k8s.io/apiserver/pkg/apis/example/v1": ["false"],
+    "staging/src/k8s.io/apiserver/pkg/apis/example2/v1": ["false"],
+    "staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1": ["true"],
+    "staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1": ["true"],
+    "staging/src/k8s.io/client-go/pkg/version": ["true"],
+    "staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1": ["true"],
+    "staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1": ["true"],
+    "staging/src/k8s.io/kube-controller-manager/config/v1alpha1": ["true"],
+    "staging/src/k8s.io/kube-proxy/config/v1alpha1": ["true"],
+    "staging/src/k8s.io/kube-scheduler/config/v1alpha1": ["true"],
+    "staging/src/k8s.io/kubelet/config/v1beta1": ["true"],
+    "staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1": ["true"],
+    "staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta2": ["true"],
+    "staging/src/k8s.io/metrics/pkg/apis/external_metrics/v1beta1": ["true"],
+    "staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1": ["true"],
+    "staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1": ["true"],
+    "staging/src/k8s.io/node-api/pkg/apis/node/v1alpha1": ["true"],
+}}

+ 648 - 0
kubernetes-v1.15.4/build/lib/release.sh

@@ -0,0 +1,648 @@
+#!/usr/bin/env bash
+
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file creates release artifacts (tar files, container images) that are
+# ready to distribute to install or distribute to end users.
+
+###############################################################################
+# Most of the ::release:: namespace functions have been moved to
+# github.com/kubernetes/release.  Have a look in that repo and specifically in
+# lib/releaselib.sh for ::release::-related functionality.
+###############################################################################
+
+# This is where the final release artifacts are created locally
+readonly RELEASE_STAGE="${LOCAL_OUTPUT_ROOT}/release-stage"
+readonly RELEASE_TARS="${LOCAL_OUTPUT_ROOT}/release-tars"
+readonly RELEASE_IMAGES="${LOCAL_OUTPUT_ROOT}/release-images"
+
+KUBE_BUILD_HYPERKUBE=${KUBE_BUILD_HYPERKUBE:-y}
+KUBE_BUILD_CONFORMANCE=${KUBE_BUILD_CONFORMANCE:-y}
+KUBE_BUILD_PULL_LATEST_IMAGES=${KUBE_BUILD_PULL_LATEST_IMAGES:-y}
+
+# The mondo test tarball is deprecated as of Kubernetes 1.14, and the default
+# will be set to 'n' in a future release.
+# See KEP sig-testing/20190118-breaking-apart-the-kubernetes-test-tarball
+KUBE_BUILD_MONDO_TEST_TARBALL=${KUBE_BUILD_MONDO_TEST_TARBALL:-y}
+
+# Validate a ci version
+#
+# Globals:
+#   None
+# Arguments:
+#   version
+# Returns:
+#   If version is a valid ci version
+# Sets:                    (e.g. for '1.2.3-alpha.4.56+abcdef12345678')
+#   VERSION_MAJOR          (e.g. '1')
+#   VERSION_MINOR          (e.g. '2')
+#   VERSION_PATCH          (e.g. '3')
+#   VERSION_PRERELEASE     (e.g. 'alpha')
+#   VERSION_PRERELEASE_REV (e.g. '4')
+#   VERSION_BUILD_INFO     (e.g. '.56+abcdef12345678')
+#   VERSION_COMMITS        (e.g. '56')
+function kube::release::parse_and_validate_ci_version() {
+  # Accept things like "v1.2.3-alpha.4.56+abcdef12345678" or "v1.2.3-beta.4"
+  local -r version_regex="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)-([a-zA-Z0-9]+)\\.(0|[1-9][0-9]*)(\\.(0|[1-9][0-9]*)\\+[0-9a-f]{7,40})?$"
+  local -r version="${1-}"
+  [[ "${version}" =~ ${version_regex} ]] || {
+    kube::log::error "Invalid ci version: '${version}', must match regex ${version_regex}"
+    return 1
+  }
+  VERSION_MAJOR="${BASH_REMATCH[1]}"
+  VERSION_MINOR="${BASH_REMATCH[2]}"
+  VERSION_PATCH="${BASH_REMATCH[3]}"
+  VERSION_PRERELEASE="${BASH_REMATCH[4]}"
+  VERSION_PRERELEASE_REV="${BASH_REMATCH[5]}"
+  VERSION_BUILD_INFO="${BASH_REMATCH[6]}"
+  VERSION_COMMITS="${BASH_REMATCH[7]}"
+}
+
+# ---------------------------------------------------------------------------
+# Build final release artifacts
+function kube::release::clean_cruft() {
+  # Clean out cruft
+  find "${RELEASE_STAGE}" -name '*~' -exec rm {} \;
+  find "${RELEASE_STAGE}" -name '#*#' -exec rm {} \;
+  find "${RELEASE_STAGE}" -name '.DS*' -exec rm {} \;
+}
+
+function kube::release::package_tarballs() {
+  # Clean out any old releases
+  rm -rf "${RELEASE_STAGE}" "${RELEASE_TARS}" "${RELEASE_IMAGES}"
+  mkdir -p "${RELEASE_TARS}"
+  kube::release::package_src_tarball &
+  kube::release::package_client_tarballs &
+  kube::release::package_kube_manifests_tarball &
+  kube::util::wait-for-jobs || { kube::log::error "previous tarball phase failed"; return 1; }
+
+  # _node and _server tarballs depend on _src tarball
+  kube::release::package_node_tarballs &
+  kube::release::package_server_tarballs &
+  kube::util::wait-for-jobs || { kube::log::error "previous tarball phase failed"; return 1; }
+
+  kube::release::package_final_tarball & # _final depends on some of the previous phases
+  kube::release::package_test_tarballs & # _test doesn't depend on anything
+  kube::util::wait-for-jobs || { kube::log::error "previous tarball phase failed"; return 1; }
+}
+
+# Package the source code we built, for compliance/licensing/audit/yadda.
+function kube::release::package_src_tarball() {
+  local -r src_tarball="${RELEASE_TARS}/kubernetes-src.tar.gz"
+  kube::log::status "Building tarball: src"
+  if [[ "${KUBE_GIT_TREE_STATE-}" == "clean" ]]; then
+    git archive -o "${src_tarball}" HEAD
+  else
+    local source_files=(
+      $(cd "${KUBE_ROOT}" && find . -mindepth 1 -maxdepth 1 \
+        -not \( \
+          \( -path ./_\*        -o \
+             -path ./.git\*     -o \
+             -path ./.config\* -o \
+             -path ./.gsutil\*    \
+          \) -prune \
+        \))
+    )
+    "${TAR}" czf "${src_tarball}" --transform 's|^\.|kubernetes|' -C "${KUBE_ROOT}" "${source_files[@]}"
+  fi
+}
+
+# Package up all of the cross compiled clients. Over time this should grow into
+# a full SDK
+function kube::release::package_client_tarballs() {
+   # Find all of the built client binaries
+  local platform platforms
+  platforms=($(cd "${LOCAL_OUTPUT_BINPATH}" ; echo */*))
+  for platform in "${platforms[@]}"; do
+    local platform_tag=${platform/\//-} # Replace a "/" for a "-"
+    kube::log::status "Starting tarball: client $platform_tag"
+
+    (
+      local release_stage="${RELEASE_STAGE}/client/${platform_tag}/kubernetes"
+      rm -rf "${release_stage}"
+      mkdir -p "${release_stage}/client/bin"
+
+      local client_bins=("${KUBE_CLIENT_BINARIES[@]}")
+      if [[ "${platform%/*}" == "windows" ]]; then
+        client_bins=("${KUBE_CLIENT_BINARIES_WIN[@]}")
+      fi
+
+      # This fancy expression will expand to prepend a path
+      # (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
+      # client_bins array.
+      cp "${client_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
+        "${release_stage}/client/bin/"
+
+      kube::release::clean_cruft
+
+      local package_name="${RELEASE_TARS}/kubernetes-client-${platform_tag}.tar.gz"
+      kube::release::create_tarball "${package_name}" "${release_stage}/.."
+    ) &
+  done
+
+  kube::log::status "Waiting on tarballs"
+  kube::util::wait-for-jobs || { kube::log::error "client tarball creation failed"; exit 1; }
+}
+
+# Package up all of the node binaries
+function kube::release::package_node_tarballs() {
+  local platform
+  for platform in "${KUBE_NODE_PLATFORMS[@]}"; do
+    local platform_tag=${platform/\//-} # Replace a "/" for a "-"
+    local arch=$(basename "${platform}")
+    kube::log::status "Building tarball: node $platform_tag"
+
+    local release_stage="${RELEASE_STAGE}/node/${platform_tag}/kubernetes"
+    rm -rf "${release_stage}"
+    mkdir -p "${release_stage}/node/bin"
+
+    local node_bins=("${KUBE_NODE_BINARIES[@]}")
+    if [[ "${platform%/*}" == "windows" ]]; then
+      node_bins=("${KUBE_NODE_BINARIES_WIN[@]}")
+    fi
+    # This fancy expression will expand to prepend a path
+    # (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
+    # node_bins array.
+    cp "${node_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
+      "${release_stage}/node/bin/"
+
+    # TODO: Docker images here
+    # kube::release::create_docker_images_for_server "${release_stage}/server/bin" "${arch}"
+
+    # Include the client binaries here too as they are useful debugging tools.
+    local client_bins=("${KUBE_CLIENT_BINARIES[@]}")
+    if [[ "${platform%/*}" == "windows" ]]; then
+      client_bins=("${KUBE_CLIENT_BINARIES_WIN[@]}")
+    fi
+    # This fancy expression will expand to prepend a path
+    # (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
+    # client_bins array.
+    cp "${client_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
+      "${release_stage}/node/bin/"
+
+    cp "${KUBE_ROOT}/Godeps/LICENSES" "${release_stage}/"
+
+    cp "${RELEASE_TARS}/kubernetes-src.tar.gz" "${release_stage}/"
+
+    kube::release::clean_cruft
+
+    local package_name="${RELEASE_TARS}/kubernetes-node-${platform_tag}.tar.gz"
+    kube::release::create_tarball "${package_name}" "${release_stage}/.."
+  done
+}
+
+# Package up all of the server binaries in docker images
+function kube::release::build_server_images() {
+  # Clean out any old images
+  rm -rf "${RELEASE_IMAGES}"
+  local platform
+  for platform in "${KUBE_SERVER_PLATFORMS[@]}"; do
+    local platform_tag=${platform/\//-} # Replace a "/" for a "-"
+    local arch=$(basename "${platform}")
+    kube::log::status "Building images: $platform_tag"
+
+    local release_stage="${RELEASE_STAGE}/server/${platform_tag}/kubernetes"
+    rm -rf "${release_stage}"
+    mkdir -p "${release_stage}/server/bin"
+
+    # This fancy expression will expand to prepend a path
+    # (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
+    # KUBE_SERVER_IMAGE_BINARIES array.
+    cp "${KUBE_SERVER_IMAGE_BINARIES[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
+      "${release_stage}/server/bin/"
+
+    # if we are building hyperkube, we also need to copy that binary
+    if [[ "${KUBE_BUILD_HYPERKUBE}" =~ [yY] ]]; then
+      cp "${LOCAL_OUTPUT_BINPATH}/${platform}/hyperkube" "${release_stage}/server/bin"
+    fi
+
+    kube::release::create_docker_images_for_server "${release_stage}/server/bin" "${arch}"
+  done
+}
+
+# Package up all of the server binaries
+function kube::release::package_server_tarballs() {
+  kube::release::build_server_images
+  local platform
+  for platform in "${KUBE_SERVER_PLATFORMS[@]}"; do
+    local platform_tag=${platform/\//-} # Replace a "/" for a "-"
+    local arch=$(basename "${platform}")
+    kube::log::status "Building tarball: server $platform_tag"
+
+    # NOTE: this directory was setup in kube::release::build_server_images
+    local release_stage="${RELEASE_STAGE}/server/${platform_tag}/kubernetes"
+    mkdir -p "${release_stage}/addons"
+
+    # This fancy expression will expand to prepend a path
+    # (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
+    # KUBE_SERVER_BINARIES array.
+    cp "${KUBE_SERVER_BINARIES[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
+      "${release_stage}/server/bin/"
+
+    # Include the client binaries here too as they are useful debugging tools.
+    local client_bins=("${KUBE_CLIENT_BINARIES[@]}")
+    if [[ "${platform%/*}" == "windows" ]]; then
+      client_bins=("${KUBE_CLIENT_BINARIES_WIN[@]}")
+    fi
+    # This fancy expression will expand to prepend a path
+    # (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
+    # client_bins array.
+    cp "${client_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
+      "${release_stage}/server/bin/"
+
+    cp "${KUBE_ROOT}/Godeps/LICENSES" "${release_stage}/"
+
+    cp "${RELEASE_TARS}/kubernetes-src.tar.gz" "${release_stage}/"
+
+    kube::release::clean_cruft
+
+    local package_name="${RELEASE_TARS}/kubernetes-server-${platform_tag}.tar.gz"
+    kube::release::create_tarball "${package_name}" "${release_stage}/.."
+  done
+}
+
+function kube::release::md5() {
+  if which md5 >/dev/null 2>&1; then
+    md5 -q "$1"
+  else
+    md5sum "$1" | awk '{ print $1 }'
+  fi
+}
+
+function kube::release::sha1() {
+  if which sha1sum >/dev/null 2>&1; then
+    sha1sum "$1" | awk '{ print $1 }'
+  else
+    shasum -a1 "$1" | awk '{ print $1 }'
+  fi
+}
+
+function kube::release::build_hyperkube_image() {
+  local -r arch="$1"
+  local -r registry="$2"
+  local -r version="$3"
+  local -r save_dir="${4-}"
+  kube::log::status "Building hyperkube image for arch: ${arch}"
+  ARCH="${arch}" REGISTRY="${registry}" VERSION="${version}" \
+    make -C cluster/images/hyperkube/ build >/dev/null
+
+  local hyperkube_tag="${registry}/hyperkube-${arch}:${version}"
+  if [[ -n "${save_dir}" ]]; then
+    "${DOCKER[@]}" save "${hyperkube_tag}" > "${save_dir}/hyperkube-${arch}.tar"
+  fi
+  kube::log::status "Deleting hyperkube image ${hyperkube_tag}"
+  "${DOCKER[@]}" rmi "${hyperkube_tag}" &>/dev/null || true
+}
+
+function kube::release::build_conformance_image() {
+  local -r arch="$1"
+  local -r registry="$2"
+  local -r version="$3"
+  local -r save_dir="${4-}"
+  kube::log::status "Building conformance image for arch: ${arch}"
+  ARCH="${arch}" REGISTRY="${registry}" VERSION="${version}" \
+    make -C cluster/images/conformance/ build >/dev/null
+
+  local conformance_tag="${registry}/conformance-${arch}:${version}"
+  if [[ -n "${save_dir}" ]]; then
+    "${DOCKER[@]}" save "${conformance_tag}" > "${save_dir}/conformance-${arch}.tar"
+  fi
+  kube::log::status "Deleting conformance image ${conformance_tag}"
+  "${DOCKER[@]}" rmi "${conformance_tag}" &>/dev/null || true
+}
+
+# This builds all the release docker images (One docker image per binary)
+# Args:
+#  $1 - binary_dir, the directory to save the tared images to.
+#  $2 - arch, architecture for which we are building docker images.
+function kube::release::create_docker_images_for_server() {
+  # Create a sub-shell so that we don't pollute the outer environment
+  (
+    local binary_dir="$1"
+    local arch="$2"
+    local binary_name
+    local binaries=($(kube::build::get_docker_wrapped_binaries "${arch}"))
+    local images_dir="${RELEASE_IMAGES}/${arch}"
+    mkdir -p "${images_dir}"
+
+    local -r docker_registry="k8s.gcr.io"
+    # Docker tags cannot contain '+'
+    local docker_tag="${KUBE_GIT_VERSION/+/_}"
+    if [[ -z "${docker_tag}" ]]; then
+      kube::log::error "git version information missing; cannot create Docker tag"
+      return 1
+    fi
+
+    for wrappable in "${binaries[@]}"; do
+
+      local oldifs=$IFS
+      IFS=","
+      set $wrappable
+      IFS=$oldifs
+
+      local binary_name="$1"
+      local base_image="$2"
+      local docker_build_path="${binary_dir}/${binary_name}.dockerbuild"
+      local docker_file_path="${docker_build_path}/Dockerfile"
+      local binary_file_path="${binary_dir}/${binary_name}"
+      local docker_image_tag="${docker_registry}"
+      if [[ ${arch} == "amd64" ]]; then
+        # If we are building a amd64 docker image, preserve the original
+        # image name
+        docker_image_tag+="/${binary_name}:${docker_tag}"
+      else
+        # If we are building a docker image for another architecture,
+        # append the arch in the image tag
+        docker_image_tag+="/${binary_name}-${arch}:${docker_tag}"
+      fi
+
+
+      kube::log::status "Starting docker build for image: ${binary_name}-${arch}"
+      (
+        rm -rf "${docker_build_path}"
+        mkdir -p "${docker_build_path}"
+        ln "${binary_dir}/${binary_name}" "${docker_build_path}/${binary_name}"
+        ln "${KUBE_ROOT}/build/nsswitch.conf" "${docker_build_path}/nsswitch.conf"
+        chmod 0644 "${docker_build_path}/nsswitch.conf"
+        cat <<EOF > "${docker_file_path}"
+FROM ${base_image}
+COPY ${binary_name} /usr/local/bin/${binary_name}
+EOF
+        # ensure /etc/nsswitch.conf exists so go's resolver respects /etc/hosts
+        if [[ "${base_image}" =~ busybox ]]; then
+          echo "COPY nsswitch.conf /etc/" >> "${docker_file_path}"
+        fi
+
+        # provide `--pull` argument to `docker build` if `KUBE_BUILD_PULL_LATEST_IMAGES`
+        # is set to y or Y; otherwise try to build the image without forcefully
+        # pulling the latest base image.
+        local -a docker_build_opts=()
+        if [[ "${KUBE_BUILD_PULL_LATEST_IMAGES}" =~ [yY] ]]; then
+            docker_build_opts+=("--pull")
+        fi
+        "${DOCKER[@]}" build "${docker_build_opts[@]}" -q -t "${docker_image_tag}" "${docker_build_path}" >/dev/null
+        "${DOCKER[@]}" save "${docker_image_tag}" > "${binary_dir}/${binary_name}.tar"
+        echo "${docker_tag}" > "${binary_dir}/${binary_name}.docker_tag"
+        rm -rf "${docker_build_path}"
+        ln "${binary_dir}/${binary_name}.tar" "${images_dir}/"
+
+        # If we are building an official/alpha/beta release we want to keep
+        # docker images and tag them appropriately.
+        if [[ -n "${KUBE_DOCKER_IMAGE_TAG-}" && -n "${KUBE_DOCKER_REGISTRY-}" ]]; then
+          local release_docker_image_tag="${KUBE_DOCKER_REGISTRY}/${binary_name}-${arch}:${KUBE_DOCKER_IMAGE_TAG}"
+          # Only rmi and tag if name is different
+          if [[ $docker_image_tag != $release_docker_image_tag ]]; then
+            kube::log::status "Tagging docker image ${docker_image_tag} as ${release_docker_image_tag}"
+            "${DOCKER[@]}" rmi "${release_docker_image_tag}" 2>/dev/null || true
+            "${DOCKER[@]}" tag "${docker_image_tag}" "${release_docker_image_tag}" 2>/dev/null
+          fi
+        else
+          # not a release
+          kube::log::status "Deleting docker image ${docker_image_tag}"
+          "${DOCKER[@]}" rmi "${docker_image_tag}" &>/dev/null || true
+        fi
+      ) &
+    done
+
+    if [[ "${KUBE_BUILD_HYPERKUBE}" =~ [yY] ]]; then
+      kube::release::build_hyperkube_image "${arch}" "${docker_registry}" \
+        "${docker_tag}" "${images_dir}" &
+    fi
+    if [[ "${KUBE_BUILD_CONFORMANCE}" =~ [yY] ]]; then
+      kube::release::build_conformance_image "${arch}" "${docker_registry}" \
+        "${docker_tag}" "${images_dir}" &
+    fi
+
+    kube::util::wait-for-jobs || { kube::log::error "previous Docker build failed"; return 1; }
+    kube::log::status "Docker builds done"
+  )
+
+}
+
+# This will pack kube-system manifests files for distros such as COS.
+function kube::release::package_kube_manifests_tarball() {
+  kube::log::status "Building tarball: manifests"
+
+  local src_dir="${KUBE_ROOT}/cluster/gce/manifests"
+
+  local release_stage="${RELEASE_STAGE}/manifests/kubernetes"
+  rm -rf "${release_stage}"
+
+  local dst_dir="${release_stage}/gci-trusty"
+  mkdir -p "${dst_dir}"
+  cp "${src_dir}/kube-proxy.manifest" "${dst_dir}/"
+  cp "${src_dir}/cluster-autoscaler.manifest" "${dst_dir}/"
+  cp "${src_dir}/etcd.manifest" "${dst_dir}"
+  cp "${src_dir}/kube-scheduler.manifest" "${dst_dir}"
+  cp "${src_dir}/kube-apiserver.manifest" "${dst_dir}"
+  cp "${src_dir}/abac-authz-policy.jsonl" "${dst_dir}"
+  cp "${src_dir}/kube-controller-manager.manifest" "${dst_dir}"
+  cp "${src_dir}/kube-addon-manager.yaml" "${dst_dir}"
+  cp "${src_dir}/glbc.manifest" "${dst_dir}"
+  cp "${src_dir}/etcd-empty-dir-cleanup.yaml" "${dst_dir}/"
+  local internal_manifest
+  for internal_manifest in $(ls "${src_dir}" | grep "^internal-*"); do
+    cp "${src_dir}/${internal_manifest}" "${dst_dir}"
+  done
+  cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${dst_dir}/gci-configure-helper.sh"
+  if [[ -e "${KUBE_ROOT}/cluster/gce/gci/gke-internal-configure-helper.sh" ]]; then
+    cp "${KUBE_ROOT}/cluster/gce/gci/gke-internal-configure-helper.sh" "${dst_dir}/"
+  fi
+  cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${dst_dir}/health-monitor.sh"
+  local objects
+  objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo)
+  tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${dst_dir}"
+  # Merge GCE-specific addons with general purpose addons.
+  local gce_objects
+  gce_objects=$(cd "${KUBE_ROOT}/cluster/gce/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) \( -not -name \*demo\* \))
+  if [[ -n "${gce_objects}" ]]; then
+    tar c -C "${KUBE_ROOT}/cluster/gce/addons" ${gce_objects} | tar x -C "${dst_dir}"
+  fi
+
+  kube::release::clean_cruft
+
+  local package_name="${RELEASE_TARS}/kubernetes-manifests.tar.gz"
+  kube::release::create_tarball "${package_name}" "${release_stage}/.."
+}
+
+# Builds tarballs for each test platform containing the appropriate binaries.
+function kube::release::package_test_platform_tarballs() {
+  local platform
+  rm -rf "${RELEASE_STAGE}/test"
+  # KUBE_TEST_SERVER_PLATFORMS is a subset of KUBE_TEST_PLATFORMS,
+  # so process it first.
+  for platform in "${KUBE_TEST_SERVER_PLATFORMS[@]}"; do
+    local platform_tag=${platform/\//-} # Replace a "/" for a "-"
+    local release_stage="${RELEASE_STAGE}/test/${platform_tag}/kubernetes"
+    mkdir -p "${release_stage}/test/bin"
+    # This fancy expression will expand to prepend a path
+    # (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
+    # KUBE_TEST_SERVER_BINARIES array.
+    cp "${KUBE_TEST_SERVER_BINARIES[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
+      "${release_stage}/test/bin/"
+  done
+  for platform in "${KUBE_TEST_PLATFORMS[@]}"; do
+    (
+      local platform_tag=${platform/\//-} # Replace a "/" for a "-"
+      kube::log::status "Starting tarball: test $platform_tag"
+      local release_stage="${RELEASE_STAGE}/test/${platform_tag}/kubernetes"
+      mkdir -p "${release_stage}/test/bin"
+
+      local test_bins=("${KUBE_TEST_BINARIES[@]}")
+      if [[ "${platform%/*}" == "windows" ]]; then
+        test_bins=("${KUBE_TEST_BINARIES_WIN[@]}")
+      fi
+      # This fancy expression will expand to prepend a path
+      # (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
+      # test_bins array.
+      cp "${test_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
+        "${release_stage}/test/bin/"
+
+      local package_name="${RELEASE_TARS}/kubernetes-test-${platform_tag}.tar.gz"
+      kube::release::create_tarball "${package_name}" "${release_stage}/.."
+    ) &
+  done
+
+  kube::log::status "Waiting on test tarballs"
+  kube::util::wait-for-jobs || { kube::log::error "test tarball creation failed"; exit 1; }
+}
+
+
+# This is the stuff you need to run tests from the binary distribution.
+function kube::release::package_test_tarballs() {
+  kube::release::package_test_platform_tarballs
+
+  kube::log::status "Building tarball: test portable"
+
+  local release_stage="${RELEASE_STAGE}/test/kubernetes"
+  rm -rf "${release_stage}"
+  mkdir -p "${release_stage}"
+
+  # First add test image files and other portable sources so we can create
+  # the portable test tarball.
+  mkdir -p "${release_stage}/test/images"
+  cp -fR "${KUBE_ROOT}/test/images" "${release_stage}/test/"
+  tar c "${KUBE_TEST_PORTABLE[@]}" | tar x -C "${release_stage}"
+
+  kube::release::clean_cruft
+
+  local portable_tarball_name="${RELEASE_TARS}/kubernetes-test-portable.tar.gz"
+  kube::release::create_tarball "${portable_tarball_name}" "${release_stage}/.."
+
+  if [[ "${KUBE_BUILD_MONDO_TEST_TARBALL}" =~ [yY] ]]; then
+    kube::log::status "Building tarball: test mondo (deprecated by KEP sig-testing/20190118-breaking-apart-the-kubernetes-test-tarball)"
+    local platform
+    for platform in "${KUBE_TEST_PLATFORMS[@]}"; do
+      local test_bins=("${KUBE_TEST_BINARIES[@]}")
+      if [[ "${platform%/*}" == "windows" ]]; then
+        test_bins=("${KUBE_TEST_BINARIES_WIN[@]}")
+      fi
+      mkdir -p "${release_stage}/platforms/${platform}"
+      # This fancy expression will expand to prepend a path
+      # (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
+      # test_bins array.
+      cp "${test_bins[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
+        "${release_stage}/platforms/${platform}"
+    done
+    for platform in "${KUBE_TEST_SERVER_PLATFORMS[@]}"; do
+      mkdir -p "${release_stage}/platforms/${platform}"
+      # This fancy expression will expand to prepend a path
+      # (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
+      # KUBE_TEST_SERVER_BINARIES array.
+      cp "${KUBE_TEST_SERVER_BINARIES[@]/#/${LOCAL_OUTPUT_BINPATH}/${platform}/}" \
+        "${release_stage}/platforms/${platform}"
+    done
+
+    cat <<EOF > "${release_stage}/DEPRECATION_NOTICE"
+The mondo test tarball containing binaries for all platforms is
+DEPRECATED as of Kubernetes 1.14.
+
+Users of this tarball should migrate to using the platform-specific
+tarballs in combination with the "portable" tarball which contains
+scripts, test images, and other manifests.
+
+For more details, please see KEP
+sig-testing/20190118-breaking-apart-the-kubernetes-test-tarball.
+EOF
+
+    kube::release::clean_cruft
+
+    local package_name="${RELEASE_TARS}/kubernetes-test.tar.gz"
+    kube::release::create_tarball "${package_name}" "${release_stage}/.."
+  fi
+}
+
+# This is all the platform-independent stuff you need to run/install kubernetes.
+# Arch-specific binaries will need to be downloaded separately (possibly by
+# using the bundled cluster/get-kube-binaries.sh script).
+# Included in this tarball:
+#   - Cluster spin up/down scripts and configs for various cloud providers
+#   - Tarballs for manifest configs that are ready to be uploaded
+#   - Examples (which may or may not still work)
+#   - The remnants of the docs/ directory
+function kube::release::package_final_tarball() {
+  kube::log::status "Building tarball: final"
+
+  # This isn't a "full" tarball anymore, but the release lib still expects
+  # artifacts under "full/kubernetes/"
+  local release_stage="${RELEASE_STAGE}/full/kubernetes"
+  rm -rf "${release_stage}"
+  mkdir -p "${release_stage}"
+
+  mkdir -p "${release_stage}/client"
+  cat <<EOF > "${release_stage}/client/README"
+Client binaries are no longer included in the Kubernetes final tarball.
+
+Run cluster/get-kube-binaries.sh to download client and server binaries.
+EOF
+
+  # We want everything in /cluster.
+  cp -R "${KUBE_ROOT}/cluster" "${release_stage}/"
+
+  mkdir -p "${release_stage}/server"
+  cp "${RELEASE_TARS}/kubernetes-manifests.tar.gz" "${release_stage}/server/"
+  cat <<EOF > "${release_stage}/server/README"
+Server binary tarballs are no longer included in the Kubernetes final tarball.
+
+Run cluster/get-kube-binaries.sh to download client and server binaries.
+EOF
+
+  # Include hack/lib as a dependency for the cluster/ scripts
+  mkdir -p "${release_stage}/hack"
+  cp -R "${KUBE_ROOT}/hack/lib" "${release_stage}/hack/"
+
+  cp -R "${KUBE_ROOT}/docs" "${release_stage}/"
+  cp "${KUBE_ROOT}/README.md" "${release_stage}/"
+  cp "${KUBE_ROOT}/Godeps/LICENSES" "${release_stage}/"
+
+  echo "${KUBE_GIT_VERSION}" > "${release_stage}/version"
+
+  kube::release::clean_cruft
+
+  local package_name="${RELEASE_TARS}/kubernetes.tar.gz"
+  kube::release::create_tarball "${package_name}" "${release_stage}/.."
+}
+
+# Build a release tarball.  $1 is the output tar name.  $2 is the base directory
+# of the files to be packaged.  This assumes that ${2}/kubernetes is what is
+# being packaged.
+function kube::release::create_tarball() {
+  kube::build::ensure_tar
+
+  local tarfile=$1
+  local stagingdir=$2
+
+  "${TAR}" czf "${tarfile}" -C "${stagingdir}" kubernetes --owner=0 --group=0
+}

+ 31 - 0
kubernetes-v1.15.4/build/make-build-image.sh

@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+
+# Copyright 2014 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build the docker image necessary for building Kubernetes
+#
+# This script will package the parts of the repo that we need to build
+# Kubernetes into a tar file and put it in the right place in the output
+# directory.  It will then copy over the Dockerfile and build the kube-build
+# image.
+set -o errexit
+set -o nounset
+set -o pipefail
+
+KUBE_ROOT="$(dirname "${BASH_SOURCE[0]}")/.."
+source "${KUBE_ROOT}/build/common.sh"
+
+kube::build::verify_prereqs
+kube::build::build_image

+ 26 - 0
kubernetes-v1.15.4/build/make-clean.sh

@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+# Copyright 2014 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Clean out the output directory on the docker host.
+set -o errexit
+set -o nounset
+set -o pipefail
+
+KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+source "${KUBE_ROOT}/build/common.sh"
+
+kube::build::verify_prereqs false
+kube::build::clean

+ 2 - 0
kubernetes-v1.15.4/build/nsswitch.conf

@@ -0,0 +1,2 @@
+# ensure go's non-cgo resolver respects /etc/hosts
+hosts: files dns

+ 21 - 0
kubernetes-v1.15.4/build/openapi.bzl

@@ -0,0 +1,21 @@
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# A project wanting to generate openapi code for vendored
+# k8s.io/kubernetes will need to set the following variables in
+# //build/openapi.bzl in their project and customize the go prefix:
+#
+# openapi_vendor_prefix = "vendor/k8s.io/kubernetes/"
+
+openapi_vendor_prefix = ""

+ 27 - 0
kubernetes-v1.15.4/build/package-tarballs.sh

@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# Complete the release with the standard env
+KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+source "${KUBE_ROOT}/build/common.sh"
+source "${KUBE_ROOT}/build/lib/release.sh"
+
+kube::build::ensure_tar
+kube::version::get_version_vars
+kube::release::package_tarballs

+ 3 - 0
kubernetes-v1.15.4/build/pause/.gitignore

@@ -0,0 +1,3 @@
+/.container-*
+/.push-*
+/bin

+ 8 - 0
kubernetes-v1.15.4/build/pause/CHANGELOG.md

@@ -0,0 +1,8 @@
+# 3.1
+
+* The pause container gains a signal handler to clean up orphaned zombie processes. ([#36853](https://prs.k8s.io/36853), [@verb](https://github.com/verb))
+* `pause -v` will return build information for the pause binary. ([#56762](https://prs.k8s.io/56762), [@verb](https://github.com/verb))
+
+# 3.0
+
+* The pause container was rewritten entirely in C. ([#23009](https://prs.k8s.io/23009), [@uluyol](https://github.com/uluyol))

+ 18 - 0
kubernetes-v1.15.4/build/pause/Dockerfile

@@ -0,0 +1,18 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM scratch
+ARG ARCH
+ADD bin/pause-${ARCH} /pause
+ENTRYPOINT ["/pause"]

+ 109 - 0
kubernetes-v1.15.4/build/pause/Makefile

@@ -0,0 +1,109 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY: all push container clean orphan all-push push-manifest
+
+include ../../hack/make-rules/Makefile.manifest
+
+REGISTRY ?= staging-k8s.gcr.io
+IMAGE = $(REGISTRY)/pause
+IMAGE_WITH_ARCH = $(IMAGE)-$(ARCH)
+
+TAG = 3.1
+REV = $(shell git describe --contains --always --match='v*')
+
+# Architectures supported: amd64, arm, arm64, ppc64le and s390x
+ARCH ?= amd64
+
+ALL_ARCH = amd64 arm arm64 ppc64le s390x
+
+CFLAGS = -Os -Wall -Werror -static -DVERSION=v$(TAG)-$(REV)
+KUBE_CROSS_IMAGE ?= k8s.gcr.io/kube-cross
+KUBE_CROSS_VERSION ?= $(shell cat ../build-image/cross/VERSION)
+
+BIN = pause
+SRCS = pause.c
+
+ifeq ($(ARCH),amd64)
+	TRIPLE ?= x86_64-linux-gnu
+endif
+
+ifeq ($(ARCH),arm)
+	TRIPLE ?= arm-linux-gnueabihf
+endif
+
+ifeq ($(ARCH),arm64)
+	TRIPLE ?= aarch64-linux-gnu
+endif
+
+ifeq ($(ARCH),ppc64le)
+	TRIPLE ?= powerpc64le-linux-gnu
+endif
+
+ifeq ($(ARCH),s390x)
+	TRIPLE ?= s390x-linux-gnu
+endif
+
+# If you want to build AND push all containers, see the 'all-push' rule.
+all: all-container
+
+all-push: all-push-images push-manifest
+
+push-manifest: manifest-tool
+	manifest-tool push from-args --platforms $(call join_platforms,$(ALL_ARCH)) --template $(IMAGE)-ARCH:$(TAG) --target $(IMAGE):$(TAG)
+
+sub-container-%:
+	$(MAKE) ARCH=$* container
+
+sub-push-%:
+	$(MAKE) ARCH=$* push
+
+all-container: $(addprefix sub-container-,$(ALL_ARCH))
+
+all-push-images: $(addprefix sub-push-,$(ALL_ARCH))
+
+build: bin/$(BIN)-$(ARCH)
+
+bin/$(BIN)-$(ARCH): $(SRCS)
+	mkdir -p bin
+	docker run --rm -u $$(id -u):$$(id -g) -v $$(pwd):/build \
+		$(KUBE_CROSS_IMAGE):$(KUBE_CROSS_VERSION) \
+		/bin/bash -c "\
+			cd /build && \
+			$(TRIPLE)-gcc $(CFLAGS) -o $@ $^ && \
+			$(TRIPLE)-strip $@"
+
+container: .container-$(ARCH)
+.container-$(ARCH): bin/$(BIN)-$(ARCH)
+	docker build --pull -t $(IMAGE_WITH_ARCH):$(TAG) --build-arg ARCH=$(ARCH) .
+	touch $@
+
+push: .push-$(ARCH)
+.push-$(ARCH): .container-$(ARCH)
+	docker push $(IMAGE_WITH_ARCH):$(TAG)
+	touch $@
+
+# Useful for testing, not automatically included in container image
+orphan: bin/orphan-$(ARCH)
+bin/orphan-$(ARCH): orphan.c
+	mkdir -p bin
+	docker run -u $$(id -u):$$(id -g) -v $$(pwd):/build \
+		$(KUBE_CROSS_IMAGE):$(KUBE_CROSS_VERSION) \
+		/bin/bash -c "\
+			cd /build && \
+			$(TRIPLE)-gcc $(CFLAGS) -o $@ $^ && \
+			$(TRIPLE)-strip $@"
+
+clean:
+	rm -rf .container-* .push-* bin/

+ 36 - 0
kubernetes-v1.15.4/build/pause/orphan.c

@@ -0,0 +1,36 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/* Creates a zombie to be reaped by init. Useful for testing. */
+
+#include <stdio.h>
+#include <unistd.h>
+
+int main() {
+  pid_t pid;
+  pid = fork();
+  if (pid == 0) {
+    while (getppid() > 1)
+      ;
+    printf("Child exiting: pid=%d ppid=%d\n", getpid(), getppid());
+    return 0;
+  } else if (pid > 0) {
+    printf("Parent exiting: pid=%d ppid=%d\n", getpid(), getppid());
+    return 0;
+  }
+  perror("Could not create child");
+  return 1;
+}

+ 68 - 0
kubernetes-v1.15.4/build/pause/pause.c

@@ -0,0 +1,68 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#define STRINGIFY(x) #x
+#define VERSION_STRING(x) STRINGIFY(x)
+
+#ifndef VERSION
+#define VERSION HEAD
+#endif
+
+static void sigdown(int signo) {
+  psignal(signo, "Shutting down, got signal");
+  exit(0);
+}
+
+static void sigreap(int signo) {
+  while (waitpid(-1, NULL, WNOHANG) > 0)
+    ;
+}
+
+int main(int argc, char **argv) {
+  int i;
+  for (i = 1; i < argc; ++i) {
+    if (!strcasecmp(argv[i], "-v")) {
+      printf("pause.c %s\n", VERSION_STRING(VERSION));
+      return 0;
+    }
+  }
+
+  if (getpid() != 1)
+    /* Not an error because pause sees use outside of infra containers. */
+    fprintf(stderr, "Warning: pause should be the first process\n");
+
+  if (sigaction(SIGINT, &(struct sigaction){.sa_handler = sigdown}, NULL) < 0)
+    return 1;
+  if (sigaction(SIGTERM, &(struct sigaction){.sa_handler = sigdown}, NULL) < 0)
+    return 2;
+  if (sigaction(SIGCHLD, &(struct sigaction){.sa_handler = sigreap,
+                                             .sa_flags = SA_NOCLDSTOP},
+                NULL) < 0)
+    return 3;
+
+  for (;;)
+    pause();
+  fprintf(stderr, "Error: infinite loop terminated\n");
+  return 42;
+}

+ 189 - 0
kubernetes-v1.15.4/build/platforms.bzl

@@ -0,0 +1,189 @@
+# Copyright 2019 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("@bazel_skylib//lib:new_sets.bzl", "sets")
+load("@bazel_skylib//lib:types.bzl", "types")
+
+# KUBE_SERVER_PLATFORMS in hack/lib/golang.sh
+SERVER_PLATFORMS = {
+    "linux": [
+        "amd64",
+        "arm",
+        "arm64",
+        "ppc64le",
+        "s390x",
+    ],
+}
+
+# KUBE_NODE_PLATFORMS in hack/lib/golang.sh
+NODE_PLATFORMS = {
+    "linux": [
+        "amd64",
+        "arm",
+        "arm64",
+        "ppc64le",
+        "s390x",
+    ],
+    "windows": [
+        "amd64",
+    ],
+}
+
+# KUBE_CLIENT_PLATFORMS in hack/lib/golang.sh
+CLIENT_PLATFORMS = {
+    "linux": [
+        "386",
+        "amd64",
+        "arm",
+        "arm64",
+        "ppc64le",
+        "s390x",
+    ],
+    "darwin": [
+        "386",
+        "amd64",
+    ],
+    "windows": [
+        "386",
+        "amd64",
+    ],
+}
+
+# KUBE_TEST_PLATFORMS in hack/lib/golang.sh
+TEST_PLATFORMS = {
+    "linux": [
+        "amd64",
+        "arm",
+        "arm64",
+        "s390x",
+        "ppc64le",
+    ],
+    "darwin": [
+        "amd64",
+    ],
+    "windows": [
+        "amd64",
+    ],
+}
+
+# Helper which produces the ALL_PLATFORMS dictionary, composed of the union of
+# CLIENT, NODE, SERVER, and TEST platforms
+def _all_platforms():
+    all_platforms = {}
+    for platforms in [CLIENT_PLATFORMS, NODE_PLATFORMS, SERVER_PLATFORMS, TEST_PLATFORMS]:
+        for os, archs in platforms.items():
+            all_platforms[os] = sets.union(
+                all_platforms.setdefault(os, sets.make()),
+                sets.make(archs),
+            )
+    for os, archs in all_platforms.items():
+        all_platforms[os] = sets.to_list(archs)
+    return all_platforms
+
+ALL_PLATFORMS = _all_platforms()
+
+def go_platform_constraint(os, arch):
+    return "@io_bazel_rules_go//go/platform:%s_%s" % (os, arch)
+
+# Helper to for_platforms which updates the select() dictionary.
+# d is the dictionary being updated.
+# value is the value to set for each item of platforms, which should
+# be a single platform category dictionary (e.g. SERVER_PLATFORMS).
+# only_os selects one of the OSes in platforms.
+def _update_dict_for_platform_category(d, value, platforms, only_os = None):
+    if not value:
+        return
+    for os, arches in platforms.items():
+        if only_os and os != only_os:
+            continue
+        for arch in arches:
+            constraint = go_platform_constraint(os, arch)
+            fmt_args = {"OS": os, "ARCH": arch}
+            if types.is_list(value):
+                # Format all items in the list, and hope there are no duplicates
+                d.setdefault(constraint, []).extend(
+                    [v.format(**fmt_args) for v in value],
+                )
+            else:
+                # Don't overwrite existing value
+                if constraint in d:
+                    fail("duplicate entry for constraint %s", constraint)
+                if types.is_dict(value):
+                    # Format dictionary values only
+                    d[constraint] = {
+                        dict_key: dict_value.format(**fmt_args)
+                        for dict_key, dict_value in value.items()
+                    }
+                else:
+                    # Hopefully this is just a string
+                    d[constraint] = value.format(**fmt_args)
+
+# for_platforms returns a dictionary to be used with select().
+# select() is used for configurable attributes (most attributes, notably
+# excluding output filenames), and takes a dictionary mapping a condition
+# to a value for that attribute.
+# select() is described in more detail in the Bazel documentation:
+# https://docs.bazel.build/versions/master/be/functions.html#select
+#
+# One notable condition is the target platform (os and arch).
+# Kubernetes binaries generally target particular platform categories,
+# such as client binaries like kubectl, or node binaries like kubelet.
+# Additionally, some build artifacts need specific configurations such as
+# the appropriate arch-specific base image.
+#
+# This macro produces a dictionary where each of the platform categories
+# (client, node, server, test, all) is enumerated and filled in
+# the the provided arguments as the values.
+#
+# For example, a filegroup might want to include one binary for all client
+# platforms and another binary for server platforms. The client and server
+# platform lists have some shared items but also some disjoint items.
+# The client binary can be provided in for_client and the server binary provided
+# in for_server; this macro will then return a select() dictionary that
+# includes the appropriate binaries based on the configured platform.
+#
+# Another example selecting the appropriate base image for a docker container.
+# One can use select(for_platforms(for_server="base-image-{ARCH}//image"))
+# to have the appropriate arch-specific image selected.
+#
+# The for_platform arguments can be lists, dictionaries, or strings, but
+# they should all be the same type for a given call.
+# The tokens {OS} and {ARCH} will be substituted with the corresponding values,
+# but if a dictionary is provided, only the dictionary values will be formatted.
+#
+# If default is provided, a default condition will be added with the provided
+# value.
+# only_os can be used to select a single OS from a platform category that lists
+# multiple OSes. For example, it doesn't make sense to build debs or RPMs for
+# anything besides Linux, so you might supply only_os="linux" for those rules.
+#
+# For a complete example, consult something like the release-tars target in
+# build/release-tars/BUILD.
+def for_platforms(
+        for_client = None,
+        for_node = None,
+        for_server = None,
+        for_test = None,
+        for_all = None,
+        default = None,
+        only_os = None):
+    d = {}
+    if default != None:
+        d["//conditions:default"] = default
+    _update_dict_for_platform_category(d, for_client, CLIENT_PLATFORMS, only_os)
+    _update_dict_for_platform_category(d, for_node, NODE_PLATFORMS, only_os)
+    _update_dict_for_platform_category(d, for_server, SERVER_PLATFORMS, only_os)
+    _update_dict_for_platform_category(d, for_test, TEST_PLATFORMS, only_os)
+    _update_dict_for_platform_category(d, for_all, ALL_PLATFORMS, only_os)
+    return d

+ 42 - 0
kubernetes-v1.15.4/build/release-images.sh

@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+
+# Copyright 2018 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build Kubernetes release images. This will build the server target binaries,
+# and create wrap them in Docker images, see `make release` for full releases
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+source "${KUBE_ROOT}/build/common.sh"
+source "${KUBE_ROOT}/build/lib/release.sh"
+
+CMD_TARGETS="${KUBE_SERVER_IMAGE_TARGETS[*]}"
+if [[ "${KUBE_BUILD_HYPERKUBE}" =~ [yY] ]]; then
+    CMD_TARGETS="${CMD_TARGETS} cmd/hyperkube"
+fi
+if [[ "${KUBE_BUILD_CONFORMANCE}" =~ [yY] ]]; then
+    CMD_TARGETS="${CMD_TARGETS} ${KUBE_CONFORMANCE_IMAGE_TARGETS[*]}"
+fi
+
+kube::build::verify_prereqs
+kube::build::build_image
+kube::build::run_build_command make all WHAT="${CMD_TARGETS}" KUBE_BUILD_PLATFORMS="${KUBE_SERVER_PLATFORMS[*]}"
+
+kube::build::copy_output
+
+kube::release::build_server_images

+ 49 - 0
kubernetes-v1.15.4/build/release-in-a-container.sh

@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# Complete the release with the standard env
+KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+
+# Check and error if not "in-a-container"
+if [[ ! -f /.dockerenv ]]; then
+  echo
+  echo "'make release-in-a-container' can only be used from a docker container."
+  echo
+  exit 1
+fi
+
+# Other dependencies: Your container should contain docker
+if ! type -p docker >/dev/null 2>&1; then
+  echo
+  echo "'make release-in-a-container' requires a container with" \
+       "docker installed."
+  echo
+  exit 1
+fi
+
+
+# First run make cross-in-a-container
+make cross-in-a-container
+
+# at the moment only make test is supported.
+if [[ $KUBE_RELEASE_RUN_TESTS =~ ^[yY]$ ]]; then
+  make test
+fi
+
+"${KUBE_ROOT}/build/package-tarballs.sh"

+ 321 - 0
kubernetes-v1.15.4/build/release-tars/BUILD

@@ -0,0 +1,321 @@
+package(default_visibility = ["//visibility:public"])
+
+load(
+    "//build:platforms.bzl",
+    "CLIENT_PLATFORMS",
+    "NODE_PLATFORMS",
+    "SERVER_PLATFORMS",
+    "TEST_PLATFORMS",
+    "for_platforms",
+    "go_platform_constraint",
+)
+load("@io_k8s_repo_infra//defs:build.bzl", "release_filegroup")
+load("@io_k8s_repo_infra//defs:pkg.bzl", "pkg_tar")
+load("//staging:repos_generated.bzl", "staging_repos")
+
+# Bazel doesn't make the output filename
+# (such as kubernetes-server-{OS}-{ARCH}.tar.gz) configurable, so we instead
+# create rules for all platforms and tag them manual.
+# We then select the correct set of platform-specific tarballs in this filegroup
+# using a select() statement.
+# Thus the release-tars target always selects the correct set of tarballs
+# for the configured platform being built.
+release_filegroup(
+    name = "release-tars",
+    conditioned_srcs = for_platforms(
+        for_all = [
+            ":kubernetes.tar.gz",
+            ":kubernetes-src.tar.gz",
+        ],
+        for_client = [":kubernetes-client-{OS}-{ARCH}.tar.gz"],
+        for_node = [":kubernetes-node-{OS}-{ARCH}.tar.gz"],
+        for_server = [
+            ":kubernetes-server-{OS}-{ARCH}.tar.gz",
+            ":kubernetes-manifests.tar.gz",
+        ],
+        for_test = [
+            ":kubernetes-test-portable.tar.gz",
+            ":kubernetes-test-{OS}-{ARCH}.tar.gz",
+            # TODO(ixdy): remove once the "mondo-test" tarball is deprecated.
+            # It isn't really mondo under Bazel anyway.
+            ":kubernetes-test.tar.gz",
+        ],
+    ),
+)
+
+filegroup(
+    name = "package-srcs",
+    srcs = glob(["**"]),
+    tags = ["automanaged"],
+    visibility = ["//visibility:private"],
+)
+
+filegroup(
+    name = "all-srcs",
+    srcs = [":package-srcs"],
+    tags = ["automanaged"],
+)
+
+pkg_tar(
+    name = "kubernetes-src",
+    srcs = ["//:all-srcs"],
+    extension = "tar.gz",
+    package_dir = "kubernetes",
+    strip_prefix = "//",
+    symlinks = {
+        "kubernetes/vendor/%s" % repo: "../../staging/src/%s" % repo
+        for repo in staging_repos
+    },
+    tags = [
+        "manual",
+        "no-cache",
+    ],
+)
+
+# Included in node and server tarballs.
+filegroup(
+    name = "license-targets",
+    srcs = [
+        ":kubernetes-src.tar.gz",
+        "//:Godeps/LICENSES",
+    ],
+    visibility = ["//visibility:private"],
+)
+
+pkg_tar(
+    name = "_client-bin",
+    srcs = ["//build:client-targets"],
+    mode = "0755",
+    package_dir = "client/bin",
+    tags = [
+        "manual",
+        "no-cache",
+    ],
+    visibility = ["//visibility:private"],
+)
+
+[[pkg_tar(
+    name = "kubernetes-client-%s-%s" % (os, arch),
+    extension = "tar.gz",
+    package_dir = "kubernetes",
+    tags = [
+        "manual",
+        "no-cache",
+    ],
+    deps = select({go_platform_constraint(os, arch): [":_client-bin"]}),
+) for arch in archs] for os, archs in CLIENT_PLATFORMS.items()]
+
+pkg_tar(
+    name = "_node-bin",
+    srcs = [
+        "//build:client-targets",
+        "//build:node-targets",
+    ],
+    mode = "0755",
+    package_dir = "node/bin",
+    tags = [
+        "manual",
+        "no-cache",
+    ],
+    visibility = ["//visibility:private"],
+)
+
+[[pkg_tar(
+    name = "kubernetes-node-%s-%s" % (os, arch),
+    srcs = [":license-targets"],
+    extension = "tar.gz",
+    mode = "0644",
+    package_dir = "kubernetes",
+    tags = [
+        "manual",
+        "no-cache",
+    ],
+    deps = select({go_platform_constraint(os, arch): [":_node-bin"]}),
+) for arch in archs] for os, archs in NODE_PLATFORMS.items()]
+
+pkg_tar(
+    name = "_server-bin",
+    srcs = [
+        "//build:client-targets",
+        "//build:docker-artifacts",
+        "//build:node-targets",
+        "//build:server-targets",
+    ],
+    mode = "0755",
+    package_dir = "server/bin",
+    tags = [
+        "manual",
+        "no-cache",
+    ],
+    visibility = ["//visibility:private"],
+)
+
+genrule(
+    name = "dummy",
+    outs = [".dummy"],
+    cmd = "touch $@",
+    visibility = ["//visibility:private"],
+)
+
+# Some of the startup scripts fail if there isn't an addons/ directory in the server tarball.
+pkg_tar(
+    name = "_server-addons",
+    srcs = [
+        ":.dummy",
+    ],
+    package_dir = "addons",
+    tags = [
+        "manual",
+        "no-cache",
+    ],
+    visibility = ["//visibility:private"],
+)
+
+[[pkg_tar(
+    name = "kubernetes-server-%s-%s" % (os, arch),
+    srcs = [":license-targets"],
+    extension = "tar.gz",
+    mode = "0644",
+    package_dir = "kubernetes",
+    tags = [
+        "manual",
+        "no-cache",
+    ],
+    deps = select({
+        go_platform_constraint(os, arch): [
+            ":_server-addons",
+            ":_server-bin",
+        ],
+    }),
+) for arch in archs] for os, archs in SERVER_PLATFORMS.items()]
+
+# The mondo test tarball is deprecated.
+pkg_tar(
+    name = "_test-mondo-bin",
+    srcs = ["//build:test-targets"],
+    mode = "0755",
+    package_dir = select(for_platforms(
+        for_test = "platforms/{OS}/{ARCH}",
+    )),
+    tags = [
+        "manual",
+        "no-cache",
+    ],
+    visibility = ["//visibility:private"],
+)
+
+genrule(
+    name = "kubernetes-test-mondo-deprecation",
+    outs = ["DEPRECATION_NOTICE"],
+    cmd = """cat <<EOF >$@
+The mondo test tarball containing binaries for all platforms is
+DEPRECATED as of Kubernetes 1.14.
+
+Users of this tarball should migrate to using the platform-specific
+tarballs in combination with the "portable" tarball which contains
+scripts, test images, and other manifests.
+
+For more details, please see KEP
+sig-testing/20190118-breaking-apart-the-kubernetes-test-tarball.
+EOF
+""",
+    visibility = ["//visibility:private"],
+)
+
+# The mondo test tarball is deprecated.
+# This one was never really correct, anyway, since we can't include
+# binaries from multiple platforms in a single tarball.
+pkg_tar(
+    name = "kubernetes-test",
+    srcs = [
+        ":kubernetes-test-mondo-deprecation",
+        "//build:test-portable-targets",
+    ],
+    extension = "tar.gz",
+    package_dir = "kubernetes",
+    remap_paths = {
+        "build/release-tars/DEPRECATION_NOTICE": "DEPRECATION_NOTICE",
+    },
+    strip_prefix = "//",
+    tags = [
+        "manual",
+        "no-cache",
+    ],
+    deps = select(for_platforms(
+        for_test = [":_test-mondo-bin"],
+    )),
+)
+
+pkg_tar(
+    name = "kubernetes-test-portable",
+    srcs = ["//build:test-portable-targets"],
+    extension = "tar.gz",
+    package_dir = "kubernetes",
+    strip_prefix = "//",
+    tags = ["no-cache"],
+)
+
+pkg_tar(
+    name = "_test-bin",
+    srcs = ["//build:test-targets"],
+    mode = "0755",
+    package_dir = "test/bin",
+    tags = [
+        "manual",
+        "no-cache",
+    ],
+    visibility = ["//visibility:private"],
+)
+
+[[pkg_tar(
+    name = "kubernetes-test-%s-%s" % (os, arch),
+    extension = "tar.gz",
+    package_dir = "kubernetes",
+    strip_prefix = "//",
+    tags = [
+        "manual",
+        "no-cache",
+    ],
+    deps = select({go_platform_constraint(os, arch): [":_test-bin"]}),
+) for arch in archs] for os, archs in TEST_PLATFORMS.items()]
+
+pkg_tar(
+    name = "_full_server",
+    srcs = [
+        ":kubernetes-manifests.tar.gz",
+    ],
+    package_dir = "server",
+    tags = [
+        "manual",
+        "no-cache",
+    ],
+    visibility = ["//visibility:private"],
+)
+
+pkg_tar(
+    name = "kubernetes",
+    srcs = [
+        "//:Godeps/LICENSES",
+        "//:README.md",
+        "//:version",
+        "//cluster:all-srcs",
+        "//docs:all-srcs",
+        "//hack/lib:all-srcs",
+    ],
+    extension = "tar.gz",
+    package_dir = "kubernetes",
+    strip_prefix = "//",
+    tags = ["no-cache"],
+    deps = [
+        ":_full_server",
+    ],
+)
+
+pkg_tar(
+    name = "kubernetes-manifests",
+    extension = "tar.gz",
+    tags = ["no-cache"],
+    deps = [
+        "//cluster:manifests",
+    ],
+)

+ 45 - 0
kubernetes-v1.15.4/build/release.sh

@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+
+# Copyright 2014 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build a Kubernetes release.  This will build the binaries, create the Docker
+# images and other build artifacts.
+#
+# For pushing these artifacts publicly to Google Cloud Storage or to a registry
+# please refer to the kubernetes/release repo at
+# https://github.com/kubernetes/release.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+source "${KUBE_ROOT}/build/common.sh"
+source "${KUBE_ROOT}/build/lib/release.sh"
+
+KUBE_RELEASE_RUN_TESTS=${KUBE_RELEASE_RUN_TESTS-y}
+
+kube::build::verify_prereqs
+kube::build::build_image
+kube::build::run_build_command make cross
+
+if [[ $KUBE_RELEASE_RUN_TESTS =~ ^[yY]$ ]]; then
+  kube::build::run_build_command make test
+  kube::build::run_build_command make test-integration
+fi
+
+kube::build::copy_output
+
+kube::release::package_tarballs

+ 79 - 0
kubernetes-v1.15.4/build/root/.bazelrc

@@ -0,0 +1,79 @@
+startup --expand_configs_in_place
+
+# Show us information about failures.
+build --verbose_failures
+test --test_output=errors
+
+# Include git version info
+build --workspace_status_command hack/print-workspace-status.sh
+
+# Make /tmp hermetic
+build --sandbox_tmpfs_path=/tmp
+
+# Ensure that Bazel never runs as root, which can cause unit tests to fail.
+# This flag requires Bazel 0.5.0+
+build --sandbox_fake_username
+
+# Enable go race detection.
+build:unit --features=race
+test:unit --features=race
+
+test:unit --build_tests_only
+test:unit --test_tag_filters=-e2e,-integration
+test:unit --flaky_test_attempts=3
+
+test:integration --local_test_jobs 4
+test:integration --test_tag_filters=integration
+
+# Darwin and Windows only cross-compile pure Go
+build:cross:darwin_386 --platforms=@io_bazel_rules_go//go/toolchain:darwin_386
+build:cross:darwin_amd64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64
+build:cross:windows_386 --platforms=@io_bazel_rules_go//go/toolchain:windows_386
+build:cross:windows_amd64 --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64
+
+# We enable cgo cross-compilation for Linux, but need to use our custom crosstool.
+build:repo_infra_crosstool --crosstool_top=@io_k8s_repo_infra//tools:toolchain --compiler=gcc
+build:cross:linux_386 --config=repo_infra_crosstool --platforms=@io_bazel_rules_go//go/toolchain:linux_386
+build:cross:linux_amd64 --config=repo_infra_crosstool --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 --cpu=amd64
+build:cross:linux_arm --config=repo_infra_crosstool --platforms=@io_bazel_rules_go//go/toolchain:linux_arm --cpu=arm
+build:cross:linux_arm64 --config=repo_infra_crosstool --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64 --cpu=arm64
+build:cross:linux_ppc64le --config=repo_infra_crosstool --platforms=@io_bazel_rules_go//go/toolchain:linux_ppc64le --cpu=ppc64le
+build:cross:linux_s390x --config=repo_infra_crosstool --platforms=@io_bazel_rules_go//go/toolchain:linux_s390x --cpu=s390x
+
+# --config=remote-cache enables a remote bazel cache
+# Note needs a --remote_instance_name=projects/PROJ/instances/default_instance flag
+build:remote-cache --remote_cache=remotebuildexecution.googleapis.com
+build:remote-cache --tls_enabled=true
+build:remote-cache --remote_timeout=3600
+build:remote-cache --auth_enabled=true
+
+# --config=remote adds remote execution to the --config=remote-cache
+# Note needs a --remote_instance_name=projects/PROJ/instances/default_instance flag
+build:remote --config=remote-cache
+build:remote --remote_executor=remotebuildexecution.googleapis.com
+build:remote --jobs=500
+build:remote --host_javabase=@rbe_default//java:jdk
+build:remote --javabase=@rbe_default//java:jdk
+build:remote --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
+build:remote --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
+build:remote --crosstool_top=@rbe_default//cc:toolchain
+build:remote --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
+build:remote --extra_toolchains=@rbe_default//config:cc-toolchain
+build:remote --extra_execution_platforms=:rbe_with_network
+build:remote --host_platform=:rbe_with_network
+build:remote --platforms=:rbe_with_network
+build:remote --spawn_strategy=remote
+build:remote --strategy=Javac=remote
+build:remote --strategy=Closure=remote
+build:remote --strategy=Genrule=remote
+build:remote --define=EXECUTOR=remote
+
+# tar locally due to https://github.com/bazelbuild/bazel/issues/8462
+build:remote --strategy=PackageTar=sandboxed
+
+
+# --config=ci-instance adds a remote instance name
+build:ci-instance --remote_instance_name=projects/k8s-prow-builds/instances/default_instance
+
+# The config we want to use in CI
+build:ci --config=remote --config=ci-instance

+ 14 - 0
kubernetes-v1.15.4/build/root/.kazelcfg.json

@@ -0,0 +1,14 @@
+{
+	"GoPrefix": "k8s.io/kubernetes",
+	"SkippedPaths": [
+		"^_.*",
+		"/_",
+		"^third_party/etcd.*"
+	],
+	"AddSourcesRules": true,
+	"K8sCodegenBzlFile": "build/kazel_generated.bzl",
+	"K8sCodegenBoilerplateFile": "hack/boilerplate/boilerplate.generatebzl.txt",
+	"K8sCodegenTags": [
+		"openapi-gen"
+	]
+}

+ 125 - 0
kubernetes-v1.15.4/build/root/BUILD.root

@@ -0,0 +1,125 @@
+# gazelle:build_file_name BUILD,BUILD.bazel
+
+# gazelle:exclude _artifacts
+# gazelle:exclude _gopath
+# gazelle:exclude _output
+# gazelle:exclude _tmp
+
+# gazelle:prefix k8s.io/kubernetes
+
+# Disable proto rules, since the Go sources are currently generated by
+# hack/update-generated-protobuf.sh and checked into the repo.
+# gazelle:proto disable_global
+
+package(default_visibility = ["//visibility:public"])
+
+load("@io_k8s_repo_infra//defs:build.bzl", "gcs_upload")
+load("//build:platforms.bzl", "for_platforms")
+
+filegroup(
+    name = "_binary-artifacts-and-hashes",
+    srcs = select(for_platforms(
+        for_client = ["//build:client-targets-and-hashes"],
+        for_node = [
+            "//build:node-targets-and-hashes",
+            "//build/debs:debs-and-hashes",
+        ],
+        for_server = [
+            "//build:docker-artifacts-and-hashes",
+            "//build:server-targets-and-hashes",
+        ],
+    )),
+    visibility = ["//visibility:private"],
+)
+
+# TODO: collect all relevant docker_push targets into one target that can be run:
+#   //build:push-server-images
+#   //cluster/images/conformance:push-conformance
+#   //cluster/images/hyperkube:push-hyperkube
+gcs_upload(
+    name = "push-build",
+    data = [
+        ":_binary-artifacts-and-hashes",
+        "//build/release-tars:release-tars-and-hashes",
+        "//cluster/gce/gci:gcs-release-artifacts-and-hashes",
+        "//cluster/gce/windows:gcs-release-artifacts-and-hashes",
+    ],
+    tags = ["manual"],
+    # Use for_platforms to format the upload path based on the configured
+    # platform (os/arch).
+    # For example, this will turn into something like
+    # upload_paths = select({
+    #    "@io_bazel_rules_go//go/platform:windows_386": {
+    #        ...,"//:binary-artifacts-and-hashes": "bin/windows/386"},
+    #    "@io_bazel_rules_go//go/platform:linux_ppc64le": {
+    #        ...,"//:binary-artifacts-and-hashes": "bin/linux/ppc64le"},
+    #})
+    # and bazel will select the correct entry.
+    upload_paths = select(for_platforms(for_all = {
+        "//build/release-tars:release-tars-and-hashes": "",
+        "//cluster/gce/gci:gcs-release-artifacts-and-hashes": "extra/gce",
+        "//cluster/gce/windows:gcs-release-artifacts-and-hashes": "extra/gce/windows",
+        "//:_binary-artifacts-and-hashes": "bin/{OS}/{ARCH}",
+    })),
+)
+
+filegroup(
+    name = "package-srcs",
+    srcs = glob(
+        ["**"],
+        exclude = [
+            "bazel-*/**",
+            "_*/**",
+            ".config/**",
+            ".git/**",
+            ".gsutil/**",
+            ".make/**",
+        ],
+    ),
+    visibility = ["//visibility:private"],
+)
+
+filegroup(
+    name = "all-srcs",
+    srcs = [
+        ":package-srcs",
+        "//api/openapi-spec:all-srcs",
+        "//build:all-srcs",
+        "//cluster:all-srcs",
+        "//cmd:all-srcs",
+        "//docs:all-srcs",
+        "//hack:all-srcs",
+        "//pkg:all-srcs",
+        "//plugin:all-srcs",
+        "//staging:all-srcs",
+        "//test:all-srcs",
+        "//third_party:all-srcs",
+        "//translations:all-srcs",
+        "//vendor:all-srcs",
+    ],
+    tags = ["automanaged"],
+)
+
+genrule(
+    name = "save_git_version",
+    outs = ["version"],
+    cmd = "grep ^STABLE_BUILD_SCM_REVISION bazel-out/stable-status.txt | awk '{print $$2}' >$@",
+    stamp = 1,
+)
+
+platform(
+    name = "rbe_with_network",
+    parents = ["@rbe_default//config:platform"],
+    # https://cloud.google.com/remote-build-execution/docs/remote-execution-environment#remote_execution_properties
+    remote_execution_properties = """
+      properties: {
+        name: "dockerNetwork"
+        value: "standard"
+      }
+      properties: {
+        name: "dockerPrivileged"
+        value: "true"
+      }
+      {PARENT_REMOTE_EXECUTION_PROPERTIES}
+    """,
+)

+ 639 - 0
kubernetes-v1.15.4/build/root/Makefile

@@ -0,0 +1,639 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DBG_MAKEFILE ?=
+ifeq ($(DBG_MAKEFILE),1)
+    $(warning ***** starting Makefile for goal(s) "$(MAKECMDGOALS)")
+    $(warning ***** $(shell date))
+else
+    # If we're not debugging the Makefile, don't echo recipes.
+    MAKEFLAGS += -s
+endif
+
+
+# Old-skool build tools.
+#
+# Commonly used targets (see each target for more information):
+#   all: Build code.
+#   test: Run tests.
+#   clean: Clean up.
+
+# It's necessary to set this because some environments don't link sh -> bash.
+SHELL := /bin/bash
+
+# We don't need make's built-in rules.
+MAKEFLAGS += --no-builtin-rules
+.SUFFIXES:
+
+# Constants used throughout.
+.EXPORT_ALL_VARIABLES:
+OUT_DIR ?= _output
+BIN_DIR := $(OUT_DIR)/bin
+PRJ_SRC_PATH := k8s.io/kubernetes
+GENERATED_FILE_PREFIX := zz_generated.
+
+# Metadata for driving the build lives here.
+META_DIR := .make
+
+ifdef KUBE_GOFLAGS
+$(info KUBE_GOFLAGS is now deprecated. Please use GOFLAGS instead.)
+ifndef GOFLAGS
+GOFLAGS := $(KUBE_GOFLAGS)
+unexport KUBE_GOFLAGS
+else
+$(error Both KUBE_GOFLAGS and GOFLAGS are set. Please use just GOFLAGS)
+endif
+endif
+
+# Extra options for the release or quick-release options:
+KUBE_RELEASE_RUN_TESTS := $(KUBE_RELEASE_RUN_TESTS)
+KUBE_FASTBUILD := $(KUBE_FASTBUILD)
+
+# This controls the verbosity of the build.  Higher numbers mean more output.
+KUBE_VERBOSE ?= 1
+
+define ALL_HELP_INFO
+# Build code.
+#
+# Args:
+#   WHAT: Directory names to build.  If any of these directories has a 'main'
+#     package, the build will produce executable files under $(OUT_DIR)/go/bin.
+#     If not specified, "everything" will be built.
+#   GOFLAGS: Extra flags to pass to 'go' when building.
+#   GOLDFLAGS: Extra linking flags passed to 'go' when building.
+#   GOGCFLAGS: Additional go compile flags passed to 'go' when building.
+#
+# Example:
+#   make
+#   make all
+#   make all WHAT=cmd/kubelet GOFLAGS=-v
+#   make all GOLDFLAGS=""
+#     Note: Specify GOLDFLAGS as an empty string for building unstripped binaries, which allows
+#           you to use code debugging tools like delve. When GOLDFLAGS is unspecified, it defaults
+#           to "-s -w" which strips debug information. Other flags that can be used for GOLDFLAGS 
+#           are documented at https://golang.org/cmd/link/
+endef
+.PHONY: all
+ifeq ($(PRINT_HELP),y)
+all:
+	@echo "$$ALL_HELP_INFO"
+else
+all: generated_files
+	hack/make-rules/build.sh $(WHAT)
+endif
+
+define GINKGO_HELP_INFO
+# Build ginkgo
+#
+# Example:
+# make ginkgo
+endef
+.PHONY: ginkgo
+ifeq ($(PRINT_HELP),y)
+ginkgo:
+	@echo "$$GINKGO_HELP_INFO"
+else
+ginkgo:
+	hack/make-rules/build.sh vendor/github.com/onsi/ginkgo/ginkgo
+endif
+
+define VERIFY_HELP_INFO
+# Runs all the presubmission verifications.
+#
+# Args:
+#   BRANCH: Branch to be passed to verify-vendor.sh script.
+#   WHAT: List of checks to run
+#
+# Example:
+#   make verify
+#   make verify BRANCH=branch_x
+#   make verify WHAT="bazel typecheck"
+endef
+.PHONY: verify
+ifeq ($(PRINT_HELP),y)
+verify:
+	@echo "$$VERIFY_HELP_INFO"
+else
+verify:
+	KUBE_VERIFY_GIT_BRANCH=$(BRANCH) hack/make-rules/verify.sh
+endif
+
+define QUICK_VERIFY_HELP_INFO
+# Runs only the presubmission verifications that aren't slow.
+#
+# Example:
+#   make quick-verify
+endef
+.PHONY: quick-verify
+ifeq ($(PRINT_HELP),y)
+quick-verify:
+	@echo "$$QUICK_VERIFY_HELP_INFO"
+else
+quick-verify:
+	QUICK=true SILENT=false hack/make-rules/verify.sh
+endif
+
+define UPDATE_HELP_INFO
+# Runs all the generated updates.
+#
+# Example:
+# make update
+endef
+.PHONY: update
+ifeq ($(PRINT_HELP),y)
+update:
+	@echo "$$UPDATE_HELP_INFO"
+else
+update: generated_files
+	CALLED_FROM_MAIN_MAKEFILE=1 hack/make-rules/update.sh
+endif
+
+define CHECK_TEST_HELP_INFO
+# Build and run tests.
+#
+# Args:
+#   WHAT: Directory names to test.  All *_test.go files under these
+#     directories will be run.  If not specified, "everything" will be tested.
+#   TESTS: Same as WHAT.
+#   KUBE_COVER: Whether to run tests with code coverage. Set to 'y' to enable coverage collection.
+#   GOFLAGS: Extra flags to pass to 'go' when building.
+#   GOLDFLAGS: Extra linking flags to pass to 'go' when building.
+#   GOGCFLAGS: Additional go compile flags passed to 'go' when building.
+#
+# Example:
+#   make check
+#   make test
+#   make check WHAT=./pkg/kubelet GOFLAGS=-v
+endef
+.PHONY: check test
+ifeq ($(PRINT_HELP),y)
+check test:
+	@echo "$$CHECK_TEST_HELP_INFO"
+else
+check test: generated_files
+	hack/make-rules/test.sh $(WHAT) $(TESTS)
+endif
+
+define TEST_IT_HELP_INFO
+# Build and run integration tests.
+#
+# Args:
+#   WHAT: Directory names to test.  All *_test.go files under these
+#     directories will be run.  If not specified, "everything" will be tested.
+#
+# Example:
+#   make test-integration
+endef
+.PHONY: test-integration
+ifeq ($(PRINT_HELP),y)
+test-integration:
+	@echo "$$TEST_IT_HELP_INFO"
+else
+test-integration: generated_files
+	hack/make-rules/test-integration.sh $(WHAT)
+endif
+
+define TEST_E2E_HELP_INFO
+# Build and run end-to-end tests.
+#
+# Example:
+#   make test-e2e
+endef
+.PHONY: test-e2e
+ifeq ($(PRINT_HELP),y)
+test-e2e:
+	@echo "$$TEST_E2E_HELP_INFO"
+else
+test-e2e: ginkgo generated_files
+	go run hack/e2e.go -- --build --up --test --down
+endif
+
+define TEST_E2E_NODE_HELP_INFO
+# Build and run node end-to-end tests.
+#
+# Args:
+#  FOCUS: Regexp that matches the tests to be run.  Defaults to "".
+#  SKIP: Regexp that matches the tests that needs to be skipped.  Defaults
+#    to "".
+#  RUN_UNTIL_FAILURE: If true, pass --untilItFails to ginkgo so tests are run
+#    repeatedly until they fail.  Defaults to false.
+#  REMOTE: If true, run the tests on a remote host instance on GCE.  Defaults
+#    to false.
+#  IMAGES: For REMOTE=true only.  Comma delimited list of images for creating
+#    remote hosts to run tests against.  Defaults to a recent image.
+#  LIST_IMAGES: If true, don't run tests.  Just output the list of available
+#    images for testing.  Defaults to false.
+#  HOSTS: For REMOTE=true only.  Comma delimited list of running gce hosts to
+#    run tests against.  Defaults to "".
+#  DELETE_INSTANCES: For REMOTE=true only.  Delete any instances created as
+#    part of this test run.  Defaults to false.
+#  ARTIFACTS: For REMOTE=true only.  Local directory to scp test artifacts into
+#    from the remote hosts.  Defaults to "/tmp/_artifacts".
+#  REPORT: For REMOTE=false only.  Local directory to write juntil xml results
+#    to.  Defaults to "/tmp/".
+#  CLEANUP: For REMOTE=true only.  If false, do not stop processes or delete
+#    test files on remote hosts.  Defaults to true.
+#  IMAGE_PROJECT: For REMOTE=true only.  Project containing images provided to
+#  IMAGES.  Defaults to "kubernetes-node-e2e-images".
+#  INSTANCE_PREFIX: For REMOTE=true only.  Instances created from images will
+#    have the name "${INSTANCE_PREFIX}-${IMAGE_NAME}".  Defaults to "test".
+#  INSTANCE_METADATA: For REMOTE=true and running on GCE only.
+#  GUBERNATOR: For REMOTE=true only. Produce link to Gubernator to view logs.
+#	 Defaults to false.
+#  PARALLELISM: The number of gingko nodes to run.  Defaults to 8.
+#  RUNTIME: Container runtime to use (eg. docker, remote).
+#    Defaults to "docker".
+#  CONTAINER_RUNTIME_ENDPOINT: remote container endpoint to connect to.
+#   Used when RUNTIME is set to "remote".
+#  IMAGE_SERVICE_ENDPOINT: remote image endpoint to connect to, to prepull images.
+#   Used when RUNTIME is set to "remote".
+#  IMAGE_CONFIG_FILE: path to a file containing image configuration.
+#  SYSTEM_SPEC_NAME: The name of the system spec to be used for validating the
+#    image in the node conformance test. The specs are located at
+#    test/e2e_node/system/specs/. For example, "SYSTEM_SPEC_NAME=gke" will use
+#    the spec at test/e2e_node/system/specs/gke.yaml. If unspecified, the
+#    default built-in spec (system.DefaultSpec) will be used.
+#
+# Example:
+#   make test-e2e-node FOCUS=Kubelet SKIP=container
+#   make test-e2e-node REMOTE=true DELETE_INSTANCES=true
+#   make test-e2e-node TEST_ARGS='--kubelet-flags="--cgroups-per-qos=true"'
+# Build and run tests.
+endef
+.PHONY: test-e2e-node
+ifeq ($(PRINT_HELP),y)
+test-e2e-node:
+	@echo "$$TEST_E2E_NODE_HELP_INFO"
+else
+test-e2e-node: ginkgo generated_files
+	hack/make-rules/test-e2e-node.sh
+endif
+
+define TEST_E2E_KUBEADM_HELP_INFO
+# Build and run kubeadm end-to-end tests.
+#
+# Args:
+#  FOCUS: Regexp that matches the tests to be run.  Defaults to "".
+#  SKIP: Regexp that matches the tests that needs to be skipped.  Defaults
+#    to "".
+#  RUN_UNTIL_FAILURE: If true, pass --untilItFails to ginkgo so tests are run
+#    repeatedly until they fail. Defaults to false.
+#  ARTIFACTS: Local directory to save test artifacts into. Defaults to "/tmp/_artifacts".
+#  PARALLELISM: The number of gingko nodes to run.  If empty ginkgo default 
+#    parallelism (cores - 1) is used
+#  BUILD: Build kubeadm end-to-end tests. Defaults to true.
+#
+# Example:
+#   make test-e2e-kubeadm 
+#   make test-e2e-kubeadm FOCUS=kubeadm-config 
+#   make test-e2e-kubeadm SKIP=kubeadm-config
+#
+# Build and run tests.
+endef
+.PHONY: test-e2e-kubeadm
+ifeq ($(PRINT_HELP),y)
+test-e2e-kubeadm:
+	@echo "$$TEST_E2E_KUBEADM_HELP_INFO"
+else
+test-e2e-kubeadm: 
+	hack/make-rules/test-e2e-kubeadm.sh
+endif
+
+define TEST_CMD_HELP_INFO
+# Build and run cmdline tests.
+#
+# Args:
+#   WHAT: List of tests to run, check test/cmd/legacy-script.sh for names.
+#     For example, WHAT=deployment will run run_deployment_tests function.
+# Example:
+#   make test-cmd
+#   make test-cmd WHAT="deployment impersonation"
+endef
+.PHONY: test-cmd
+ifeq ($(PRINT_HELP),y)
+test-cmd:
+	@echo "$$TEST_CMD_HELP_INFO"
+else
+test-cmd: generated_files
+	hack/make-rules/test-cmd.sh
+endif
+
+define CLEAN_HELP_INFO
+# Remove all build artifacts.
+#
+# Example:
+#   make clean
+#
+# TODO(thockin): call clean_generated when we stop committing generated code.
+endef
+.PHONY: clean
+ifeq ($(PRINT_HELP),y)
+clean:
+	@echo "$$CLEAN_HELP_INFO"
+else
+clean: clean_meta
+	build/make-clean.sh
+	hack/make-rules/clean.sh
+endif
+
+define CLEAN_META_HELP_INFO
+# Remove make-related metadata files.
+#
+# Example:
+#   make clean_meta
+endef
+.PHONY: clean_meta
+ifeq ($(PRINT_HELP),y)
+clean_meta:
+	@echo "$$CLEAN_META_HELP_INFO"
+else
+clean_meta:
+	rm -rf $(META_DIR)
+endif
+
+define CLEAN_GENERATED_HELP_INFO
+# Remove all auto-generated artifacts. Generated artifacts in staging folder should not be removed as they are not
+# generated using generated_files.
+#
+# Example:
+#   make clean_generated
+endef
+.PHONY: clean_generated
+ifeq ($(PRINT_HELP),y)
+clean_generated:
+	@echo "$$CLEAN_GENERATED_HELP_INFO"
+else
+clean_generated:
+	find . -type f -name $(GENERATED_FILE_PREFIX)\* | grep -v "[.]/staging/.*" | xargs rm -f
+endif
+
+define VET_HELP_INFO
+# Run 'go vet'.
+#
+# Args:
+#   WHAT: Directory names to vet.  All *.go files under these
+#     directories will be vetted.  If not specified, "everything" will be
+#     vetted.
+#
+# Example:
+#   make vet
+#   make vet WHAT=./pkg/kubelet
+endef
+.PHONY: vet
+ifeq ($(PRINT_HELP),y)
+vet:
+	@echo "$$VET_HELP_INFO"
+else
+vet: generated_files
+	CALLED_FROM_MAIN_MAKEFILE=1 hack/make-rules/vet.sh $(WHAT)
+endif
+
+define RELEASE_HELP_INFO
+# Build a release
+# Use the 'release-in-a-container' target to build the release when already in
+# a container vs. creating a new container to build in using the 'release'
+# target.  Useful for running in GCB.
+#
+# Example:
+#   make release
+#   make release-in-a-container
+endef
+.PHONY: release release-in-a-container
+ifeq ($(PRINT_HELP),y)
+release release-in-a-container:
+	@echo "$$RELEASE_HELP_INFO"
+else
+release:
+	build/release.sh
+release-in-a-container:
+	build/release-in-a-container.sh
+endif
+
+define RELEASE_IMAGES_HELP_INFO
+# Build release images
+#
+# Args:
+#   KUBE_BUILD_HYPERKUBE: Whether to build hyperkube image as well. Set to 'n' to skip.
+#   KUBE_BUILD_CONFORMANCE: Whether to build conformance testing image as well. Set to 'n' to skip.
+#
+# Example:
+#   make release-images
+endef
+.PHONY: release-images
+ifeq ($(PRINT_HELP),y)
+release-images:
+	@echo "$$RELEASE_IMAGES_HELP_INFO"
+else
+release-images:
+	build/release-images.sh
+endif
+
+define RELEASE_SKIP_TESTS_HELP_INFO
+# Build a release, but skip tests
+#
+# Args:
+#   KUBE_RELEASE_RUN_TESTS: Whether to run tests. Set to 'y' to run tests anyways.
+#   KUBE_FASTBUILD: Whether to cross-compile for other architectures. Set to 'false' to do so.
+#
+# Example:
+#   make release-skip-tests
+#   make quick-release
+endef
+.PHONY: release-skip-tests quick-release
+ifeq ($(PRINT_HELP),y)
+release-skip-tests quick-release:
+	@echo "$$RELEASE_SKIP_TESTS_HELP_INFO"
+else
+release-skip-tests quick-release: KUBE_RELEASE_RUN_TESTS = n
+release-skip-tests quick-release: KUBE_FASTBUILD = true
+release-skip-tests quick-release:
+	build/release.sh
+endif
+
+define QUICK_RELEASE_IMAGES_HELP_INFO
+# Build release images, but only for linux/amd64
+#
+# Args:
+#   KUBE_FASTBUILD: Whether to cross-compile for other architectures. Set to 'false' to do so.
+#   KUBE_BUILD_HYPERKUBE: Whether to build hyperkube image as well. Set to 'n' to skip.
+#   KUBE_BUILD_CONFORMANCE: Whether to build conformance testing image as well. Set to 'n' to skip.
+#
+# Example:
+#   make quick-release-images
+endef
+.PHONY: quick-release-images
+ifeq ($(PRINT_HELP),y)
+quick-release-images:
+	@echo "$$QUICK_RELEASE_IMAGES_HELP_INFO"
+else
+quick-release-images: KUBE_FASTBUILD = true
+quick-release-images:
+	build/release-images.sh
+endif
+
+define PACKAGE_HELP_INFO
+# Package tarballs
+# Use the 'package-tarballs' target to run the final packaging steps of
+# a release.
+#
+# Example:
+#   make package-tarballs
+endef
+.PHONY: package package-tarballs
+ifeq ($(PRINT_HELP),y)
+package package-tarballs:
+	@echo "$$PACKAGE_HELP_INFO"
+else
+package package-tarballs:
+	build/package-tarballs.sh
+endif
+
+define CROSS_HELP_INFO
+# Cross-compile for all platforms
+# Use the 'cross-in-a-container' target to cross build when already in
+# a container vs. creating a new container to build from (build-image)
+# Useful for running in GCB.
+#
+# Example:
+#   make cross
+#   make cross-in-a-container
+endef
+.PHONY: cross cross-in-a-container
+ifeq ($(PRINT_HELP),y)
+cross cross-in-a-container:
+	@echo "$$CROSS_HELP_INFO"
+else
+cross:
+	hack/make-rules/cross.sh
+cross-in-a-container: KUBE_OUTPUT_SUBPATH = $(OUT_DIR)/dockerized
+cross-in-a-container:
+ifeq (,$(wildcard /.dockerenv))
+	@echo -e "\nThe 'cross-in-a-container' target can only be used from within a docker container.\n"
+else
+	hack/make-rules/cross.sh
+endif
+endif
+
+define CMD_HELP_INFO
+# Add rules for all directories in cmd/
+#
+# Example:
+#   make kubectl kube-proxy
+endef
+#TODO: make EXCLUDE_TARGET auto-generated when there are other files in cmd/
+EXCLUDE_TARGET=BUILD OWNERS
+.PHONY: $(filter-out %$(EXCLUDE_TARGET),$(notdir $(abspath $(wildcard cmd/*/))))
+ifeq ($(PRINT_HELP),y)
+$(filter-out %$(EXCLUDE_TARGET),$(notdir $(abspath $(wildcard cmd/*/)))):
+	@echo "$$CMD_HELP_INFO"
+else
+$(filter-out %$(EXCLUDE_TARGET),$(notdir $(abspath $(wildcard cmd/*/)))): generated_files
+	hack/make-rules/build.sh cmd/$@
+endif
+
+define GENERATED_FILES_HELP_INFO
+# Produce auto-generated files needed for the build.
+#
+# Example:
+#   make generated_files
+endef
+.PHONY: generated_files
+ifeq ($(PRINT_HELP),y)
+generated_files:
+	@echo "$$GENERATED_FILES_HELP_INFO"
+else
+generated_files:
+	$(MAKE) -f Makefile.generated_files $@ CALLED_FROM_MAIN_MAKEFILE=1
+endif
+
+define HELP_INFO
+# Print make targets and help info
+#
+# Example:
+# make help
+endef
+.PHONY: help
+ifeq ($(PRINT_HELP),y)
+help:
+	@echo "$$HELP_INFO"
+else
+help:
+	hack/make-rules/make-help.sh
+endif
+
+# Non-dockerized bazel rules.
+.PHONY: bazel-build bazel-test bazel-release
+
+ifeq ($(PRINT_HELP),y)
+define BAZEL_BUILD_HELP_INFO
+# Build with bazel
+#
+# Example:
+# make bazel-build
+endef
+bazel-build:
+	@echo "$$BAZEL_BUILD_HELP_INFO"
+else
+# Some things in vendor don't build due to empty target lists for cross-platform rules.
+bazel-build:
+	bazel build -- //... -//vendor/...
+endif
+
+
+ifeq ($(PRINT_HELP),y)
+define BAZEL_TEST_HELP_INFO
+# Test with bazel
+#
+# Example:
+# make bazel-test
+endef
+bazel-test:
+	@echo "$$BAZEL_TEST_HELP_INFO"
+else
+# //hack:verify-all is a manual target.
+# Some things in vendor don't build due to empty target lists for cross-platform rules.
+bazel-test:
+	bazel test --config=unit -- \
+	  //... \
+	  //hack:verify-all \
+	  -//vendor/...
+endif
+
+ifeq ($(PRINT_HELP),y)
+define BAZEL_TEST_INTEGRATION_HELP_INFO
+# Integration test with bazel
+#
+# Example:
+# make bazel-test-integration
+endef
+bazel-test-integration:
+	@echo "$$BAZEL_TEST_INTEGRATION_HELP_INFO"
+else
+bazel-test-integration:
+	bazel test --config integration //test/integration/...
+endif
+
+ifeq ($(PRINT_HELP),y)
+define BAZEL_RELEASE_HELP_INFO
+# Build release tars with bazel
+#
+# Example:
+# make bazel-release
+endef
+bazel-release:
+	@echo "$$BAZEL_RELEASE_HELP_INFO"
+else
+bazel-release:
+	bazel build //build/release-tars
+endif

+ 527 - 0
kubernetes-v1.15.4/build/root/Makefile.generated_files

@@ -0,0 +1,527 @@
+# Copyright 2016 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Don't allow users to call this directly.  There are too many variables this
+# assumes to inherit from the main Makefile.  This is not a user-facing file.
+ifeq ($(CALLED_FROM_MAIN_MAKEFILE),)
+    $(error Please use the main Makefile, e.g. `make generated_files`)
+endif
+
+# Don't allow an implicit 'all' rule.  This is not a user-facing file.
+ifeq ($(MAKECMDGOALS),)
+    $(error This Makefile requires an explicit rule to be specified)
+endif
+
+ifeq ($(DBG_MAKEFILE),1)
+    $(warning ***** starting Makefile.generated_files for goal(s) "$(MAKECMDGOALS)")
+    $(warning ***** $(shell date))
+endif
+
+
+# It's necessary to set this because some environments don't link sh -> bash.
+SHELL := /bin/bash
+
+# This rule collects all the generated file sets into a single rule.  Other
+# rules should depend on this to ensure generated files are rebuilt.
+.PHONY: generated_files
+generated_files: gen_deepcopy gen_defaulter gen_conversion gen_openapi gen_bindata
+
+#
+# Helper logic to calculate Go's dependency DAG ourselves.
+#
+
+# This is a file that will be emitted by the go2make tool, containing a
+# variable for each Go package in the project (including deps) which lists all
+# of the transitive deps of that package.  Each variable is named the same as
+# the package - for example the variable for `k8s.io/kubernetes/pkg/api` is
+# $(k8s.io/kubernetes/pkg/api).  This is roughly the same DAG that the Go
+# compiler uses.  These variables can be used to figure out if, for example,
+# generated code needs to be regenerated.
+GO_PKGDEPS_FILE = go-pkgdeps.mk
+
+# Include the Go package dependencies file.  This will cause the rule of
+# the same name to be considered and if it is updated, make will restart and
+# reload the updated deps.
+sinclude $(META_DIR)/$(GO_PKGDEPS_FILE)
+
+# Update the set of Go deps for our project.  This will let us determine if
+# we really need to do expensive codegen.  We use FORCE because it is not a
+# PHONY file, but we do want it to be re-evaluated every time make is run.  The
+# file will only be touched if it actually changes.
+$(META_DIR)/$(GO_PKGDEPS_FILE): FORCE
+	if [[ "$(DBG_CODEGEN)" == 1 ]]; then          \
+	    echo "DBG: calculating Go dependencies";  \
+	fi
+	hack/run-in-gopath.sh go install ./hack/make-rules/helpers/go2make
+	hack/run-in-gopath.sh go2make                     \
+	    k8s.io/kubernetes/...                         \
+	    --prune  k8s.io/kubernetes/staging            \
+	    --prune  k8s.io/kubernetes/vendor             \
+	    k8s.io/kubernetes/vendor/k8s.io/...           \
+	    github.com/jteeuwen/go-bindata/go-bindata/... \
+	    > $@.tmp
+	if ! cmp -s $@.tmp $@; then                       \
+	    if [[ "$(DBG_CODEGEN)" == 1 ]]; then          \
+	        echo "DBG: $(GO_PKGDEPS_FILE) changed";   \
+	    fi;                                           \
+	    cat $@.tmp > $@;                              \
+	fi
+	rm -f $@.tmp
+
+.PHONY: FORCE
+FORCE:
+
+#
+# Helper logic to find which directories need codegen as quickly as possible.
+#
+
+# This variable holds a list of every directory that contains Go files in this
+# project.  Other rules and variables can use this as a starting point to
+# reduce filesystem accesses.
+ifeq ($(DBG_MAKEFILE),1)
+    $(warning ***** finding all *.go dirs)
+endif
+ALL_GO_DIRS := $(shell                                                   \
+    hack/make-rules/helpers/cache_go_dirs.sh $(META_DIR)/all_go_dirs.mk  \
+)
+
+# Generate a list of all files that have a `+k8s:` comment-tag.  This will be
+# used to derive lists of files/dirs for generation tools.
+ifeq ($(DBG_MAKEFILE),1)
+    $(warning ***** finding all +k8s: tags)
+endif
+ALL_K8S_TAG_FILES := $(shell                             \
+    find $(ALL_GO_DIRS) -maxdepth 1 -type f -name \*.go  \
+        | xargs grep --color=never -l '^// *+k8s:'       \
+)
+
+
+#
+# Code generation logic.
+#
+
+
+# Deep-copy generation
+#
+# Any package that wants deep-copy functions generated must include a
+# comment-tag in column 0 of one file of the form:
+#     // +k8s:deepcopy-gen=<VALUE>
+#
+# The <VALUE> may be one of:
+#     generate: generate deep-copy functions into the package
+#     register: generate deep-copy functions and register them with a
+#               scheme
+
+# The result file, in each pkg, of deep-copy generation.
+DEEPCOPY_BASENAME := $(GENERATED_FILE_PREFIX)deepcopy
+DEEPCOPY_FILENAME := $(DEEPCOPY_BASENAME).go
+
+# The tool used to generate deep copies.
+DEEPCOPY_GEN := $(BIN_DIR)/deepcopy-gen
+
+# Find all the directories that request deep-copy generation.
+ifeq ($(DBG_MAKEFILE),1)
+    $(warning ***** finding all +k8s:deepcopy-gen tags)
+endif
+DEEPCOPY_DIRS := $(shell                                             \
+    grep --color=never -l '+k8s:deepcopy-gen=' $(ALL_K8S_TAG_FILES)  \
+        | xargs -n1 dirname                                          \
+        | LC_ALL=C sort -u                                           \
+)
+DEEPCOPY_FILES := $(addsuffix /$(DEEPCOPY_FILENAME), $(DEEPCOPY_DIRS))
+
+# Reset the list of packages that need generation.
+$(shell mkdir -p $$(dirname $(META_DIR)/$(DEEPCOPY_GEN)))
+$(shell rm -f $(META_DIR)/$(DEEPCOPY_GEN).todo)
+
+# This rule aggregates the set of files to generate and then generates them all
+# in a single run of the tool.
+.PHONY: gen_deepcopy
+gen_deepcopy: $(DEEPCOPY_GEN) $(META_DIR)/$(DEEPCOPY_GEN).todo
+	if [[ -s $(META_DIR)/$(DEEPCOPY_GEN).todo ]]; then                 \
+	    pkgs=$$(cat $(META_DIR)/$(DEEPCOPY_GEN).todo | paste -sd, -);  \
+	    if [[ "$(DBG_CODEGEN)" == 1 ]]; then                           \
+	        echo "DBG: running $(DEEPCOPY_GEN) for $$pkgs";            \
+	    fi;                                                            \
+	    ./hack/run-in-gopath.sh $(DEEPCOPY_GEN)                        \
+	        --v $(KUBE_VERBOSE)                                        \
+	        --logtostderr                                              \
+	        -i "$$pkgs"                                                \
+	        --bounding-dirs $(PRJ_SRC_PATH),"k8s.io/api"               \
+	        -O $(DEEPCOPY_BASENAME)                                    \
+	        "$$@";                                                     \
+	fi                                                                 \
+
+# For each dir in DEEPCOPY_DIRS, this establishes a dependency between the
+# output file and the input files that should trigger a rebuild.
+#
+# Note that this is a deps-only statement, not a full rule (see below).  This
+# has to be done in a distinct step because wildcards don't work in static
+# pattern rules.
+#
+# The '$(eval)' is needed because this has a different RHS for each LHS, and
+# would otherwise produce results that make can't parse.
+$(foreach dir, $(DEEPCOPY_DIRS), $(eval                     \
+    $(dir)/$(DEEPCOPY_FILENAME): $($(PRJ_SRC_PATH)/$(dir))  \
+))
+
+# How to regenerate deep-copy code.  This is a little slow to run, so we batch
+# it up and trigger the batch from the 'generated_files' target.
+$(META_DIR)/$(DEEPCOPY_GEN).todo: $(DEEPCOPY_FILES)
+
+$(DEEPCOPY_FILES): $(DEEPCOPY_GEN)
+	if [[ "$(DBG_CODEGEN)" == 1 ]]; then        \
+	    echo "DBG: deepcopy needed $(@D): $?";  \
+	    ls -lf --full-time $@ $? || true;       \
+	fi
+	echo $(PRJ_SRC_PATH)/$(@D) >> $(META_DIR)/$(DEEPCOPY_GEN).todo
+
+# How to build the generator tool.  The deps for this are defined in
+# the $(GO_PKGDEPS_FILE), above.
+#
+# A word on the need to touch: This rule might trigger if, for example, a
+# non-Go file was added or deleted from a directory on which this depends.
+# This target needs to be reconsidered, but Go realizes it doesn't actually
+# have to be rebuilt.  In that case, make will forever see the dependency as
+# newer than the binary, and try to "rebuild" it over and over.  So we touch
+# it, and make is happy.
+$(DEEPCOPY_GEN): $(k8s.io/kubernetes/vendor/k8s.io/code-generator/cmd/deepcopy-gen)
+	KUBE_BUILD_PLATFORMS="" hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/deepcopy-gen
+	touch $@
+
+
+# Defaulter generation
+#
+# Any package that wants defaulter functions generated must include a
+# comment-tag in column 0 of one file of the form:
+#     // +k8s:defaulter-gen=<VALUE>
+#
+# The <VALUE> depends on context:
+#     on types:
+#       true:  always generate a defaulter for this type
+#       false: never generate a defaulter for this type
+#     on functions:
+#       covers: if the function name matches SetDefault_NAME, instructs
+#               the generator not to recurse
+#     on packages:
+#       FIELDNAME: any object with a field of this name is a candidate
+#                  for having a defaulter generated
+
+# The result file, in each pkg, of defaulter generation.
+DEFAULTER_BASENAME := $(GENERATED_FILE_PREFIX)defaults
+DEFAULTER_FILENAME := $(DEFAULTER_BASENAME).go
+
+# The tool used to generate defaulters.
+DEFAULTER_GEN := $(BIN_DIR)/defaulter-gen
+
+# All directories that request any form of defaulter generation.
+ifeq ($(DBG_MAKEFILE),1)
+    $(warning ***** finding all +k8s:defaulter-gen tags)
+endif
+DEFAULTER_DIRS := $(shell                                            \
+    grep --color=never -l '+k8s:defaulter-gen=' $(ALL_K8S_TAG_FILES) \
+        | xargs -n1 dirname                                          \
+        | LC_ALL=C sort -u                                           \
+)
+
+DEFAULTER_FILES := $(addsuffix /$(DEFAULTER_FILENAME), $(DEFAULTER_DIRS))
+
+# Reset the list of packages that need generation.
+$(shell mkdir -p $$(dirname $(META_DIR)/$(DEFAULTER_GEN)))
+$(shell rm -f $(META_DIR)/$(DEFAULTER_GEN).todo)
+
+# This rule aggregates the set of files to generate and then generates them all
+# in a single run of the tool.
+.PHONY: gen_defaulter
+gen_defaulter: $(DEFAULTER_GEN) $(META_DIR)/$(DEFAULTER_GEN).todo
+	if [[ -s $(META_DIR)/$(DEFAULTER_GEN).todo ]]; then                 \
+	    pkgs=$$(cat $(META_DIR)/$(DEFAULTER_GEN).todo | paste -sd, -);  \
+	    if [[ "$(DBG_CODEGEN)" == 1 ]]; then                            \
+	        echo "DBG: running $(DEFAULTER_GEN) for $$pkgs";            \
+	    fi;                                                             \
+	    ./hack/run-in-gopath.sh $(DEFAULTER_GEN)                        \
+	        --v $(KUBE_VERBOSE)                                         \
+	        --logtostderr                                               \
+	        -i "$$pkgs"                                                 \
+	        --extra-peer-dirs $$(echo $(addprefix $(PRJ_SRC_PATH)/, $(DEFAULTER_DIRS)) | sed 's/ /,/g') \
+	        -O $(DEFAULTER_BASENAME)                                    \
+	        "$$@";                                                      \
+	fi
+
+# For each dir in DEFAULTER_DIRS, this establishes a dependency between the
+# output file and the input files that should trigger a rebuild.
+#
+# Note that this is a deps-only statement, not a full rule (see below for that).
+#
+# The '$(eval)' is needed because this has a different RHS for each LHS, and
+# would otherwise produce results that make can't parse.
+$(foreach dir, $(DEFAULTER_DIRS), $(eval                     \
+    $(dir)/$(DEFAULTER_FILENAME): $($(PRJ_SRC_PATH)/$(dir))  \
+))
+
+# How to regenerate defaulter code.  This is a little slow to run, so we batch
+# it up and trigger the batch from the 'generated_files' target.
+$(META_DIR)/$(DEFAULTER_GEN).todo: $(DEFAULTER_FILES)
+
+$(DEFAULTER_FILES): $(DEFAULTER_GEN)
+	if [[ "$(DBG_CODEGEN)" == 1 ]]; then         \
+	    echo "DBG: defaulter needed $(@D): $?";  \
+	    ls -lf --full-time $@ $? || true;        \
+	fi
+	echo $(PRJ_SRC_PATH)/$(@D) >> $(META_DIR)/$(DEFAULTER_GEN).todo
+
+# How to build the generator tool.  The deps for this are defined in
+# the $(GO_PKGDEPS_FILE), above.
+#
+# A word on the need to touch: This rule might trigger if, for example, a
+# non-Go file was added or deleted from a directory on which this depends.
+# This target needs to be reconsidered, but Go realizes it doesn't actually
+# have to be rebuilt.  In that case, make will forever see the dependency as
+# newer than the binary, and try to "rebuild" it over and over.  So we touch
+# it, and make is happy.
+$(DEFAULTER_GEN): $(k8s.io/kubernetes/vendor/k8s.io/code-generator/cmd/defaulter-gen)
+	KUBE_BUILD_PLATFORMS="" hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/defaulter-gen
+	touch $@
+
+
+# Conversion generation
+
+# Any package that wants conversion functions generated into it must
+# include one or more comment-tags in its `doc.go` file, of the form:
+#     // +k8s:conversion-gen=<INTERNAL_TYPES_DIR>
+#
+# The INTERNAL_TYPES_DIR is a project-local path to another directory
+# which should be considered when evaluating peer types for
+# conversions.  An optional additional comment of the form
+#     // +k8s:conversion-gen-external-types=<EXTERNAL_TYPES_DIR>
+#
+# identifies where to find the external types; if there is no such
+# comment then the external types are sought in the package where the
+# `k8s:conversion` tag is found.
+#
+# Conversions, in both directions, are generated for every type name
+# that is defined in both an internal types package and the external
+# types package.
+#
+# TODO: it might be better in the long term to make peer-types explicit in the
+# IDL.
+
+# The result file, in each pkg, of conversion generation.
+CONVERSION_BASENAME := $(GENERATED_FILE_PREFIX)conversion
+CONVERSION_FILENAME := $(CONVERSION_BASENAME).go
+
+# The tool used to generate conversions.
+CONVERSION_GEN := $(BIN_DIR)/conversion-gen
+
+# The name of the metadata file listing conversion peers for each pkg.
+CONVERSIONS_META := conversions.mk
+
+# All directories that request any form of conversion generation.
+ifeq ($(DBG_MAKEFILE),1)
+    $(warning ***** finding all +k8s:conversion-gen tags)
+endif
+CONVERSION_DIRS := $(shell                                              \
+    grep --color=never '^// *+k8s:conversion-gen=' $(ALL_K8S_TAG_FILES) \
+        | cut -f1 -d:                                                   \
+        | xargs -n1 dirname                                             \
+        | LC_ALL=C sort -u                                              \
+)
+
+CONVERSION_FILES := $(addsuffix /$(CONVERSION_FILENAME), $(CONVERSION_DIRS))
+CONVERSION_EXTRA_PEER_DIRS := k8s.io/kubernetes/pkg/apis/core,k8s.io/kubernetes/pkg/apis/core/v1,k8s.io/api/core/v1
+
+# Reset the list of packages that need generation.
+$(shell mkdir -p $$(dirname $(META_DIR)/$(CONVERSION_GEN)))
+$(shell rm -f $(META_DIR)/$(CONVERSION_GEN).todo)
+
+# This rule aggregates the set of files to generate and then generates them all
+# in a single run of the tool.
+.PHONY: gen_conversion
+gen_conversion: $(CONVERSION_GEN) $(META_DIR)/$(CONVERSION_GEN).todo
+	if [[ -s $(META_DIR)/$(CONVERSION_GEN).todo ]]; then                 \
+	    pkgs=$$(cat $(META_DIR)/$(CONVERSION_GEN).todo | paste -sd, -);  \
+	    if [[ "$(DBG_CODEGEN)" == 1 ]]; then                             \
+	        echo "DBG: running $(CONVERSION_GEN) for $$pkgs";            \
+	    fi;                                                              \
+	    ./hack/run-in-gopath.sh $(CONVERSION_GEN)                        \
+	        --extra-peer-dirs $(CONVERSION_EXTRA_PEER_DIRS)              \
+	        --v $(KUBE_VERBOSE)                                          \
+	        --logtostderr                                                \
+	        -i "$$pkgs"                                                  \
+	        -O $(CONVERSION_BASENAME)                                    \
+	        "$$@";                                                       \
+	fi
+
+# For each dir in CONVERSION_DIRS, this establishes a dependency between the
+# output file and the input files that should trigger a rebuild.
+#
+# Note that this is a deps-only statement, not a full rule (see below for that).
+#
+# The '$(eval)' is needed because this has a different RHS for each LHS, and
+# would otherwise produce results that make can't parse.
+$(foreach dir, $(CONVERSION_DIRS), $(eval                     \
+    $(dir)/$(CONVERSION_FILENAME): $($(PRJ_SRC_PATH)/$(dir))  \
+))
+
+# How to regenerate conversion code.  This is a little slow to run, so we batch
+# it up and trigger the batch from the 'generated_files' target.
+$(META_DIR)/$(CONVERSION_GEN).todo: $(CONVERSION_FILES)
+
+$(CONVERSION_FILES): $(CONVERSION_GEN)
+	if [[ "$(DBG_CODEGEN)" == 1 ]]; then          \
+	    echo "DBG: conversion needed $(@D): $?";  \
+	    ls -lf --full-time $@ $? || true;         \
+	fi
+	echo $(PRJ_SRC_PATH)/$(@D) >> $(META_DIR)/$(CONVERSION_GEN).todo
+
+# How to build the generator tool.  The deps for this are defined in
+# the $(GO_PKGDEPS_FILE), above.
+#
+# A word on the need to touch: This rule might trigger if, for example, a
+# non-Go file was added or deleted from a directory on which this depends.
+# This target needs to be reconsidered, but Go realizes it doesn't actually
+# have to be rebuilt.  In that case, make will forever see the dependency as
+# newer than the binary, and try to rebuild it over and over.  So we touch it,
+# and make is happy.
+$(CONVERSION_GEN): $(k8s.io/kubernetes/vendor/k8s.io/code-generator/cmd/conversion-gen)
+	KUBE_BUILD_PLATFORMS="" hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/conversion-gen
+	touch $@
+
+
+# OpenAPI generation
+#
+# Any package that wants open-api functions generated must include a
+# comment-tag in column 0 of one file of the form:
+#     // +k8s:openapi-gen=true
+#
+# The result file, in each pkg, of open-api generation.
+OPENAPI_BASENAME := $(GENERATED_FILE_PREFIX)openapi
+OPENAPI_FILENAME := $(OPENAPI_BASENAME).go
+OPENAPI_OUTPUT_PKG := pkg/generated/openapi
+CRD_OPENAPI_OUTPUT_PKG := staging/src/k8s.io/apiextensions-apiserver/pkg/generated/openapi
+BOILERPLATE_FILENAME := vendor/k8s.io/code-generator/hack/boilerplate.go.txt
+REPORT_FILENAME := $(OUT_DIR)/violations.report
+IGNORED_REPORT_FILENAME := $(OUT_DIR)/ignored_violations.report
+KNOWN_VIOLATION_FILENAME := api/api-rules/violation_exceptions.list
+# When UPDATE_API_KNOWN_VIOLATIONS is set to be true, let the generator to write
+# updated API violations to the known API violation exceptions list.
+ifeq ($(UPDATE_API_KNOWN_VIOLATIONS),true)
+    REPORT_FILENAME:=$(KNOWN_VIOLATION_FILENAME)
+    # When UPDATE_API_KNOWN_VIOLATIONS is set to be true, touch the exceptions
+    # list so that the OPENAPI_OUTFILE target re-run instead of being cached.
+    $(shell touch $(KNOWN_VIOLATION_FILENAME))
+endif
+API_RULE_CHECK_FAILURE_MESSAGE := "ERROR: \n\t API rule check failed. Reported violations differ from known violations. Please read api/api-rules/README.md to resolve the failure. \n"
+
+# The tool used to generate open apis.
+OPENAPI_GEN := $(BIN_DIR)/openapi-gen
+
+# Find all the directories that request open-api generation.
+ifeq ($(DBG_MAKEFILE),1)
+    $(warning ***** finding all +k8s:openapi-gen tags)
+endif
+OPENAPI_DIRS := $(shell                                             \
+    grep --color=never -l '+k8s:openapi-gen=' $(ALL_K8S_TAG_FILES)  \
+        | xargs -n1 dirname                                         \
+        | LC_ALL=C sort -u                                          \
+)
+
+OPENAPI_OUTFILE := $(OPENAPI_OUTPUT_PKG)/$(OPENAPI_FILENAME)
+CRD_OPENAPI_OUTFILE := $(CRD_OPENAPI_OUTPUT_PKG)/$(OPENAPI_FILENAME)
+
+# This rule is the user-friendly entrypoint for openapi generation.
+.PHONY: gen_openapi
+gen_openapi: $(OPENAPI_OUTFILE) $(OPENAPI_GEN) $(CRD_OPENAPI_OUTFILE)
+
+# For each dir in OPENAPI_DIRS, this establishes a dependency between the
+# output file and the input files that should trigger a rebuild.
+#
+# Note that this is a deps-only statement, not a full rule (see below for that).
+#
+# The '$(eval)' is needed because this has a different RHS for each LHS, and
+# would otherwise produce results that make can't parse.
+$(foreach dir, $(OPENAPI_DIRS), $(eval             \
+    $(OPENAPI_OUTFILE): $($(PRJ_SRC_PATH)/$(dir))  \
+))
+
+# How to regenerate open-api code.  This emits a single file for all results.
+# The Make rule fails if generated API rule violation report differs from the checked-in
+# violation file, and prints error message to request developer to fix either the API
+# source code, or the known API rule violation file.
+$(OPENAPI_OUTFILE): $(OPENAPI_GEN) $(KNOWN_VIOLATION_FILENAME)
+	./hack/run-in-gopath.sh $(OPENAPI_GEN)                                          \
+	    --v $(KUBE_VERBOSE)                                                         \
+	    --logtostderr                                                               \
+	    -i $$(echo $(addprefix $(PRJ_SRC_PATH)/, $(OPENAPI_DIRS)) | sed 's/ /,/g')  \
+	    -p $(PRJ_SRC_PATH)/$(OPENAPI_OUTPUT_PKG)                                    \
+	    -O $(OPENAPI_BASENAME)                                                      \
+	    -h $(BOILERPLATE_FILENAME)                                                  \
+	    -r $(REPORT_FILENAME)                                                       \
+	    "$$@";                                                                      \
+	diff $(REPORT_FILENAME) $(KNOWN_VIOLATION_FILENAME) ||                          \
+	(echo -e $(API_RULE_CHECK_FAILURE_MESSAGE); exit 1)
+
+# TODO(roycaihw): move the automation to apiextensions-apiserver
+$(CRD_OPENAPI_OUTFILE): $(OPENAPI_GEN)
+	./hack/run-in-gopath.sh $(OPENAPI_GEN)                                          \
+	    --v $(KUBE_VERBOSE)                                                         \
+	    --logtostderr                                                               \
+	    -i "k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/api/autoscaling/v1"         \
+	    -p $(PRJ_SRC_PATH)/$(CRD_OPENAPI_OUTPUT_PKG)                                \
+	    -O $(OPENAPI_BASENAME)                                                      \
+	    -h $(BOILERPLATE_FILENAME)                                                  \
+	    -r $(IGNORED_REPORT_FILENAME)                                               \
+	    "$$@"
+
+# How to build the generator tool.  The deps for this are defined in
+# the $(GO_PKGDEPS_FILE), above.
+#
+# A word on the need to touch: This rule might trigger if, for example, a
+# non-Go file was added or deleted from a directory on which this depends.
+# This target needs to be reconsidered, but Go realizes it doesn't actually
+# have to be rebuilt.  In that case, make will forever see the dependency as
+# newer than the binary, and try to "rebuild" it over and over.  So we touch
+# it, and make is happy.
+$(OPENAPI_GEN): $(k8s.io/kubernetes/vendor/k8s.io/kube-openapi/cmd/openapi-gen)
+	KUBE_BUILD_PLATFORMS="" hack/make-rules/build.sh ./vendor/k8s.io/kube-openapi/cmd/openapi-gen
+	touch $@
+
+
+# bindata generation
+#
+
+# The tool used to generate bindata files.
+BINDATA_GEN := $(BIN_DIR)/go-bindata
+
+# A wrapper script that generates all bindata files.  It is fast enough that we
+# don't care.
+BINDATA_SCRIPT := hack/generate-bindata.sh
+
+# This rule is the user-friendly entrypoint for bindata generation.
+.PHONY: gen_bindata
+gen_bindata: $(BINDATA_GEN) FORCE
+	./hack/run-in-gopath.sh $(BINDATA_SCRIPT)
+
+# How to build the generator tool.  The deps for this are defined in
+# the $(BINDATA_GEN).mk, above.
+#
+# A word on the need to touch: This rule might trigger if, for example, a
+# non-Go file was added or deleted from a directory on which this depends.
+# This target needs to be reconsidered, but Go realizes it doesn't actually
+# have to be rebuilt.  In that case, make will forever see the dependency as
+# newer than the binary, and try to rebuild it over and over.  So we touch it,
+# and make is happy.
+$(BINDATA_GEN): $(k8s.io/kubernetes/vendor/github.com/jteeuwen/go-bindata/go-bindata)
+	KUBE_BUILD_PLATFORMS="" hack/make-rules/build.sh ./vendor/github.com/jteeuwen/go-bindata/go-bindata
+	touch $@

+ 89 - 0
kubernetes-v1.15.4/build/root/WORKSPACE

@@ -0,0 +1,89 @@
+workspace(name = "io_k8s_kubernetes")
+
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
+load("//build:workspace_mirror.bzl", "mirror")
+
+http_archive(
+    name = "bazel_toolchains",
+    sha256 = "3a6ffe6dd91ee975f5d5bc5c50b34f58e3881dfac59a7b7aba3323bd8f8571a8",
+    strip_prefix = "bazel-toolchains-92dd8a7",
+    urls = [
+        "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/92dd8a7.tar.gz",
+        "https://github.com/bazelbuild/bazel-toolchains/archive/92dd8a7.tar.gz",
+    ],
+)
+
+load("@bazel_toolchains//rules:rbe_repo.bzl", "rbe_autoconfig")
+
+rbe_autoconfig(
+    name = "rbe_default",
+    base_container_digest = "sha256:677c1317f14c6fd5eba2fd8ec645bfdc5119f64b3e5e944e13c89e0525cc8ad1",
+    digest = "sha256:b7c2e7a18968b9df2db43eda722c5ae592aafbf774ba2766074a9c96926743d8",
+    registry = "gcr.io",
+    repository = "k8s-testimages/bazel-krte",
+    # tag = "latest",
+)
+
+http_archive(
+    name = "bazel_skylib",
+    sha256 = "eb5c57e4c12e68c0c20bc774bfbc60a568e800d025557bc4ea022c6479acc867",
+    strip_prefix = "bazel-skylib-0.6.0",
+    urls = mirror("https://github.com/bazelbuild/bazel-skylib/archive/0.6.0.tar.gz"),
+)
+
+load("@bazel_skylib//lib:versions.bzl", "versions")
+
+versions.check(minimum_bazel_version = "0.23.0")
+
+http_archive(
+    name = "io_k8s_repo_infra",
+    sha256 = "4a8384320fba401cbf21fef177aa113ed8fe35952ace98e00b796cac87ae7868",
+    strip_prefix = "repo-infra-df02ded38f9506e5bbcbf21702034b4fef815f2f",
+    urls = mirror("https://github.com/kubernetes/repo-infra/archive/df02ded38f9506e5bbcbf21702034b4fef815f2f.tar.gz"),
+)
+
+http_archive(
+    name = "io_bazel_rules_go",
+    sha256 = "f635b285d7e902ac7327637edbba98a4f110e8202c8f4fb49d2f6ecd837f704a",
+    urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.18.9/rules_go-0.18.9.tar.gz"),
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
+
+go_rules_dependencies()
+
+go_register_toolchains(
+    go_version = "1.12.9",
+)
+
+http_archive(
+    name = "io_bazel_rules_docker",
+    sha256 = "aed1c249d4ec8f703edddf35cbe9dfaca0b5f5ea6e4cd9e83e99f3b0d1136c3d",
+    strip_prefix = "rules_docker-0.7.0",
+    urls = mirror("https://github.com/bazelbuild/rules_docker/archive/v0.7.0.tar.gz"),
+)
+
+load(
+    "@io_bazel_rules_docker//repositories:repositories.bzl",
+    container_repositories = "repositories",
+)
+
+container_repositories()
+
+load("@io_bazel_rules_docker//container:container.bzl", "container_pull")
+
+container_pull(
+    name = "official_busybox",
+    digest = "sha256:5e8e0509e829bb8f990249135a36e81a3ecbe94294e7a185cc14616e5fad96bd",
+    registry = "index.docker.io",
+    repository = "library/busybox",
+    tag = "latest",  # ignored, but kept here for documentation
+)
+
+load("//build:workspace.bzl", "release_dependencies")
+
+release_dependencies()
+
+load("//build:workspace_mirror.bzl", "export_urls")
+
+export_urls("workspace_urls")

+ 11 - 0
kubernetes-v1.15.4/build/rpms/10-kubeadm.conf

@@ -0,0 +1,11 @@
+# Note: This dropin only works with kubeadm and kubelet v1.11+
+[Service]
+Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
+Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
+# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
+EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
+# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
+# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
+EnvironmentFile=-/etc/sysconfig/kubelet
+ExecStart=
+ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS

+ 2 - 0
kubernetes-v1.15.4/build/rpms/50-kubeadm.conf

@@ -0,0 +1,2 @@
+# The file is provided as part of the kubeadm package
+net.ipv4.ip_forward = 1

+ 128 - 0
kubernetes-v1.15.4/build/rpms/BUILD

@@ -0,0 +1,128 @@
+package(default_visibility = ["//visibility:public"])
+
+load("//build:platforms.bzl", "CLIENT_PLATFORMS", "NODE_PLATFORMS", "for_platforms")
+load("//build:workspace.bzl", "CNI_VERSION", "CRI_TOOLS_VERSION")
+load("@io_k8s_repo_infra//defs:build.bzl", "release_filegroup")
+load("@io_k8s_repo_infra//defs:rpm.bzl", "pkg_rpm_for_goarch")
+
+release_filegroup(
+    name = "rpms",
+    conditioned_srcs = for_platforms(
+        default = [],
+        for_client = [":kubectl.rpm"],
+        for_node = [
+            ":cri-tools.rpm",
+            ":kubeadm.rpm",
+            ":kubelet.rpm",
+            ":kubernetes-cni.rpm",
+        ],
+        only_os = "linux",
+    ),
+    tags = ["manual"],
+    visibility = ["//visibility:public"],
+)
+
+# Create genrules to copy the arch-specific RPMs to RPMs without the arch in their filename.
+genrule(
+    name = "kubectl",
+    srcs = select(for_platforms(
+        for_client = [":kubectl-{ARCH}.rpm"],
+        only_os = "linux",
+    )),
+    outs = ["kubectl.rpm"],
+    cmd = "cp $< $@",
+    output_to_bindir = True,
+)
+
+[genrule(
+    name = pkg,
+    srcs = select(for_platforms(
+        for_client = [":%s-{ARCH}.rpm" % pkg],
+        only_os = "linux",
+    )),
+    outs = ["%s.rpm" % pkg],
+    cmd = "cp $< $@",
+    output_to_bindir = True,
+) for pkg in [
+    "cri-tools",
+    "kubeadm",
+    "kubelet",
+    "kubernetes-cni",
+]]
+
+[pkg_rpm_for_goarch(
+    name = "kubectl",
+    changelog = "//:CHANGELOG.md",
+    data = [
+        "//cmd/kubectl",
+    ],
+    goarch = arch,
+    spec_file = "kubectl.spec",
+    tags = ["manual"],
+    version_file = "//build:os_package_version",
+) for arch in CLIENT_PLATFORMS["linux"]]
+
+[pkg_rpm_for_goarch(
+    name = "kubelet",
+    changelog = "//:CHANGELOG.md",
+    data = [
+        "kubelet.service",
+        "//cmd/kubelet",
+    ],
+    goarch = arch,
+    spec_file = "kubelet.spec",
+    tags = ["manual"],
+    version_file = "//build:os_package_version",
+) for arch in NODE_PLATFORMS["linux"]]
+
+[pkg_rpm_for_goarch(
+    name = "kubeadm",
+    changelog = "//:CHANGELOG.md",
+    data = [
+        "10-kubeadm.conf",
+        "50-kubeadm.conf",
+        "kubeadm.conf",
+        "kubelet.env",
+        "//cmd/kubeadm",
+    ],
+    goarch = arch,
+    spec_file = "kubeadm.spec",
+    tags = ["manual"],
+    version_file = "//build:os_package_version",
+) for arch in NODE_PLATFORMS["linux"]]
+
+[pkg_rpm_for_goarch(
+    name = "kubernetes-cni",
+    changelog = "//:CHANGELOG.md",
+    data = [
+        "@kubernetes_cni_{GOARCH}//file",
+    ],
+    goarch = arch,
+    spec_file = "kubernetes-cni.spec",
+    tags = ["manual"],
+    version = CNI_VERSION,
+) for arch in NODE_PLATFORMS["linux"]]
+
+[pkg_rpm_for_goarch(
+    name = "cri-tools",
+    data = [
+        "@cri_tools_{GOARCH}//file",
+    ],
+    goarch = arch,
+    spec_file = "cri-tools.spec",
+    tags = ["manual"],
+    version = CRI_TOOLS_VERSION,
+) for arch in NODE_PLATFORMS["linux"]]
+
+filegroup(
+    name = "package-srcs",
+    srcs = glob(["**"]),
+    tags = ["automanaged"],
+    visibility = ["//visibility:private"],
+)
+
+filegroup(
+    name = "all-srcs",
+    srcs = [":package-srcs"],
+    tags = ["automanaged"],
+)

+ 15 - 0
kubernetes-v1.15.4/build/rpms/OWNERS

@@ -0,0 +1,15 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+  - luxas
+  - jbeda
+  - mikedanese
+  - pipejakob
+  - chuckha
+  - timothysc
+approvers:
+  - luxas
+  - jbeda
+  - mikedanese
+  - pipejakob
+  - timothysc

+ 0 - 0
kubernetes-v1.15.4/build/rpms/cri-tools.spec


Some files were not shown because too many files changed in this diff