boilerplate.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. #!/usr/bin/env python
  2. # Copyright 2015 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. from __future__ import print_function
  16. import argparse
  17. import datetime
  18. import difflib
  19. import glob
  20. import os
  21. import re
  22. import sys
  23. parser = argparse.ArgumentParser()
  24. parser.add_argument(
  25. "filenames",
  26. help="list of files to check, all files if unspecified",
  27. nargs='*')
  28. rootdir = os.path.dirname(__file__) + "/../../"
  29. rootdir = os.path.abspath(rootdir)
  30. parser.add_argument(
  31. "--rootdir", default=rootdir, help="root directory to examine")
  32. default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
  33. parser.add_argument(
  34. "--boilerplate-dir", default=default_boilerplate_dir)
  35. parser.add_argument(
  36. "-v", "--verbose",
  37. help="give verbose output regarding why a file does not pass",
  38. action="store_true")
  39. args = parser.parse_args()
  40. verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
  41. def get_refs():
  42. refs = {}
  43. for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
  44. extension = os.path.basename(path).split(".")[1]
  45. ref_file = open(path, 'r')
  46. ref = ref_file.read().splitlines()
  47. ref_file.close()
  48. refs[extension] = ref
  49. return refs
  50. def is_generated_file(filename, data, regexs):
  51. for d in skipped_ungenerated_files:
  52. if d in filename:
  53. return False
  54. p = regexs["generated"]
  55. return p.search(data)
  56. def file_passes(filename, refs, regexs):
  57. try:
  58. f = open(filename, 'r')
  59. except Exception as exc:
  60. print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
  61. return False
  62. data = f.read()
  63. f.close()
  64. # determine if the file is automatically generated
  65. generated = is_generated_file(filename, data, regexs)
  66. basename = os.path.basename(filename)
  67. extension = file_extension(filename)
  68. if generated:
  69. if extension == "go":
  70. extension = "generatego"
  71. elif extension == "bzl":
  72. extension = "generatebzl"
  73. if extension != "":
  74. ref = refs[extension]
  75. else:
  76. ref = refs[basename]
  77. # remove extra content from the top of files
  78. if extension == "go" or extension == "generatego":
  79. p = regexs["go_build_constraints"]
  80. (data, found) = p.subn("", data, 1)
  81. elif extension in ["sh", "py"]:
  82. p = regexs["shebang"]
  83. (data, found) = p.subn("", data, 1)
  84. data = data.splitlines()
  85. # if our test file is smaller than the reference it surely fails!
  86. if len(ref) > len(data):
  87. print('File %s smaller than reference (%d < %d)' %
  88. (filename, len(data), len(ref)),
  89. file=verbose_out)
  90. return False
  91. # trim our file to the same number of lines as the reference file
  92. data = data[:len(ref)]
  93. p = regexs["year"]
  94. for d in data:
  95. if p.search(d):
  96. if generated:
  97. print('File %s has the YEAR field, but it should not be in generated file' %
  98. filename, file=verbose_out)
  99. else:
  100. print('File %s has the YEAR field, but missing the year of date' %
  101. filename, file=verbose_out)
  102. return False
  103. if not generated:
  104. # Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
  105. p = regexs["date"]
  106. for i, d in enumerate(data):
  107. (data[i], found) = p.subn('YEAR', d)
  108. if found != 0:
  109. break
  110. # if we don't match the reference at this point, fail
  111. if ref != data:
  112. print("Header in %s does not match reference, diff:" %
  113. filename, file=verbose_out)
  114. if args.verbose:
  115. print(file=verbose_out)
  116. for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
  117. print(line, file=verbose_out)
  118. print(file=verbose_out)
  119. return False
  120. return True
  121. def file_extension(filename):
  122. return os.path.splitext(filename)[1].split(".")[-1].lower()
  123. skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh',
  124. "vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test",
  125. "staging/src/k8s.io/kubectl/pkg/generated/bindata.go"]
  126. # list all the files contain 'DO NOT EDIT', but are not generated
  127. skipped_ungenerated_files = [
  128. 'hack/lib/swagger.sh', 'hack/boilerplate/boilerplate.py']
  129. def normalize_files(files):
  130. newfiles = []
  131. for pathname in files:
  132. if any(x in pathname for x in skipped_dirs):
  133. continue
  134. newfiles.append(pathname)
  135. for i, pathname in enumerate(newfiles):
  136. if not os.path.isabs(pathname):
  137. newfiles[i] = os.path.join(args.rootdir, pathname)
  138. return newfiles
  139. def get_files(extensions):
  140. files = []
  141. if len(args.filenames) > 0:
  142. files = args.filenames
  143. else:
  144. for root, dirs, walkfiles in os.walk(args.rootdir):
  145. # don't visit certain dirs. This is just a performance improvement
  146. # as we would prune these later in normalize_files(). But doing it
  147. # cuts down the amount of filesystem walking we do and cuts down
  148. # the size of the file list
  149. for d in skipped_dirs:
  150. if d in dirs:
  151. dirs.remove(d)
  152. for name in walkfiles:
  153. pathname = os.path.join(root, name)
  154. files.append(pathname)
  155. files = normalize_files(files)
  156. outfiles = []
  157. for pathname in files:
  158. basename = os.path.basename(pathname)
  159. extension = file_extension(pathname)
  160. if extension in extensions or basename in extensions:
  161. outfiles.append(pathname)
  162. return outfiles
  163. def get_dates():
  164. years = datetime.datetime.now().year
  165. return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
  166. def get_regexs():
  167. regexs = {}
  168. # Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
  169. regexs["year"] = re.compile('YEAR')
  170. # get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
  171. # company holder names can be anything
  172. regexs["date"] = re.compile(get_dates())
  173. # strip // +build \n\n build constraints
  174. regexs["go_build_constraints"] = re.compile(
  175. r"^(// \+build.*\n)+\n", re.MULTILINE)
  176. # strip #!.* from scripts
  177. regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
  178. # Search for generated files
  179. regexs["generated"] = re.compile('DO NOT EDIT')
  180. return regexs
  181. def main():
  182. regexs = get_regexs()
  183. refs = get_refs()
  184. filenames = get_files(refs.keys())
  185. for filename in filenames:
  186. if not file_passes(filename, refs, regexs):
  187. print(filename, file=sys.stdout)
  188. return 0
  189. if __name__ == "__main__":
  190. sys.exit(main())