boilerplate.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. #!/usr/bin/env python
  2. # Copyright 2015 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. from __future__ import print_function
  16. import argparse
  17. import datetime
  18. import difflib
  19. import glob
  20. import os
  21. import re
  22. import sys
  23. parser = argparse.ArgumentParser()
  24. parser.add_argument(
  25. "filenames",
  26. help="list of files to check, all files if unspecified",
  27. nargs='*')
  28. rootdir = os.path.dirname(__file__) + "/../../"
  29. rootdir = os.path.abspath(rootdir)
  30. parser.add_argument(
  31. "--rootdir", default=rootdir, help="root directory to examine")
  32. default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
  33. parser.add_argument(
  34. "--boilerplate-dir", default=default_boilerplate_dir)
  35. parser.add_argument(
  36. "-v", "--verbose",
  37. help="give verbose output regarding why a file does not pass",
  38. action="store_true")
  39. args = parser.parse_args()
  40. verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
  41. def get_refs():
  42. refs = {}
  43. for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
  44. extension = os.path.basename(path).split(".")[1]
  45. ref_file = open(path, 'r')
  46. ref = ref_file.read().splitlines()
  47. ref_file.close()
  48. refs[extension] = ref
  49. return refs
  50. def is_generated_file(filename, data, regexs):
  51. for d in skipped_ungenerated_files:
  52. if d in filename:
  53. return False
  54. p = regexs["generated"]
  55. return p.search(data)
  56. def file_passes(filename, refs, regexs):
  57. try:
  58. f = open(filename, 'r')
  59. except Exception as exc:
  60. print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
  61. return False
  62. data = f.read()
  63. f.close()
  64. # determine if the file is automatically generated
  65. generated = is_generated_file(filename, data, regexs)
  66. basename = os.path.basename(filename)
  67. extension = file_extension(filename)
  68. if generated:
  69. if extension == "go":
  70. extension = "generatego"
  71. elif extension == "bzl":
  72. extension = "generatebzl"
  73. if extension != "":
  74. ref = refs[extension]
  75. else:
  76. ref = refs[basename]
  77. # remove extra content from the top of files
  78. if extension == "go" or extension == "generatego":
  79. p = regexs["go_build_constraints"]
  80. (data, found) = p.subn("", data, 1)
  81. elif extension == "sh":
  82. p = regexs["shebang"]
  83. (data, found) = p.subn("", data, 1)
  84. data = data.splitlines()
  85. # if our test file is smaller than the reference it surely fails!
  86. if len(ref) > len(data):
  87. print('File %s smaller than reference (%d < %d)' %
  88. (filename, len(data), len(ref)),
  89. file=verbose_out)
  90. return False
  91. # trim our file to the same number of lines as the reference file
  92. data = data[:len(ref)]
  93. p = regexs["year"]
  94. for d in data:
  95. if p.search(d):
  96. if generated:
  97. print('File %s has the YEAR field, but it should not be in generated file' % filename, file=verbose_out)
  98. else:
  99. print('File %s has the YEAR field, but missing the year of date' % filename, file=verbose_out)
  100. return False
  101. if not generated:
  102. # Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
  103. p = regexs["date"]
  104. for i, d in enumerate(data):
  105. (data[i], found) = p.subn('YEAR', d)
  106. if found != 0:
  107. break
  108. # if we don't match the reference at this point, fail
  109. if ref != data:
  110. print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
  111. if args.verbose:
  112. print(file=verbose_out)
  113. for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
  114. print(line, file=verbose_out)
  115. print(file=verbose_out)
  116. return False
  117. return True
  118. def file_extension(filename):
  119. return os.path.splitext(filename)[1].split(".")[-1].lower()
  120. skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh',
  121. "vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test",
  122. "pkg/kubectl/generated/bindata.go"]
  123. # list all the files contain 'DO NOT EDIT', but are not generated
  124. skipped_ungenerated_files = ['hack/lib/swagger.sh', 'hack/boilerplate/boilerplate.py']
  125. def normalize_files(files):
  126. newfiles = []
  127. for pathname in files:
  128. if any(x in pathname for x in skipped_dirs):
  129. continue
  130. newfiles.append(pathname)
  131. for i, pathname in enumerate(newfiles):
  132. if not os.path.isabs(pathname):
  133. newfiles[i] = os.path.join(args.rootdir, pathname)
  134. return newfiles
  135. def get_files(extensions):
  136. files = []
  137. if len(args.filenames) > 0:
  138. files = args.filenames
  139. else:
  140. for root, dirs, walkfiles in os.walk(args.rootdir):
  141. # don't visit certain dirs. This is just a performance improvement
  142. # as we would prune these later in normalize_files(). But doing it
  143. # cuts down the amount of filesystem walking we do and cuts down
  144. # the size of the file list
  145. for d in skipped_dirs:
  146. if d in dirs:
  147. dirs.remove(d)
  148. for name in walkfiles:
  149. pathname = os.path.join(root, name)
  150. files.append(pathname)
  151. files = normalize_files(files)
  152. outfiles = []
  153. for pathname in files:
  154. basename = os.path.basename(pathname)
  155. extension = file_extension(pathname)
  156. if extension in extensions or basename in extensions:
  157. outfiles.append(pathname)
  158. return outfiles
  159. def get_dates():
  160. years = datetime.datetime.now().year
  161. return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
  162. def get_regexs():
  163. regexs = {}
  164. # Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
  165. regexs["year"] = re.compile( 'YEAR' )
  166. # get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
  167. # company holder names can be anything
  168. regexs["date"] = re.compile(get_dates())
  169. # strip // +build \n\n build constraints
  170. regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
  171. # strip #!.* from shell scripts
  172. regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
  173. # Search for generated files
  174. regexs["generated"] = re.compile( 'DO NOT EDIT' )
  175. return regexs
  176. def main():
  177. regexs = get_regexs()
  178. refs = get_refs()
  179. filenames = get_files(refs.keys())
  180. for filename in filenames:
  181. if not file_passes(filename, refs, regexs):
  182. print(filename, file=sys.stdout)
  183. return 0
  184. if __name__ == "__main__":
  185. sys.exit(main())