main.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. /*
  2. Copyright 2018 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. // do a fast type check of kubernetes code, for all platforms.
  14. package main
  15. import (
  16. "flag"
  17. "fmt"
  18. "go/ast"
  19. "go/build"
  20. "go/parser"
  21. "go/token"
  22. "go/types"
  23. "io"
  24. "log"
  25. "os"
  26. "path/filepath"
  27. "sort"
  28. "strings"
  29. "sync"
  30. "sync/atomic"
  31. "time"
  32. "golang.org/x/crypto/ssh/terminal"
  33. srcimporter "k8s.io/kubernetes/third_party/go-srcimporter"
  34. )
  35. var (
  36. verbose = flag.Bool("verbose", false, "print more information")
  37. cross = flag.Bool("cross", true, "build for all platforms")
  38. platforms = flag.String("platform", "", "comma-separated list of platforms to typecheck")
  39. timings = flag.Bool("time", false, "output times taken for each phase")
  40. defuses = flag.Bool("defuse", false, "output defs/uses")
  41. serial = flag.Bool("serial", false, "don't type check platforms in parallel")
  42. isTerminal = terminal.IsTerminal(int(os.Stdout.Fd()))
  43. logPrefix = ""
  44. // When processed in order, windows and darwin are early to make
  45. // interesting OS-based errors happen earlier.
  46. crossPlatforms = []string{
  47. "linux/amd64", "windows/386",
  48. "darwin/amd64", "linux/arm",
  49. "linux/386", "windows/amd64",
  50. "linux/arm64", "linux/ppc64le",
  51. "linux/s390x", "darwin/386",
  52. }
  53. darwinPlatString = "darwin/386,darwin/amd64"
  54. windowsPlatString = "windows/386,windows/amd64"
  55. )
  56. type analyzer struct {
  57. fset *token.FileSet // positions are relative to fset
  58. conf types.Config
  59. ctx build.Context
  60. failed bool
  61. platform string
  62. donePaths map[string]interface{}
  63. errors []string
  64. }
  65. func newAnalyzer(platform string) *analyzer {
  66. ctx := build.Default
  67. platSplit := strings.Split(platform, "/")
  68. ctx.GOOS, ctx.GOARCH = platSplit[0], platSplit[1]
  69. ctx.CgoEnabled = true
  70. a := &analyzer{
  71. platform: platform,
  72. fset: token.NewFileSet(),
  73. ctx: ctx,
  74. donePaths: make(map[string]interface{}),
  75. }
  76. a.conf = types.Config{
  77. FakeImportC: true,
  78. Error: a.handleError,
  79. Sizes: types.SizesFor("gc", a.ctx.GOARCH),
  80. }
  81. a.conf.Importer = srcimporter.New(
  82. &a.ctx, a.fset, make(map[string]*types.Package))
  83. if *verbose {
  84. fmt.Printf("context: %#v\n", ctx)
  85. }
  86. return a
  87. }
  88. func (a *analyzer) handleError(err error) {
  89. if e, ok := err.(types.Error); ok {
  90. // useful for some ignores:
  91. // path := e.Fset.Position(e.Pos).String()
  92. ignore := false
  93. // TODO(rmmh): read ignores from a file, so this code can
  94. // be Kubernetes-agnostic. Unused ignores should be treated as
  95. // errors, to ensure coverage isn't overly broad.
  96. if strings.Contains(e.Msg, "GetOpenAPIDefinitions") {
  97. // TODO(rmmh): figure out why this happens.
  98. // cmd/kube-apiserver/app/server.go:392:70
  99. // test/integration/framework/master_utils.go:131:84
  100. ignore = true
  101. }
  102. if ignore {
  103. if *verbose {
  104. fmt.Println("ignoring error:", err)
  105. }
  106. return
  107. }
  108. }
  109. a.errors = append(a.errors, err.Error())
  110. if *serial {
  111. fmt.Fprintf(os.Stderr, "%sERROR(%s) %s\n", logPrefix, a.platform, err)
  112. }
  113. a.failed = true
  114. }
  115. func (a *analyzer) dumpAndResetErrors() []string {
  116. es := a.errors
  117. a.errors = nil
  118. return es
  119. }
  120. // collect extracts test metadata from a file.
  121. func (a *analyzer) collect(dir string) {
  122. if _, ok := a.donePaths[dir]; ok {
  123. return
  124. }
  125. a.donePaths[dir] = nil
  126. // Create the AST by parsing src.
  127. fs, err := parser.ParseDir(a.fset, dir, nil, parser.AllErrors)
  128. if err != nil {
  129. fmt.Println(logPrefix+"ERROR(syntax)", err)
  130. a.failed = true
  131. return
  132. }
  133. if len(fs) > 1 && *verbose {
  134. fmt.Println("multiple packages in dir:", dir)
  135. }
  136. for _, p := range fs {
  137. // returns first error, but a.handleError deals with it
  138. files := a.filterFiles(p.Files)
  139. if *verbose {
  140. fmt.Printf("path: %s package: %s files: ", dir, p.Name)
  141. for _, f := range files {
  142. fname := filepath.Base(a.fset.File(f.Pos()).Name())
  143. fmt.Printf("%s ", fname)
  144. }
  145. fmt.Printf("\n")
  146. }
  147. a.typeCheck(dir, files)
  148. }
  149. }
  150. // filterFiles restricts a list of files to only those that should be built by
  151. // the current platform. This includes both build suffixes (_windows.go) and build
  152. // tags ("// +build !linux" at the beginning).
  153. func (a *analyzer) filterFiles(fs map[string]*ast.File) []*ast.File {
  154. files := []*ast.File{}
  155. for _, f := range fs {
  156. fpath := a.fset.File(f.Pos()).Name()
  157. dir, name := filepath.Split(fpath)
  158. matches, err := a.ctx.MatchFile(dir, name)
  159. if err != nil {
  160. fmt.Fprintf(os.Stderr, "%sERROR reading %s: %s\n", logPrefix, fpath, err)
  161. a.failed = true
  162. continue
  163. }
  164. if matches {
  165. files = append(files, f)
  166. }
  167. }
  168. return files
  169. }
  170. func (a *analyzer) typeCheck(dir string, files []*ast.File) error {
  171. info := types.Info{
  172. Defs: make(map[*ast.Ident]types.Object),
  173. Uses: make(map[*ast.Ident]types.Object),
  174. }
  175. // NOTE: this type check does a *recursive* import, but srcimporter
  176. // doesn't do a full type check (ignores function bodies)-- this has
  177. // some additional overhead.
  178. //
  179. // This means that we need to ensure that typeCheck runs on all
  180. // code we will be compiling.
  181. //
  182. // TODO(rmmh): Customize our forked srcimporter to do this better.
  183. pkg, err := a.conf.Check(dir, a.fset, files, &info)
  184. if err != nil {
  185. return err // type error
  186. }
  187. // A significant fraction of vendored code only compiles on Linux,
  188. // but it's only imported by code that has build-guards for Linux.
  189. // Track vendored code to type-check it in a second pass.
  190. for _, imp := range pkg.Imports() {
  191. if strings.HasPrefix(imp.Path(), "k8s.io/kubernetes/vendor/") {
  192. vendorPath := imp.Path()[len("k8s.io/kubernetes/"):]
  193. if *verbose {
  194. fmt.Println("recursively checking vendor path:", vendorPath)
  195. }
  196. a.collect(vendorPath)
  197. }
  198. }
  199. if *defuses {
  200. for id, obj := range info.Defs {
  201. fmt.Printf("%s: %q defines %v\n",
  202. a.fset.Position(id.Pos()), id.Name, obj)
  203. }
  204. for id, obj := range info.Uses {
  205. fmt.Printf("%s: %q uses %v\n",
  206. a.fset.Position(id.Pos()), id.Name, obj)
  207. }
  208. }
  209. return nil
  210. }
  211. type collector struct {
  212. dirs []string
  213. }
  214. // handlePath walks the filesystem recursively, collecting directories,
  215. // ignoring some unneeded directories (hidden/vendored) that are handled
  216. // specially later.
  217. func (c *collector) handlePath(path string, info os.FileInfo, err error) error {
  218. if err != nil {
  219. return err
  220. }
  221. if info.IsDir() {
  222. // Ignore hidden directories (.git, .cache, etc)
  223. if len(path) > 1 && path[0] == '.' ||
  224. // Staging code is symlinked from vendor/k8s.io, and uses import
  225. // paths as if it were inside of vendor/. It fails typechecking
  226. // inside of staging/, but works when typechecked as part of vendor/.
  227. path == "staging" ||
  228. // OS-specific vendor code tends to be imported by OS-specific
  229. // packages. We recursively typecheck imported vendored packages for
  230. // each OS, but don't typecheck everything for every OS.
  231. path == "vendor" ||
  232. path == "_output" ||
  233. // This is a weird one. /testdata/ is *mostly* ignored by Go,
  234. // and this translates to kubernetes/vendor not working.
  235. // edit/record.go doesn't compile without gopkg.in/yaml.v2
  236. // in $GOSRC/$GOROOT (both typecheck and the shell script).
  237. path == "pkg/kubectl/cmd/testdata/edit" {
  238. return filepath.SkipDir
  239. }
  240. c.dirs = append(c.dirs, path)
  241. }
  242. return nil
  243. }
  244. type analyzerResult struct {
  245. platform string
  246. dir string
  247. errors []string
  248. }
  249. func dedupeErrors(out io.Writer, results chan analyzerResult, nDirs, nPlatforms int) {
  250. pkgRes := make(map[string][]analyzerResult)
  251. for done := 0; done < nDirs; {
  252. res := <-results
  253. pkgRes[res.dir] = append(pkgRes[res.dir], res)
  254. if len(pkgRes[res.dir]) != nPlatforms {
  255. continue // expect more results for dir
  256. }
  257. done++
  258. // Collect list of platforms for each error
  259. errPlats := map[string][]string{}
  260. for _, res := range pkgRes[res.dir] {
  261. for _, err := range res.errors {
  262. errPlats[err] = append(errPlats[err], res.platform)
  263. }
  264. }
  265. // Print each error (in the same order!) once.
  266. for _, res := range pkgRes[res.dir] {
  267. for _, err := range res.errors {
  268. if errPlats[err] == nil {
  269. continue // already printed
  270. }
  271. sort.Strings(errPlats[err])
  272. plats := strings.Join(errPlats[err], ",")
  273. if len(errPlats[err]) == len(crossPlatforms) {
  274. plats = "all"
  275. } else if plats == darwinPlatString {
  276. plats = "darwin"
  277. } else if plats == windowsPlatString {
  278. plats = "windows"
  279. }
  280. fmt.Fprintf(out, "%sERROR(%s) %s\n", logPrefix, plats, err)
  281. delete(errPlats, err)
  282. }
  283. }
  284. delete(pkgRes, res.dir)
  285. }
  286. }
  287. func main() {
  288. flag.Parse()
  289. args := flag.Args()
  290. if *verbose {
  291. *serial = true // to avoid confusing interleaved logs
  292. }
  293. if len(args) == 0 {
  294. args = append(args, ".")
  295. }
  296. c := collector{}
  297. for _, arg := range args {
  298. err := filepath.Walk(arg, c.handlePath)
  299. if err != nil {
  300. log.Fatalf("Error walking: %v", err)
  301. }
  302. }
  303. sort.Strings(c.dirs)
  304. ps := crossPlatforms[:]
  305. if *platforms != "" {
  306. ps = strings.Split(*platforms, ",")
  307. } else if !*cross {
  308. ps = ps[:1]
  309. }
  310. fmt.Println("type-checking: ", strings.Join(ps, ", "))
  311. var wg sync.WaitGroup
  312. var processedDirs int64
  313. var currentWork int64 // (dir_index << 8) | platform_index
  314. statuses := make([]int, len(ps))
  315. var results chan analyzerResult
  316. if !*serial {
  317. results = make(chan analyzerResult)
  318. wg.Add(1)
  319. go func() {
  320. dedupeErrors(os.Stderr, results, len(c.dirs), len(ps))
  321. wg.Done()
  322. }()
  323. }
  324. for i, p := range ps {
  325. wg.Add(1)
  326. fn := func(i int, p string) {
  327. start := time.Now()
  328. a := newAnalyzer(p)
  329. for n, dir := range c.dirs {
  330. a.collect(dir)
  331. atomic.AddInt64(&processedDirs, 1)
  332. atomic.StoreInt64(&currentWork, int64(n<<8|i))
  333. if results != nil {
  334. results <- analyzerResult{p, dir, a.dumpAndResetErrors()}
  335. }
  336. }
  337. if a.failed {
  338. statuses[i] = 1
  339. }
  340. if *timings {
  341. fmt.Printf("%s took %.1fs\n", p, time.Since(start).Seconds())
  342. }
  343. wg.Done()
  344. }
  345. if *serial {
  346. fn(i, p)
  347. } else {
  348. go fn(i, p)
  349. }
  350. }
  351. if isTerminal {
  352. logPrefix = "\r" // clear status bar when printing
  353. // Display a status bar so devs can estimate completion times.
  354. wg.Add(1)
  355. go func() {
  356. total := len(ps) * len(c.dirs)
  357. for proc := 0; ; proc = int(atomic.LoadInt64(&processedDirs)) {
  358. work := atomic.LoadInt64(&currentWork)
  359. dir := c.dirs[work>>8]
  360. platform := ps[work&0xFF]
  361. if len(dir) > 80 {
  362. dir = dir[:80]
  363. }
  364. fmt.Printf("\r%d/%d \033[2m%-13s\033[0m %-80s", proc, total, platform, dir)
  365. if proc == total {
  366. fmt.Println()
  367. break
  368. }
  369. time.Sleep(50 * time.Millisecond)
  370. }
  371. wg.Done()
  372. }()
  373. }
  374. wg.Wait()
  375. for _, status := range statuses {
  376. if status != 0 {
  377. os.Exit(status)
  378. }
  379. }
  380. }