123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430 |
- /*
- Copyright 2018 The Kubernetes Authors.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- */
- // do a fast type check of kubernetes code, for all platforms.
- package main
- import (
- "flag"
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "go/types"
- "io"
- "log"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "sync"
- "sync/atomic"
- "time"
- "golang.org/x/crypto/ssh/terminal"
- srcimporter "k8s.io/kubernetes/third_party/go-srcimporter"
- )
- var (
- verbose = flag.Bool("verbose", false, "print more information")
- cross = flag.Bool("cross", true, "build for all platforms")
- platforms = flag.String("platform", "", "comma-separated list of platforms to typecheck")
- timings = flag.Bool("time", false, "output times taken for each phase")
- defuses = flag.Bool("defuse", false, "output defs/uses")
- serial = flag.Bool("serial", false, "don't type check platforms in parallel")
- skipTest = flag.Bool("skip-test", false, "don't type check test code")
- tags = flag.String("tags", "", "comma-separated list of build tags to apply in addition to go's defaults")
- ignoreDirs = flag.String("ignore-dirs", "", "comma-separated list of directories to ignore in addition to the default hardcoded list including staging, vendor, and hidden dirs")
- isTerminal = terminal.IsTerminal(int(os.Stdout.Fd()))
- logPrefix = ""
- // When processed in order, windows and darwin are early to make
- // interesting OS-based errors happen earlier.
- crossPlatforms = []string{
- "linux/amd64", "windows/386",
- "darwin/amd64", "linux/arm",
- "linux/386", "windows/amd64",
- "linux/arm64", "linux/ppc64le",
- "linux/s390x", "darwin/386",
- }
- darwinPlatString = "darwin/386,darwin/amd64"
- windowsPlatString = "windows/386,windows/amd64"
- // directories we always ignore
- standardIgnoreDirs = []string{
- // Staging code is symlinked from vendor/k8s.io, and uses import
- // paths as if it were inside of vendor/. It fails typechecking
- // inside of staging/, but works when typechecked as part of vendor/.
- "staging",
- // OS-specific vendor code tends to be imported by OS-specific
- // packages. We recursively typecheck imported vendored packages for
- // each OS, but don't typecheck everything for every OS.
- "vendor",
- "_output",
- // This is a weird one. /testdata/ is *mostly* ignored by Go,
- // and this translates to kubernetes/vendor not working.
- // edit/record.go doesn't compile without gopkg.in/yaml.v2
- // in $GOSRC/$GOROOT (both typecheck and the shell script).
- "pkg/kubectl/cmd/testdata/edit",
- }
- )
- type analyzer struct {
- fset *token.FileSet // positions are relative to fset
- conf types.Config
- ctx build.Context
- failed bool
- platform string
- donePaths map[string]interface{}
- errors []string
- }
- func newAnalyzer(platform string) *analyzer {
- ctx := build.Default
- platSplit := strings.Split(platform, "/")
- ctx.GOOS, ctx.GOARCH = platSplit[0], platSplit[1]
- ctx.CgoEnabled = true
- if *tags != "" {
- tagsSplit := strings.Split(*tags, ",")
- ctx.BuildTags = append(ctx.BuildTags, tagsSplit...)
- }
- // add selinux tag explicitly
- ctx.BuildTags = append(ctx.BuildTags, "selinux")
- a := &analyzer{
- platform: platform,
- fset: token.NewFileSet(),
- ctx: ctx,
- donePaths: make(map[string]interface{}),
- }
- a.conf = types.Config{
- FakeImportC: true,
- Error: a.handleError,
- Sizes: types.SizesFor("gc", a.ctx.GOARCH),
- }
- a.conf.Importer = srcimporter.New(
- &a.ctx, a.fset, make(map[string]*types.Package))
- if *verbose {
- fmt.Printf("context: %#v\n", ctx)
- }
- return a
- }
- func (a *analyzer) handleError(err error) {
- a.errors = append(a.errors, err.Error())
- if *serial {
- fmt.Fprintf(os.Stderr, "%sERROR(%s) %s\n", logPrefix, a.platform, err)
- }
- a.failed = true
- }
- func (a *analyzer) dumpAndResetErrors() []string {
- es := a.errors
- a.errors = nil
- return es
- }
- // collect extracts test metadata from a file.
- func (a *analyzer) collect(dir string) {
- if _, ok := a.donePaths[dir]; ok {
- return
- }
- a.donePaths[dir] = nil
- // Create the AST by parsing src.
- fs, err := parser.ParseDir(a.fset, dir, nil, parser.AllErrors)
- if err != nil {
- fmt.Println(logPrefix+"ERROR(syntax)", err)
- a.failed = true
- return
- }
- if len(fs) > 1 && *verbose {
- fmt.Println("multiple packages in dir:", dir)
- }
- for _, p := range fs {
- // returns first error, but a.handleError deals with it
- files := a.filterFiles(p.Files)
- if *verbose {
- fmt.Printf("path: %s package: %s files: ", dir, p.Name)
- for _, f := range files {
- fname := filepath.Base(a.fset.File(f.Pos()).Name())
- fmt.Printf("%s ", fname)
- }
- fmt.Printf("\n")
- }
- a.typeCheck(dir, files)
- }
- }
- // filterFiles restricts a list of files to only those that should be built by
- // the current platform. This includes both build suffixes (_windows.go) and build
- // tags ("// +build !linux" at the beginning).
- func (a *analyzer) filterFiles(fs map[string]*ast.File) []*ast.File {
- files := []*ast.File{}
- for _, f := range fs {
- fpath := a.fset.File(f.Pos()).Name()
- if *skipTest && strings.HasSuffix(fpath, "_test.go") {
- continue
- }
- dir, name := filepath.Split(fpath)
- matches, err := a.ctx.MatchFile(dir, name)
- if err != nil {
- fmt.Fprintf(os.Stderr, "%sERROR reading %s: %s\n", logPrefix, fpath, err)
- a.failed = true
- continue
- }
- if matches {
- files = append(files, f)
- }
- }
- return files
- }
- func (a *analyzer) typeCheck(dir string, files []*ast.File) error {
- info := types.Info{
- Defs: make(map[*ast.Ident]types.Object),
- Uses: make(map[*ast.Ident]types.Object),
- }
- // NOTE: this type check does a *recursive* import, but srcimporter
- // doesn't do a full type check (ignores function bodies)-- this has
- // some additional overhead.
- //
- // This means that we need to ensure that typeCheck runs on all
- // code we will be compiling.
- //
- // TODO(rmmh): Customize our forked srcimporter to do this better.
- pkg, err := a.conf.Check(dir, a.fset, files, &info)
- if err != nil {
- return err // type error
- }
- // A significant fraction of vendored code only compiles on Linux,
- // but it's only imported by code that has build-guards for Linux.
- // Track vendored code to type-check it in a second pass.
- for _, imp := range pkg.Imports() {
- if strings.HasPrefix(imp.Path(), "k8s.io/kubernetes/vendor/") {
- vendorPath := imp.Path()[len("k8s.io/kubernetes/"):]
- if *verbose {
- fmt.Println("recursively checking vendor path:", vendorPath)
- }
- a.collect(vendorPath)
- }
- }
- if *defuses {
- for id, obj := range info.Defs {
- fmt.Printf("%s: %q defines %v\n",
- a.fset.Position(id.Pos()), id.Name, obj)
- }
- for id, obj := range info.Uses {
- fmt.Printf("%s: %q uses %v\n",
- a.fset.Position(id.Pos()), id.Name, obj)
- }
- }
- return nil
- }
- type collector struct {
- dirs []string
- ignoreDirs []string
- }
- // handlePath walks the filesystem recursively, collecting directories,
- // ignoring some unneeded directories (hidden/vendored) that are handled
- // specially later.
- func (c *collector) handlePath(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if info.IsDir() {
- // Ignore hidden directories (.git, .cache, etc)
- if len(path) > 1 && path[0] == '.' {
- return filepath.SkipDir
- }
- for _, dir := range c.ignoreDirs {
- if path == dir {
- return filepath.SkipDir
- }
- }
- c.dirs = append(c.dirs, path)
- }
- return nil
- }
- type analyzerResult struct {
- platform string
- dir string
- errors []string
- }
- func dedupeErrors(out io.Writer, results chan analyzerResult, nDirs, nPlatforms int) {
- pkgRes := make(map[string][]analyzerResult)
- for done := 0; done < nDirs; {
- res := <-results
- pkgRes[res.dir] = append(pkgRes[res.dir], res)
- if len(pkgRes[res.dir]) != nPlatforms {
- continue // expect more results for dir
- }
- done++
- // Collect list of platforms for each error
- errPlats := map[string][]string{}
- for _, res := range pkgRes[res.dir] {
- for _, err := range res.errors {
- errPlats[err] = append(errPlats[err], res.platform)
- }
- }
- // Print each error (in the same order!) once.
- for _, res := range pkgRes[res.dir] {
- for _, err := range res.errors {
- if errPlats[err] == nil {
- continue // already printed
- }
- sort.Strings(errPlats[err])
- plats := strings.Join(errPlats[err], ",")
- if len(errPlats[err]) == len(crossPlatforms) {
- plats = "all"
- } else if plats == darwinPlatString {
- plats = "darwin"
- } else if plats == windowsPlatString {
- plats = "windows"
- }
- fmt.Fprintf(out, "%sERROR(%s) %s\n", logPrefix, plats, err)
- delete(errPlats, err)
- }
- }
- delete(pkgRes, res.dir)
- }
- }
- func main() {
- flag.Parse()
- args := flag.Args()
- if *verbose {
- *serial = true // to avoid confusing interleaved logs
- }
- if len(args) == 0 {
- args = append(args, ".")
- }
- c := collector{
- ignoreDirs: append([]string(nil), standardIgnoreDirs...),
- }
- if *ignoreDirs != "" {
- c.ignoreDirs = append(c.ignoreDirs, strings.Split(*ignoreDirs, ",")...)
- }
- for _, arg := range args {
- err := filepath.Walk(arg, c.handlePath)
- if err != nil {
- log.Fatalf("Error walking: %v", err)
- }
- }
- sort.Strings(c.dirs)
- ps := crossPlatforms[:]
- if *platforms != "" {
- ps = strings.Split(*platforms, ",")
- } else if !*cross {
- ps = ps[:1]
- }
- fmt.Println("type-checking: ", strings.Join(ps, ", "))
- var wg sync.WaitGroup
- var processedDirs int64
- var currentWork int64 // (dir_index << 8) | platform_index
- statuses := make([]int, len(ps))
- var results chan analyzerResult
- if !*serial {
- results = make(chan analyzerResult)
- wg.Add(1)
- go func() {
- dedupeErrors(os.Stderr, results, len(c.dirs), len(ps))
- wg.Done()
- }()
- }
- for i, p := range ps {
- wg.Add(1)
- fn := func(i int, p string) {
- start := time.Now()
- a := newAnalyzer(p)
- for n, dir := range c.dirs {
- a.collect(dir)
- atomic.AddInt64(&processedDirs, 1)
- atomic.StoreInt64(¤tWork, int64(n<<8|i))
- if results != nil {
- results <- analyzerResult{p, dir, a.dumpAndResetErrors()}
- }
- }
- if a.failed {
- statuses[i] = 1
- }
- if *timings {
- fmt.Printf("%s took %.1fs\n", p, time.Since(start).Seconds())
- }
- wg.Done()
- }
- if *serial {
- fn(i, p)
- } else {
- go fn(i, p)
- }
- }
- if isTerminal {
- logPrefix = "\r" // clear status bar when printing
- // Display a status bar so devs can estimate completion times.
- wg.Add(1)
- go func() {
- total := len(ps) * len(c.dirs)
- for proc := 0; ; proc = int(atomic.LoadInt64(&processedDirs)) {
- work := atomic.LoadInt64(¤tWork)
- dir := c.dirs[work>>8]
- platform := ps[work&0xFF]
- if len(dir) > 80 {
- dir = dir[:80]
- }
- fmt.Printf("\r%d/%d \033[2m%-13s\033[0m %-80s", proc, total, platform, dir)
- if proc == total {
- fmt.Println()
- break
- }
- time.Sleep(50 * time.Millisecond)
- }
- wg.Done()
- }()
- }
- wg.Wait()
- for _, status := range statuses {
- if status != 0 {
- os.Exit(status)
- }
- }
- }
|