runner.go 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971
  1. package lint
  2. /*
  3. Parallelism
  4. Runner implements parallel processing of packages by spawning one
  5. goroutine per package in the dependency graph, without any semaphores.
  6. Each goroutine initially waits on the completion of all of its
  7. dependencies, thus establishing correct order of processing. Once all
  8. dependencies finish processing, the goroutine will load the package
  9. from export data or source – this loading is guarded by a semaphore,
  10. sized according to the number of CPU cores. This way, we only have as
  11. many packages occupying memory and CPU resources as there are actual
  12. cores to process them.
  13. This combination of unbounded goroutines but bounded package loading
  14. means that if we have many parallel, independent subgraphs, they will
  15. all execute in parallel, while not wasting resources for long linear
  16. chains or trying to process more subgraphs in parallel than the system
  17. can handle.
  18. */
  19. import (
  20. "bytes"
  21. "encoding/gob"
  22. "encoding/hex"
  23. "fmt"
  24. "go/ast"
  25. "go/token"
  26. "go/types"
  27. "reflect"
  28. "regexp"
  29. "runtime"
  30. "sort"
  31. "strconv"
  32. "strings"
  33. "sync"
  34. "sync/atomic"
  35. "golang.org/x/tools/go/analysis"
  36. "golang.org/x/tools/go/packages"
  37. "golang.org/x/tools/go/types/objectpath"
  38. "honnef.co/go/tools/config"
  39. "honnef.co/go/tools/facts"
  40. "honnef.co/go/tools/internal/cache"
  41. "honnef.co/go/tools/loader"
  42. )
  43. // If enabled, abuse of the go/analysis API will lead to panics
  44. const sanityCheck = true
  45. // OPT(dh): for a dependency tree A->B->C->D, if we have cached data
  46. // for B, there should be no need to load C and D individually. Go's
  47. // export data for B contains all the data we need on types, and our
  48. // fact cache could store the union of B, C and D in B.
  49. //
  50. // This may change unused's behavior, however, as it may observe fewer
  51. // interfaces from transitive dependencies.
  52. type Package struct {
  53. dependents uint64
  54. *packages.Package
  55. Imports []*Package
  56. initial bool
  57. fromSource bool
  58. hash string
  59. done chan struct{}
  60. resultsMu sync.Mutex
  61. // results maps analyzer IDs to analyzer results
  62. results []*result
  63. cfg *config.Config
  64. gen map[string]facts.Generator
  65. problems []Problem
  66. ignores []Ignore
  67. errs []error
  68. // these slices are indexed by analysis
  69. facts []map[types.Object][]analysis.Fact
  70. pkgFacts [][]analysis.Fact
  71. canClearTypes bool
  72. }
  73. func (pkg *Package) decUse() {
  74. atomic.AddUint64(&pkg.dependents, ^uint64(0))
  75. if atomic.LoadUint64(&pkg.dependents) == 0 {
  76. // nobody depends on this package anymore
  77. if pkg.canClearTypes {
  78. pkg.Types = nil
  79. }
  80. pkg.facts = nil
  81. pkg.pkgFacts = nil
  82. for _, imp := range pkg.Imports {
  83. imp.decUse()
  84. }
  85. }
  86. }
  87. type result struct {
  88. v interface{}
  89. err error
  90. ready chan struct{}
  91. }
  92. type Runner struct {
  93. ld loader.Loader
  94. cache *cache.Cache
  95. analyzerIDs analyzerIDs
  96. // limits parallelism of loading packages
  97. loadSem chan struct{}
  98. goVersion int
  99. stats *Stats
  100. }
  101. type analyzerIDs struct {
  102. m map[*analysis.Analyzer]int
  103. }
  104. func (ids analyzerIDs) get(a *analysis.Analyzer) int {
  105. id, ok := ids.m[a]
  106. if !ok {
  107. panic(fmt.Sprintf("no analyzer ID for %s", a.Name))
  108. }
  109. return id
  110. }
  111. type Fact struct {
  112. Path string
  113. Fact analysis.Fact
  114. }
  115. type analysisAction struct {
  116. analyzer *analysis.Analyzer
  117. analyzerID int
  118. pkg *Package
  119. newPackageFacts []analysis.Fact
  120. problems []Problem
  121. pkgFacts map[*types.Package][]analysis.Fact
  122. }
  123. func (ac *analysisAction) String() string {
  124. return fmt.Sprintf("%s @ %s", ac.analyzer, ac.pkg)
  125. }
  126. func (ac *analysisAction) allObjectFacts() []analysis.ObjectFact {
  127. out := make([]analysis.ObjectFact, 0, len(ac.pkg.facts[ac.analyzerID]))
  128. for obj, facts := range ac.pkg.facts[ac.analyzerID] {
  129. for _, fact := range facts {
  130. out = append(out, analysis.ObjectFact{
  131. Object: obj,
  132. Fact: fact,
  133. })
  134. }
  135. }
  136. return out
  137. }
  138. func (ac *analysisAction) allPackageFacts() []analysis.PackageFact {
  139. out := make([]analysis.PackageFact, 0, len(ac.pkgFacts))
  140. for pkg, facts := range ac.pkgFacts {
  141. for _, fact := range facts {
  142. out = append(out, analysis.PackageFact{
  143. Package: pkg,
  144. Fact: fact,
  145. })
  146. }
  147. }
  148. return out
  149. }
  150. func (ac *analysisAction) importObjectFact(obj types.Object, fact analysis.Fact) bool {
  151. if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
  152. panic("analysis doesn't export any facts")
  153. }
  154. for _, f := range ac.pkg.facts[ac.analyzerID][obj] {
  155. if reflect.TypeOf(f) == reflect.TypeOf(fact) {
  156. reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem())
  157. return true
  158. }
  159. }
  160. return false
  161. }
  162. func (ac *analysisAction) importPackageFact(pkg *types.Package, fact analysis.Fact) bool {
  163. if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
  164. panic("analysis doesn't export any facts")
  165. }
  166. for _, f := range ac.pkgFacts[pkg] {
  167. if reflect.TypeOf(f) == reflect.TypeOf(fact) {
  168. reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem())
  169. return true
  170. }
  171. }
  172. return false
  173. }
  174. func (ac *analysisAction) exportObjectFact(obj types.Object, fact analysis.Fact) {
  175. if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
  176. panic("analysis doesn't export any facts")
  177. }
  178. ac.pkg.facts[ac.analyzerID][obj] = append(ac.pkg.facts[ac.analyzerID][obj], fact)
  179. }
  180. func (ac *analysisAction) exportPackageFact(fact analysis.Fact) {
  181. if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
  182. panic("analysis doesn't export any facts")
  183. }
  184. ac.pkgFacts[ac.pkg.Types] = append(ac.pkgFacts[ac.pkg.Types], fact)
  185. ac.newPackageFacts = append(ac.newPackageFacts, fact)
  186. }
  187. func (ac *analysisAction) report(pass *analysis.Pass, d analysis.Diagnostic) {
  188. p := Problem{
  189. Pos: DisplayPosition(pass.Fset, d.Pos),
  190. End: DisplayPosition(pass.Fset, d.End),
  191. Message: d.Message,
  192. Check: pass.Analyzer.Name,
  193. }
  194. ac.problems = append(ac.problems, p)
  195. }
  196. func (r *Runner) runAnalysis(ac *analysisAction) (ret interface{}, err error) {
  197. ac.pkg.resultsMu.Lock()
  198. res := ac.pkg.results[r.analyzerIDs.get(ac.analyzer)]
  199. if res != nil {
  200. ac.pkg.resultsMu.Unlock()
  201. <-res.ready
  202. return res.v, res.err
  203. } else {
  204. res = &result{
  205. ready: make(chan struct{}),
  206. }
  207. ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] = res
  208. ac.pkg.resultsMu.Unlock()
  209. defer func() {
  210. res.v = ret
  211. res.err = err
  212. close(res.ready)
  213. }()
  214. pass := new(analysis.Pass)
  215. *pass = analysis.Pass{
  216. Analyzer: ac.analyzer,
  217. Fset: ac.pkg.Fset,
  218. Files: ac.pkg.Syntax,
  219. // type information may be nil or may be populated. if it is
  220. // nil, it will get populated later.
  221. Pkg: ac.pkg.Types,
  222. TypesInfo: ac.pkg.TypesInfo,
  223. TypesSizes: ac.pkg.TypesSizes,
  224. ResultOf: map[*analysis.Analyzer]interface{}{},
  225. ImportObjectFact: ac.importObjectFact,
  226. ImportPackageFact: ac.importPackageFact,
  227. ExportObjectFact: ac.exportObjectFact,
  228. ExportPackageFact: ac.exportPackageFact,
  229. Report: func(d analysis.Diagnostic) {
  230. ac.report(pass, d)
  231. },
  232. AllObjectFacts: ac.allObjectFacts,
  233. AllPackageFacts: ac.allPackageFacts,
  234. }
  235. if !ac.pkg.initial {
  236. // Don't report problems in dependencies
  237. pass.Report = func(analysis.Diagnostic) {}
  238. }
  239. return r.runAnalysisUser(pass, ac)
  240. }
  241. }
  242. func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bool) {
  243. if len(a.FactTypes) == 0 {
  244. return nil, true
  245. }
  246. var facts []Fact
  247. // Look in the cache for facts
  248. aID, err := passActionID(pkg, a)
  249. if err != nil {
  250. return nil, false
  251. }
  252. aID = cache.Subkey(aID, "facts")
  253. b, _, err := r.cache.GetBytes(aID)
  254. if err != nil {
  255. // No cached facts, analyse this package like a user-provided one, but ignore diagnostics
  256. return nil, false
  257. }
  258. if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&facts); err != nil {
  259. // Cached facts are broken, analyse this package like a user-provided one, but ignore diagnostics
  260. return nil, false
  261. }
  262. return facts, true
  263. }
  264. type dependencyError struct {
  265. dep string
  266. err error
  267. }
  268. func (err dependencyError) nested() dependencyError {
  269. if o, ok := err.err.(dependencyError); ok {
  270. return o.nested()
  271. }
  272. return err
  273. }
  274. func (err dependencyError) Error() string {
  275. if o, ok := err.err.(dependencyError); ok {
  276. return o.Error()
  277. }
  278. return fmt.Sprintf("error running dependency %s: %s", err.dep, err.err)
  279. }
  280. func (r *Runner) makeAnalysisAction(a *analysis.Analyzer, pkg *Package) *analysisAction {
  281. aid := r.analyzerIDs.get(a)
  282. ac := &analysisAction{
  283. analyzer: a,
  284. analyzerID: aid,
  285. pkg: pkg,
  286. }
  287. if len(a.FactTypes) == 0 {
  288. return ac
  289. }
  290. // Merge all package facts of dependencies
  291. ac.pkgFacts = map[*types.Package][]analysis.Fact{}
  292. seen := map[*Package]struct{}{}
  293. var dfs func(*Package)
  294. dfs = func(pkg *Package) {
  295. if _, ok := seen[pkg]; ok {
  296. return
  297. }
  298. seen[pkg] = struct{}{}
  299. s := pkg.pkgFacts[aid]
  300. ac.pkgFacts[pkg.Types] = s[0:len(s):len(s)]
  301. for _, imp := range pkg.Imports {
  302. dfs(imp)
  303. }
  304. }
  305. dfs(pkg)
  306. return ac
  307. }
  308. // analyzes that we always want to run, even if they're not being run
  309. // explicitly or as dependencies. these are necessary for the inner
  310. // workings of the runner.
  311. var injectedAnalyses = []*analysis.Analyzer{facts.Generated, config.Analyzer}
  312. func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (interface{}, error) {
  313. if !ac.pkg.fromSource {
  314. panic(fmt.Sprintf("internal error: %s was not loaded from source", ac.pkg))
  315. }
  316. // User-provided package, analyse it
  317. // First analyze it with dependencies
  318. for _, req := range ac.analyzer.Requires {
  319. acReq := r.makeAnalysisAction(req, ac.pkg)
  320. ret, err := r.runAnalysis(acReq)
  321. if err != nil {
  322. // We couldn't run a dependency, no point in going on
  323. return nil, dependencyError{req.Name, err}
  324. }
  325. pass.ResultOf[req] = ret
  326. }
  327. // Then with this analyzer
  328. ret, err := ac.analyzer.Run(pass)
  329. if err != nil {
  330. return nil, err
  331. }
  332. if len(ac.analyzer.FactTypes) > 0 {
  333. // Merge new facts into the package and persist them.
  334. var facts []Fact
  335. for _, fact := range ac.newPackageFacts {
  336. id := r.analyzerIDs.get(ac.analyzer)
  337. ac.pkg.pkgFacts[id] = append(ac.pkg.pkgFacts[id], fact)
  338. facts = append(facts, Fact{"", fact})
  339. }
  340. for obj, afacts := range ac.pkg.facts[ac.analyzerID] {
  341. if obj.Pkg() != ac.pkg.Package.Types {
  342. continue
  343. }
  344. path, err := objectpath.For(obj)
  345. if err != nil {
  346. continue
  347. }
  348. for _, fact := range afacts {
  349. facts = append(facts, Fact{string(path), fact})
  350. }
  351. }
  352. buf := &bytes.Buffer{}
  353. if err := gob.NewEncoder(buf).Encode(facts); err != nil {
  354. return nil, err
  355. }
  356. aID, err := passActionID(ac.pkg, ac.analyzer)
  357. if err != nil {
  358. return nil, err
  359. }
  360. aID = cache.Subkey(aID, "facts")
  361. if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil {
  362. return nil, err
  363. }
  364. }
  365. return ret, nil
  366. }
  367. func NewRunner(stats *Stats) (*Runner, error) {
  368. cache, err := cache.Default()
  369. if err != nil {
  370. return nil, err
  371. }
  372. return &Runner{
  373. cache: cache,
  374. stats: stats,
  375. }, nil
  376. }
  377. // Run loads packages corresponding to patterns and analyses them with
  378. // analyzers. It returns the loaded packages, which contain reported
  379. // diagnostics as well as extracted ignore directives.
  380. //
  381. // Note that diagnostics have not been filtered at this point yet, to
  382. // accomodate cumulative analyzes that require additional steps to
  383. // produce diagnostics.
  384. func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analysis.Analyzer, hasCumulative bool) ([]*Package, error) {
  385. r.analyzerIDs = analyzerIDs{m: map[*analysis.Analyzer]int{}}
  386. id := 0
  387. seen := map[*analysis.Analyzer]struct{}{}
  388. var dfs func(a *analysis.Analyzer)
  389. dfs = func(a *analysis.Analyzer) {
  390. if _, ok := seen[a]; ok {
  391. return
  392. }
  393. seen[a] = struct{}{}
  394. r.analyzerIDs.m[a] = id
  395. id++
  396. for _, f := range a.FactTypes {
  397. gob.Register(f)
  398. }
  399. for _, req := range a.Requires {
  400. dfs(req)
  401. }
  402. }
  403. for _, a := range analyzers {
  404. if v := a.Flags.Lookup("go"); v != nil {
  405. v.Value.Set(fmt.Sprintf("1.%d", r.goVersion))
  406. }
  407. dfs(a)
  408. }
  409. for _, a := range injectedAnalyses {
  410. dfs(a)
  411. }
  412. var dcfg packages.Config
  413. if cfg != nil {
  414. dcfg = *cfg
  415. }
  416. atomic.StoreUint32(&r.stats.State, StateGraph)
  417. initialPkgs, err := r.ld.Graph(dcfg, patterns...)
  418. if err != nil {
  419. return nil, err
  420. }
  421. defer r.cache.Trim()
  422. var allPkgs []*Package
  423. m := map[*packages.Package]*Package{}
  424. packages.Visit(initialPkgs, nil, func(l *packages.Package) {
  425. m[l] = &Package{
  426. Package: l,
  427. results: make([]*result, len(r.analyzerIDs.m)),
  428. facts: make([]map[types.Object][]analysis.Fact, len(r.analyzerIDs.m)),
  429. pkgFacts: make([][]analysis.Fact, len(r.analyzerIDs.m)),
  430. done: make(chan struct{}),
  431. // every package needs itself
  432. dependents: 1,
  433. canClearTypes: !hasCumulative,
  434. }
  435. allPkgs = append(allPkgs, m[l])
  436. for i := range m[l].facts {
  437. m[l].facts[i] = map[types.Object][]analysis.Fact{}
  438. }
  439. for _, err := range l.Errors {
  440. m[l].errs = append(m[l].errs, err)
  441. }
  442. for _, v := range l.Imports {
  443. m[v].dependents++
  444. m[l].Imports = append(m[l].Imports, m[v])
  445. }
  446. m[l].hash, err = packageHash(m[l])
  447. if err != nil {
  448. m[l].errs = append(m[l].errs, err)
  449. }
  450. })
  451. pkgs := make([]*Package, len(initialPkgs))
  452. for i, l := range initialPkgs {
  453. pkgs[i] = m[l]
  454. pkgs[i].initial = true
  455. }
  456. atomic.StoreUint32(&r.stats.InitialPackages, uint32(len(initialPkgs)))
  457. atomic.StoreUint32(&r.stats.TotalPackages, uint32(len(allPkgs)))
  458. atomic.StoreUint32(&r.stats.State, StateProcessing)
  459. var wg sync.WaitGroup
  460. wg.Add(len(allPkgs))
  461. r.loadSem = make(chan struct{}, runtime.GOMAXPROCS(-1))
  462. atomic.StoreUint32(&r.stats.TotalWorkers, uint32(cap(r.loadSem)))
  463. for _, pkg := range allPkgs {
  464. pkg := pkg
  465. go func() {
  466. r.processPkg(pkg, analyzers)
  467. if pkg.initial {
  468. atomic.AddUint32(&r.stats.ProcessedInitialPackages, 1)
  469. }
  470. atomic.AddUint32(&r.stats.Problems, uint32(len(pkg.problems)))
  471. wg.Done()
  472. }()
  473. }
  474. wg.Wait()
  475. return pkgs, nil
  476. }
  477. var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?`)
  478. func parsePos(pos string) (token.Position, int, error) {
  479. if pos == "-" || pos == "" {
  480. return token.Position{}, 0, nil
  481. }
  482. parts := posRe.FindStringSubmatch(pos)
  483. if parts == nil {
  484. return token.Position{}, 0, fmt.Errorf("malformed position %q", pos)
  485. }
  486. file := parts[1]
  487. line, _ := strconv.Atoi(parts[2])
  488. col, _ := strconv.Atoi(parts[3])
  489. return token.Position{
  490. Filename: file,
  491. Line: line,
  492. Column: col,
  493. }, len(parts[0]), nil
  494. }
  495. // loadPkg loads a Go package. If the package is in the set of initial
  496. // packages, it will be loaded from source, otherwise it will be
  497. // loaded from export data. In the case that the package was loaded
  498. // from export data, cached facts will also be loaded.
  499. //
  500. // Currently, only cached facts for this package will be loaded, not
  501. // for any of its dependencies.
  502. func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error {
  503. if pkg.Types != nil {
  504. panic(fmt.Sprintf("internal error: %s has already been loaded", pkg.Package))
  505. }
  506. // Load type information
  507. if pkg.initial {
  508. // Load package from source
  509. pkg.fromSource = true
  510. return r.ld.LoadFromSource(pkg.Package)
  511. }
  512. // Load package from export data
  513. if err := r.ld.LoadFromExport(pkg.Package); err != nil {
  514. // We asked Go to give us up to date export data, yet
  515. // we can't load it. There must be something wrong.
  516. //
  517. // Attempt loading from source. This should fail (because
  518. // otherwise there would be export data); we just want to
  519. // get the compile errors. If loading from source succeeds
  520. // we discard the result, anyway. Otherwise we'll fail
  521. // when trying to reload from export data later.
  522. //
  523. // FIXME(dh): we no longer reload from export data, so
  524. // theoretically we should be able to continue
  525. pkg.fromSource = true
  526. if err := r.ld.LoadFromSource(pkg.Package); err != nil {
  527. return err
  528. }
  529. // Make sure this package can't be imported successfully
  530. pkg.Package.Errors = append(pkg.Package.Errors, packages.Error{
  531. Pos: "-",
  532. Msg: fmt.Sprintf("could not load export data: %s", err),
  533. Kind: packages.ParseError,
  534. })
  535. return fmt.Errorf("could not load export data: %s", err)
  536. }
  537. failed := false
  538. seen := make([]bool, len(r.analyzerIDs.m))
  539. var dfs func(*analysis.Analyzer)
  540. dfs = func(a *analysis.Analyzer) {
  541. if seen[r.analyzerIDs.get(a)] {
  542. return
  543. }
  544. seen[r.analyzerIDs.get(a)] = true
  545. if len(a.FactTypes) > 0 {
  546. facts, ok := r.loadCachedFacts(a, pkg)
  547. if !ok {
  548. failed = true
  549. return
  550. }
  551. for _, f := range facts {
  552. if f.Path == "" {
  553. // This is a package fact
  554. pkg.pkgFacts[r.analyzerIDs.get(a)] = append(pkg.pkgFacts[r.analyzerIDs.get(a)], f.Fact)
  555. continue
  556. }
  557. obj, err := objectpath.Object(pkg.Types, objectpath.Path(f.Path))
  558. if err != nil {
  559. // Be lenient about these errors. For example, when
  560. // analysing io/ioutil from source, we may get a fact
  561. // for methods on the devNull type, and objectpath
  562. // will happily create a path for them. However, when
  563. // we later load io/ioutil from export data, the path
  564. // no longer resolves.
  565. //
  566. // If an exported type embeds the unexported type,
  567. // then (part of) the unexported type will become part
  568. // of the type information and our path will resolve
  569. // again.
  570. continue
  571. }
  572. pkg.facts[r.analyzerIDs.get(a)][obj] = append(pkg.facts[r.analyzerIDs.get(a)][obj], f.Fact)
  573. }
  574. }
  575. for _, req := range a.Requires {
  576. dfs(req)
  577. }
  578. }
  579. for _, a := range analyzers {
  580. dfs(a)
  581. }
  582. if failed {
  583. pkg.fromSource = true
  584. // XXX we added facts to the maps, we need to get rid of those
  585. return r.ld.LoadFromSource(pkg.Package)
  586. }
  587. return nil
  588. }
  589. type analysisError struct {
  590. analyzer *analysis.Analyzer
  591. pkg *Package
  592. err error
  593. }
  594. func (err analysisError) Error() string {
  595. return fmt.Sprintf("error running analyzer %s on %s: %s", err.analyzer, err.pkg, err.err)
  596. }
  597. // processPkg processes a package. This involves loading the package,
  598. // either from export data or from source. For packages loaded from
  599. // source, the provides analyzers will be run on the package.
  600. func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) {
  601. defer func() {
  602. // Clear information we no longer need. Make sure to do this
  603. // when returning from processPkg so that we clear
  604. // dependencies, not just initial packages.
  605. pkg.TypesInfo = nil
  606. pkg.Syntax = nil
  607. pkg.results = nil
  608. atomic.AddUint32(&r.stats.ProcessedPackages, 1)
  609. pkg.decUse()
  610. close(pkg.done)
  611. }()
  612. // Ensure all packages have the generated map and config. This is
  613. // required by interna of the runner. Analyses that themselves
  614. // make use of either have an explicit dependency so that other
  615. // runners work correctly, too.
  616. analyzers = append(analyzers[0:len(analyzers):len(analyzers)], injectedAnalyses...)
  617. if len(pkg.errs) != 0 {
  618. return
  619. }
  620. for _, imp := range pkg.Imports {
  621. <-imp.done
  622. if len(imp.errs) > 0 {
  623. if imp.initial {
  624. // Don't print the error of the dependency since it's
  625. // an initial package and we're already printing the
  626. // error.
  627. pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s", imp, pkg))
  628. } else {
  629. var s string
  630. for _, err := range imp.errs {
  631. s += "\n\t" + err.Error()
  632. }
  633. pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s: %s", imp, pkg, s))
  634. }
  635. return
  636. }
  637. }
  638. if pkg.PkgPath == "unsafe" {
  639. pkg.Types = types.Unsafe
  640. return
  641. }
  642. r.loadSem <- struct{}{}
  643. atomic.AddUint32(&r.stats.ActiveWorkers, 1)
  644. defer func() {
  645. <-r.loadSem
  646. atomic.AddUint32(&r.stats.ActiveWorkers, ^uint32(0))
  647. }()
  648. if err := r.loadPkg(pkg, analyzers); err != nil {
  649. pkg.errs = append(pkg.errs, err)
  650. return
  651. }
  652. // A package's object facts is the union of all of its dependencies.
  653. for _, imp := range pkg.Imports {
  654. for ai, m := range imp.facts {
  655. for obj, facts := range m {
  656. pkg.facts[ai][obj] = facts[0:len(facts):len(facts)]
  657. }
  658. }
  659. }
  660. if !pkg.fromSource {
  661. // Nothing left to do for the package.
  662. return
  663. }
  664. // Run analyses on initial packages and those missing facts
  665. var wg sync.WaitGroup
  666. wg.Add(len(analyzers))
  667. errs := make([]error, len(analyzers))
  668. var acs []*analysisAction
  669. for i, a := range analyzers {
  670. i := i
  671. a := a
  672. ac := r.makeAnalysisAction(a, pkg)
  673. acs = append(acs, ac)
  674. go func() {
  675. defer wg.Done()
  676. // Only initial packages and packages with missing
  677. // facts will have been loaded from source.
  678. if pkg.initial || r.hasFacts(a) {
  679. if _, err := r.runAnalysis(ac); err != nil {
  680. errs[i] = analysisError{a, pkg, err}
  681. return
  682. }
  683. }
  684. }()
  685. }
  686. wg.Wait()
  687. depErrors := map[dependencyError]int{}
  688. for _, err := range errs {
  689. if err == nil {
  690. continue
  691. }
  692. switch err := err.(type) {
  693. case analysisError:
  694. switch err := err.err.(type) {
  695. case dependencyError:
  696. depErrors[err.nested()]++
  697. default:
  698. pkg.errs = append(pkg.errs, err)
  699. }
  700. default:
  701. pkg.errs = append(pkg.errs, err)
  702. }
  703. }
  704. for err, count := range depErrors {
  705. pkg.errs = append(pkg.errs,
  706. fmt.Errorf("could not run %s@%s, preventing %d analyzers from running: %s", err.dep, pkg, count, err.err))
  707. }
  708. // We can't process ignores at this point because `unused` needs
  709. // to see more than one package to make its decision.
  710. ignores, problems := parseDirectives(pkg.Package)
  711. pkg.ignores = append(pkg.ignores, ignores...)
  712. pkg.problems = append(pkg.problems, problems...)
  713. for _, ac := range acs {
  714. pkg.problems = append(pkg.problems, ac.problems...)
  715. }
  716. if pkg.initial {
  717. // Only initial packages have these analyzers run, and only
  718. // initial packages need these.
  719. if pkg.results[r.analyzerIDs.get(config.Analyzer)].v != nil {
  720. pkg.cfg = pkg.results[r.analyzerIDs.get(config.Analyzer)].v.(*config.Config)
  721. }
  722. pkg.gen = pkg.results[r.analyzerIDs.get(facts.Generated)].v.(map[string]facts.Generator)
  723. }
  724. // In a previous version of the code, we would throw away all type
  725. // information and reload it from export data. That was
  726. // nonsensical. The *types.Package doesn't keep any information
  727. // live that export data wouldn't also. We only need to discard
  728. // the AST and the TypesInfo maps; that happens after we return
  729. // from processPkg.
  730. }
  731. // hasFacts reports whether an analysis exports any facts. An analysis
  732. // that has a transitive dependency that exports facts is considered
  733. // to be exporting facts.
  734. func (r *Runner) hasFacts(a *analysis.Analyzer) bool {
  735. ret := false
  736. seen := make([]bool, len(r.analyzerIDs.m))
  737. var dfs func(*analysis.Analyzer)
  738. dfs = func(a *analysis.Analyzer) {
  739. if seen[r.analyzerIDs.get(a)] {
  740. return
  741. }
  742. seen[r.analyzerIDs.get(a)] = true
  743. if len(a.FactTypes) > 0 {
  744. ret = true
  745. }
  746. for _, req := range a.Requires {
  747. if ret {
  748. break
  749. }
  750. dfs(req)
  751. }
  752. }
  753. dfs(a)
  754. return ret
  755. }
  756. func parseDirective(s string) (cmd string, args []string) {
  757. if !strings.HasPrefix(s, "//lint:") {
  758. return "", nil
  759. }
  760. s = strings.TrimPrefix(s, "//lint:")
  761. fields := strings.Split(s, " ")
  762. return fields[0], fields[1:]
  763. }
  764. // parseDirectives extracts all linter directives from the source
  765. // files of the package. Malformed directives are returned as problems.
  766. func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) {
  767. var ignores []Ignore
  768. var problems []Problem
  769. for _, f := range pkg.Syntax {
  770. found := false
  771. commentLoop:
  772. for _, cg := range f.Comments {
  773. for _, c := range cg.List {
  774. if strings.Contains(c.Text, "//lint:") {
  775. found = true
  776. break commentLoop
  777. }
  778. }
  779. }
  780. if !found {
  781. continue
  782. }
  783. cm := ast.NewCommentMap(pkg.Fset, f, f.Comments)
  784. for node, cgs := range cm {
  785. for _, cg := range cgs {
  786. for _, c := range cg.List {
  787. if !strings.HasPrefix(c.Text, "//lint:") {
  788. continue
  789. }
  790. cmd, args := parseDirective(c.Text)
  791. switch cmd {
  792. case "ignore", "file-ignore":
  793. if len(args) < 2 {
  794. p := Problem{
  795. Pos: DisplayPosition(pkg.Fset, c.Pos()),
  796. Message: "malformed linter directive; missing the required reason field?",
  797. Severity: Error,
  798. Check: "compile",
  799. }
  800. problems = append(problems, p)
  801. continue
  802. }
  803. default:
  804. // unknown directive, ignore
  805. continue
  806. }
  807. checks := strings.Split(args[0], ",")
  808. pos := DisplayPosition(pkg.Fset, node.Pos())
  809. var ig Ignore
  810. switch cmd {
  811. case "ignore":
  812. ig = &LineIgnore{
  813. File: pos.Filename,
  814. Line: pos.Line,
  815. Checks: checks,
  816. Pos: c.Pos(),
  817. }
  818. case "file-ignore":
  819. ig = &FileIgnore{
  820. File: pos.Filename,
  821. Checks: checks,
  822. }
  823. }
  824. ignores = append(ignores, ig)
  825. }
  826. }
  827. }
  828. }
  829. return ignores, problems
  830. }
  831. // packageHash computes a package's hash. The hash is based on all Go
  832. // files that make up the package, as well as the hashes of imported
  833. // packages.
  834. func packageHash(pkg *Package) (string, error) {
  835. key := cache.NewHash("package hash")
  836. fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
  837. for _, f := range pkg.CompiledGoFiles {
  838. h, err := cache.FileHash(f)
  839. if err != nil {
  840. return "", err
  841. }
  842. fmt.Fprintf(key, "file %s %x\n", f, h)
  843. }
  844. imps := make([]*Package, len(pkg.Imports))
  845. copy(imps, pkg.Imports)
  846. sort.Slice(imps, func(i, j int) bool {
  847. return imps[i].PkgPath < imps[j].PkgPath
  848. })
  849. for _, dep := range imps {
  850. if dep.PkgPath == "unsafe" {
  851. continue
  852. }
  853. fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, dep.hash)
  854. }
  855. h := key.Sum()
  856. return hex.EncodeToString(h[:]), nil
  857. }
  858. // passActionID computes an ActionID for an analysis pass.
  859. func passActionID(pkg *Package, analyzer *analysis.Analyzer) (cache.ActionID, error) {
  860. key := cache.NewHash("action ID")
  861. fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
  862. fmt.Fprintf(key, "pkghash %s\n", pkg.hash)
  863. fmt.Fprintf(key, "analyzer %s\n", analyzer.Name)
  864. return key.Sum(), nil
  865. }