Update module spf13/viper to v1.7.0 (#494)
Update module spf13/viper to v1.7.0 Reviewed-on: https://kolaente.dev/vikunja/api/pulls/494
This commit is contained in:
44
vendor/honnef.co/go/tools/lint/generated.go
vendored
44
vendor/honnef.co/go/tools/lint/generated.go
vendored
@ -1,44 +0,0 @@
|
||||
package lint
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
// used by cgo before Go 1.11
|
||||
oldCgo = []byte("// Created by cgo - DO NOT EDIT")
|
||||
prefix = []byte("// Code generated ")
|
||||
suffix = []byte(" DO NOT EDIT.")
|
||||
nl = []byte("\n")
|
||||
crnl = []byte("\r\n")
|
||||
)
|
||||
|
||||
func isGenerated(path string) bool {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer f.Close()
|
||||
br := bufio.NewReader(f)
|
||||
for {
|
||||
s, err := br.ReadBytes('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
return false
|
||||
}
|
||||
s = bytes.TrimSuffix(s, crnl)
|
||||
s = bytes.TrimSuffix(s, nl)
|
||||
if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) {
|
||||
return true
|
||||
}
|
||||
if bytes.Equal(s, oldCgo) {
|
||||
return true
|
||||
}
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
712
vendor/honnef.co/go/tools/lint/lint.go
vendored
712
vendor/honnef.co/go/tools/lint/lint.go
vendored
@ -4,34 +4,53 @@ package lint // import "honnef.co/go/tools/lint"
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"sync/atomic"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/tools/go/ast/inspector"
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/packages"
|
||||
"honnef.co/go/tools/config"
|
||||
"honnef.co/go/tools/ssa"
|
||||
"honnef.co/go/tools/ssa/ssautil"
|
||||
)
|
||||
|
||||
type Job struct {
|
||||
Pkg *Pkg
|
||||
GoVersion int
|
||||
type Documentation struct {
|
||||
Title string
|
||||
Text string
|
||||
Since string
|
||||
NonDefault bool
|
||||
Options []string
|
||||
}
|
||||
|
||||
check Check
|
||||
problems []Problem
|
||||
|
||||
duration time.Duration
|
||||
func (doc *Documentation) String() string {
|
||||
b := &strings.Builder{}
|
||||
fmt.Fprintf(b, "%s\n\n", doc.Title)
|
||||
if doc.Text != "" {
|
||||
fmt.Fprintf(b, "%s\n\n", doc.Text)
|
||||
}
|
||||
fmt.Fprint(b, "Available since\n ")
|
||||
if doc.Since == "" {
|
||||
fmt.Fprint(b, "unreleased")
|
||||
} else {
|
||||
fmt.Fprintf(b, "%s", doc.Since)
|
||||
}
|
||||
if doc.NonDefault {
|
||||
fmt.Fprint(b, ", non-default")
|
||||
}
|
||||
fmt.Fprint(b, "\n")
|
||||
if len(doc.Options) > 0 {
|
||||
fmt.Fprintf(b, "\nOptions\n")
|
||||
for _, opt := range doc.Options {
|
||||
fmt.Fprintf(b, " %s", opt)
|
||||
}
|
||||
fmt.Fprint(b, "\n")
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
type Ignore interface {
|
||||
@ -42,17 +61,18 @@ type LineIgnore struct {
|
||||
File string
|
||||
Line int
|
||||
Checks []string
|
||||
matched bool
|
||||
pos token.Pos
|
||||
Matched bool
|
||||
Pos token.Pos
|
||||
}
|
||||
|
||||
func (li *LineIgnore) Match(p Problem) bool {
|
||||
if p.Position.Filename != li.File || p.Position.Line != li.Line {
|
||||
pos := p.Pos
|
||||
if pos.Filename != li.File || pos.Line != li.Line {
|
||||
return false
|
||||
}
|
||||
for _, c := range li.Checks {
|
||||
if m, _ := filepath.Match(c, p.Check); m {
|
||||
li.matched = true
|
||||
li.Matched = true
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -61,7 +81,7 @@ func (li *LineIgnore) Match(p Problem) bool {
|
||||
|
||||
func (li *LineIgnore) String() string {
|
||||
matched := "not matched"
|
||||
if li.matched {
|
||||
if li.Matched {
|
||||
matched = "matched"
|
||||
}
|
||||
return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched)
|
||||
@ -73,7 +93,7 @@ type FileIgnore struct {
|
||||
}
|
||||
|
||||
func (fi *FileIgnore) Match(p Problem) bool {
|
||||
if p.Position.Filename != fi.File {
|
||||
if p.Pos.Filename != fi.File {
|
||||
return false
|
||||
}
|
||||
for _, c := range fi.Checks {
|
||||
@ -84,43 +104,6 @@ func (fi *FileIgnore) Match(p Problem) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type GlobIgnore struct {
|
||||
Pattern string
|
||||
Checks []string
|
||||
}
|
||||
|
||||
func (gi *GlobIgnore) Match(p Problem) bool {
|
||||
if gi.Pattern != "*" {
|
||||
pkgpath := p.Package.Types.Path()
|
||||
if strings.HasSuffix(pkgpath, "_test") {
|
||||
pkgpath = pkgpath[:len(pkgpath)-len("_test")]
|
||||
}
|
||||
name := filepath.Join(pkgpath, filepath.Base(p.Position.Filename))
|
||||
if m, _ := filepath.Match(gi.Pattern, name); !m {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, c := range gi.Checks {
|
||||
if m, _ := filepath.Match(c, p.Check); m {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Program struct {
|
||||
SSA *ssa.Program
|
||||
InitialPackages []*Pkg
|
||||
AllPackages []*packages.Package
|
||||
AllFunctions []*ssa.Function
|
||||
}
|
||||
|
||||
func (prog *Program) Fset() *token.FileSet {
|
||||
return prog.InitialPackages[0].Fset
|
||||
}
|
||||
|
||||
type Func func(*Job)
|
||||
|
||||
type Severity uint8
|
||||
|
||||
const (
|
||||
@ -131,367 +114,245 @@ const (
|
||||
|
||||
// Problem represents a problem in some source code.
|
||||
type Problem struct {
|
||||
Position token.Position // position in source file
|
||||
Text string // the prose that describes the problem
|
||||
Pos token.Position
|
||||
End token.Position
|
||||
Message string
|
||||
Check string
|
||||
Package *Pkg
|
||||
Severity Severity
|
||||
}
|
||||
|
||||
func (p *Problem) String() string {
|
||||
if p.Check == "" {
|
||||
return p.Text
|
||||
}
|
||||
return fmt.Sprintf("%s (%s)", p.Text, p.Check)
|
||||
}
|
||||
|
||||
type Checker interface {
|
||||
Name() string
|
||||
Prefix() string
|
||||
Init(*Program)
|
||||
Checks() []Check
|
||||
}
|
||||
|
||||
type Check struct {
|
||||
Fn Func
|
||||
ID string
|
||||
FilterGenerated bool
|
||||
Doc string
|
||||
return fmt.Sprintf("%s (%s)", p.Message, p.Check)
|
||||
}
|
||||
|
||||
// A Linter lints Go source code.
|
||||
type Linter struct {
|
||||
Checkers []Checker
|
||||
Ignores []Ignore
|
||||
GoVersion int
|
||||
ReturnIgnored bool
|
||||
Config config.Config
|
||||
|
||||
MaxConcurrentJobs int
|
||||
PrintStats bool
|
||||
|
||||
automaticIgnores []Ignore
|
||||
Checkers []*analysis.Analyzer
|
||||
CumulativeCheckers []CumulativeChecker
|
||||
GoVersion int
|
||||
Config config.Config
|
||||
Stats Stats
|
||||
}
|
||||
|
||||
func (l *Linter) ignore(p Problem) bool {
|
||||
ignored := false
|
||||
for _, ig := range l.automaticIgnores {
|
||||
// We cannot short-circuit these, as we want to record, for
|
||||
// each ignore, whether it matched or not.
|
||||
if ig.Match(p) {
|
||||
ignored = true
|
||||
}
|
||||
type CumulativeChecker interface {
|
||||
Analyzer() *analysis.Analyzer
|
||||
Result() []types.Object
|
||||
ProblemObject(*token.FileSet, types.Object) Problem
|
||||
}
|
||||
|
||||
func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error) {
|
||||
var allAnalyzers []*analysis.Analyzer
|
||||
allAnalyzers = append(allAnalyzers, l.Checkers...)
|
||||
for _, cum := range l.CumulativeCheckers {
|
||||
allAnalyzers = append(allAnalyzers, cum.Analyzer())
|
||||
}
|
||||
if ignored {
|
||||
// no need to execute other ignores if we've already had a
|
||||
// match.
|
||||
return true
|
||||
}
|
||||
for _, ig := range l.Ignores {
|
||||
// We can short-circuit here, as we aren't tracking any
|
||||
// information.
|
||||
if ig.Match(p) {
|
||||
return true
|
||||
|
||||
// The -checks command line flag overrules all configuration
|
||||
// files, which means that for `-checks="foo"`, no check other
|
||||
// than foo can ever be reported to the user. Make use of this
|
||||
// fact to cull the list of analyses we need to run.
|
||||
|
||||
// replace "inherit" with "all", as we don't want to base the
|
||||
// list of all checks on the default configuration, which
|
||||
// disables certain checks.
|
||||
checks := make([]string, len(l.Config.Checks))
|
||||
copy(checks, l.Config.Checks)
|
||||
for i, c := range checks {
|
||||
if c == "inherit" {
|
||||
checks[i] = "all"
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (j *Job) File(node Positioner) *ast.File {
|
||||
return j.Pkg.tokenFileMap[j.Pkg.Fset.File(node.Pos())]
|
||||
}
|
||||
|
||||
func parseDirective(s string) (cmd string, args []string) {
|
||||
if !strings.HasPrefix(s, "//lint:") {
|
||||
return "", nil
|
||||
}
|
||||
s = strings.TrimPrefix(s, "//lint:")
|
||||
fields := strings.Split(s, " ")
|
||||
return fields[0], fields[1:]
|
||||
}
|
||||
|
||||
type PerfStats struct {
|
||||
PackageLoading time.Duration
|
||||
SSABuild time.Duration
|
||||
OtherInitWork time.Duration
|
||||
CheckerInits map[string]time.Duration
|
||||
Jobs []JobStat
|
||||
}
|
||||
|
||||
type JobStat struct {
|
||||
Job string
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
func (stats *PerfStats) Print(w io.Writer) {
|
||||
fmt.Fprintln(w, "Package loading:", stats.PackageLoading)
|
||||
fmt.Fprintln(w, "SSA build:", stats.SSABuild)
|
||||
fmt.Fprintln(w, "Other init work:", stats.OtherInitWork)
|
||||
|
||||
fmt.Fprintln(w, "Checker inits:")
|
||||
for checker, d := range stats.CheckerInits {
|
||||
fmt.Fprintf(w, "\t%s: %s\n", checker, d)
|
||||
}
|
||||
fmt.Fprintln(w)
|
||||
|
||||
fmt.Fprintln(w, "Jobs:")
|
||||
sort.Slice(stats.Jobs, func(i, j int) bool {
|
||||
return stats.Jobs[i].Duration < stats.Jobs[j].Duration
|
||||
})
|
||||
var total time.Duration
|
||||
for _, job := range stats.Jobs {
|
||||
fmt.Fprintf(w, "\t%s: %s\n", job.Job, job.Duration)
|
||||
total += job.Duration
|
||||
}
|
||||
fmt.Fprintf(w, "\tTotal: %s\n", total)
|
||||
}
|
||||
|
||||
func (l *Linter) Lint(initial []*packages.Package, stats *PerfStats) []Problem {
|
||||
allPkgs := allPackages(initial)
|
||||
t := time.Now()
|
||||
ssaprog, _ := ssautil.Packages(allPkgs, ssa.GlobalDebug)
|
||||
ssaprog.Build()
|
||||
if stats != nil {
|
||||
stats.SSABuild = time.Since(t)
|
||||
}
|
||||
runtime.GC()
|
||||
|
||||
t = time.Now()
|
||||
pkgMap := map[*ssa.Package]*Pkg{}
|
||||
var pkgs []*Pkg
|
||||
for _, pkg := range initial {
|
||||
ssapkg := ssaprog.Package(pkg.Types)
|
||||
var cfg config.Config
|
||||
if len(pkg.GoFiles) != 0 {
|
||||
path := pkg.GoFiles[0]
|
||||
dir := filepath.Dir(path)
|
||||
var err error
|
||||
// OPT(dh): we're rebuilding the entire config tree for
|
||||
// each package. for example, if we check a/b/c and
|
||||
// a/b/c/d, we'll process a, a/b, a/b/c, a, a/b, a/b/c,
|
||||
// a/b/c/d – we should cache configs per package and only
|
||||
// load the new levels.
|
||||
cfg, err = config.Load(dir)
|
||||
if err != nil {
|
||||
// FIXME(dh): we couldn't load the config, what are we
|
||||
// supposed to do? probably tell the user somehow
|
||||
}
|
||||
cfg = cfg.Merge(l.Config)
|
||||
allowed := FilterChecks(allAnalyzers, checks)
|
||||
var allowedAnalyzers []*analysis.Analyzer
|
||||
for _, c := range l.Checkers {
|
||||
if allowed[c.Name] {
|
||||
allowedAnalyzers = append(allowedAnalyzers, c)
|
||||
}
|
||||
|
||||
pkg := &Pkg{
|
||||
SSA: ssapkg,
|
||||
Package: pkg,
|
||||
Config: cfg,
|
||||
Generated: map[string]bool{},
|
||||
tokenFileMap: map[*token.File]*ast.File{},
|
||||
}
|
||||
pkg.Inspector = inspector.New(pkg.Syntax)
|
||||
for _, f := range pkg.Syntax {
|
||||
tf := pkg.Fset.File(f.Pos())
|
||||
pkg.tokenFileMap[tf] = f
|
||||
|
||||
path := DisplayPosition(pkg.Fset, f.Pos()).Filename
|
||||
pkg.Generated[path] = isGenerated(path)
|
||||
}
|
||||
pkgMap[ssapkg] = pkg
|
||||
pkgs = append(pkgs, pkg)
|
||||
}
|
||||
|
||||
prog := &Program{
|
||||
SSA: ssaprog,
|
||||
InitialPackages: pkgs,
|
||||
AllPackages: allPkgs,
|
||||
}
|
||||
|
||||
for fn := range ssautil.AllFunctions(ssaprog) {
|
||||
prog.AllFunctions = append(prog.AllFunctions, fn)
|
||||
if fn.Pkg == nil {
|
||||
continue
|
||||
}
|
||||
if pkg, ok := pkgMap[fn.Pkg]; ok {
|
||||
pkg.InitialFunctions = append(pkg.InitialFunctions, fn)
|
||||
hasCumulative := false
|
||||
for _, cum := range l.CumulativeCheckers {
|
||||
a := cum.Analyzer()
|
||||
if allowed[a.Name] {
|
||||
hasCumulative = true
|
||||
allowedAnalyzers = append(allowedAnalyzers, a)
|
||||
}
|
||||
}
|
||||
|
||||
var out []Problem
|
||||
l.automaticIgnores = nil
|
||||
for _, pkg := range initial {
|
||||
for _, f := range pkg.Syntax {
|
||||
found := false
|
||||
commentLoop:
|
||||
for _, cg := range f.Comments {
|
||||
for _, c := range cg.List {
|
||||
if strings.Contains(c.Text, "//lint:") {
|
||||
found = true
|
||||
break commentLoop
|
||||
r, err := NewRunner(&l.Stats)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.goVersion = l.GoVersion
|
||||
|
||||
pkgs, err := r.Run(cfg, patterns, allowedAnalyzers, hasCumulative)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tpkgToPkg := map[*types.Package]*Package{}
|
||||
for _, pkg := range pkgs {
|
||||
tpkgToPkg[pkg.Types] = pkg
|
||||
|
||||
for _, e := range pkg.errs {
|
||||
switch e := e.(type) {
|
||||
case types.Error:
|
||||
p := Problem{
|
||||
Pos: e.Fset.PositionFor(e.Pos, false),
|
||||
Message: e.Msg,
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
pkg.problems = append(pkg.problems, p)
|
||||
case packages.Error:
|
||||
msg := e.Msg
|
||||
if len(msg) != 0 && msg[0] == '\n' {
|
||||
// TODO(dh): See https://github.com/golang/go/issues/32363
|
||||
msg = msg[1:]
|
||||
}
|
||||
|
||||
var pos token.Position
|
||||
if e.Pos == "" {
|
||||
// Under certain conditions (malformed package
|
||||
// declarations, multiple packages in the same
|
||||
// directory), go list emits an error on stderr
|
||||
// instead of JSON. Those errors do not have
|
||||
// associated position information in
|
||||
// go/packages.Error, even though the output on
|
||||
// stderr may contain it.
|
||||
if p, n, err := parsePos(msg); err == nil {
|
||||
if abs, err := filepath.Abs(p.Filename); err == nil {
|
||||
p.Filename = abs
|
||||
}
|
||||
pos = p
|
||||
msg = msg[n+2:]
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
pos, _, err = parsePos(e.Pos)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("internal error: %s", e))
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
cm := ast.NewCommentMap(pkg.Fset, f, f.Comments)
|
||||
for node, cgs := range cm {
|
||||
for _, cg := range cgs {
|
||||
for _, c := range cg.List {
|
||||
if !strings.HasPrefix(c.Text, "//lint:") {
|
||||
continue
|
||||
}
|
||||
cmd, args := parseDirective(c.Text)
|
||||
switch cmd {
|
||||
case "ignore", "file-ignore":
|
||||
if len(args) < 2 {
|
||||
// FIXME(dh): this causes duplicated warnings when using megacheck
|
||||
p := Problem{
|
||||
Position: DisplayPosition(prog.Fset(), c.Pos()),
|
||||
Text: "malformed linter directive; missing the required reason field?",
|
||||
Check: "",
|
||||
Package: nil,
|
||||
}
|
||||
out = append(out, p)
|
||||
continue
|
||||
}
|
||||
default:
|
||||
// unknown directive, ignore
|
||||
continue
|
||||
}
|
||||
checks := strings.Split(args[0], ",")
|
||||
pos := DisplayPosition(prog.Fset(), node.Pos())
|
||||
var ig Ignore
|
||||
switch cmd {
|
||||
case "ignore":
|
||||
ig = &LineIgnore{
|
||||
File: pos.Filename,
|
||||
Line: pos.Line,
|
||||
Checks: checks,
|
||||
pos: c.Pos(),
|
||||
}
|
||||
case "file-ignore":
|
||||
ig = &FileIgnore{
|
||||
File: pos.Filename,
|
||||
Checks: checks,
|
||||
}
|
||||
}
|
||||
l.automaticIgnores = append(l.automaticIgnores, ig)
|
||||
p := Problem{
|
||||
Pos: pos,
|
||||
Message: msg,
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
pkg.problems = append(pkg.problems, p)
|
||||
case scanner.ErrorList:
|
||||
for _, e := range e {
|
||||
p := Problem{
|
||||
Pos: e.Pos,
|
||||
Message: e.Msg,
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
pkg.problems = append(pkg.problems, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if stats != nil {
|
||||
stats.OtherInitWork = time.Since(t)
|
||||
}
|
||||
|
||||
for _, checker := range l.Checkers {
|
||||
t := time.Now()
|
||||
checker.Init(prog)
|
||||
if stats != nil {
|
||||
stats.CheckerInits[checker.Name()] = time.Since(t)
|
||||
}
|
||||
}
|
||||
|
||||
var jobs []*Job
|
||||
var allChecks []string
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, checker := range l.Checkers {
|
||||
for _, check := range checker.Checks() {
|
||||
allChecks = append(allChecks, check.ID)
|
||||
if check.Fn == nil {
|
||||
continue
|
||||
}
|
||||
for _, pkg := range pkgs {
|
||||
j := &Job{
|
||||
Pkg: pkg,
|
||||
check: check,
|
||||
GoVersion: l.GoVersion,
|
||||
case error:
|
||||
p := Problem{
|
||||
Pos: token.Position{},
|
||||
Message: e.Error(),
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
jobs = append(jobs, j)
|
||||
wg.Add(1)
|
||||
go func(check Check, j *Job) {
|
||||
t := time.Now()
|
||||
check.Fn(j)
|
||||
j.duration = time.Since(t)
|
||||
wg.Done()
|
||||
}(check, j)
|
||||
pkg.problems = append(pkg.problems, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
for _, j := range jobs {
|
||||
if stats != nil {
|
||||
stats.Jobs = append(stats.Jobs, JobStat{j.check.ID, j.duration})
|
||||
}
|
||||
for _, p := range j.problems {
|
||||
if p.Package == nil {
|
||||
panic(fmt.Sprintf("internal error: problem at position %s has nil package", p.Position))
|
||||
}
|
||||
allowedChecks := FilterChecks(allChecks, p.Package.Config.Checks)
|
||||
|
||||
if l.ignore(p) {
|
||||
p.Severity = Ignored
|
||||
}
|
||||
// TODO(dh): support globs in check white/blacklist
|
||||
// OPT(dh): this approach doesn't actually disable checks,
|
||||
// it just discards their results. For the moment, that's
|
||||
// fine. None of our checks are super expensive. In the
|
||||
// future, we may want to provide opt-in expensive
|
||||
// analysis, which shouldn't run at all. It may be easiest
|
||||
// to implement this in the individual checks.
|
||||
if (l.ReturnIgnored || p.Severity != Ignored) && allowedChecks[p.Check] {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, ig := range l.automaticIgnores {
|
||||
ig, ok := ig.(*LineIgnore)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if ig.matched {
|
||||
continue
|
||||
}
|
||||
|
||||
couldveMatched := false
|
||||
for _, pkg := range pkgs {
|
||||
for _, f := range pkg.tokenFileMap {
|
||||
if prog.Fset().Position(f.Pos()).Filename != ig.File {
|
||||
atomic.StoreUint32(&r.stats.State, StateCumulative)
|
||||
var problems []Problem
|
||||
for _, cum := range l.CumulativeCheckers {
|
||||
for _, res := range cum.Result() {
|
||||
pkg := tpkgToPkg[res.Pkg()]
|
||||
allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
|
||||
if allowedChecks[cum.Analyzer().Name] {
|
||||
pos := DisplayPosition(pkg.Fset, res.Pos())
|
||||
// FIXME(dh): why are we ignoring generated files
|
||||
// here? Surely this is specific to 'unused', not all
|
||||
// cumulative checkers
|
||||
if _, ok := pkg.gen[pos.Filename]; ok {
|
||||
continue
|
||||
}
|
||||
allowedChecks := FilterChecks(allChecks, pkg.Config.Checks)
|
||||
for _, c := range ig.Checks {
|
||||
if !allowedChecks[c] {
|
||||
continue
|
||||
}
|
||||
couldveMatched = true
|
||||
break
|
||||
p := cum.ProblemObject(pkg.Fset, res)
|
||||
problems = append(problems, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, pkg := range pkgs {
|
||||
for _, ig := range pkg.ignores {
|
||||
for i := range pkg.problems {
|
||||
p := &pkg.problems[i]
|
||||
if ig.Match(*p) {
|
||||
p.Severity = Ignored
|
||||
}
|
||||
}
|
||||
for i := range problems {
|
||||
p := &problems[i]
|
||||
if ig.Match(*p) {
|
||||
p.Severity = Ignored
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !couldveMatched {
|
||||
// The ignored checks were disabled for the containing package.
|
||||
// Don't flag the ignore for not having matched.
|
||||
continue
|
||||
if pkg.cfg == nil {
|
||||
// The package failed to load, otherwise we would have a
|
||||
// valid config. Pass through all errors.
|
||||
problems = append(problems, pkg.problems...)
|
||||
} else {
|
||||
for _, p := range pkg.problems {
|
||||
allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
|
||||
allowedChecks["compile"] = true
|
||||
if allowedChecks[p.Check] {
|
||||
problems = append(problems, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
p := Problem{
|
||||
Position: DisplayPosition(prog.Fset(), ig.pos),
|
||||
Text: "this linter directive didn't match anything; should it be removed?",
|
||||
Check: "",
|
||||
Package: nil,
|
||||
|
||||
for _, ig := range pkg.ignores {
|
||||
ig, ok := ig.(*LineIgnore)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if ig.Matched {
|
||||
continue
|
||||
}
|
||||
|
||||
couldveMatched := false
|
||||
allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
|
||||
for _, c := range ig.Checks {
|
||||
if !allowedChecks[c] {
|
||||
continue
|
||||
}
|
||||
couldveMatched = true
|
||||
break
|
||||
}
|
||||
|
||||
if !couldveMatched {
|
||||
// The ignored checks were disabled for the containing package.
|
||||
// Don't flag the ignore for not having matched.
|
||||
continue
|
||||
}
|
||||
p := Problem{
|
||||
Pos: DisplayPosition(pkg.Fset, ig.Pos),
|
||||
Message: "this linter directive didn't match anything; should it be removed?",
|
||||
Check: "",
|
||||
}
|
||||
problems = append(problems, p)
|
||||
}
|
||||
out = append(out, p)
|
||||
}
|
||||
|
||||
sort.Slice(out, func(i int, j int) bool {
|
||||
pi, pj := out[i].Position, out[j].Position
|
||||
if len(problems) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
sort.Slice(problems, func(i, j int) bool {
|
||||
pi := problems[i].Pos
|
||||
pj := problems[j].Pos
|
||||
|
||||
if pi.Filename != pj.Filename {
|
||||
return pi.Filename < pj.Filename
|
||||
@ -503,32 +364,22 @@ func (l *Linter) Lint(initial []*packages.Package, stats *PerfStats) []Problem {
|
||||
return pi.Column < pj.Column
|
||||
}
|
||||
|
||||
return out[i].Text < out[j].Text
|
||||
return problems[i].Message < problems[j].Message
|
||||
})
|
||||
|
||||
if l.PrintStats && stats != nil {
|
||||
stats.Print(os.Stderr)
|
||||
}
|
||||
|
||||
if len(out) < 2 {
|
||||
return out
|
||||
}
|
||||
|
||||
uniq := make([]Problem, 0, len(out))
|
||||
uniq = append(uniq, out[0])
|
||||
prev := out[0]
|
||||
for _, p := range out[1:] {
|
||||
if prev.Position == p.Position && prev.Text == p.Text {
|
||||
continue
|
||||
var out []Problem
|
||||
out = append(out, problems[0])
|
||||
for i, p := range problems[1:] {
|
||||
// We may encounter duplicate problems because one file
|
||||
// can be part of many packages.
|
||||
if problems[i] != p {
|
||||
out = append(out, p)
|
||||
}
|
||||
prev = p
|
||||
uniq = append(uniq, p)
|
||||
}
|
||||
|
||||
return uniq
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func FilterChecks(allChecks []string, checks []string) map[string]bool {
|
||||
func FilterChecks(allChecks []*analysis.Analyzer, checks []string) map[string]bool {
|
||||
// OPT(dh): this entire computation could be cached per package
|
||||
allowedChecks := map[string]bool{}
|
||||
|
||||
@ -541,7 +392,7 @@ func FilterChecks(allChecks []string, checks []string) map[string]bool {
|
||||
if check == "*" || check == "all" {
|
||||
// Match all
|
||||
for _, c := range allChecks {
|
||||
allowedChecks[c] = b
|
||||
allowedChecks[c.Name] = b
|
||||
}
|
||||
} else if strings.HasSuffix(check, "*") {
|
||||
// Glob
|
||||
@ -549,17 +400,17 @@ func FilterChecks(allChecks []string, checks []string) map[string]bool {
|
||||
isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1
|
||||
|
||||
for _, c := range allChecks {
|
||||
idx := strings.IndexFunc(c, func(r rune) bool { return unicode.IsNumber(r) })
|
||||
idx := strings.IndexFunc(c.Name, func(r rune) bool { return unicode.IsNumber(r) })
|
||||
if isCat {
|
||||
// Glob is S*, which should match S1000 but not SA1000
|
||||
cat := c[:idx]
|
||||
cat := c.Name[:idx]
|
||||
if prefix == cat {
|
||||
allowedChecks[c] = b
|
||||
allowedChecks[c.Name] = b
|
||||
}
|
||||
} else {
|
||||
// Glob is S1*
|
||||
if strings.HasPrefix(c, prefix) {
|
||||
allowedChecks[c] = b
|
||||
if strings.HasPrefix(c.Name, prefix) {
|
||||
allowedChecks[c.Name] = b
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -571,28 +422,18 @@ func FilterChecks(allChecks []string, checks []string) map[string]bool {
|
||||
return allowedChecks
|
||||
}
|
||||
|
||||
// Pkg represents a package being linted.
|
||||
type Pkg struct {
|
||||
SSA *ssa.Package
|
||||
InitialFunctions []*ssa.Function
|
||||
*packages.Package
|
||||
Config config.Config
|
||||
Inspector *inspector.Inspector
|
||||
// TODO(dh): this map should probably map from *ast.File, not string
|
||||
Generated map[string]bool
|
||||
|
||||
tokenFileMap map[*token.File]*ast.File
|
||||
}
|
||||
|
||||
type Positioner interface {
|
||||
Pos() token.Pos
|
||||
}
|
||||
|
||||
func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position {
|
||||
if p == token.NoPos {
|
||||
return token.Position{}
|
||||
}
|
||||
|
||||
// Only use the adjusted position if it points to another Go file.
|
||||
// This means we'll point to the original file for cgo files, but
|
||||
// we won't point to a YACC grammar file.
|
||||
|
||||
pos := fset.PositionFor(p, false)
|
||||
adjPos := fset.PositionFor(p, true)
|
||||
|
||||
@ -602,34 +443,6 @@ func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position {
|
||||
return pos
|
||||
}
|
||||
|
||||
func (j *Job) Errorf(n Positioner, format string, args ...interface{}) *Problem {
|
||||
pos := DisplayPosition(j.Pkg.Fset, n.Pos())
|
||||
if j.Pkg.Generated[pos.Filename] && j.check.FilterGenerated {
|
||||
return nil
|
||||
}
|
||||
problem := Problem{
|
||||
Position: pos,
|
||||
Text: fmt.Sprintf(format, args...),
|
||||
Check: j.check.ID,
|
||||
Package: j.Pkg,
|
||||
}
|
||||
j.problems = append(j.problems, problem)
|
||||
return &j.problems[len(j.problems)-1]
|
||||
}
|
||||
|
||||
func allPackages(pkgs []*packages.Package) []*packages.Package {
|
||||
var out []*packages.Package
|
||||
packages.Visit(
|
||||
pkgs,
|
||||
func(pkg *packages.Package) bool {
|
||||
out = append(out, pkg)
|
||||
return true
|
||||
},
|
||||
nil,
|
||||
)
|
||||
return out
|
||||
}
|
||||
|
||||
var bufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
@ -670,8 +483,7 @@ func writePackage(buf *bytes.Buffer, pkg *types.Package) {
|
||||
if pkg == nil {
|
||||
return
|
||||
}
|
||||
var s string
|
||||
s = pkg.Path()
|
||||
s := pkg.Path()
|
||||
if s != "" {
|
||||
buf.WriteString(s)
|
||||
buf.WriteByte('.')
|
||||
|
121
vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go
vendored
121
vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go
vendored
@ -4,6 +4,7 @@ package lintdsl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/constant"
|
||||
@ -12,6 +13,8 @@ import (
|
||||
"go/types"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"honnef.co/go/tools/facts"
|
||||
"honnef.co/go/tools/lint"
|
||||
"honnef.co/go/tools/ssa"
|
||||
)
|
||||
@ -71,16 +74,6 @@ func IsPointerLike(T types.Type) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func IsGenerated(f *ast.File) bool {
|
||||
comments := f.Comments
|
||||
if len(comments) > 0 {
|
||||
comment := comments[0].Text()
|
||||
return strings.Contains(comment, "Code generated by") ||
|
||||
strings.Contains(comment, "DO NOT EDIT")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsIdent(expr ast.Expr, ident string) bool {
|
||||
id, ok := expr.(*ast.Ident)
|
||||
return ok && id.Name == ident
|
||||
@ -103,26 +96,26 @@ func IsZero(expr ast.Expr) bool {
|
||||
return IsIntLiteral(expr, "0")
|
||||
}
|
||||
|
||||
func IsOfType(j *lint.Job, expr ast.Expr, name string) bool {
|
||||
return IsType(j.Pkg.TypesInfo.TypeOf(expr), name)
|
||||
func IsOfType(pass *analysis.Pass, expr ast.Expr, name string) bool {
|
||||
return IsType(pass.TypesInfo.TypeOf(expr), name)
|
||||
}
|
||||
|
||||
func IsInTest(j *lint.Job, node lint.Positioner) bool {
|
||||
func IsInTest(pass *analysis.Pass, node lint.Positioner) bool {
|
||||
// FIXME(dh): this doesn't work for global variables with
|
||||
// initializers
|
||||
f := j.Pkg.Fset.File(node.Pos())
|
||||
f := pass.Fset.File(node.Pos())
|
||||
return f != nil && strings.HasSuffix(f.Name(), "_test.go")
|
||||
}
|
||||
|
||||
func IsInMain(j *lint.Job, node lint.Positioner) bool {
|
||||
func IsInMain(pass *analysis.Pass, node lint.Positioner) bool {
|
||||
if node, ok := node.(packager); ok {
|
||||
return node.Package().Pkg.Name() == "main"
|
||||
}
|
||||
return j.Pkg.Types.Name() == "main"
|
||||
return pass.Pkg.Name() == "main"
|
||||
}
|
||||
|
||||
func SelectorName(j *lint.Job, expr *ast.SelectorExpr) string {
|
||||
info := j.Pkg.TypesInfo
|
||||
func SelectorName(pass *analysis.Pass, expr *ast.SelectorExpr) string {
|
||||
info := pass.TypesInfo
|
||||
sel := info.Selections[expr]
|
||||
if sel == nil {
|
||||
if x, ok := expr.X.(*ast.Ident); ok {
|
||||
@ -138,16 +131,16 @@ func SelectorName(j *lint.Job, expr *ast.SelectorExpr) string {
|
||||
return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name())
|
||||
}
|
||||
|
||||
func IsNil(j *lint.Job, expr ast.Expr) bool {
|
||||
return j.Pkg.TypesInfo.Types[expr].IsNil()
|
||||
func IsNil(pass *analysis.Pass, expr ast.Expr) bool {
|
||||
return pass.TypesInfo.Types[expr].IsNil()
|
||||
}
|
||||
|
||||
func BoolConst(j *lint.Job, expr ast.Expr) bool {
|
||||
val := j.Pkg.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val()
|
||||
func BoolConst(pass *analysis.Pass, expr ast.Expr) bool {
|
||||
val := pass.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val()
|
||||
return constant.BoolVal(val)
|
||||
}
|
||||
|
||||
func IsBoolConst(j *lint.Job, expr ast.Expr) bool {
|
||||
func IsBoolConst(pass *analysis.Pass, expr ast.Expr) bool {
|
||||
// We explicitly don't support typed bools because more often than
|
||||
// not, custom bool types are used as binary enums and the
|
||||
// explicit comparison is desired.
|
||||
@ -156,7 +149,7 @@ func IsBoolConst(j *lint.Job, expr ast.Expr) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
obj := j.Pkg.TypesInfo.ObjectOf(ident)
|
||||
obj := pass.TypesInfo.ObjectOf(ident)
|
||||
c, ok := obj.(*types.Const)
|
||||
if !ok {
|
||||
return false
|
||||
@ -171,8 +164,8 @@ func IsBoolConst(j *lint.Job, expr ast.Expr) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func ExprToInt(j *lint.Job, expr ast.Expr) (int64, bool) {
|
||||
tv := j.Pkg.TypesInfo.Types[expr]
|
||||
func ExprToInt(pass *analysis.Pass, expr ast.Expr) (int64, bool) {
|
||||
tv := pass.TypesInfo.Types[expr]
|
||||
if tv.Value == nil {
|
||||
return 0, false
|
||||
}
|
||||
@ -182,8 +175,8 @@ func ExprToInt(j *lint.Job, expr ast.Expr) (int64, bool) {
|
||||
return constant.Int64Val(tv.Value)
|
||||
}
|
||||
|
||||
func ExprToString(j *lint.Job, expr ast.Expr) (string, bool) {
|
||||
val := j.Pkg.TypesInfo.Types[expr].Value
|
||||
func ExprToString(pass *analysis.Pass, expr ast.Expr) (string, bool) {
|
||||
val := pass.TypesInfo.Types[expr].Value
|
||||
if val == nil {
|
||||
return "", false
|
||||
}
|
||||
@ -212,20 +205,21 @@ func DereferenceR(T types.Type) types.Type {
|
||||
return T
|
||||
}
|
||||
|
||||
func IsGoVersion(j *lint.Job, minor int) bool {
|
||||
return j.GoVersion >= minor
|
||||
func IsGoVersion(pass *analysis.Pass, minor int) bool {
|
||||
version := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter).Get().(int)
|
||||
return version >= minor
|
||||
}
|
||||
|
||||
func CallNameAST(j *lint.Job, call *ast.CallExpr) string {
|
||||
func CallNameAST(pass *analysis.Pass, call *ast.CallExpr) string {
|
||||
switch fun := call.Fun.(type) {
|
||||
case *ast.SelectorExpr:
|
||||
fn, ok := j.Pkg.TypesInfo.ObjectOf(fun.Sel).(*types.Func)
|
||||
fn, ok := pass.TypesInfo.ObjectOf(fun.Sel).(*types.Func)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return lint.FuncName(fn)
|
||||
case *ast.Ident:
|
||||
obj := j.Pkg.TypesInfo.ObjectOf(fun)
|
||||
obj := pass.TypesInfo.ObjectOf(fun)
|
||||
switch obj := obj.(type) {
|
||||
case *types.Func:
|
||||
return lint.FuncName(obj)
|
||||
@ -239,35 +233,35 @@ func CallNameAST(j *lint.Job, call *ast.CallExpr) string {
|
||||
}
|
||||
}
|
||||
|
||||
func IsCallToAST(j *lint.Job, node ast.Node, name string) bool {
|
||||
func IsCallToAST(pass *analysis.Pass, node ast.Node, name string) bool {
|
||||
call, ok := node.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return CallNameAST(j, call) == name
|
||||
return CallNameAST(pass, call) == name
|
||||
}
|
||||
|
||||
func IsCallToAnyAST(j *lint.Job, node ast.Node, names ...string) bool {
|
||||
func IsCallToAnyAST(pass *analysis.Pass, node ast.Node, names ...string) bool {
|
||||
for _, name := range names {
|
||||
if IsCallToAST(j, node, name) {
|
||||
if IsCallToAST(pass, node, name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func Render(j *lint.Job, x interface{}) string {
|
||||
func Render(pass *analysis.Pass, x interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
if err := printer.Fprint(&buf, j.Pkg.Fset, x); err != nil {
|
||||
if err := printer.Fprint(&buf, pass.Fset, x); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func RenderArgs(j *lint.Job, args []ast.Expr) string {
|
||||
func RenderArgs(pass *analysis.Pass, args []ast.Expr) string {
|
||||
var ss []string
|
||||
for _, arg := range args {
|
||||
ss = append(ss, Render(j, arg))
|
||||
ss = append(ss, Render(pass, arg))
|
||||
}
|
||||
return strings.Join(ss, ", ")
|
||||
}
|
||||
@ -359,3 +353,48 @@ func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Fiel
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func File(pass *analysis.Pass, node lint.Positioner) *ast.File {
|
||||
pass.Fset.PositionFor(node.Pos(), true)
|
||||
m := pass.ResultOf[facts.TokenFile].(map[*token.File]*ast.File)
|
||||
return m[pass.Fset.File(node.Pos())]
|
||||
}
|
||||
|
||||
// IsGenerated reports whether pos is in a generated file, It ignores
|
||||
// //line directives.
|
||||
func IsGenerated(pass *analysis.Pass, pos token.Pos) bool {
|
||||
_, ok := Generator(pass, pos)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Generator returns the generator that generated the file containing
|
||||
// pos. It ignores //line directives.
|
||||
func Generator(pass *analysis.Pass, pos token.Pos) (facts.Generator, bool) {
|
||||
file := pass.Fset.PositionFor(pos, false).Filename
|
||||
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
|
||||
g, ok := m[file]
|
||||
return g, ok
|
||||
}
|
||||
|
||||
func ReportfFG(pass *analysis.Pass, pos token.Pos, f string, args ...interface{}) {
|
||||
file := lint.DisplayPosition(pass.Fset, pos).Filename
|
||||
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
|
||||
if _, ok := m[file]; ok {
|
||||
return
|
||||
}
|
||||
pass.Reportf(pos, f, args...)
|
||||
}
|
||||
|
||||
func ReportNodef(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
pass.Report(analysis.Diagnostic{Pos: node.Pos(), End: node.End(), Message: msg})
|
||||
}
|
||||
|
||||
func ReportNodefFG(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) {
|
||||
file := lint.DisplayPosition(pass.Fset, node.Pos()).Filename
|
||||
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
|
||||
if _, ok := m[file]; ok {
|
||||
return
|
||||
}
|
||||
ReportNodef(pass, node, format, args...)
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ type Text struct {
|
||||
}
|
||||
|
||||
func (o Text) Format(p lint.Problem) {
|
||||
fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Position), p.String())
|
||||
fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Pos), p.String())
|
||||
}
|
||||
|
||||
type JSON struct {
|
||||
@ -80,16 +80,22 @@ func (o JSON) Format(p lint.Problem) {
|
||||
Code string `json:"code"`
|
||||
Severity string `json:"severity,omitempty"`
|
||||
Location location `json:"location"`
|
||||
End location `json:"end"`
|
||||
Message string `json:"message"`
|
||||
}{
|
||||
Code: p.Check,
|
||||
Severity: severity(p.Severity),
|
||||
Location: location{
|
||||
File: p.Position.Filename,
|
||||
Line: p.Position.Line,
|
||||
Column: p.Position.Column,
|
||||
File: p.Pos.Filename,
|
||||
Line: p.Pos.Line,
|
||||
Column: p.Pos.Column,
|
||||
},
|
||||
Message: p.Text,
|
||||
End: location{
|
||||
File: p.End.Filename,
|
||||
Line: p.End.Line,
|
||||
Column: p.End.Column,
|
||||
},
|
||||
Message: p.Message,
|
||||
}
|
||||
_ = json.NewEncoder(o.W).Encode(jp)
|
||||
}
|
||||
@ -102,20 +108,21 @@ type Stylish struct {
|
||||
}
|
||||
|
||||
func (o *Stylish) Format(p lint.Problem) {
|
||||
if p.Position.Filename == "" {
|
||||
p.Position.Filename = "-"
|
||||
pos := p.Pos
|
||||
if pos.Filename == "" {
|
||||
pos.Filename = "-"
|
||||
}
|
||||
|
||||
if p.Position.Filename != o.prevFile {
|
||||
if pos.Filename != o.prevFile {
|
||||
if o.prevFile != "" {
|
||||
o.tw.Flush()
|
||||
fmt.Fprintln(o.W)
|
||||
}
|
||||
fmt.Fprintln(o.W, p.Position.Filename)
|
||||
o.prevFile = p.Position.Filename
|
||||
fmt.Fprintln(o.W, pos.Filename)
|
||||
o.prevFile = pos.Filename
|
||||
o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0)
|
||||
}
|
||||
fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", p.Position.Line, p.Position.Column, p.Check, p.Text)
|
||||
fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Check, p.Message)
|
||||
}
|
||||
|
||||
func (o *Stylish) Stats(total, errors, warnings int) {
|
||||
|
7
vendor/honnef.co/go/tools/lint/lintutil/stats.go
vendored
Normal file
7
vendor/honnef.co/go/tools/lint/lintutil/stats.go
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build !aix,!android,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
|
||||
|
||||
package lintutil
|
||||
|
||||
import "os"
|
||||
|
||||
var infoSignals = []os.Signal{}
|
10
vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go
vendored
Normal file
10
vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// +build darwin dragonfly freebsd netbsd openbsd
|
||||
|
||||
package lintutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var infoSignals = []os.Signal{syscall.SIGINFO}
|
10
vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go
vendored
Normal file
10
vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// +build aix android linux solaris
|
||||
|
||||
package lintutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var infoSignals = []os.Signal{syscall.SIGUSR1}
|
332
vendor/honnef.co/go/tools/lint/lintutil/util.go
vendored
332
vendor/honnef.co/go/tools/lint/lintutil/util.go
vendored
@ -8,29 +8,70 @@
|
||||
package lintutil // import "honnef.co/go/tools/lint/lintutil"
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/token"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"sync/atomic"
|
||||
|
||||
"honnef.co/go/tools/config"
|
||||
"honnef.co/go/tools/internal/cache"
|
||||
"honnef.co/go/tools/lint"
|
||||
"honnef.co/go/tools/lint/lintutil/format"
|
||||
"honnef.co/go/tools/version"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/buildutil"
|
||||
"golang.org/x/tools/go/packages"
|
||||
)
|
||||
|
||||
func NewVersionFlag() flag.Getter {
|
||||
tags := build.Default.ReleaseTags
|
||||
v := tags[len(tags)-1][2:]
|
||||
version := new(VersionFlag)
|
||||
if err := version.Set(v); err != nil {
|
||||
panic(fmt.Sprintf("internal error: %s", err))
|
||||
}
|
||||
return version
|
||||
}
|
||||
|
||||
type VersionFlag int
|
||||
|
||||
func (v *VersionFlag) String() string {
|
||||
return fmt.Sprintf("1.%d", *v)
|
||||
|
||||
}
|
||||
|
||||
func (v *VersionFlag) Set(s string) error {
|
||||
if len(s) < 3 {
|
||||
return errors.New("invalid Go version")
|
||||
}
|
||||
if s[0] != '1' {
|
||||
return errors.New("invalid Go version")
|
||||
}
|
||||
if s[1] != '.' {
|
||||
return errors.New("invalid Go version")
|
||||
}
|
||||
i, err := strconv.Atoi(s[2:])
|
||||
*v = VersionFlag(i)
|
||||
return err
|
||||
}
|
||||
|
||||
func (v *VersionFlag) Get() interface{} {
|
||||
return int(*v)
|
||||
}
|
||||
|
||||
func usage(name string, flags *flag.FlagSet) func() {
|
||||
return func() {
|
||||
fmt.Fprintf(os.Stderr, "Usage of %s:\n", name)
|
||||
@ -43,48 +84,6 @@ func usage(name string, flags *flag.FlagSet) func() {
|
||||
}
|
||||
}
|
||||
|
||||
func parseIgnore(s string) ([]lint.Ignore, error) {
|
||||
var out []lint.Ignore
|
||||
if len(s) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
for _, part := range strings.Fields(s) {
|
||||
p := strings.Split(part, ":")
|
||||
if len(p) != 2 {
|
||||
return nil, errors.New("malformed ignore string")
|
||||
}
|
||||
path := p[0]
|
||||
checks := strings.Split(p[1], ",")
|
||||
out = append(out, &lint.GlobIgnore{Pattern: path, Checks: checks})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
type versionFlag int
|
||||
|
||||
func (v *versionFlag) String() string {
|
||||
return fmt.Sprintf("1.%d", *v)
|
||||
}
|
||||
|
||||
func (v *versionFlag) Set(s string) error {
|
||||
if len(s) < 3 {
|
||||
return errors.New("invalid Go version")
|
||||
}
|
||||
if s[0] != '1' {
|
||||
return errors.New("invalid Go version")
|
||||
}
|
||||
if s[1] != '.' {
|
||||
return errors.New("invalid Go version")
|
||||
}
|
||||
i, err := strconv.Atoi(s[2:])
|
||||
*v = versionFlag(i)
|
||||
return err
|
||||
}
|
||||
|
||||
func (v *versionFlag) Get() interface{} {
|
||||
return int(*v)
|
||||
}
|
||||
|
||||
type list []string
|
||||
|
||||
func (list *list) String() string {
|
||||
@ -105,17 +104,16 @@ func FlagSet(name string) *flag.FlagSet {
|
||||
flags := flag.NewFlagSet("", flag.ExitOnError)
|
||||
flags.Usage = usage(name, flags)
|
||||
flags.String("tags", "", "List of `build tags`")
|
||||
flags.String("ignore", "", "Deprecated: use linter directives instead")
|
||||
flags.Bool("tests", true, "Include tests")
|
||||
flags.Bool("version", false, "Print version and exit")
|
||||
flags.Bool("show-ignored", false, "Don't filter ignored problems")
|
||||
flags.String("f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')")
|
||||
flags.String("explain", "", "Print description of `check`")
|
||||
|
||||
flags.Int("debug.max-concurrent-jobs", 0, "Number of jobs to run concurrently")
|
||||
flags.Bool("debug.print-stats", false, "Print debug statistics")
|
||||
flags.String("debug.cpuprofile", "", "Write CPU profile to `file`")
|
||||
flags.String("debug.memprofile", "", "Write memory profile to `file`")
|
||||
flags.Bool("debug.version", false, "Print detailed version information about this program")
|
||||
flags.Bool("debug.no-compile-errors", false, "Don't print compile errors")
|
||||
|
||||
checks := list{"inherit"}
|
||||
fail := list{"all"}
|
||||
@ -124,7 +122,7 @@ func FlagSet(name string) *flag.FlagSet {
|
||||
|
||||
tags := build.Default.ReleaseTags
|
||||
v := tags[len(tags)-1][2:]
|
||||
version := new(versionFlag)
|
||||
version := new(VersionFlag)
|
||||
if err := version.Set(v); err != nil {
|
||||
panic(fmt.Sprintf("internal error: %s", err))
|
||||
}
|
||||
@ -133,24 +131,17 @@ func FlagSet(name string) *flag.FlagSet {
|
||||
return flags
|
||||
}
|
||||
|
||||
func findCheck(cs []lint.Checker, check string) (lint.Check, bool) {
|
||||
func findCheck(cs []*analysis.Analyzer, check string) (*analysis.Analyzer, bool) {
|
||||
for _, c := range cs {
|
||||
for _, cc := range c.Checks() {
|
||||
if cc.ID == check {
|
||||
return cc, true
|
||||
}
|
||||
if c.Name == check {
|
||||
return c, true
|
||||
}
|
||||
}
|
||||
return lint.Check{}, false
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) {
|
||||
if _, ok := os.LookupEnv("GOGC"); !ok {
|
||||
debug.SetGCPercent(50)
|
||||
}
|
||||
|
||||
func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *flag.FlagSet) {
|
||||
tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string)
|
||||
ignore := fs.Lookup("ignore").Value.(flag.Getter).Get().(string)
|
||||
tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool)
|
||||
goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int)
|
||||
formatter := fs.Lookup("f").Value.(flag.Getter).Get().(string)
|
||||
@ -158,10 +149,10 @@ func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) {
|
||||
showIgnored := fs.Lookup("show-ignored").Value.(flag.Getter).Get().(bool)
|
||||
explain := fs.Lookup("explain").Value.(flag.Getter).Get().(string)
|
||||
|
||||
maxConcurrentJobs := fs.Lookup("debug.max-concurrent-jobs").Value.(flag.Getter).Get().(int)
|
||||
printStats := fs.Lookup("debug.print-stats").Value.(flag.Getter).Get().(bool)
|
||||
cpuProfile := fs.Lookup("debug.cpuprofile").Value.(flag.Getter).Get().(string)
|
||||
memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string)
|
||||
debugVersion := fs.Lookup("debug.version").Value.(flag.Getter).Get().(bool)
|
||||
debugNoCompile := fs.Lookup("debug.no-compile-errors").Value.(flag.Getter).Get().(bool)
|
||||
|
||||
cfg := config.Config{}
|
||||
cfg.Checks = *fs.Lookup("checks").Value.(*list)
|
||||
@ -188,13 +179,32 @@ func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) {
|
||||
pprof.StartCPUProfile(f)
|
||||
}
|
||||
|
||||
if debugVersion {
|
||||
version.Verbose()
|
||||
exit(0)
|
||||
}
|
||||
|
||||
if printVersion {
|
||||
version.Print()
|
||||
exit(0)
|
||||
}
|
||||
|
||||
// Validate that the tags argument is well-formed. go/packages
|
||||
// doesn't detect malformed build flags and returns unhelpful
|
||||
// errors.
|
||||
tf := buildutil.TagsFlag{}
|
||||
if err := tf.Set(tags); err != nil {
|
||||
fmt.Fprintln(os.Stderr, fmt.Errorf("invalid value %q for flag -tags: %s", tags, err))
|
||||
exit(1)
|
||||
}
|
||||
|
||||
if explain != "" {
|
||||
check, ok := findCheck(cs, explain)
|
||||
var haystack []*analysis.Analyzer
|
||||
haystack = append(haystack, cs...)
|
||||
for _, cum := range cums {
|
||||
haystack = append(haystack, cum.Analyzer())
|
||||
}
|
||||
check, ok := findCheck(haystack, explain)
|
||||
if !ok {
|
||||
fmt.Fprintln(os.Stderr, "Couldn't find check", explain)
|
||||
exit(1)
|
||||
@ -207,16 +217,11 @@ func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) {
|
||||
exit(0)
|
||||
}
|
||||
|
||||
ps, err := Lint(cs, fs.Args(), &Options{
|
||||
Tags: strings.Fields(tags),
|
||||
LintTests: tests,
|
||||
Ignores: ignore,
|
||||
GoVersion: goVersion,
|
||||
ReturnIgnored: showIgnored,
|
||||
Config: cfg,
|
||||
|
||||
MaxConcurrentJobs: maxConcurrentJobs,
|
||||
PrintStats: printStats,
|
||||
ps, err := Lint(cs, cums, fs.Args(), &Options{
|
||||
Tags: tags,
|
||||
LintTests: tests,
|
||||
GoVersion: goVersion,
|
||||
Config: cfg,
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
@ -243,15 +248,22 @@ func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) {
|
||||
)
|
||||
|
||||
fail := *fs.Lookup("fail").Value.(*list)
|
||||
var allChecks []string
|
||||
for _, p := range ps {
|
||||
allChecks = append(allChecks, p.Check)
|
||||
analyzers := make([]*analysis.Analyzer, len(cs), len(cs)+len(cums))
|
||||
copy(analyzers, cs)
|
||||
for _, cum := range cums {
|
||||
analyzers = append(analyzers, cum.Analyzer())
|
||||
}
|
||||
|
||||
shouldExit := lint.FilterChecks(allChecks, fail)
|
||||
shouldExit := lint.FilterChecks(analyzers, fail)
|
||||
shouldExit["compile"] = true
|
||||
|
||||
total = len(ps)
|
||||
for _, p := range ps {
|
||||
if p.Check == "compile" && debugNoCompile {
|
||||
continue
|
||||
}
|
||||
if p.Severity == lint.Ignored && !showIgnored {
|
||||
continue
|
||||
}
|
||||
if shouldExit[p.Check] {
|
||||
errors++
|
||||
} else {
|
||||
@ -266,80 +278,97 @@ func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) {
|
||||
if errors > 0 {
|
||||
exit(1)
|
||||
}
|
||||
exit(0)
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
Config config.Config
|
||||
|
||||
Tags []string
|
||||
LintTests bool
|
||||
Ignores string
|
||||
GoVersion int
|
||||
ReturnIgnored bool
|
||||
|
||||
MaxConcurrentJobs int
|
||||
PrintStats bool
|
||||
Tags string
|
||||
LintTests bool
|
||||
GoVersion int
|
||||
}
|
||||
|
||||
func Lint(cs []lint.Checker, paths []string, opt *Options) ([]lint.Problem, error) {
|
||||
stats := lint.PerfStats{
|
||||
CheckerInits: map[string]time.Duration{},
|
||||
func computeSalt() ([]byte, error) {
|
||||
if version.Version != "devel" {
|
||||
return []byte(version.Version), nil
|
||||
}
|
||||
p, err := os.Executable()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string, opt *Options) ([]lint.Problem, error) {
|
||||
salt, err := computeSalt()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not compute salt for cache: %s", err)
|
||||
}
|
||||
cache.SetSalt(salt)
|
||||
|
||||
if opt == nil {
|
||||
opt = &Options{}
|
||||
}
|
||||
ignores, err := parseIgnore(opt.Ignores)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf := &packages.Config{
|
||||
Mode: packages.LoadAllSyntax,
|
||||
Tests: opt.LintTests,
|
||||
BuildFlags: []string{
|
||||
"-tags=" + strings.Join(opt.Tags, " "),
|
||||
},
|
||||
}
|
||||
|
||||
t := time.Now()
|
||||
if len(paths) == 0 {
|
||||
paths = []string{"."}
|
||||
}
|
||||
pkgs, err := packages.Load(conf, paths...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stats.PackageLoading = time.Since(t)
|
||||
runtime.GC()
|
||||
|
||||
var problems []lint.Problem
|
||||
workingPkgs := make([]*packages.Package, 0, len(pkgs))
|
||||
for _, pkg := range pkgs {
|
||||
if pkg.IllTyped {
|
||||
problems = append(problems, compileErrors(pkg)...)
|
||||
} else {
|
||||
workingPkgs = append(workingPkgs, pkg)
|
||||
}
|
||||
}
|
||||
|
||||
if len(workingPkgs) == 0 {
|
||||
return problems, nil
|
||||
}
|
||||
|
||||
l := &lint.Linter{
|
||||
Checkers: cs,
|
||||
Ignores: ignores,
|
||||
GoVersion: opt.GoVersion,
|
||||
ReturnIgnored: opt.ReturnIgnored,
|
||||
Config: opt.Config,
|
||||
|
||||
MaxConcurrentJobs: opt.MaxConcurrentJobs,
|
||||
PrintStats: opt.PrintStats,
|
||||
Checkers: cs,
|
||||
CumulativeCheckers: cums,
|
||||
GoVersion: opt.GoVersion,
|
||||
Config: opt.Config,
|
||||
}
|
||||
cfg := &packages.Config{}
|
||||
if opt.LintTests {
|
||||
cfg.Tests = true
|
||||
}
|
||||
if opt.Tags != "" {
|
||||
cfg.BuildFlags = append(cfg.BuildFlags, "-tags", opt.Tags)
|
||||
}
|
||||
problems = append(problems, l.Lint(workingPkgs, &stats)...)
|
||||
|
||||
return problems, nil
|
||||
printStats := func() {
|
||||
// Individual stats are read atomically, but overall there
|
||||
// is no synchronisation. For printing rough progress
|
||||
// information, this doesn't matter.
|
||||
switch atomic.LoadUint32(&l.Stats.State) {
|
||||
case lint.StateInitializing:
|
||||
fmt.Fprintln(os.Stderr, "Status: initializing")
|
||||
case lint.StateGraph:
|
||||
fmt.Fprintln(os.Stderr, "Status: loading package graph")
|
||||
case lint.StateProcessing:
|
||||
fmt.Fprintf(os.Stderr, "Packages: %d/%d initial, %d/%d total; Workers: %d/%d; Problems: %d\n",
|
||||
atomic.LoadUint32(&l.Stats.ProcessedInitialPackages),
|
||||
atomic.LoadUint32(&l.Stats.InitialPackages),
|
||||
atomic.LoadUint32(&l.Stats.ProcessedPackages),
|
||||
atomic.LoadUint32(&l.Stats.TotalPackages),
|
||||
atomic.LoadUint32(&l.Stats.ActiveWorkers),
|
||||
atomic.LoadUint32(&l.Stats.TotalWorkers),
|
||||
atomic.LoadUint32(&l.Stats.Problems),
|
||||
)
|
||||
case lint.StateCumulative:
|
||||
fmt.Fprintln(os.Stderr, "Status: processing cumulative checkers")
|
||||
}
|
||||
}
|
||||
if len(infoSignals) > 0 {
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, infoSignals...)
|
||||
defer signal.Stop(ch)
|
||||
go func() {
|
||||
for range ch {
|
||||
printStats()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return l.Lint(cfg, paths)
|
||||
}
|
||||
|
||||
var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?$`)
|
||||
@ -361,34 +390,3 @@ func parsePos(pos string) token.Position {
|
||||
Column: col,
|
||||
}
|
||||
}
|
||||
|
||||
func compileErrors(pkg *packages.Package) []lint.Problem {
|
||||
if !pkg.IllTyped {
|
||||
return nil
|
||||
}
|
||||
if len(pkg.Errors) == 0 {
|
||||
// transitively ill-typed
|
||||
var ps []lint.Problem
|
||||
for _, imp := range pkg.Imports {
|
||||
ps = append(ps, compileErrors(imp)...)
|
||||
}
|
||||
return ps
|
||||
}
|
||||
var ps []lint.Problem
|
||||
for _, err := range pkg.Errors {
|
||||
p := lint.Problem{
|
||||
Position: parsePos(err.Pos),
|
||||
Text: err.Msg,
|
||||
Check: "compile",
|
||||
}
|
||||
ps = append(ps, p)
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
func ProcessArgs(name string, cs []lint.Checker, args []string) {
|
||||
flags := FlagSet(name)
|
||||
flags.Parse(args)
|
||||
|
||||
ProcessFlagSet(cs, flags)
|
||||
}
|
||||
|
970
vendor/honnef.co/go/tools/lint/runner.go
vendored
Normal file
970
vendor/honnef.co/go/tools/lint/runner.go
vendored
Normal file
@ -0,0 +1,970 @@
|
||||
package lint
|
||||
|
||||
/*
|
||||
Parallelism
|
||||
|
||||
Runner implements parallel processing of packages by spawning one
|
||||
goroutine per package in the dependency graph, without any semaphores.
|
||||
Each goroutine initially waits on the completion of all of its
|
||||
dependencies, thus establishing correct order of processing. Once all
|
||||
dependencies finish processing, the goroutine will load the package
|
||||
from export data or source – this loading is guarded by a semaphore,
|
||||
sized according to the number of CPU cores. This way, we only have as
|
||||
many packages occupying memory and CPU resources as there are actual
|
||||
cores to process them.
|
||||
|
||||
This combination of unbounded goroutines but bounded package loading
|
||||
means that if we have many parallel, independent subgraphs, they will
|
||||
all execute in parallel, while not wasting resources for long linear
|
||||
chains or trying to process more subgraphs in parallel than the system
|
||||
can handle.
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/packages"
|
||||
"golang.org/x/tools/go/types/objectpath"
|
||||
"honnef.co/go/tools/config"
|
||||
"honnef.co/go/tools/facts"
|
||||
"honnef.co/go/tools/internal/cache"
|
||||
"honnef.co/go/tools/loader"
|
||||
)
|
||||
|
||||
// If enabled, abuse of the go/analysis API will lead to panics
|
||||
const sanityCheck = true
|
||||
|
||||
// OPT(dh): for a dependency tree A->B->C->D, if we have cached data
|
||||
// for B, there should be no need to load C and D individually. Go's
|
||||
// export data for B contains all the data we need on types, and our
|
||||
// fact cache could store the union of B, C and D in B.
|
||||
//
|
||||
// This may change unused's behavior, however, as it may observe fewer
|
||||
// interfaces from transitive dependencies.
|
||||
|
||||
type Package struct {
|
||||
dependents uint64
|
||||
|
||||
*packages.Package
|
||||
Imports []*Package
|
||||
initial bool
|
||||
fromSource bool
|
||||
hash string
|
||||
done chan struct{}
|
||||
|
||||
resultsMu sync.Mutex
|
||||
// results maps analyzer IDs to analyzer results
|
||||
results []*result
|
||||
|
||||
cfg *config.Config
|
||||
gen map[string]facts.Generator
|
||||
problems []Problem
|
||||
ignores []Ignore
|
||||
errs []error
|
||||
|
||||
// these slices are indexed by analysis
|
||||
facts []map[types.Object][]analysis.Fact
|
||||
pkgFacts [][]analysis.Fact
|
||||
|
||||
canClearTypes bool
|
||||
}
|
||||
|
||||
func (pkg *Package) decUse() {
|
||||
atomic.AddUint64(&pkg.dependents, ^uint64(0))
|
||||
if atomic.LoadUint64(&pkg.dependents) == 0 {
|
||||
// nobody depends on this package anymore
|
||||
if pkg.canClearTypes {
|
||||
pkg.Types = nil
|
||||
}
|
||||
pkg.facts = nil
|
||||
pkg.pkgFacts = nil
|
||||
|
||||
for _, imp := range pkg.Imports {
|
||||
imp.decUse()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type result struct {
|
||||
v interface{}
|
||||
err error
|
||||
ready chan struct{}
|
||||
}
|
||||
|
||||
type Runner struct {
|
||||
ld loader.Loader
|
||||
cache *cache.Cache
|
||||
|
||||
analyzerIDs analyzerIDs
|
||||
|
||||
// limits parallelism of loading packages
|
||||
loadSem chan struct{}
|
||||
|
||||
goVersion int
|
||||
stats *Stats
|
||||
}
|
||||
|
||||
type analyzerIDs struct {
|
||||
m map[*analysis.Analyzer]int
|
||||
}
|
||||
|
||||
func (ids analyzerIDs) get(a *analysis.Analyzer) int {
|
||||
id, ok := ids.m[a]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("no analyzer ID for %s", a.Name))
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
type Fact struct {
|
||||
Path string
|
||||
Fact analysis.Fact
|
||||
}
|
||||
|
||||
type analysisAction struct {
|
||||
analyzer *analysis.Analyzer
|
||||
analyzerID int
|
||||
pkg *Package
|
||||
newPackageFacts []analysis.Fact
|
||||
problems []Problem
|
||||
|
||||
pkgFacts map[*types.Package][]analysis.Fact
|
||||
}
|
||||
|
||||
func (ac *analysisAction) String() string {
|
||||
return fmt.Sprintf("%s @ %s", ac.analyzer, ac.pkg)
|
||||
}
|
||||
|
||||
func (ac *analysisAction) allObjectFacts() []analysis.ObjectFact {
|
||||
out := make([]analysis.ObjectFact, 0, len(ac.pkg.facts[ac.analyzerID]))
|
||||
for obj, facts := range ac.pkg.facts[ac.analyzerID] {
|
||||
for _, fact := range facts {
|
||||
out = append(out, analysis.ObjectFact{
|
||||
Object: obj,
|
||||
Fact: fact,
|
||||
})
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (ac *analysisAction) allPackageFacts() []analysis.PackageFact {
|
||||
out := make([]analysis.PackageFact, 0, len(ac.pkgFacts))
|
||||
for pkg, facts := range ac.pkgFacts {
|
||||
for _, fact := range facts {
|
||||
out = append(out, analysis.PackageFact{
|
||||
Package: pkg,
|
||||
Fact: fact,
|
||||
})
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (ac *analysisAction) importObjectFact(obj types.Object, fact analysis.Fact) bool {
|
||||
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
|
||||
panic("analysis doesn't export any facts")
|
||||
}
|
||||
for _, f := range ac.pkg.facts[ac.analyzerID][obj] {
|
||||
if reflect.TypeOf(f) == reflect.TypeOf(fact) {
|
||||
reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem())
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ac *analysisAction) importPackageFact(pkg *types.Package, fact analysis.Fact) bool {
|
||||
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
|
||||
panic("analysis doesn't export any facts")
|
||||
}
|
||||
for _, f := range ac.pkgFacts[pkg] {
|
||||
if reflect.TypeOf(f) == reflect.TypeOf(fact) {
|
||||
reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem())
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ac *analysisAction) exportObjectFact(obj types.Object, fact analysis.Fact) {
|
||||
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
|
||||
panic("analysis doesn't export any facts")
|
||||
}
|
||||
ac.pkg.facts[ac.analyzerID][obj] = append(ac.pkg.facts[ac.analyzerID][obj], fact)
|
||||
}
|
||||
|
||||
func (ac *analysisAction) exportPackageFact(fact analysis.Fact) {
|
||||
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
|
||||
panic("analysis doesn't export any facts")
|
||||
}
|
||||
ac.pkgFacts[ac.pkg.Types] = append(ac.pkgFacts[ac.pkg.Types], fact)
|
||||
ac.newPackageFacts = append(ac.newPackageFacts, fact)
|
||||
}
|
||||
|
||||
func (ac *analysisAction) report(pass *analysis.Pass, d analysis.Diagnostic) {
|
||||
p := Problem{
|
||||
Pos: DisplayPosition(pass.Fset, d.Pos),
|
||||
End: DisplayPosition(pass.Fset, d.End),
|
||||
Message: d.Message,
|
||||
Check: pass.Analyzer.Name,
|
||||
}
|
||||
ac.problems = append(ac.problems, p)
|
||||
}
|
||||
|
||||
func (r *Runner) runAnalysis(ac *analysisAction) (ret interface{}, err error) {
|
||||
ac.pkg.resultsMu.Lock()
|
||||
res := ac.pkg.results[r.analyzerIDs.get(ac.analyzer)]
|
||||
if res != nil {
|
||||
ac.pkg.resultsMu.Unlock()
|
||||
<-res.ready
|
||||
return res.v, res.err
|
||||
} else {
|
||||
res = &result{
|
||||
ready: make(chan struct{}),
|
||||
}
|
||||
ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] = res
|
||||
ac.pkg.resultsMu.Unlock()
|
||||
|
||||
defer func() {
|
||||
res.v = ret
|
||||
res.err = err
|
||||
close(res.ready)
|
||||
}()
|
||||
|
||||
pass := new(analysis.Pass)
|
||||
*pass = analysis.Pass{
|
||||
Analyzer: ac.analyzer,
|
||||
Fset: ac.pkg.Fset,
|
||||
Files: ac.pkg.Syntax,
|
||||
// type information may be nil or may be populated. if it is
|
||||
// nil, it will get populated later.
|
||||
Pkg: ac.pkg.Types,
|
||||
TypesInfo: ac.pkg.TypesInfo,
|
||||
TypesSizes: ac.pkg.TypesSizes,
|
||||
ResultOf: map[*analysis.Analyzer]interface{}{},
|
||||
ImportObjectFact: ac.importObjectFact,
|
||||
ImportPackageFact: ac.importPackageFact,
|
||||
ExportObjectFact: ac.exportObjectFact,
|
||||
ExportPackageFact: ac.exportPackageFact,
|
||||
Report: func(d analysis.Diagnostic) {
|
||||
ac.report(pass, d)
|
||||
},
|
||||
AllObjectFacts: ac.allObjectFacts,
|
||||
AllPackageFacts: ac.allPackageFacts,
|
||||
}
|
||||
|
||||
if !ac.pkg.initial {
|
||||
// Don't report problems in dependencies
|
||||
pass.Report = func(analysis.Diagnostic) {}
|
||||
}
|
||||
return r.runAnalysisUser(pass, ac)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bool) {
|
||||
if len(a.FactTypes) == 0 {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
var facts []Fact
|
||||
// Look in the cache for facts
|
||||
aID, err := passActionID(pkg, a)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
aID = cache.Subkey(aID, "facts")
|
||||
b, _, err := r.cache.GetBytes(aID)
|
||||
if err != nil {
|
||||
// No cached facts, analyse this package like a user-provided one, but ignore diagnostics
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&facts); err != nil {
|
||||
// Cached facts are broken, analyse this package like a user-provided one, but ignore diagnostics
|
||||
return nil, false
|
||||
}
|
||||
return facts, true
|
||||
}
|
||||
|
||||
type dependencyError struct {
|
||||
dep string
|
||||
err error
|
||||
}
|
||||
|
||||
func (err dependencyError) nested() dependencyError {
|
||||
if o, ok := err.err.(dependencyError); ok {
|
||||
return o.nested()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (err dependencyError) Error() string {
|
||||
if o, ok := err.err.(dependencyError); ok {
|
||||
return o.Error()
|
||||
}
|
||||
return fmt.Sprintf("error running dependency %s: %s", err.dep, err.err)
|
||||
}
|
||||
|
||||
func (r *Runner) makeAnalysisAction(a *analysis.Analyzer, pkg *Package) *analysisAction {
|
||||
aid := r.analyzerIDs.get(a)
|
||||
ac := &analysisAction{
|
||||
analyzer: a,
|
||||
analyzerID: aid,
|
||||
pkg: pkg,
|
||||
}
|
||||
|
||||
if len(a.FactTypes) == 0 {
|
||||
return ac
|
||||
}
|
||||
|
||||
// Merge all package facts of dependencies
|
||||
ac.pkgFacts = map[*types.Package][]analysis.Fact{}
|
||||
seen := map[*Package]struct{}{}
|
||||
var dfs func(*Package)
|
||||
dfs = func(pkg *Package) {
|
||||
if _, ok := seen[pkg]; ok {
|
||||
return
|
||||
}
|
||||
seen[pkg] = struct{}{}
|
||||
s := pkg.pkgFacts[aid]
|
||||
ac.pkgFacts[pkg.Types] = s[0:len(s):len(s)]
|
||||
for _, imp := range pkg.Imports {
|
||||
dfs(imp)
|
||||
}
|
||||
}
|
||||
dfs(pkg)
|
||||
|
||||
return ac
|
||||
}
|
||||
|
||||
// analyzes that we always want to run, even if they're not being run
|
||||
// explicitly or as dependencies. these are necessary for the inner
|
||||
// workings of the runner.
|
||||
var injectedAnalyses = []*analysis.Analyzer{facts.Generated, config.Analyzer}
|
||||
|
||||
func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (interface{}, error) {
|
||||
if !ac.pkg.fromSource {
|
||||
panic(fmt.Sprintf("internal error: %s was not loaded from source", ac.pkg))
|
||||
}
|
||||
|
||||
// User-provided package, analyse it
|
||||
// First analyze it with dependencies
|
||||
for _, req := range ac.analyzer.Requires {
|
||||
acReq := r.makeAnalysisAction(req, ac.pkg)
|
||||
ret, err := r.runAnalysis(acReq)
|
||||
if err != nil {
|
||||
// We couldn't run a dependency, no point in going on
|
||||
return nil, dependencyError{req.Name, err}
|
||||
}
|
||||
|
||||
pass.ResultOf[req] = ret
|
||||
}
|
||||
|
||||
// Then with this analyzer
|
||||
ret, err := ac.analyzer.Run(pass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(ac.analyzer.FactTypes) > 0 {
|
||||
// Merge new facts into the package and persist them.
|
||||
var facts []Fact
|
||||
for _, fact := range ac.newPackageFacts {
|
||||
id := r.analyzerIDs.get(ac.analyzer)
|
||||
ac.pkg.pkgFacts[id] = append(ac.pkg.pkgFacts[id], fact)
|
||||
facts = append(facts, Fact{"", fact})
|
||||
}
|
||||
for obj, afacts := range ac.pkg.facts[ac.analyzerID] {
|
||||
if obj.Pkg() != ac.pkg.Package.Types {
|
||||
continue
|
||||
}
|
||||
path, err := objectpath.For(obj)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, fact := range afacts {
|
||||
facts = append(facts, Fact{string(path), fact})
|
||||
}
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
if err := gob.NewEncoder(buf).Encode(facts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aID, err := passActionID(ac.pkg, ac.analyzer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aID = cache.Subkey(aID, "facts")
|
||||
if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func NewRunner(stats *Stats) (*Runner, error) {
|
||||
cache, err := cache.Default()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Runner{
|
||||
cache: cache,
|
||||
stats: stats,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Run loads packages corresponding to patterns and analyses them with
|
||||
// analyzers. It returns the loaded packages, which contain reported
|
||||
// diagnostics as well as extracted ignore directives.
|
||||
//
|
||||
// Note that diagnostics have not been filtered at this point yet, to
|
||||
// accomodate cumulative analyzes that require additional steps to
|
||||
// produce diagnostics.
|
||||
func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analysis.Analyzer, hasCumulative bool) ([]*Package, error) {
|
||||
r.analyzerIDs = analyzerIDs{m: map[*analysis.Analyzer]int{}}
|
||||
id := 0
|
||||
seen := map[*analysis.Analyzer]struct{}{}
|
||||
var dfs func(a *analysis.Analyzer)
|
||||
dfs = func(a *analysis.Analyzer) {
|
||||
if _, ok := seen[a]; ok {
|
||||
return
|
||||
}
|
||||
seen[a] = struct{}{}
|
||||
r.analyzerIDs.m[a] = id
|
||||
id++
|
||||
for _, f := range a.FactTypes {
|
||||
gob.Register(f)
|
||||
}
|
||||
for _, req := range a.Requires {
|
||||
dfs(req)
|
||||
}
|
||||
}
|
||||
for _, a := range analyzers {
|
||||
if v := a.Flags.Lookup("go"); v != nil {
|
||||
v.Value.Set(fmt.Sprintf("1.%d", r.goVersion))
|
||||
}
|
||||
dfs(a)
|
||||
}
|
||||
for _, a := range injectedAnalyses {
|
||||
dfs(a)
|
||||
}
|
||||
|
||||
var dcfg packages.Config
|
||||
if cfg != nil {
|
||||
dcfg = *cfg
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&r.stats.State, StateGraph)
|
||||
initialPkgs, err := r.ld.Graph(dcfg, patterns...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer r.cache.Trim()
|
||||
|
||||
var allPkgs []*Package
|
||||
m := map[*packages.Package]*Package{}
|
||||
packages.Visit(initialPkgs, nil, func(l *packages.Package) {
|
||||
m[l] = &Package{
|
||||
Package: l,
|
||||
results: make([]*result, len(r.analyzerIDs.m)),
|
||||
facts: make([]map[types.Object][]analysis.Fact, len(r.analyzerIDs.m)),
|
||||
pkgFacts: make([][]analysis.Fact, len(r.analyzerIDs.m)),
|
||||
done: make(chan struct{}),
|
||||
// every package needs itself
|
||||
dependents: 1,
|
||||
canClearTypes: !hasCumulative,
|
||||
}
|
||||
allPkgs = append(allPkgs, m[l])
|
||||
for i := range m[l].facts {
|
||||
m[l].facts[i] = map[types.Object][]analysis.Fact{}
|
||||
}
|
||||
for _, err := range l.Errors {
|
||||
m[l].errs = append(m[l].errs, err)
|
||||
}
|
||||
for _, v := range l.Imports {
|
||||
m[v].dependents++
|
||||
m[l].Imports = append(m[l].Imports, m[v])
|
||||
}
|
||||
|
||||
m[l].hash, err = packageHash(m[l])
|
||||
if err != nil {
|
||||
m[l].errs = append(m[l].errs, err)
|
||||
}
|
||||
})
|
||||
|
||||
pkgs := make([]*Package, len(initialPkgs))
|
||||
for i, l := range initialPkgs {
|
||||
pkgs[i] = m[l]
|
||||
pkgs[i].initial = true
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&r.stats.InitialPackages, uint32(len(initialPkgs)))
|
||||
atomic.StoreUint32(&r.stats.TotalPackages, uint32(len(allPkgs)))
|
||||
atomic.StoreUint32(&r.stats.State, StateProcessing)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(allPkgs))
|
||||
r.loadSem = make(chan struct{}, runtime.GOMAXPROCS(-1))
|
||||
atomic.StoreUint32(&r.stats.TotalWorkers, uint32(cap(r.loadSem)))
|
||||
for _, pkg := range allPkgs {
|
||||
pkg := pkg
|
||||
go func() {
|
||||
r.processPkg(pkg, analyzers)
|
||||
|
||||
if pkg.initial {
|
||||
atomic.AddUint32(&r.stats.ProcessedInitialPackages, 1)
|
||||
}
|
||||
atomic.AddUint32(&r.stats.Problems, uint32(len(pkg.problems)))
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return pkgs, nil
|
||||
}
|
||||
|
||||
var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?`)
|
||||
|
||||
func parsePos(pos string) (token.Position, int, error) {
|
||||
if pos == "-" || pos == "" {
|
||||
return token.Position{}, 0, nil
|
||||
}
|
||||
parts := posRe.FindStringSubmatch(pos)
|
||||
if parts == nil {
|
||||
return token.Position{}, 0, fmt.Errorf("malformed position %q", pos)
|
||||
}
|
||||
file := parts[1]
|
||||
line, _ := strconv.Atoi(parts[2])
|
||||
col, _ := strconv.Atoi(parts[3])
|
||||
return token.Position{
|
||||
Filename: file,
|
||||
Line: line,
|
||||
Column: col,
|
||||
}, len(parts[0]), nil
|
||||
}
|
||||
|
||||
// loadPkg loads a Go package. If the package is in the set of initial
|
||||
// packages, it will be loaded from source, otherwise it will be
|
||||
// loaded from export data. In the case that the package was loaded
|
||||
// from export data, cached facts will also be loaded.
|
||||
//
|
||||
// Currently, only cached facts for this package will be loaded, not
|
||||
// for any of its dependencies.
|
||||
func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error {
|
||||
if pkg.Types != nil {
|
||||
panic(fmt.Sprintf("internal error: %s has already been loaded", pkg.Package))
|
||||
}
|
||||
|
||||
// Load type information
|
||||
if pkg.initial {
|
||||
// Load package from source
|
||||
pkg.fromSource = true
|
||||
return r.ld.LoadFromSource(pkg.Package)
|
||||
}
|
||||
|
||||
// Load package from export data
|
||||
if err := r.ld.LoadFromExport(pkg.Package); err != nil {
|
||||
// We asked Go to give us up to date export data, yet
|
||||
// we can't load it. There must be something wrong.
|
||||
//
|
||||
// Attempt loading from source. This should fail (because
|
||||
// otherwise there would be export data); we just want to
|
||||
// get the compile errors. If loading from source succeeds
|
||||
// we discard the result, anyway. Otherwise we'll fail
|
||||
// when trying to reload from export data later.
|
||||
//
|
||||
// FIXME(dh): we no longer reload from export data, so
|
||||
// theoretically we should be able to continue
|
||||
pkg.fromSource = true
|
||||
if err := r.ld.LoadFromSource(pkg.Package); err != nil {
|
||||
return err
|
||||
}
|
||||
// Make sure this package can't be imported successfully
|
||||
pkg.Package.Errors = append(pkg.Package.Errors, packages.Error{
|
||||
Pos: "-",
|
||||
Msg: fmt.Sprintf("could not load export data: %s", err),
|
||||
Kind: packages.ParseError,
|
||||
})
|
||||
return fmt.Errorf("could not load export data: %s", err)
|
||||
}
|
||||
|
||||
failed := false
|
||||
seen := make([]bool, len(r.analyzerIDs.m))
|
||||
var dfs func(*analysis.Analyzer)
|
||||
dfs = func(a *analysis.Analyzer) {
|
||||
if seen[r.analyzerIDs.get(a)] {
|
||||
return
|
||||
}
|
||||
seen[r.analyzerIDs.get(a)] = true
|
||||
|
||||
if len(a.FactTypes) > 0 {
|
||||
facts, ok := r.loadCachedFacts(a, pkg)
|
||||
if !ok {
|
||||
failed = true
|
||||
return
|
||||
}
|
||||
|
||||
for _, f := range facts {
|
||||
if f.Path == "" {
|
||||
// This is a package fact
|
||||
pkg.pkgFacts[r.analyzerIDs.get(a)] = append(pkg.pkgFacts[r.analyzerIDs.get(a)], f.Fact)
|
||||
continue
|
||||
}
|
||||
obj, err := objectpath.Object(pkg.Types, objectpath.Path(f.Path))
|
||||
if err != nil {
|
||||
// Be lenient about these errors. For example, when
|
||||
// analysing io/ioutil from source, we may get a fact
|
||||
// for methods on the devNull type, and objectpath
|
||||
// will happily create a path for them. However, when
|
||||
// we later load io/ioutil from export data, the path
|
||||
// no longer resolves.
|
||||
//
|
||||
// If an exported type embeds the unexported type,
|
||||
// then (part of) the unexported type will become part
|
||||
// of the type information and our path will resolve
|
||||
// again.
|
||||
continue
|
||||
}
|
||||
pkg.facts[r.analyzerIDs.get(a)][obj] = append(pkg.facts[r.analyzerIDs.get(a)][obj], f.Fact)
|
||||
}
|
||||
}
|
||||
|
||||
for _, req := range a.Requires {
|
||||
dfs(req)
|
||||
}
|
||||
}
|
||||
for _, a := range analyzers {
|
||||
dfs(a)
|
||||
}
|
||||
|
||||
if failed {
|
||||
pkg.fromSource = true
|
||||
// XXX we added facts to the maps, we need to get rid of those
|
||||
return r.ld.LoadFromSource(pkg.Package)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type analysisError struct {
|
||||
analyzer *analysis.Analyzer
|
||||
pkg *Package
|
||||
err error
|
||||
}
|
||||
|
||||
func (err analysisError) Error() string {
|
||||
return fmt.Sprintf("error running analyzer %s on %s: %s", err.analyzer, err.pkg, err.err)
|
||||
}
|
||||
|
||||
// processPkg processes a package. This involves loading the package,
|
||||
// either from export data or from source. For packages loaded from
|
||||
// source, the provides analyzers will be run on the package.
|
||||
func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) {
|
||||
defer func() {
|
||||
// Clear information we no longer need. Make sure to do this
|
||||
// when returning from processPkg so that we clear
|
||||
// dependencies, not just initial packages.
|
||||
pkg.TypesInfo = nil
|
||||
pkg.Syntax = nil
|
||||
pkg.results = nil
|
||||
|
||||
atomic.AddUint32(&r.stats.ProcessedPackages, 1)
|
||||
pkg.decUse()
|
||||
close(pkg.done)
|
||||
}()
|
||||
|
||||
// Ensure all packages have the generated map and config. This is
|
||||
// required by interna of the runner. Analyses that themselves
|
||||
// make use of either have an explicit dependency so that other
|
||||
// runners work correctly, too.
|
||||
analyzers = append(analyzers[0:len(analyzers):len(analyzers)], injectedAnalyses...)
|
||||
|
||||
if len(pkg.errs) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, imp := range pkg.Imports {
|
||||
<-imp.done
|
||||
if len(imp.errs) > 0 {
|
||||
if imp.initial {
|
||||
// Don't print the error of the dependency since it's
|
||||
// an initial package and we're already printing the
|
||||
// error.
|
||||
pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s", imp, pkg))
|
||||
} else {
|
||||
var s string
|
||||
for _, err := range imp.errs {
|
||||
s += "\n\t" + err.Error()
|
||||
}
|
||||
pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s: %s", imp, pkg, s))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
if pkg.PkgPath == "unsafe" {
|
||||
pkg.Types = types.Unsafe
|
||||
return
|
||||
}
|
||||
|
||||
r.loadSem <- struct{}{}
|
||||
atomic.AddUint32(&r.stats.ActiveWorkers, 1)
|
||||
defer func() {
|
||||
<-r.loadSem
|
||||
atomic.AddUint32(&r.stats.ActiveWorkers, ^uint32(0))
|
||||
}()
|
||||
if err := r.loadPkg(pkg, analyzers); err != nil {
|
||||
pkg.errs = append(pkg.errs, err)
|
||||
return
|
||||
}
|
||||
|
||||
// A package's object facts is the union of all of its dependencies.
|
||||
for _, imp := range pkg.Imports {
|
||||
for ai, m := range imp.facts {
|
||||
for obj, facts := range m {
|
||||
pkg.facts[ai][obj] = facts[0:len(facts):len(facts)]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !pkg.fromSource {
|
||||
// Nothing left to do for the package.
|
||||
return
|
||||
}
|
||||
|
||||
// Run analyses on initial packages and those missing facts
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(analyzers))
|
||||
errs := make([]error, len(analyzers))
|
||||
var acs []*analysisAction
|
||||
for i, a := range analyzers {
|
||||
i := i
|
||||
a := a
|
||||
ac := r.makeAnalysisAction(a, pkg)
|
||||
acs = append(acs, ac)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Only initial packages and packages with missing
|
||||
// facts will have been loaded from source.
|
||||
if pkg.initial || r.hasFacts(a) {
|
||||
if _, err := r.runAnalysis(ac); err != nil {
|
||||
errs[i] = analysisError{a, pkg, err}
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
depErrors := map[dependencyError]int{}
|
||||
for _, err := range errs {
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
switch err := err.(type) {
|
||||
case analysisError:
|
||||
switch err := err.err.(type) {
|
||||
case dependencyError:
|
||||
depErrors[err.nested()]++
|
||||
default:
|
||||
pkg.errs = append(pkg.errs, err)
|
||||
}
|
||||
default:
|
||||
pkg.errs = append(pkg.errs, err)
|
||||
}
|
||||
}
|
||||
for err, count := range depErrors {
|
||||
pkg.errs = append(pkg.errs,
|
||||
fmt.Errorf("could not run %s@%s, preventing %d analyzers from running: %s", err.dep, pkg, count, err.err))
|
||||
}
|
||||
|
||||
// We can't process ignores at this point because `unused` needs
|
||||
// to see more than one package to make its decision.
|
||||
ignores, problems := parseDirectives(pkg.Package)
|
||||
pkg.ignores = append(pkg.ignores, ignores...)
|
||||
pkg.problems = append(pkg.problems, problems...)
|
||||
for _, ac := range acs {
|
||||
pkg.problems = append(pkg.problems, ac.problems...)
|
||||
}
|
||||
|
||||
if pkg.initial {
|
||||
// Only initial packages have these analyzers run, and only
|
||||
// initial packages need these.
|
||||
if pkg.results[r.analyzerIDs.get(config.Analyzer)].v != nil {
|
||||
pkg.cfg = pkg.results[r.analyzerIDs.get(config.Analyzer)].v.(*config.Config)
|
||||
}
|
||||
pkg.gen = pkg.results[r.analyzerIDs.get(facts.Generated)].v.(map[string]facts.Generator)
|
||||
}
|
||||
|
||||
// In a previous version of the code, we would throw away all type
|
||||
// information and reload it from export data. That was
|
||||
// nonsensical. The *types.Package doesn't keep any information
|
||||
// live that export data wouldn't also. We only need to discard
|
||||
// the AST and the TypesInfo maps; that happens after we return
|
||||
// from processPkg.
|
||||
}
|
||||
|
||||
// hasFacts reports whether an analysis exports any facts. An analysis
|
||||
// that has a transitive dependency that exports facts is considered
|
||||
// to be exporting facts.
|
||||
func (r *Runner) hasFacts(a *analysis.Analyzer) bool {
|
||||
ret := false
|
||||
seen := make([]bool, len(r.analyzerIDs.m))
|
||||
var dfs func(*analysis.Analyzer)
|
||||
dfs = func(a *analysis.Analyzer) {
|
||||
if seen[r.analyzerIDs.get(a)] {
|
||||
return
|
||||
}
|
||||
seen[r.analyzerIDs.get(a)] = true
|
||||
if len(a.FactTypes) > 0 {
|
||||
ret = true
|
||||
}
|
||||
for _, req := range a.Requires {
|
||||
if ret {
|
||||
break
|
||||
}
|
||||
dfs(req)
|
||||
}
|
||||
}
|
||||
dfs(a)
|
||||
return ret
|
||||
}
|
||||
|
||||
func parseDirective(s string) (cmd string, args []string) {
|
||||
if !strings.HasPrefix(s, "//lint:") {
|
||||
return "", nil
|
||||
}
|
||||
s = strings.TrimPrefix(s, "//lint:")
|
||||
fields := strings.Split(s, " ")
|
||||
return fields[0], fields[1:]
|
||||
}
|
||||
|
||||
// parseDirectives extracts all linter directives from the source
|
||||
// files of the package. Malformed directives are returned as problems.
|
||||
func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) {
|
||||
var ignores []Ignore
|
||||
var problems []Problem
|
||||
|
||||
for _, f := range pkg.Syntax {
|
||||
found := false
|
||||
commentLoop:
|
||||
for _, cg := range f.Comments {
|
||||
for _, c := range cg.List {
|
||||
if strings.Contains(c.Text, "//lint:") {
|
||||
found = true
|
||||
break commentLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
cm := ast.NewCommentMap(pkg.Fset, f, f.Comments)
|
||||
for node, cgs := range cm {
|
||||
for _, cg := range cgs {
|
||||
for _, c := range cg.List {
|
||||
if !strings.HasPrefix(c.Text, "//lint:") {
|
||||
continue
|
||||
}
|
||||
cmd, args := parseDirective(c.Text)
|
||||
switch cmd {
|
||||
case "ignore", "file-ignore":
|
||||
if len(args) < 2 {
|
||||
p := Problem{
|
||||
Pos: DisplayPosition(pkg.Fset, c.Pos()),
|
||||
Message: "malformed linter directive; missing the required reason field?",
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
problems = append(problems, p)
|
||||
continue
|
||||
}
|
||||
default:
|
||||
// unknown directive, ignore
|
||||
continue
|
||||
}
|
||||
checks := strings.Split(args[0], ",")
|
||||
pos := DisplayPosition(pkg.Fset, node.Pos())
|
||||
var ig Ignore
|
||||
switch cmd {
|
||||
case "ignore":
|
||||
ig = &LineIgnore{
|
||||
File: pos.Filename,
|
||||
Line: pos.Line,
|
||||
Checks: checks,
|
||||
Pos: c.Pos(),
|
||||
}
|
||||
case "file-ignore":
|
||||
ig = &FileIgnore{
|
||||
File: pos.Filename,
|
||||
Checks: checks,
|
||||
}
|
||||
}
|
||||
ignores = append(ignores, ig)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ignores, problems
|
||||
}
|
||||
|
||||
// packageHash computes a package's hash. The hash is based on all Go
|
||||
// files that make up the package, as well as the hashes of imported
|
||||
// packages.
|
||||
func packageHash(pkg *Package) (string, error) {
|
||||
key := cache.NewHash("package hash")
|
||||
fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
|
||||
for _, f := range pkg.CompiledGoFiles {
|
||||
h, err := cache.FileHash(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
fmt.Fprintf(key, "file %s %x\n", f, h)
|
||||
}
|
||||
|
||||
imps := make([]*Package, len(pkg.Imports))
|
||||
copy(imps, pkg.Imports)
|
||||
sort.Slice(imps, func(i, j int) bool {
|
||||
return imps[i].PkgPath < imps[j].PkgPath
|
||||
})
|
||||
for _, dep := range imps {
|
||||
if dep.PkgPath == "unsafe" {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, dep.hash)
|
||||
}
|
||||
h := key.Sum()
|
||||
return hex.EncodeToString(h[:]), nil
|
||||
}
|
||||
|
||||
// passActionID computes an ActionID for an analysis pass.
|
||||
func passActionID(pkg *Package, analyzer *analysis.Analyzer) (cache.ActionID, error) {
|
||||
key := cache.NewHash("action ID")
|
||||
fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
|
||||
fmt.Fprintf(key, "pkghash %s\n", pkg.hash)
|
||||
fmt.Fprintf(key, "analyzer %s\n", analyzer.Name)
|
||||
|
||||
return key.Sum(), nil
|
||||
}
|
20
vendor/honnef.co/go/tools/lint/stats.go
vendored
Normal file
20
vendor/honnef.co/go/tools/lint/stats.go
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
package lint
|
||||
|
||||
const (
|
||||
StateInitializing = 0
|
||||
StateGraph = 1
|
||||
StateProcessing = 2
|
||||
StateCumulative = 3
|
||||
)
|
||||
|
||||
type Stats struct {
|
||||
State uint32
|
||||
|
||||
InitialPackages uint32
|
||||
TotalPackages uint32
|
||||
ProcessedPackages uint32
|
||||
ProcessedInitialPackages uint32
|
||||
Problems uint32
|
||||
ActiveWorkers uint32
|
||||
TotalWorkers uint32
|
||||
}
|
Reference in New Issue
Block a user