1
0

Update and fix staticcheck

This commit is contained in:
kolaente
2020-05-29 22:15:21 +02:00
parent aae1bc3cab
commit a525787ab7
100 changed files with 12353 additions and 7912 deletions

View File

@ -3,6 +3,7 @@ package lint // import "honnef.co/go/tools/lint"
import (
"bytes"
"encoding/gob"
"fmt"
"go/scanner"
"go/token"
@ -17,6 +18,7 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
"honnef.co/go/tools/config"
"honnef.co/go/tools/internal/cache"
)
type Documentation struct {
@ -62,7 +64,7 @@ type LineIgnore struct {
Line int
Checks []string
Matched bool
Pos token.Pos
Pos token.Position
}
func (li *LineIgnore) Match(p Problem) bool {
@ -119,6 +121,21 @@ type Problem struct {
Message string
Check string
Severity Severity
Related []Related
}
type Related struct {
Pos token.Position
End token.Position
Message string
}
func (p Problem) Equal(o Problem) bool {
return p.Pos == o.Pos &&
p.End == o.End &&
p.Message == o.Message &&
p.Check == o.Check &&
p.Severity == o.Severity
}
func (p *Problem) String() string {
@ -132,6 +149,7 @@ type Linter struct {
GoVersion int
Config config.Config
Stats Stats
RepeatAnalyzers uint
}
type CumulativeChecker interface {
@ -184,6 +202,7 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error
return nil, err
}
r.goVersion = l.GoVersion
r.repeatAnalyzers = l.RepeatAnalyzers
pkgs, err := r.Run(cfg, patterns, allowedAnalyzers, hasCumulative)
if err != nil {
@ -264,10 +283,12 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error
}
atomic.StoreUint32(&r.stats.State, StateCumulative)
var problems []Problem
for _, cum := range l.CumulativeCheckers {
for _, res := range cum.Result() {
pkg := tpkgToPkg[res.Pkg()]
if pkg == nil {
panic(fmt.Sprintf("analyzer %s flagged object %s in package %s, a package that we aren't tracking", cum.Analyzer(), res, res.Pkg()))
}
allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
if allowedChecks[cum.Analyzer().Name] {
pos := DisplayPosition(pkg.Fset, res.Pos())
@ -278,21 +299,51 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error
continue
}
p := cum.ProblemObject(pkg.Fset, res)
problems = append(problems, p)
pkg.problems = append(pkg.problems, p)
}
}
}
for _, pkg := range pkgs {
if !pkg.fromSource {
// Don't cache packages that we loaded from the cache
continue
}
cpkg := cachedPackage{
Problems: pkg.problems,
Ignores: pkg.ignores,
Config: pkg.cfg,
}
buf := &bytes.Buffer{}
if err := gob.NewEncoder(buf).Encode(cpkg); err != nil {
return nil, err
}
id := cache.Subkey(pkg.actionID, "data "+r.problemsCacheKey)
if err := r.cache.PutBytes(id, buf.Bytes()); err != nil {
return nil, err
}
}
var problems []Problem
// Deduplicate line ignores. When U1000 processes a package and
// its test variant, it will only emit a single problem for an
// unused object, not two problems. We will, however, have two
// line ignores, one per package. Without deduplication, one line
// ignore will be marked as matched, while the other one won't,
// subsequently reporting a "this linter directive didn't match
// anything" error.
ignores := map[token.Position]Ignore{}
for _, pkg := range pkgs {
for _, ig := range pkg.ignores {
for i := range pkg.problems {
p := &pkg.problems[i]
if ig.Match(*p) {
p.Severity = Ignored
if lig, ok := ig.(*LineIgnore); ok {
ig = ignores[lig.Pos]
if ig == nil {
ignores[lig.Pos] = lig
ig = lig
}
}
for i := range problems {
p := &problems[i]
for i := range pkg.problems {
p := &pkg.problems[i]
if ig.Match(*p) {
p.Severity = Ignored
}
@ -318,6 +369,7 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error
if !ok {
continue
}
ig = ignores[ig.Pos].(*LineIgnore)
if ig.Matched {
continue
}
@ -338,7 +390,7 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error
continue
}
p := Problem{
Pos: DisplayPosition(pkg.Fset, ig.Pos),
Pos: ig.Pos,
Message: "this linter directive didn't match anything; should it be removed?",
Check: "",
}
@ -372,7 +424,7 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error
for i, p := range problems[1:] {
// We may encounter duplicate problems because one file
// can be part of many packages.
if problems[i] != p {
if !problems[i].Equal(p) {
out = append(out, p)
}
}
@ -422,10 +474,6 @@ func FilterChecks(allChecks []*analysis.Analyzer, checks []string) map[string]bo
return allowedChecks
}
type Positioner interface {
Pos() token.Pos
}
func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position {
if p == token.NoPos {
return token.Position{}

View File

@ -4,283 +4,14 @@ package lintdsl
import (
"bytes"
"flag"
"fmt"
"go/ast"
"go/constant"
"go/printer"
"go/token"
"go/types"
"strings"
"go/format"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/facts"
"honnef.co/go/tools/lint"
"honnef.co/go/tools/ssa"
"honnef.co/go/tools/pattern"
)
type packager interface {
Package() *ssa.Package
}
func CallName(call *ssa.CallCommon) string {
if call.IsInvoke() {
return ""
}
switch v := call.Value.(type) {
case *ssa.Function:
fn, ok := v.Object().(*types.Func)
if !ok {
return ""
}
return lint.FuncName(fn)
case *ssa.Builtin:
return v.Name()
}
return ""
}
func IsCallTo(call *ssa.CallCommon, name string) bool { return CallName(call) == name }
func IsType(T types.Type, name string) bool { return types.TypeString(T, nil) == name }
func FilterDebug(instr []ssa.Instruction) []ssa.Instruction {
var out []ssa.Instruction
for _, ins := range instr {
if _, ok := ins.(*ssa.DebugRef); !ok {
out = append(out, ins)
}
}
return out
}
func IsExample(fn *ssa.Function) bool {
if !strings.HasPrefix(fn.Name(), "Example") {
return false
}
f := fn.Prog.Fset.File(fn.Pos())
if f == nil {
return false
}
return strings.HasSuffix(f.Name(), "_test.go")
}
func IsPointerLike(T types.Type) bool {
switch T := T.Underlying().(type) {
case *types.Interface, *types.Chan, *types.Map, *types.Signature, *types.Pointer:
return true
case *types.Basic:
return T.Kind() == types.UnsafePointer
}
return false
}
func IsIdent(expr ast.Expr, ident string) bool {
id, ok := expr.(*ast.Ident)
return ok && id.Name == ident
}
// isBlank returns whether id is the blank identifier "_".
// If id == nil, the answer is false.
func IsBlank(id ast.Expr) bool {
ident, _ := id.(*ast.Ident)
return ident != nil && ident.Name == "_"
}
func IsIntLiteral(expr ast.Expr, literal string) bool {
lit, ok := expr.(*ast.BasicLit)
return ok && lit.Kind == token.INT && lit.Value == literal
}
// Deprecated: use IsIntLiteral instead
func IsZero(expr ast.Expr) bool {
return IsIntLiteral(expr, "0")
}
func IsOfType(pass *analysis.Pass, expr ast.Expr, name string) bool {
return IsType(pass.TypesInfo.TypeOf(expr), name)
}
func IsInTest(pass *analysis.Pass, node lint.Positioner) bool {
// FIXME(dh): this doesn't work for global variables with
// initializers
f := pass.Fset.File(node.Pos())
return f != nil && strings.HasSuffix(f.Name(), "_test.go")
}
func IsInMain(pass *analysis.Pass, node lint.Positioner) bool {
if node, ok := node.(packager); ok {
return node.Package().Pkg.Name() == "main"
}
return pass.Pkg.Name() == "main"
}
func SelectorName(pass *analysis.Pass, expr *ast.SelectorExpr) string {
info := pass.TypesInfo
sel := info.Selections[expr]
if sel == nil {
if x, ok := expr.X.(*ast.Ident); ok {
pkg, ok := info.ObjectOf(x).(*types.PkgName)
if !ok {
// This shouldn't happen
return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name)
}
return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name)
}
panic(fmt.Sprintf("unsupported selector: %v", expr))
}
return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name())
}
func IsNil(pass *analysis.Pass, expr ast.Expr) bool {
return pass.TypesInfo.Types[expr].IsNil()
}
func BoolConst(pass *analysis.Pass, expr ast.Expr) bool {
val := pass.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val()
return constant.BoolVal(val)
}
func IsBoolConst(pass *analysis.Pass, expr ast.Expr) bool {
// We explicitly don't support typed bools because more often than
// not, custom bool types are used as binary enums and the
// explicit comparison is desired.
ident, ok := expr.(*ast.Ident)
if !ok {
return false
}
obj := pass.TypesInfo.ObjectOf(ident)
c, ok := obj.(*types.Const)
if !ok {
return false
}
basic, ok := c.Type().(*types.Basic)
if !ok {
return false
}
if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool {
return false
}
return true
}
func ExprToInt(pass *analysis.Pass, expr ast.Expr) (int64, bool) {
tv := pass.TypesInfo.Types[expr]
if tv.Value == nil {
return 0, false
}
if tv.Value.Kind() != constant.Int {
return 0, false
}
return constant.Int64Val(tv.Value)
}
func ExprToString(pass *analysis.Pass, expr ast.Expr) (string, bool) {
val := pass.TypesInfo.Types[expr].Value
if val == nil {
return "", false
}
if val.Kind() != constant.String {
return "", false
}
return constant.StringVal(val), true
}
// Dereference returns a pointer's element type; otherwise it returns
// T.
func Dereference(T types.Type) types.Type {
if p, ok := T.Underlying().(*types.Pointer); ok {
return p.Elem()
}
return T
}
// DereferenceR returns a pointer's element type; otherwise it returns
// T. If the element type is itself a pointer, DereferenceR will be
// applied recursively.
func DereferenceR(T types.Type) types.Type {
if p, ok := T.Underlying().(*types.Pointer); ok {
return DereferenceR(p.Elem())
}
return T
}
func IsGoVersion(pass *analysis.Pass, minor int) bool {
version := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter).Get().(int)
return version >= minor
}
func CallNameAST(pass *analysis.Pass, call *ast.CallExpr) string {
switch fun := call.Fun.(type) {
case *ast.SelectorExpr:
fn, ok := pass.TypesInfo.ObjectOf(fun.Sel).(*types.Func)
if !ok {
return ""
}
return lint.FuncName(fn)
case *ast.Ident:
obj := pass.TypesInfo.ObjectOf(fun)
switch obj := obj.(type) {
case *types.Func:
return lint.FuncName(obj)
case *types.Builtin:
return obj.Name()
default:
return ""
}
default:
return ""
}
}
func IsCallToAST(pass *analysis.Pass, node ast.Node, name string) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return false
}
return CallNameAST(pass, call) == name
}
func IsCallToAnyAST(pass *analysis.Pass, node ast.Node, names ...string) bool {
for _, name := range names {
if IsCallToAST(pass, node, name) {
return true
}
}
return false
}
func Render(pass *analysis.Pass, x interface{}) string {
var buf bytes.Buffer
if err := printer.Fprint(&buf, pass.Fset, x); err != nil {
panic(err)
}
return buf.String()
}
func RenderArgs(pass *analysis.Pass, args []ast.Expr) string {
var ss []string
for _, arg := range args {
ss = append(ss, Render(pass, arg))
}
return strings.Join(ss, ", ")
}
func Preamble(f *ast.File) string {
cutoff := f.Package
if f.Doc != nil {
cutoff = f.Doc.Pos()
}
var out []string
for _, cmt := range f.Comments {
if cmt.Pos() >= cutoff {
break
}
out = append(out, cmt.Text())
}
return strings.Join(out, "\n")
}
func Inspect(node ast.Node, fn func(node ast.Node) bool) {
if node == nil {
return
@ -288,113 +19,40 @@ func Inspect(node ast.Node, fn func(node ast.Node) bool) {
ast.Inspect(node, fn)
}
func GroupSpecs(fset *token.FileSet, specs []ast.Spec) [][]ast.Spec {
if len(specs) == 0 {
return nil
func Match(pass *analysis.Pass, q pattern.Pattern, node ast.Node) (*pattern.Matcher, bool) {
// Note that we ignore q.Relevant callers of Match usually use
// AST inspectors that already filter on nodes we're interested
// in.
m := &pattern.Matcher{TypesInfo: pass.TypesInfo}
ok := m.Match(q.Root, node)
return m, ok
}
func MatchAndEdit(pass *analysis.Pass, before, after pattern.Pattern, node ast.Node) (*pattern.Matcher, []analysis.TextEdit, bool) {
m, ok := Match(pass, before, node)
if !ok {
return m, nil, false
}
groups := make([][]ast.Spec, 1)
groups[0] = append(groups[0], specs[0])
r := pattern.NodeToAST(after.Root, m.State)
buf := &bytes.Buffer{}
format.Node(buf, pass.Fset, r)
edit := []analysis.TextEdit{{
Pos: node.Pos(),
End: node.End(),
NewText: buf.Bytes(),
}}
return m, edit, true
}
for _, spec := range specs[1:] {
g := groups[len(groups)-1]
if fset.PositionFor(spec.Pos(), false).Line-1 !=
fset.PositionFor(g[len(g)-1].End(), false).Line {
groups = append(groups, nil)
}
groups[len(groups)-1] = append(groups[len(groups)-1], spec)
func Selector(x, sel string) *ast.SelectorExpr {
return &ast.SelectorExpr{
X: &ast.Ident{Name: x},
Sel: &ast.Ident{Name: sel},
}
return groups
}
func IsObject(obj types.Object, name string) bool {
var path string
if pkg := obj.Pkg(); pkg != nil {
path = pkg.Path() + "."
}
return path+obj.Name() == name
}
type Field struct {
Var *types.Var
Tag string
Path []int
}
// FlattenFields recursively flattens T and embedded structs,
// returning a list of fields. If multiple fields with the same name
// exist, all will be returned.
func FlattenFields(T *types.Struct) []Field {
return flattenFields(T, nil, nil)
}
func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field {
if seen == nil {
seen = map[types.Type]bool{}
}
if seen[T] {
return nil
}
seen[T] = true
var out []Field
for i := 0; i < T.NumFields(); i++ {
field := T.Field(i)
tag := T.Tag(i)
np := append(path[:len(path):len(path)], i)
if field.Anonymous() {
if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok {
out = append(out, flattenFields(s, np, seen)...)
}
} else {
out = append(out, Field{field, tag, np})
}
}
return out
}
func File(pass *analysis.Pass, node lint.Positioner) *ast.File {
pass.Fset.PositionFor(node.Pos(), true)
m := pass.ResultOf[facts.TokenFile].(map[*token.File]*ast.File)
return m[pass.Fset.File(node.Pos())]
}
// IsGenerated reports whether pos is in a generated file, It ignores
// //line directives.
func IsGenerated(pass *analysis.Pass, pos token.Pos) bool {
_, ok := Generator(pass, pos)
return ok
}
// Generator returns the generator that generated the file containing
// pos. It ignores //line directives.
func Generator(pass *analysis.Pass, pos token.Pos) (facts.Generator, bool) {
file := pass.Fset.PositionFor(pos, false).Filename
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
g, ok := m[file]
return g, ok
}
func ReportfFG(pass *analysis.Pass, pos token.Pos, f string, args ...interface{}) {
file := lint.DisplayPosition(pass.Fset, pos).Filename
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
if _, ok := m[file]; ok {
return
}
pass.Reportf(pos, f, args...)
}
func ReportNodef(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
pass.Report(analysis.Diagnostic{Pos: node.Pos(), End: node.End(), Message: msg})
}
func ReportNodefFG(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) {
file := lint.DisplayPosition(pass.Fset, node.Pos()).Filename
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
if _, ok := m[file]; ok {
return
}
ReportNodef(pass, node, format, args...)
// ExhaustiveTypeSwitch panics when called. It can be used to ensure
// that type switches are exhaustive.
func ExhaustiveTypeSwitch(v interface{}) {
panic(fmt.Sprintf("internal error: unhandled case %T", v))
}

View File

@ -39,7 +39,7 @@ func relativePositionString(pos token.Position) string {
}
type Statter interface {
Stats(total, errors, warnings int)
Stats(total, errors, warnings, ignored int)
}
type Formatter interface {
@ -51,7 +51,10 @@ type Text struct {
}
func (o Text) Format(p lint.Problem) {
fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Pos), p.String())
fmt.Fprintf(o.W, "%s: %s\n", relativePositionString(p.Pos), p.String())
for _, r := range p.Related {
fmt.Fprintf(o.W, "\t%s: %s\n", relativePositionString(r.Pos), r.Message)
}
}
type JSON struct {
@ -76,12 +79,18 @@ func (o JSON) Format(p lint.Problem) {
Line int `json:"line"`
Column int `json:"column"`
}
jp := struct {
Code string `json:"code"`
Severity string `json:"severity,omitempty"`
type related struct {
Location location `json:"location"`
End location `json:"end"`
Message string `json:"message"`
}
jp := struct {
Code string `json:"code"`
Severity string `json:"severity,omitempty"`
Location location `json:"location"`
End location `json:"end"`
Message string `json:"message"`
Related []related `json:"related,omitempty"`
}{
Code: p.Check,
Severity: severity(p.Severity),
@ -97,6 +106,21 @@ func (o JSON) Format(p lint.Problem) {
},
Message: p.Message,
}
for _, r := range p.Related {
jp.Related = append(jp.Related, related{
Location: location{
File: r.Pos.Filename,
Line: r.Pos.Line,
Column: r.Pos.Column,
},
End: location{
File: r.End.Filename,
Line: r.End.Line,
Column: r.End.Column,
},
Message: r.Message,
})
}
_ = json.NewEncoder(o.W).Encode(jp)
}
@ -123,13 +147,16 @@ func (o *Stylish) Format(p lint.Problem) {
o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0)
}
fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Check, p.Message)
for _, r := range p.Related {
fmt.Fprintf(o.tw, " (%d, %d)\t\t %s\n", r.Pos.Line, r.Pos.Column, r.Message)
}
}
func (o *Stylish) Stats(total, errors, warnings int) {
func (o *Stylish) Stats(total, errors, warnings, ignored int) {
if o.tw != nil {
o.tw.Flush()
fmt.Fprintln(o.W)
}
fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings)\n",
total, errors, warnings)
fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings, %d ignored)\n",
total, errors, warnings, ignored)
}

View File

@ -23,7 +23,9 @@ import (
"runtime/pprof"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"honnef.co/go/tools/config"
"honnef.co/go/tools/internal/cache"
@ -114,6 +116,8 @@ func FlagSet(name string) *flag.FlagSet {
flags.String("debug.memprofile", "", "Write memory profile to `file`")
flags.Bool("debug.version", false, "Print detailed version information about this program")
flags.Bool("debug.no-compile-errors", false, "Don't print compile errors")
flags.String("debug.measure-analyzers", "", "Write analysis measurements to `file`. `file` will be opened for appending if it already exists.")
flags.Uint("debug.repeat-analyzers", 0, "Run analyzers `num` times")
checks := list{"inherit"}
fail := list{"all"}
@ -153,6 +157,24 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *
memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string)
debugVersion := fs.Lookup("debug.version").Value.(flag.Getter).Get().(bool)
debugNoCompile := fs.Lookup("debug.no-compile-errors").Value.(flag.Getter).Get().(bool)
debugRepeat := fs.Lookup("debug.repeat-analyzers").Value.(flag.Getter).Get().(uint)
var measureAnalyzers func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration)
if path := fs.Lookup("debug.measure-analyzers").Value.(flag.Getter).Get().(string); path != "" {
f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.Fatal(err)
}
mu := &sync.Mutex{}
measureAnalyzers = func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration) {
mu.Lock()
defer mu.Unlock()
if _, err := fmt.Fprintf(f, "%s\t%s\t%d\n", analysis.Name, pkg.ID, d.Nanoseconds()); err != nil {
log.Println("error writing analysis measurements:", err)
}
}
}
cfg := config.Config{}
cfg.Checks = *fs.Lookup("checks").Value.(*list)
@ -218,10 +240,12 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *
}
ps, err := Lint(cs, cums, fs.Args(), &Options{
Tags: tags,
LintTests: tests,
GoVersion: goVersion,
Config: cfg,
Tags: tags,
LintTests: tests,
GoVersion: goVersion,
Config: cfg,
PrintAnalyzerMeasurement: measureAnalyzers,
RepeatAnalyzers: debugRepeat,
})
if err != nil {
fmt.Fprintln(os.Stderr, err)
@ -245,6 +269,7 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *
total int
errors int
warnings int
ignored int
)
fail := *fs.Lookup("fail").Value.(*list)
@ -262,6 +287,7 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *
continue
}
if p.Severity == lint.Ignored && !showIgnored {
ignored++
continue
}
if shouldExit[p.Check] {
@ -273,7 +299,7 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *
f.Format(p)
}
if f, ok := f.(format.Statter); ok {
f.Stats(total, errors, warnings)
f.Stats(total, errors, warnings, ignored)
}
if errors > 0 {
exit(1)
@ -284,9 +310,11 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *
type Options struct {
Config config.Config
Tags string
LintTests bool
GoVersion int
Tags string
LintTests bool
GoVersion int
PrintAnalyzerMeasurement func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration)
RepeatAnalyzers uint
}
func computeSalt() ([]byte, error) {
@ -325,7 +353,9 @@ func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string
CumulativeCheckers: cums,
GoVersion: opt.GoVersion,
Config: opt.Config,
RepeatAnalyzers: opt.RepeatAnalyzers,
}
l.Stats.PrintAnalyzerMeasurement = opt.PrintAnalyzerMeasurement
cfg := &packages.Config{}
if opt.LintTests {
cfg.Tests = true
@ -368,7 +398,8 @@ func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string
}()
}
return l.Lint(cfg, paths)
ps, err := l.Lint(cfg, paths)
return ps, err
}
var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?$`)
@ -390,3 +421,24 @@ func parsePos(pos string) token.Position {
Column: col,
}
}
func InitializeAnalyzers(docs map[string]*lint.Documentation, analyzers map[string]*analysis.Analyzer) map[string]*analysis.Analyzer {
out := make(map[string]*analysis.Analyzer, len(analyzers))
for k, v := range analyzers {
vc := *v
out[k] = &vc
vc.Name = k
doc, ok := docs[k]
if !ok {
panic(fmt.Sprintf("missing documentation for check %s", k))
}
vc.Doc = doc.String()
if vc.Flags.Usage == nil {
fs := flag.NewFlagSet("", flag.PanicOnError)
fs.Var(NewVersionFlag(), "go", "Target Go version")
vc.Flags = *fs
}
}
return out
}

View File

@ -1,6 +1,30 @@
package lint
/*
Package loading
Conceptually, package loading in the runner can be imagined as a
graph-shaped work list. We iteratively pop off leaf nodes (packages
that have no unloaded dependencies) and load data from export data,
our cache, or source.
Specifically, non-initial packages are loaded from export data and the
fact cache if possible, otherwise from source. Initial packages are
loaded from export data, the fact cache and the (problems, ignores,
config) cache if possible, otherwise from source.
The appeal of this approach is that it is both simple to implement and
easily parallelizable. Each leaf node can be processed independently,
and new leaf nodes appear as their dependencies are being processed.
The downside of this approach, however, is that we're doing more work
than necessary. Imagine an initial package A, which has the following
dependency chain: A->B->C->D in the current implementation, we will
load all 4 packages. However, if package A can be loaded fully from
cached information, then none of its dependencies are necessary, and
we could avoid loading them.
Parallelism
Runner implements parallel processing of packages by spawning one
@ -19,6 +43,34 @@ all execute in parallel, while not wasting resources for long linear
chains or trying to process more subgraphs in parallel than the system
can handle.
Caching
We make use of several caches. These caches are Go's export data, our
facts cache, and our (problems, ignores, config) cache.
Initial packages will either be loaded from a combination of all three
caches, or from source. Non-initial packages will either be loaded
from a combination of export data and facts cache, or from source.
The facts cache is separate from the (problems, ignores, config) cache
because when we process non-initial packages, we generate facts, but
we discard problems and ignores.
The facts cache is keyed by (package, analyzer), whereas the
(problems, ignores, config) cache is keyed by (package, list of
analyzes). The difference between the two exists because there are
only a handful of analyses that produce facts, but hundreds of
analyses that don't. Creating one cache entry per fact-generating
analysis is feasible, creating one cache entry per normal analysis has
significant performance and storage overheads.
The downside of keying by the list of analyzes is, naturally, that a
change in list of analyzes changes the cache key. `staticcheck -checks
A` and `staticcheck -checks A,B` will therefore need their own cache
entries and not reuse each other's work. This problem does not affect
the facts cache.
*/
import (
@ -37,6 +89,7 @@ import (
"strings"
"sync"
"sync/atomic"
"time"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
@ -47,6 +100,11 @@ import (
"honnef.co/go/tools/loader"
)
func init() {
gob.Register(&FileIgnore{})
gob.Register(&LineIgnore{})
}
// If enabled, abuse of the go/analysis API will lead to panics
const sanityCheck = true
@ -58,21 +116,43 @@ const sanityCheck = true
// This may change unused's behavior, however, as it may observe fewer
// interfaces from transitive dependencies.
// OPT(dh): every single package will have the same value for
// canClearTypes. We could move the Package.decUse method to runner to
// eliminate this field. This is probably not worth it, though. There
// are only thousands of packages, so the field only takes up
// kilobytes of memory.
// OPT(dh): do we really need the Package.gen field? it's based
// trivially on pkg.results and merely caches the result of a type
// assertion. How often do we actually use the field?
type Package struct {
// dependents is initially set to 1 plus the number of packages
// that directly import this package. It is atomically decreased
// by 1 every time a dependent has been processed or when the
// package itself has been processed. Once the value reaches zero,
// the package is no longer needed.
dependents uint64
*packages.Package
Imports []*Package
initial bool
Imports []*Package
initial bool
// fromSource is set to true for packages that have been loaded
// from source. This is the case for initial packages, packages
// with missing export data, and packages with no cached facts.
fromSource bool
hash string
done chan struct{}
// hash stores the package hash, as computed by packageHash
hash string
actionID cache.ActionID
done chan struct{}
resultsMu sync.Mutex
// results maps analyzer IDs to analyzer results
// results maps analyzer IDs to analyzer results. it is
// implemented as a deduplicating concurrent cache.
results []*result
cfg *config.Config
cfg *config.Config
// gen maps file names to the code generator that created them
gen map[string]facts.Generator
problems []Problem
ignores []Ignore
@ -82,12 +162,22 @@ type Package struct {
facts []map[types.Object][]analysis.Fact
pkgFacts [][]analysis.Fact
// canClearTypes is set to true if we can discard type
// information after the package and its dependents have been
// processed. This is the case when no cumulative checkers are
// being run.
canClearTypes bool
}
type cachedPackage struct {
Problems []Problem
Ignores []Ignore
Config *config.Config
}
func (pkg *Package) decUse() {
atomic.AddUint64(&pkg.dependents, ^uint64(0))
if atomic.LoadUint64(&pkg.dependents) == 0 {
ret := atomic.AddUint64(&pkg.dependents, ^uint64(0))
if ret == 0 {
// nobody depends on this package anymore
if pkg.canClearTypes {
pkg.Types = nil
@ -108,16 +198,16 @@ type result struct {
}
type Runner struct {
ld loader.Loader
cache *cache.Cache
cache *cache.Cache
goVersion int
stats *Stats
repeatAnalyzers uint
analyzerIDs analyzerIDs
analyzerIDs analyzerIDs
problemsCacheKey string
// limits parallelism of loading packages
loadSem chan struct{}
goVersion int
stats *Stats
}
type analyzerIDs struct {
@ -225,6 +315,13 @@ func (ac *analysisAction) report(pass *analysis.Pass, d analysis.Diagnostic) {
Message: d.Message,
Check: pass.Analyzer.Name,
}
for _, r := range d.Related {
p.Related = append(p.Related, Related{
Pos: DisplayPosition(pass.Fset, r.Pos),
End: DisplayPosition(pass.Fset, r.End),
Message: r.Message,
})
}
ac.problems = append(ac.problems, p)
}
@ -278,6 +375,21 @@ func (r *Runner) runAnalysis(ac *analysisAction) (ret interface{}, err error) {
}
}
func (r *Runner) loadCachedPackage(pkg *Package, analyzers []*analysis.Analyzer) (cachedPackage, bool) {
// OPT(dh): we can cache this computation, it'll be the same for all packages
id := cache.Subkey(pkg.actionID, "data "+r.problemsCacheKey)
b, _, err := r.cache.GetBytes(id)
if err != nil {
return cachedPackage{}, false
}
var cpkg cachedPackage
if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&cpkg); err != nil {
return cachedPackage{}, false
}
return cpkg, true
}
func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bool) {
if len(a.FactTypes) == 0 {
return nil, true
@ -285,10 +397,7 @@ func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bo
var facts []Fact
// Look in the cache for facts
aID, err := passActionID(pkg, a)
if err != nil {
return nil, false
}
aID := passActionID(pkg, a)
aID = cache.Subkey(aID, "facts")
b, _, err := r.cache.GetBytes(aID)
if err != nil {
@ -378,9 +487,15 @@ func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (inter
}
// Then with this analyzer
ret, err := ac.analyzer.Run(pass)
if err != nil {
return nil, err
var ret interface{}
for i := uint(0); i < r.repeatAnalyzers+1; i++ {
var err error
t := time.Now()
ret, err = ac.analyzer.Run(pass)
r.stats.MeasureAnalyzer(ac.analyzer, ac.pkg, time.Since(t))
if err != nil {
return nil, err
}
}
if len(ac.analyzer.FactTypes) > 0 {
@ -404,16 +519,7 @@ func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (inter
}
}
buf := &bytes.Buffer{}
if err := gob.NewEncoder(buf).Encode(facts); err != nil {
return nil, err
}
aID, err := passActionID(ac.pkg, ac.analyzer)
if err != nil {
return nil, err
}
aID = cache.Subkey(aID, "facts")
if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil {
if err := r.cacheData(facts, ac.pkg, ac.analyzer, "facts"); err != nil {
return nil, err
}
}
@ -421,6 +527,19 @@ func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (inter
return ret, nil
}
func (r *Runner) cacheData(v interface{}, pkg *Package, a *analysis.Analyzer, subkey string) error {
buf := &bytes.Buffer{}
if err := gob.NewEncoder(buf).Encode(v); err != nil {
return err
}
aID := passActionID(pkg, a)
aID = cache.Subkey(aID, subkey)
if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil {
return err
}
return nil
}
func NewRunner(stats *Stats) (*Runner, error) {
cache, err := cache.Default()
if err != nil {
@ -438,9 +557,17 @@ func NewRunner(stats *Stats) (*Runner, error) {
// diagnostics as well as extracted ignore directives.
//
// Note that diagnostics have not been filtered at this point yet, to
// accomodate cumulative analyzes that require additional steps to
// accommodate cumulative analyzes that require additional steps to
// produce diagnostics.
func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analysis.Analyzer, hasCumulative bool) ([]*Package, error) {
checkerNames := make([]string, len(analyzers))
for i, a := range analyzers {
checkerNames[i] = a.Name
}
sort.Strings(checkerNames)
r.problemsCacheKey = strings.Join(checkerNames, " ")
var allAnalyzers []*analysis.Analyzer
r.analyzerIDs = analyzerIDs{m: map[*analysis.Analyzer]int{}}
id := 0
seen := map[*analysis.Analyzer]struct{}{}
@ -450,6 +577,7 @@ func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analy
return
}
seen[a] = struct{}{}
allAnalyzers = append(allAnalyzers, a)
r.analyzerIDs.m[a] = id
id++
for _, f := range a.FactTypes {
@ -468,6 +596,11 @@ func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analy
for _, a := range injectedAnalyses {
dfs(a)
}
// Run all analyzers on all packages (subject to further
// restrictions enforced later). This guarantees that if analyzer
// A1 depends on A2, and A2 has facts, that A2 will run on the
// dependencies of user-provided packages, even though A1 won't.
analyzers = allAnalyzers
var dcfg packages.Config
if cfg != nil {
@ -475,11 +608,10 @@ func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analy
}
atomic.StoreUint32(&r.stats.State, StateGraph)
initialPkgs, err := r.ld.Graph(dcfg, patterns...)
initialPkgs, err := loader.Graph(dcfg, patterns...)
if err != nil {
return nil, err
}
defer r.cache.Trim()
var allPkgs []*Package
@ -507,7 +639,8 @@ func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analy
m[l].Imports = append(m[l].Imports, m[v])
}
m[l].hash, err = packageHash(m[l])
m[l].hash, err = r.packageHash(m[l])
m[l].actionID = packageActionID(m[l])
if err != nil {
m[l].errs = append(m[l].errs, err)
}
@ -564,27 +697,36 @@ func parsePos(pos string) (token.Position, int, error) {
}, len(parts[0]), nil
}
// loadPkg loads a Go package. If the package is in the set of initial
// packages, it will be loaded from source, otherwise it will be
// loaded from export data. In the case that the package was loaded
// from export data, cached facts will also be loaded.
//
// Currently, only cached facts for this package will be loaded, not
// for any of its dependencies.
// loadPkg loads a Go package. It may be loaded from a combination of
// caches, or from source.
func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error {
if pkg.Types != nil {
panic(fmt.Sprintf("internal error: %s has already been loaded", pkg.Package))
}
// Load type information
if pkg.initial {
// Load package from source
pkg.fromSource = true
return r.ld.LoadFromSource(pkg.Package)
// Try to load cached package
cpkg, ok := r.loadCachedPackage(pkg, analyzers)
if ok {
pkg.problems = cpkg.Problems
pkg.ignores = cpkg.Ignores
pkg.cfg = cpkg.Config
} else {
pkg.fromSource = true
return loader.LoadFromSource(pkg.Package)
}
}
// At this point we're either working with a non-initial package,
// or we managed to load cached problems for the package. We still
// need export data and facts.
// OPT(dh): we don't need type information for this package if no
// other package depends on it. this may be the case for initial
// packages.
// Load package from export data
if err := r.ld.LoadFromExport(pkg.Package); err != nil {
if err := loader.LoadFromExport(pkg.Package); err != nil {
// We asked Go to give us up to date export data, yet
// we can't load it. There must be something wrong.
//
@ -597,7 +739,7 @@ func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error {
// FIXME(dh): we no longer reload from export data, so
// theoretically we should be able to continue
pkg.fromSource = true
if err := r.ld.LoadFromSource(pkg.Package); err != nil {
if err := loader.LoadFromSource(pkg.Package); err != nil {
return err
}
// Make sure this package can't be imported successfully
@ -658,13 +800,14 @@ func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error {
dfs(a)
}
if failed {
pkg.fromSource = true
// XXX we added facts to the maps, we need to get rid of those
return r.ld.LoadFromSource(pkg.Package)
if !failed {
return nil
}
return nil
// We failed to load some cached facts
pkg.fromSource = true
// XXX we added facts to the maps, we need to get rid of those
return loader.LoadFromSource(pkg.Package)
}
type analysisError struct {
@ -695,7 +838,7 @@ func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) {
}()
// Ensure all packages have the generated map and config. This is
// required by interna of the runner. Analyses that themselves
// required by internals of the runner. Analyses that themselves
// make use of either have an explicit dependency so that other
// runners work correctly, too.
analyzers = append(analyzers[0:len(analyzers):len(analyzers)], injectedAnalyses...)
@ -766,7 +909,7 @@ func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) {
defer wg.Done()
// Only initial packages and packages with missing
// facts will have been loaded from source.
if pkg.initial || r.hasFacts(a) {
if pkg.initial || len(a.FactTypes) > 0 {
if _, err := r.runAnalysis(ac); err != nil {
errs[i] = analysisError{a, pkg, err}
return
@ -800,6 +943,8 @@ func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) {
// We can't process ignores at this point because `unused` needs
// to see more than one package to make its decision.
//
// OPT(dh): can't we guard this block of code by pkg.initial?
ignores, problems := parseDirectives(pkg.Package)
pkg.ignores = append(pkg.ignores, ignores...)
pkg.problems = append(pkg.problems, problems...)
@ -824,32 +969,6 @@ func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) {
// from processPkg.
}
// hasFacts reports whether an analysis exports any facts. An analysis
// that has a transitive dependency that exports facts is considered
// to be exporting facts.
func (r *Runner) hasFacts(a *analysis.Analyzer) bool {
ret := false
seen := make([]bool, len(r.analyzerIDs.m))
var dfs func(*analysis.Analyzer)
dfs = func(a *analysis.Analyzer) {
if seen[r.analyzerIDs.get(a)] {
return
}
seen[r.analyzerIDs.get(a)] = true
if len(a.FactTypes) > 0 {
ret = true
}
for _, req := range a.Requires {
if ret {
break
}
dfs(req)
}
}
dfs(a)
return ret
}
func parseDirective(s string) (cmd string, args []string) {
if !strings.HasPrefix(s, "//lint:") {
return "", nil
@ -912,7 +1031,7 @@ func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) {
File: pos.Filename,
Line: pos.Line,
Checks: checks,
Pos: c.Pos(),
Pos: DisplayPosition(pkg.Fset, c.Pos()),
}
case "file-ignore":
ig = &FileIgnore{
@ -932,9 +1051,10 @@ func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) {
// packageHash computes a package's hash. The hash is based on all Go
// files that make up the package, as well as the hashes of imported
// packages.
func packageHash(pkg *Package) (string, error) {
func (r *Runner) packageHash(pkg *Package) (string, error) {
key := cache.NewHash("package hash")
fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
fmt.Fprintf(key, "go %d\n", r.goVersion)
for _, f := range pkg.CompiledGoFiles {
h, err := cache.FileHash(f)
if err != nil {
@ -943,6 +1063,28 @@ func packageHash(pkg *Package) (string, error) {
fmt.Fprintf(key, "file %s %x\n", f, h)
}
// Actually load the configuration to calculate its hash. This
// will take into consideration inheritance of configuration
// files, as well as the default configuration.
//
// OPT(dh): doing this means we'll load the config twice: once for
// computing the hash, and once when analyzing the package from
// source.
cdir := config.Dir(pkg.GoFiles)
if cdir == "" {
fmt.Fprintf(key, "file %s %x\n", config.ConfigName, [cache.HashSize]byte{})
} else {
cfg, err := config.Load(cdir)
if err != nil {
return "", err
}
h := cache.NewHash(config.ConfigName)
if _, err := h.Write([]byte(cfg.String())); err != nil {
return "", err
}
fmt.Fprintf(key, "file %s %x\n", config.ConfigName, h.Sum())
}
imps := make([]*Package, len(pkg.Imports))
copy(imps, pkg.Imports)
sort.Slice(imps, func(i, j int) bool {
@ -959,12 +1101,14 @@ func packageHash(pkg *Package) (string, error) {
return hex.EncodeToString(h[:]), nil
}
// passActionID computes an ActionID for an analysis pass.
func passActionID(pkg *Package, analyzer *analysis.Analyzer) (cache.ActionID, error) {
key := cache.NewHash("action ID")
func packageActionID(pkg *Package) cache.ActionID {
key := cache.NewHash("package ID")
fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
fmt.Fprintf(key, "pkghash %s\n", pkg.hash)
fmt.Fprintf(key, "analyzer %s\n", analyzer.Name)
return key.Sum(), nil
return key.Sum()
}
// passActionID computes an ActionID for an analysis pass.
func passActionID(pkg *Package, analyzer *analysis.Analyzer) cache.ActionID {
return cache.Subkey(pkg.actionID, fmt.Sprintf("analyzer %s", analyzer.Name))
}

View File

@ -1,5 +1,11 @@
package lint
import (
"time"
"golang.org/x/tools/go/analysis"
)
const (
StateInitializing = 0
StateGraph = 1
@ -17,4 +23,16 @@ type Stats struct {
Problems uint32
ActiveWorkers uint32
TotalWorkers uint32
PrintAnalyzerMeasurement func(*analysis.Analyzer, *Package, time.Duration)
}
type AnalysisMeasurementKey struct {
Analysis string
Pkg string
}
func (s *Stats) MeasureAnalyzer(analysis *analysis.Analyzer, pkg *Package, d time.Duration) {
if s.PrintAnalyzerMeasurement != nil {
s.PrintAnalyzerMeasurement(analysis, pkg, d)
}
}