[chore]: Bump mvdan.cc/xurls/v2 from 2.5.0 to 2.6.0 (#3643)

Bumps [mvdan.cc/xurls/v2](https://github.com/mvdan/xurls) from 2.5.0 to 2.6.0.
- [Release notes](https://github.com/mvdan/xurls/releases)
- [Commits](https://github.com/mvdan/xurls/compare/v2.5.0...v2.6.0)

---
updated-dependencies:
- dependency-name: mvdan.cc/xurls/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot] 2025-01-14 13:11:20 +00:00 committed by GitHub
parent 4d423102c1
commit b95498b8c2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
86 changed files with 5284 additions and 1449 deletions

8
go.mod
View file

@ -99,7 +99,7 @@ require (
gopkg.in/mcuadros/go-syslog.v2 v2.3.0
gopkg.in/yaml.v3 v3.0.1
modernc.org/sqlite v0.0.0-00010101000000-000000000000
mvdan.cc/xurls/v2 v2.5.0
mvdan.cc/xurls/v2 v2.6.0
)
require (
@ -205,7 +205,7 @@ require (
github.com/puzpuzpuz/xsync/v3 v3.4.0 // indirect
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a // indirect
github.com/rs/xid v1.6.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
@ -233,10 +233,10 @@ require (
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.12.0 // indirect
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect
golang.org/x/mod v0.18.0 // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/tools v0.22.0 // indirect
golang.org/x/tools v0.28.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
google.golang.org/grpc v1.66.1 // indirect

16
go.sum generated
View file

@ -480,8 +480,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a h1:w3tdWGKbLGBPtR/8/oO74W6hmz0qE5q0z9aqSAewaaM=
github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a/go.mod h1:S8kfXMp+yh77OxPD4fdM6YUknrZpQxLhvxzS4gDHENY=
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@ -709,8 +709,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -864,8 +864,8 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1004,8 +1004,8 @@ modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
mvdan.cc/xurls/v2 v2.5.0 h1:lyBNOm8Wo71UknhUs4QTFUNNMyxy2JEIaKKo0RWOh+8=
mvdan.cc/xurls/v2 v2.5.0/go.mod h1:yQgaGQ1rFtJUzkmKiHYSSfuQxqfYmd//X6PxvholpeE=
mvdan.cc/xurls/v2 v2.6.0 h1:3NTZpeTxYVWNSokW3MKeyVkz/j7uYXYiMtXRUfmjbgI=
mvdan.cc/xurls/v2 v2.6.0/go.mod h1:bCvEZ1XvdA6wDnxY7jPPjEmigDtvtvPXAD/Exa9IMSk=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=

4
vendor/golang.org/x/mod/LICENSE generated vendored
View file

@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

View file

@ -506,7 +506,6 @@ func CheckFilePath(path string) error {
"PRN",
"AUX",
"NUL",
"COM0",
"COM1",
"COM2",
"COM3",
@ -516,7 +515,6 @@ func CheckFilePath(path string) error {
"COM7",
"COM8",
"COM9",
"LPT0",
"LPT1",
"LPT2",
"LPT3",

4
vendor/golang.org/x/tools/LICENSE generated vendored
View file

@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

View file

@ -106,8 +106,21 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod
// Does augmented child strictly contain [start, end)?
if augPos <= start && end <= augEnd {
_, isToken := child.(tokenNode)
return isToken || visit(child)
if is[tokenNode](child) {
return true
}
// childrenOf elides the FuncType node beneath FuncDecl.
// Add it back here for TypeParams, Params, Results,
// all FieldLists). But we don't add it back for the "func" token
// even though it is is the tree at FuncDecl.Type.Func.
if decl, ok := node.(*ast.FuncDecl); ok {
if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv {
path = append(path, decl.Type)
}
}
return visit(child)
}
// Does [start, end) overlap multiple children?
@ -313,6 +326,8 @@ func childrenOf(n ast.Node) []ast.Node {
//
// As a workaround, we inline the case for FuncType
// here and order things correctly.
// We also need to insert the elided FuncType just
// before the 'visit' recursion.
//
children = nil // discard ast.Walk(FuncDecl) info subtrees
children = append(children, tok(n.Type.Func, len("func")))
@ -632,3 +647,8 @@ func NodeDescription(n ast.Node) string {
}
panic(fmt.Sprintf("unexpected node type: %T", n))
}
func is[T any](x any) bool {
_, ok := x.(T)
return ok
}

View file

@ -344,7 +344,12 @@ func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (r
}
// UsesImport reports whether a given import is used.
// The provided File must have been parsed with syntactic object resolution
// (not using go/parser.SkipObjectResolution).
func UsesImport(f *ast.File, path string) (used bool) {
if f.Scope == nil {
panic("file f was not parsed with syntactic object resolution")
}
spec := importSpec(f, path)
if spec == nil {
return

View file

@ -7,12 +7,5 @@
import "go/ast"
// Unparen returns e with any enclosing parentheses stripped.
func Unparen(e ast.Expr) ast.Expr {
for {
p, ok := e.(*ast.ParenExpr)
if !ok {
return e
}
e = p.X
}
}
// Deprecated: use [ast.Unparen].
func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) }

View file

@ -2,22 +2,64 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gcexportdata provides functions for locating, reading, and
// writing export data files containing type information produced by the
// gc compiler. This package supports go1.7 export data format and all
// later versions.
// Package gcexportdata provides functions for reading and writing
// export data, which is a serialized description of the API of a Go
// package including the names, kinds, types, and locations of all
// exported declarations.
//
// Although it might seem convenient for this package to live alongside
// go/types in the standard library, this would cause version skew
// problems for developer tools that use it, since they must be able to
// consume the outputs of the gc compiler both before and after a Go
// update such as from Go 1.7 to Go 1.8. Because this package lives in
// golang.org/x/tools, sites can update their version of this repo some
// time before the Go 1.8 release and rebuild and redeploy their
// developer tools, which will then be able to consume both Go 1.7 and
// Go 1.8 export data files, so they will work before and after the
// Go update. (See discussion at https://golang.org/issue/15651.)
package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
// The standard Go compiler (cmd/compile) writes an export data file
// for each package it compiles, which it later reads when compiling
// packages that import the earlier one. The compiler must thus
// contain logic to both write and read export data.
// (See the "Export" section in the cmd/compile/README file.)
//
// The [Read] function in this package can read files produced by the
// compiler, producing [go/types] data structures. As a matter of
// policy, Read supports export data files produced by only the last
// two Go releases plus tip; see https://go.dev/issue/68898. The
// export data files produced by the compiler contain additional
// details related to generics, inlining, and other optimizations that
// cannot be decoded by the [Read] function.
//
// In files written by the compiler, the export data is not at the
// start of the file. Before calling Read, use [NewReader] to locate
// the desired portion of the file.
//
// The [Write] function in this package encodes the exported API of a
// Go package ([types.Package]) as a file. Such files can be later
// decoded by Read, but cannot be consumed by the compiler.
//
// # Future changes
//
// Although Read supports the formats written by both Write and the
// compiler, the two are quite different, and there is an open
// proposal (https://go.dev/issue/69491) to separate these APIs.
//
// Under that proposal, this package would ultimately provide only the
// Read operation for compiler export data, which must be defined in
// this module (golang.org/x/tools), not in the standard library, to
// avoid version skew for developer tools that need to read compiler
// export data both before and after a Go release, such as from Go
// 1.23 to Go 1.24. Because this package lives in the tools module,
// clients can update their version of the module some time before the
// Go 1.24 release and rebuild and redeploy their tools, which will
// then be able to consume both Go 1.23 and Go 1.24 export data files,
// so they will work before and after the Go update. (See discussion
// at https://go.dev/issue/15651.)
//
// The operations to import and export [go/types] data structures
// would be defined in the go/types package as Import and Export.
// [Write] would (eventually) delegate to Export,
// and [Read], when it detects a file produced by Export,
// would delegate to Import.
//
// # Deprecations
//
// The [NewImporter] and [Find] functions are deprecated and should
// not be used in new code. The [WriteBundle] and [ReadBundle]
// functions are experimental, and there is an open proposal to
// deprecate them (https://go.dev/issue/69573).
package gcexportdata
import (
"bufio"
@ -64,24 +106,18 @@ func Find(importPath, srcDir string) (filename, path string) {
// additional trailing data beyond the end of the export data.
func NewReader(r io.Reader) (io.Reader, error) {
buf := bufio.NewReader(r)
_, size, err := gcimporter.FindExportData(buf)
size, err := gcimporter.FindExportData(buf)
if err != nil {
return nil, err
}
if size >= 0 {
// We were given an archive and found the __.PKGDEF in it.
// This tells us the size of the export data, and we don't
// need to return the entire file.
return &io.LimitedReader{
R: buf,
N: size,
}, nil
} else {
// We were given an object file. As such, we don't know how large
// the export data is and must return the entire file.
return buf, nil
}
// We were given an archive and found the __.PKGDEF in it.
// This tells us the size of the export data, and we don't
// need to return the entire file.
return &io.LimitedReader{
R: buf,
N: size,
}, nil
}
// readAll works the same way as io.ReadAll, but avoids allocations and copies
@ -100,6 +136,11 @@ func readAll(r io.Reader) ([]byte, error) {
// Read reads export data from in, decodes it, and returns type
// information for the package.
//
// Read is capable of reading export data produced by [Write] at the
// same source code version, or by the last two Go releases (plus tip)
// of the standard Go compiler. Reading files from older compilers may
// produce an error.
//
// The package path (effectively its linker symbol prefix) is
// specified by path, since unlike the package name, this information
// may not be recorded in the export data.
@ -128,14 +169,26 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
// (from "version"). Select appropriate importer.
if len(data) > 0 {
switch data[0] {
case 'v', 'c', 'd': // binary, till go1.10
case 'v', 'c', 'd':
// binary, produced by cmd/compile till go1.10
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
case 'i': // indexed, till go1.19
case 'i':
// indexed, produced by cmd/compile till go1.19,
// and also by [Write].
//
// If proposal #69491 is accepted, go/types
// serialization will be implemented by
// types.Export, to which Write would eventually
// delegate (explicitly dropping any pretence at
// inter-version Write-Read compatibility).
// This [Read] function would delegate to types.Import
// when it detects that the file was produced by Export.
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
return pkg, err
case 'u': // unified, from go1.20
case 'u':
// unified, produced by cmd/compile since go1.20
_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
return pkg, err

View file

@ -1,54 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package packagesdriver fetches type sizes for go/packages and go/analysis.
package packagesdriver
import (
"context"
"fmt"
"strings"
"golang.org/x/tools/internal/gocommand"
)
// TODO(adonovan): move back into go/packages.
func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) {
inv.Verb = "list"
inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}
stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
var goarch, compiler string
if rawErr != nil {
rawErrMsg := rawErr.Error()
if strings.Contains(rawErrMsg, "cannot find main module") ||
strings.Contains(rawErrMsg, "go.mod file not found") {
// User's running outside of a module.
// All bets are off. Get GOARCH and guess compiler is gc.
// TODO(matloob): Is this a problem in practice?
inv.Verb = "env"
inv.Args = []string{"GOARCH"}
envout, enverr := gocmdRunner.Run(ctx, inv)
if enverr != nil {
return "", "", enverr
}
goarch = strings.TrimSpace(envout.String())
compiler = "gc"
} else if friendlyErr != nil {
return "", "", friendlyErr
} else {
// This should be unreachable, but be defensive
// in case RunRaw's error results are inconsistent.
return "", "", rawErr
}
} else {
fields := strings.Fields(stdout.String())
if len(fields) < 2 {
return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\":\nstdout: <<%s>>\nstderr: <<%s>>",
stdout.String(), stderr.String())
}
goarch = fields[0]
compiler = fields[1]
}
return compiler, goarch, nil
}

View file

@ -64,7 +64,7 @@
The Load function can be configured by passing a pointer to a Config as
the first argument. A nil Config is equivalent to the zero Config, which
causes Load to run in LoadFiles mode, collecting minimal information.
causes Load to run in [LoadFiles] mode, collecting minimal information.
See the documentation for type Config for details.
As noted earlier, the Config.Mode controls the amount of detail
@ -72,14 +72,14 @@
for details.
Most tools should pass their command-line arguments (after any flags)
uninterpreted to [Load], so that it can interpret them
uninterpreted to Load, so that it can interpret them
according to the conventions of the underlying build system.
See the Example function for typical usage.
# The driver protocol
[Load] may be used to load Go packages even in Go projects that use
Load may be used to load Go packages even in Go projects that use
alternative build systems, by installing an appropriate "driver"
program for the build system and specifying its location in the
GOPACKAGESDRIVER environment variable.
@ -97,6 +97,15 @@
is written to the driver's standard input. The driver must write a
JSON-encoded [DriverResponse] message to its standard output. (This
message differs from the JSON schema produced by 'go list'.)
The value of the PWD environment variable seen by the driver process
is the preferred name of its working directory. (The working directory
may have other aliases due to symbolic links; see the comment on the
Dir field of [exec.Cmd] for related information.)
When the driver process emits in its response the name of a file
that is a descendant of this directory, it must use an absolute path
that has the value of PWD as a prefix, to ensure that the returned
filenames satisfy the original query.
*/
package packages // import "golang.org/x/tools/go/packages"

View file

@ -13,6 +13,7 @@
"fmt"
"os"
"os/exec"
"slices"
"strings"
)
@ -79,10 +80,10 @@ type DriverResponse struct {
// driver is the type for functions that query the build system for the
// packages named by the patterns.
type driver func(cfg *Config, patterns ...string) (*DriverResponse, error)
type driver func(cfg *Config, patterns []string) (*DriverResponse, error)
// findExternalDriver returns the file path of a tool that supplies
// the build system package structure, or "" if not found."
// the build system package structure, or "" if not found.
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
func findExternalDriver(cfg *Config) driver {
@ -103,7 +104,7 @@ func findExternalDriver(cfg *Config) driver {
return nil
}
}
return func(cfg *Config, words ...string) (*DriverResponse, error) {
return func(cfg *Config, patterns []string) (*DriverResponse, error) {
req, err := json.Marshal(DriverRequest{
Mode: cfg.Mode,
Env: cfg.Env,
@ -117,7 +118,7 @@ func findExternalDriver(cfg *Config) driver {
buf := new(bytes.Buffer)
stderr := new(bytes.Buffer)
cmd := exec.CommandContext(cfg.Context, tool, words...)
cmd := exec.CommandContext(cfg.Context, tool, patterns...)
cmd.Dir = cfg.Dir
// The cwd gets resolved to the real path. On Darwin, where
// /tmp is a symlink, this breaks anything that expects the
@ -131,7 +132,7 @@ func findExternalDriver(cfg *Config) driver {
// command.
//
// (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go)
cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir)
cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir)
cmd.Stdin = bytes.NewReader(req)
cmd.Stdout = buf
cmd.Stderr = stderr
@ -150,7 +151,3 @@ func findExternalDriver(cfg *Config) driver {
return &response, nil
}
}
// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)].
// TODO(adonovan): use go1.21 slices.Clip.
func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] }

View file

@ -21,7 +21,6 @@
"sync"
"unicode"
"golang.org/x/tools/go/internal/packagesdriver"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/packagesinternal"
)
@ -81,6 +80,12 @@ type golistState struct {
cfg *Config
ctx context.Context
runner *gocommand.Runner
// overlay is the JSON file that encodes the Config.Overlay
// mapping, used by 'go list -overlay=...'.
overlay string
envOnce sync.Once
goEnvError error
goEnv map[string]string
@ -128,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string {
// goListDriver uses the go list command to interpret the patterns and produce
// the build system package structure.
// See driver for more details.
func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) {
//
// overlay is the JSON file that encodes the cfg.Overlay
// mapping, used by 'go list -overlay=...'
func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) {
// Make sure that any asynchronous go commands are killed when we return.
parentCtx := cfg.Context
if parentCtx == nil {
@ -143,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error
cfg: cfg,
ctx: ctx,
vendorDirs: map[string]bool{},
overlay: overlay,
runner: runner,
}
// Fill in response.Sizes asynchronously if necessary.
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
errCh := make(chan error)
go func() {
compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner)
compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner)
response.dr.Compiler = compiler
response.dr.Arch = arch
errCh <- err
@ -495,13 +505,14 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse
pkg := &Package{
Name: p.Name,
ID: p.ImportPath,
Dir: p.Dir,
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
EmbedFiles: absJoin(p.Dir, p.EmbedFiles),
EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns),
IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles),
forTest: p.ForTest,
ForTest: p.ForTest,
depsErrors: p.DepsErrors,
Module: p.Module,
}
@ -682,7 +693,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
// getGoVersion returns the effective minor version of the go command.
func (state *golistState) getGoVersion() (int, error) {
state.goVersionOnce.Do(func() {
state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner)
state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner)
})
return state.goVersion, state.goVersionError
}
@ -752,7 +763,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
}
}
addFields("Name", "ImportPath", "Error") // These fields are always needed
if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 {
if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles",
"CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles",
"SwigFiles", "SwigCXXFiles", "SysoFiles")
@ -760,7 +771,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
addFields("TestGoFiles", "XTestGoFiles")
}
}
if cfg.Mode&NeedTypes != 0 {
if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
// CompiledGoFiles seems to be required for the test case TestCgoNoSyntax,
// even when -compiled isn't passed in.
// TODO(#52435): Should we make the test ask for -compiled, or automatically
@ -785,7 +796,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
// Request Dir in the unlikely case Export is not absolute.
addFields("Dir", "Export")
}
if cfg.Mode&needInternalForTest != 0 {
if cfg.Mode&NeedForTest != 0 {
addFields("ForTest")
}
if cfg.Mode&needInternalDepsErrors != 0 {
@ -841,7 +852,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation {
Env: cfg.Env,
Logf: cfg.Logf,
WorkingDir: cfg.Dir,
Overlay: cfg.goListOverlayFile,
Overlay: state.overlay,
}
}
@ -852,11 +863,8 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer,
inv := state.cfgInvocation()
inv.Verb = verb
inv.Args = args
gocmdRunner := cfg.gocmdRunner
if gocmdRunner == nil {
gocmdRunner = &gocommand.Runner{}
}
stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv)
stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv)
if err != nil {
// Check for 'go' executable not being found.
if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
@ -880,6 +888,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer,
return nil, friendlyErr
}
// Return an error if 'go list' failed due to missing tools in
// $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606).
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) {
return nil, friendlyErr
}
// Is there an error running the C compiler in cgo? This will be reported in the "Error" field
// and should be suppressed by go list -e.
//
@ -1024,3 +1038,44 @@ func cmdDebugStr(cmd *exec.Cmd) string {
}
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
}
// getSizesForArgs queries 'go list' for the appropriate
// Compiler and GOARCH arguments to pass to [types.SizesFor].
func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) {
inv.Verb = "list"
inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}
stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
var goarch, compiler string
if rawErr != nil {
rawErrMsg := rawErr.Error()
if strings.Contains(rawErrMsg, "cannot find main module") ||
strings.Contains(rawErrMsg, "go.mod file not found") {
// User's running outside of a module.
// All bets are off. Get GOARCH and guess compiler is gc.
// TODO(matloob): Is this a problem in practice?
inv.Verb = "env"
inv.Args = []string{"GOARCH"}
envout, enverr := gocmdRunner.Run(ctx, inv)
if enverr != nil {
return "", "", enverr
}
goarch = strings.TrimSpace(envout.String())
compiler = "gc"
} else if friendlyErr != nil {
return "", "", friendlyErr
} else {
// This should be unreachable, but be defensive
// in case RunRaw's error results are inconsistent.
return "", "", rawErr
}
} else {
fields := strings.Fields(stdout.String())
if len(fields) < 2 {
return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\":\nstdout: <<%s>>\nstderr: <<%s>>",
stdout.String(), stderr.String())
}
goarch = fields[0]
compiler = fields[1]
}
return compiler, goarch, nil
}

View file

@ -9,49 +9,47 @@
"strings"
)
var allModes = []LoadMode{
NeedName,
NeedFiles,
NeedCompiledGoFiles,
NeedImports,
NeedDeps,
NeedExportFile,
NeedTypes,
NeedSyntax,
NeedTypesInfo,
NeedTypesSizes,
var modes = [...]struct {
mode LoadMode
name string
}{
{NeedName, "NeedName"},
{NeedFiles, "NeedFiles"},
{NeedCompiledGoFiles, "NeedCompiledGoFiles"},
{NeedImports, "NeedImports"},
{NeedDeps, "NeedDeps"},
{NeedExportFile, "NeedExportFile"},
{NeedTypes, "NeedTypes"},
{NeedSyntax, "NeedSyntax"},
{NeedTypesInfo, "NeedTypesInfo"},
{NeedTypesSizes, "NeedTypesSizes"},
{NeedForTest, "NeedForTest"},
{NeedModule, "NeedModule"},
{NeedEmbedFiles, "NeedEmbedFiles"},
{NeedEmbedPatterns, "NeedEmbedPatterns"},
}
var modeStrings = []string{
"NeedName",
"NeedFiles",
"NeedCompiledGoFiles",
"NeedImports",
"NeedDeps",
"NeedExportFile",
"NeedTypes",
"NeedSyntax",
"NeedTypesInfo",
"NeedTypesSizes",
}
func (mod LoadMode) String() string {
m := mod
if m == 0 {
func (mode LoadMode) String() string {
if mode == 0 {
return "LoadMode(0)"
}
var out []string
for i, x := range allModes {
if x > m {
break
}
if (m & x) != 0 {
out = append(out, modeStrings[i])
m = m ^ x
// named bits
for _, item := range modes {
if (mode & item.mode) != 0 {
mode ^= item.mode
out = append(out, item.name)
}
}
if m != 0 {
out = append(out, "Unknown")
// unnamed residue
if mode != 0 {
if out == nil {
return fmt.Sprintf("LoadMode(%#x)", int(mode))
}
out = append(out, fmt.Sprintf("%#x", int(mode)))
}
return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|"))
if len(out) == 1 {
return out[0]
}
return "(" + strings.Join(out, "|") + ")"
}

View file

@ -16,13 +16,13 @@
"go/scanner"
"go/token"
"go/types"
"io"
"log"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"golang.org/x/sync/errgroup"
@ -31,7 +31,6 @@
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/packagesinternal"
"golang.org/x/tools/internal/typesinternal"
"golang.org/x/tools/internal/versions"
)
// A LoadMode controls the amount of detail to return when loading.
@ -44,20 +43,33 @@
// ID and Errors (if present) will always be filled.
// [Load] may return more information than requested.
//
// The Mode flag is a union of several bits named NeedName,
// NeedFiles, and so on, each of which determines whether
// a given field of Package (Name, Files, etc) should be
// populated.
//
// For convenience, we provide named constants for the most
// common combinations of Need flags:
//
// [LoadFiles] lists of files in each package
// [LoadImports] ... plus imports
// [LoadTypes] ... plus type information
// [LoadSyntax] ... plus type-annotated syntax
// [LoadAllSyntax] ... for all dependencies
//
// Unfortunately there are a number of open bugs related to
// interactions among the LoadMode bits:
// - https://github.com/golang/go/issues/48226
// - https://github.com/golang/go/issues/56633
// - https://github.com/golang/go/issues/56677
// - https://github.com/golang/go/issues/58726
// - https://github.com/golang/go/issues/63517
// - https://github.com/golang/go/issues/56633
// - https://github.com/golang/go/issues/56677
// - https://github.com/golang/go/issues/58726
// - https://github.com/golang/go/issues/63517
type LoadMode int
const (
// NeedName adds Name and PkgPath.
NeedName LoadMode = 1 << iota
// NeedFiles adds GoFiles and OtherFiles.
// NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles
NeedFiles
// NeedCompiledGoFiles adds CompiledGoFiles.
@ -76,10 +88,10 @@
// NeedTypes adds Types, Fset, and IllTyped.
NeedTypes
// NeedSyntax adds Syntax.
// NeedSyntax adds Syntax and Fset.
NeedSyntax
// NeedTypesInfo adds TypesInfo.
// NeedTypesInfo adds TypesInfo and Fset.
NeedTypesInfo
// NeedTypesSizes adds TypesSizes.
@ -88,9 +100,10 @@
// needInternalDepsErrors adds the internal deps errors field for use by gopls.
needInternalDepsErrors
// needInternalForTest adds the internal forTest field.
// NeedForTest adds ForTest.
//
// Tests must also be set on the context for this field to be populated.
needInternalForTest
NeedForTest
// typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
// Modifies CompiledGoFiles and Types, and has no effect on its own.
@ -104,27 +117,24 @@
// NeedEmbedPatterns adds EmbedPatterns.
NeedEmbedPatterns
// Be sure to update loadmode_string.go when adding new items!
)
const (
// Deprecated: LoadFiles exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
// LoadFiles loads the name and file names for the initial packages.
LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
// Deprecated: LoadImports exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
// LoadImports loads the name, file names, and import mapping for the initial packages.
LoadImports = LoadFiles | NeedImports
// Deprecated: LoadTypes exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
// LoadTypes loads exported type information for the initial packages.
LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
// Deprecated: LoadSyntax exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
// LoadSyntax loads typed syntax for the initial packages.
LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
// Deprecated: LoadAllSyntax exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
// LoadAllSyntax loads typed syntax for the initial packages and all dependencies.
LoadAllSyntax = LoadSyntax | NeedDeps
// Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
@ -134,13 +144,7 @@
// A Config specifies details about how packages should be loaded.
// The zero value is a valid configuration.
//
// Calls to Load do not modify this struct.
//
// TODO(adonovan): #67702: this is currently false: in fact,
// calls to [Load] do not modify the public fields of this struct, but
// may modify hidden fields, so concurrent calls to [Load] must not
// use the same Config. But perhaps we should reestablish the
// documented invariant.
// Calls to [Load] do not modify this struct.
type Config struct {
// Mode controls the level of information returned for each package.
Mode LoadMode
@ -171,19 +175,10 @@ type Config struct {
//
Env []string
// gocmdRunner guards go command calls from concurrency errors.
gocmdRunner *gocommand.Runner
// BuildFlags is a list of command-line flags to be passed through to
// the build system's query tool.
BuildFlags []string
// modFile will be used for -modfile in go command invocations.
modFile string
// modFlag will be used for -modfile in go command invocations.
modFlag string
// Fset provides source position information for syntax trees and types.
// If Fset is nil, Load will use a new fileset, but preserve Fset's value.
Fset *token.FileSet
@ -230,21 +225,24 @@ type Config struct {
// drivers may vary in their level of support for overlays.
Overlay map[string][]byte
// goListOverlayFile is the JSON file that encodes the Overlay
// mapping, used by 'go list -overlay=...'
goListOverlayFile string
// -- Hidden configuration fields only for use in x/tools --
// modFile will be used for -modfile in go command invocations.
modFile string
// modFlag will be used for -modfile in go command invocations.
modFlag string
}
// Load loads and returns the Go packages named by the given patterns.
//
// Config specifies loading options;
// nil behaves the same as an empty Config.
// The cfg parameter specifies loading options; nil behaves the same as an empty [Config].
//
// The [Config.Mode] field is a set of bits that determine what kinds
// of information should be computed and returned. Modes that require
// more information tend to be slower. See [LoadMode] for details
// and important caveats. Its zero value is equivalent to
// NeedName | NeedFiles | NeedCompiledGoFiles.
// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles].
//
// Each call to Load returns a new set of [Package] instances.
// The Packages and their Imports form a directed acyclic graph.
@ -261,7 +259,7 @@ type Config struct {
// Errors associated with a particular package are recorded in the
// corresponding Package's Errors list, and do not cause Load to
// return an error. Clients may need to handle such errors before
// proceeding with further analysis. The PrintErrors function is
// proceeding with further analysis. The [PrintErrors] function is
// provided for convenient display of all errors.
func Load(cfg *Config, patterns ...string) ([]*Package, error) {
ld := newLoader(cfg)
@ -324,21 +322,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro
} else if !response.NotHandled {
return response, true, nil
}
// (fall through)
// not handled: fall through
}
// go list fallback
//
// Write overlays once, as there are many calls
// to 'go list' (one per chunk plus others too).
overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
if err != nil {
return nil, false, err
}
defer cleanupOverlay()
cfg.goListOverlayFile = overlay
response, err := callDriverOnChunks(goListDriver, cfg, chunks)
var runner gocommand.Runner // (shared across many 'go list' calls)
driver := func(cfg *Config, patterns []string) (*DriverResponse, error) {
return goListDriver(cfg, &runner, overlayFile, patterns)
}
response, err := callDriverOnChunks(driver, cfg, chunks)
if err != nil {
return nil, false, err
}
@ -376,16 +377,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) {
func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) {
if len(chunks) == 0 {
return driver(cfg)
return driver(cfg, nil)
}
responses := make([]*DriverResponse, len(chunks))
errNotHandled := errors.New("driver returned NotHandled")
var g errgroup.Group
for i, chunk := range chunks {
i := i
chunk := chunk
g.Go(func() (err error) {
responses[i], err = driver(cfg, chunk...)
responses[i], err = driver(cfg, chunk)
if responses[i] != nil && responses[i].NotHandled {
err = errNotHandled
}
@ -435,6 +434,12 @@ type Package struct {
// PkgPath is the package path as used by the go/types package.
PkgPath string
// Dir is the directory associated with the package, if it exists.
//
// For packages listed by the go command, this is the directory containing
// the package files.
Dir string
// Errors contains any errors encountered querying the metadata
// of the package, or while parsing or type-checking its files.
Errors []Error
@ -522,8 +527,8 @@ type Package struct {
// -- internal --
// forTest is the package under test, if any.
forTest string
// ForTest is the package under test, if any.
ForTest string
// depsErrors is the DepsErrors field from the go list response, if any.
depsErrors []*packagesinternal.PackageError
@ -552,9 +557,6 @@ type ModuleError struct {
}
func init() {
packagesinternal.GetForTest = func(p interface{}) string {
return p.(*Package).forTest
}
packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError {
return p.(*Package).depsErrors
}
@ -566,7 +568,6 @@ func init() {
}
packagesinternal.TypecheckCgo = int(typecheckCgo)
packagesinternal.DepsErrors = int(needInternalDepsErrors)
packagesinternal.ForTest = int(needInternalForTest)
}
// An Error describes a problem with a package's metadata, syntax, or types.
@ -682,18 +683,19 @@ func (p *Package) String() string { return p.ID }
// loaderPackage augments Package with state used during the loading phase
type loaderPackage struct {
*Package
importErrors map[string]error // maps each bad import to its error
loadOnce sync.Once
color uint8 // for cycle detection
needsrc bool // load from source (Mode >= LoadTypes)
needtypes bool // type information is either requested or depended on
initial bool // package was matched by a pattern
goVersion int // minor version number of go command on PATH
importErrors map[string]error // maps each bad import to its error
preds []*loaderPackage // packages that import this one
unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded
color uint8 // for cycle detection
needsrc bool // load from source (Mode >= LoadTypes)
needtypes bool // type information is either requested or depended on
initial bool // package was matched by a pattern
goVersion int // minor version number of go command on PATH
}
// loader holds the working state of a single call to load.
type loader struct {
pkgs map[string]*loaderPackage
pkgs map[string]*loaderPackage // keyed by Package.ID
Config
sizes types.Sizes // non-nil if needed by mode
parseCache map[string]*parseValue
@ -739,9 +741,6 @@ func newLoader(cfg *Config) *loader {
if ld.Config.Env == nil {
ld.Config.Env = os.Environ()
}
if ld.Config.gocmdRunner == nil {
ld.Config.gocmdRunner = &gocommand.Runner{}
}
if ld.Context == nil {
ld.Context = context.Background()
}
@ -755,7 +754,7 @@ func newLoader(cfg *Config) *loader {
ld.requestedMode = ld.Mode
ld.Mode = impliedLoadMode(ld.Mode)
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
if ld.Fset == nil {
ld.Fset = token.NewFileSet()
}
@ -764,6 +763,7 @@ func newLoader(cfg *Config) *loader {
// because we load source if export data is missing.
if ld.ParseFile == nil {
ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
// We implicitly promise to keep doing ast.Object resolution. :(
const mode = parser.AllErrors | parser.ParseComments
return parser.ParseFile(fset, filename, src, mode)
}
@ -795,7 +795,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
// This package needs type information if the caller requested types and the package is
// either a root, or it's a non-root and the user requested dependencies ...
needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
// This package needs source if the call requested source (or types info, which implies source)
// and the package is either a root, or itas a non- root and the user requested dependencies...
needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
@ -820,9 +820,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
}
}
if ld.Mode&NeedImports != 0 {
// Materialize the import graph.
// Materialize the import graph if it is needed (NeedImports),
// or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}).
var leaves []*loaderPackage // packages with no unfinished successors
if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
const (
white = 0 // new
grey = 1 // in progress
@ -841,63 +842,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
// dependency on a package that does. These are the only packages
// for which we load source code.
var stack []*loaderPackage
var visit func(lpkg *loaderPackage) bool
visit = func(lpkg *loaderPackage) bool {
switch lpkg.color {
case black:
return lpkg.needsrc
case grey:
var visit func(from, lpkg *loaderPackage) bool
visit = func(from, lpkg *loaderPackage) bool {
if lpkg.color == grey {
panic("internal error: grey node")
}
lpkg.color = grey
stack = append(stack, lpkg) // push
stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
lpkg.Imports = make(map[string]*Package, len(stubs))
for importPath, ipkg := range stubs {
var importErr error
imp := ld.pkgs[ipkg.ID]
if imp == nil {
// (includes package "C" when DisableCgo)
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
} else if imp.color == grey {
importErr = fmt.Errorf("import cycle: %s", stack)
}
if importErr != nil {
if lpkg.importErrors == nil {
lpkg.importErrors = make(map[string]error)
if lpkg.color == white {
lpkg.color = grey
stack = append(stack, lpkg) // push
stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
lpkg.Imports = make(map[string]*Package, len(stubs))
for importPath, ipkg := range stubs {
var importErr error
imp := ld.pkgs[ipkg.ID]
if imp == nil {
// (includes package "C" when DisableCgo)
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
} else if imp.color == grey {
importErr = fmt.Errorf("import cycle: %s", stack)
}
lpkg.importErrors[importPath] = importErr
continue
if importErr != nil {
if lpkg.importErrors == nil {
lpkg.importErrors = make(map[string]error)
}
lpkg.importErrors[importPath] = importErr
continue
}
if visit(lpkg, imp) {
lpkg.needsrc = true
}
lpkg.Imports[importPath] = imp.Package
}
if visit(imp) {
lpkg.needsrc = true
// -- postorder --
// Complete type information is required for the
// immediate dependencies of each source package.
if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
for _, ipkg := range lpkg.Imports {
ld.pkgs[ipkg.ID].needtypes = true
}
}
lpkg.Imports[importPath] = imp.Package
// NeedTypeSizes causes TypeSizes to be set even
// on packages for which types aren't needed.
if ld.Mode&NeedTypesSizes != 0 {
lpkg.TypesSizes = ld.sizes
}
// Add packages with no imports directly to the queue of leaves.
if len(lpkg.Imports) == 0 {
leaves = append(leaves, lpkg)
}
stack = stack[:len(stack)-1] // pop
lpkg.color = black
}
// Complete type information is required for the
// immediate dependencies of each source package.
if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
for _, ipkg := range lpkg.Imports {
ld.pkgs[ipkg.ID].needtypes = true
}
// Add edge from predecessor.
if from != nil {
from.unfinishedSuccs.Add(+1) // incref
lpkg.preds = append(lpkg.preds, from)
}
// NeedTypeSizes causes TypeSizes to be set even
// on packages for which types aren't needed.
if ld.Mode&NeedTypesSizes != 0 {
lpkg.TypesSizes = ld.sizes
}
stack = stack[:len(stack)-1] // pop
lpkg.color = black
return lpkg.needsrc
}
// For each initial package, create its import DAG.
for _, lpkg := range initial {
visit(lpkg)
visit(nil, lpkg)
}
} else {
@ -910,16 +924,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
// Load type data and syntax if needed, starting at
// the initial packages (roots of the import DAG).
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
var wg sync.WaitGroup
for _, lpkg := range initial {
wg.Add(1)
go func(lpkg *loaderPackage) {
ld.loadRecursive(lpkg)
wg.Done()
}(lpkg)
if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
// We avoid using g.SetLimit to limit concurrency as
// it makes g.Go stop accepting work, which prevents
// workers from enqeuing, and thus finishing, and thus
// allowing the group to make progress: deadlock.
//
// Instead we use the ioLimit and cpuLimit semaphores.
g, _ := errgroup.WithContext(ld.Context)
// enqueues adds a package to the type-checking queue.
// It must have no unfinished successors.
var enqueue func(*loaderPackage)
enqueue = func(lpkg *loaderPackage) {
g.Go(func() error {
// Parse and type-check.
ld.loadPackage(lpkg)
// Notify each waiting predecessor,
// and enqueue it when it becomes a leaf.
for _, pred := range lpkg.preds {
if pred.unfinishedSuccs.Add(-1) == 0 { // decref
enqueue(pred)
}
}
return nil
})
}
// Load leaves first, adding new packages
// to the queue as they become leaves.
for _, leaf := range leaves {
enqueue(leaf)
}
if err := g.Wait(); err != nil {
return nil, err // cancelled
}
wg.Wait()
}
// If the context is done, return its error and
@ -961,12 +1004,14 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
}
if ld.requestedMode&NeedTypes == 0 {
ld.pkgs[i].Types = nil
ld.pkgs[i].Fset = nil
ld.pkgs[i].IllTyped = false
}
if ld.requestedMode&NeedSyntax == 0 {
ld.pkgs[i].Syntax = nil
}
if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 {
ld.pkgs[i].Fset = nil
}
if ld.requestedMode&NeedTypesInfo == 0 {
ld.pkgs[i].TypesInfo = nil
}
@ -981,31 +1026,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
return result, nil
}
// loadRecursive loads the specified package and its dependencies,
// recursively, in parallel, in topological order.
// It is atomic and idempotent.
// Precondition: ld.Mode&NeedTypes.
func (ld *loader) loadRecursive(lpkg *loaderPackage) {
lpkg.loadOnce.Do(func() {
// Load the direct dependencies, in parallel.
var wg sync.WaitGroup
for _, ipkg := range lpkg.Imports {
imp := ld.pkgs[ipkg.ID]
wg.Add(1)
go func(imp *loaderPackage) {
ld.loadRecursive(imp)
wg.Done()
}(imp)
}
wg.Wait()
ld.loadPackage(lpkg)
})
}
// loadPackage loads the specified package.
// loadPackage loads/parses/typechecks the specified package.
// It must be called only once per Package,
// after immediate dependencies are loaded.
// Precondition: ld.Mode & NeedTypes.
// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0.
func (ld *loader) loadPackage(lpkg *loaderPackage) {
if lpkg.PkgPath == "unsafe" {
// Fill in the blanks to avoid surprises.
@ -1041,6 +1065,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
if !lpkg.needtypes && !lpkg.needsrc {
return
}
// TODO(adonovan): this condition looks wrong:
// I think it should be lpkg.needtypes && !lpg.needsrc,
// so that NeedSyntax without NeedTypes can be satisfied by export data.
if !lpkg.needsrc {
if err := ld.loadFromExportData(lpkg); err != nil {
lpkg.Errors = append(lpkg.Errors, Error{
@ -1146,7 +1174,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
}
lpkg.Syntax = files
if ld.Config.Mode&NeedTypes == 0 {
if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 {
return
}
@ -1157,16 +1185,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
return
}
lpkg.TypesInfo = &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Instances: make(map[*ast.Ident]types.Instance),
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
// Populate TypesInfo only if needed, as it
// causes the type checker to work much harder.
if ld.Config.Mode&NeedTypesInfo != 0 {
lpkg.TypesInfo = &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Instances: make(map[*ast.Ident]types.Instance),
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
FileVersions: make(map[*ast.File]string),
}
}
versions.InitFileVersions(lpkg.TypesInfo)
lpkg.TypesSizes = ld.sizes
importer := importerFunc(func(path string) (*types.Package, error) {
@ -1219,6 +1251,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
}
}
// Type-checking is CPU intensive.
cpuLimit <- unit{} // acquire a token
defer func() { <-cpuLimit }() // release a token
typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
lpkg.importErrors = nil // no longer needed
@ -1283,8 +1319,11 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
// We use a counting semaphore to limit
// the number of parallel I/O calls per process.
var ioLimit = make(chan bool, 20)
// the number of parallel I/O calls or CPU threads per process.
var (
ioLimit = make(chan unit, 20)
cpuLimit = make(chan unit, runtime.GOMAXPROCS(0))
)
func (ld *loader) parseFile(filename string) (*ast.File, error) {
ld.parseCacheMu.Lock()
@ -1301,20 +1340,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) {
var src []byte
for f, contents := range ld.Config.Overlay {
// TODO(adonovan): Inefficient for large overlays.
// Do an exact name-based map lookup
// (for nonexistent files) followed by a
// FileID-based map lookup (for existing ones).
if sameFile(f, filename) {
src = contents
break
}
}
var err error
if src == nil {
ioLimit <- true // wait
ioLimit <- unit{} // acquire a token
src, err = os.ReadFile(filename)
<-ioLimit // signal
<-ioLimit // release a token
}
if err != nil {
v.err = err
} else {
// Parsing is CPU intensive.
cpuLimit <- unit{} // acquire a token
v.f, v.err = ld.ParseFile(ld.Fset, filename, src)
<-cpuLimit // release a token
}
close(v.ready)
@ -1329,18 +1376,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) {
// Because files are scanned in parallel, the token.Pos
// positions of the resulting ast.Files are not ordered.
func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
var wg sync.WaitGroup
n := len(filenames)
parsed := make([]*ast.File, n)
errors := make([]error, n)
for i, file := range filenames {
wg.Add(1)
go func(i int, filename string) {
var (
n = len(filenames)
parsed = make([]*ast.File, n)
errors = make([]error, n)
)
var g errgroup.Group
for i, filename := range filenames {
// This creates goroutines unnecessarily in the
// cache-hit case, but that case is uncommon.
g.Go(func() error {
parsed[i], errors[i] = ld.parseFile(filename)
wg.Done()
}(i, file)
return nil
})
}
wg.Wait()
g.Wait()
// Eliminate nils, preserving order.
var o int
@ -1499,6 +1549,10 @@ func impliedLoadMode(loadMode LoadMode) LoadMode {
// All these things require knowing the import graph.
loadMode |= NeedImports
}
if loadMode&NeedTypes != 0 {
// Types require the GoVersion from Module.
loadMode |= NeedModule
}
return loadMode
}
@ -1507,4 +1561,4 @@ func usesExportData(cfg *Config) bool {
return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
}
var _ interface{} = io.Discard // assert build toolchain is go1.16 or later
type unit struct{}

View file

@ -49,11 +49,20 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
// PrintErrors returns the number of errors printed.
func PrintErrors(pkgs []*Package) int {
var n int
errModules := make(map[*Module]bool)
Visit(pkgs, nil, func(pkg *Package) {
for _, err := range pkg.Errors {
fmt.Fprintln(os.Stderr, err)
n++
}
// Print pkg.Module.Error once if present.
mod := pkg.Module
if mod != nil && mod.Error != nil && !errModules[mod] {
errModules[mod] = true
fmt.Fprintln(os.Stderr, mod.Error.Err)
n++
}
})
return n
}

View file

@ -51,7 +51,7 @@
//
// PO package->object Package.Scope.Lookup
// OT object->type Object.Type
// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa]
// TO type->object Type.{At,Field,Method,Obj} [AFMO]
//
// All valid paths start with a package and end at an object
@ -63,8 +63,8 @@
// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
// - The only OT operator is Object.Type,
// which we encode as '.' because dot cannot appear in an identifier.
// - The TT operators are encoded as [EKPRUTC];
// one of these (TypeParam) requires an integer operand,
// - The TT operators are encoded as [EKPRUTrCa];
// two of these ({,Recv}TypeParams) require an integer operand,
// which is encoded as a string of decimal digits.
// - The TO operators are encoded as [AFMO];
// three of these (At,Field,Method) require an integer operand,
@ -98,19 +98,21 @@
opType = '.' // .Type() (Object)
// type->type operators
opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
opKey = 'K' // .Key() (Map)
opParams = 'P' // .Params() (Signature)
opResults = 'R' // .Results() (Signature)
opUnderlying = 'U' // .Underlying() (Named)
opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
opConstraint = 'C' // .Constraint() (TypeParam)
opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
opKey = 'K' // .Key() (Map)
opParams = 'P' // .Params() (Signature)
opResults = 'R' // .Results() (Signature)
opUnderlying = 'U' // .Underlying() (Named)
opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature)
opConstraint = 'C' // .Constraint() (TypeParam)
opRhs = 'a' // .Rhs() (Alias)
// type->object operators
opAt = 'A' // .At(i) (Tuple)
opField = 'F' // .Field(i) (Struct)
opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
opObj = 'O' // .Obj() (Named, TypeParam)
opAt = 'A' // .At(i) (Tuple)
opField = 'F' // .Field(i) (Struct)
opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
opObj = 'O' // .Obj() (Named, TypeParam)
)
// For is equivalent to new(Encoder).For(obj).
@ -226,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
// Reject obviously non-viable cases.
switch obj := obj.(type) {
case *types.TypeName:
if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok {
if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok {
// With the exception of type parameters, only package-level type names
// have a path.
return "", fmt.Errorf("no path for %v", obj)
@ -278,21 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
path = append(path, opType)
T := o.Type()
if tname.IsAlias() {
// type alias
if r := find(obj, T, path, nil); r != nil {
if alias, ok := T.(*types.Alias); ok {
if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil {
return Path(r), nil
}
} else {
if named, _ := T.(*types.Named); named != nil {
if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil {
// generic named type
return Path(r), nil
}
if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil {
return Path(r), nil
}
} else if tname.IsAlias() {
// legacy alias
if r := find(obj, T, path); r != nil {
return Path(r), nil
}
} else if named, ok := T.(*types.Named); ok {
// defined (named) type
if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil {
if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil {
return Path(r), nil
}
if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil {
return Path(r), nil
}
}
@ -305,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
if _, ok := o.(*types.TypeName); !ok {
if o.Exported() {
// exported non-type (const, var, func)
if r := find(obj, o.Type(), append(path, opType), nil); r != nil {
if r := find(obj, o.Type(), append(path, opType)); r != nil {
return Path(r), nil
}
}
@ -313,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
}
// Inspect declared methods of defined types.
if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok {
if T, ok := types.Unalias(o.Type()).(*types.Named); ok {
path = append(path, opType)
// The method index here is always with respect
// to the underlying go/types data structures,
@ -325,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
if m == obj {
return Path(path2), nil // found declared method
}
if r := find(obj, m.Type(), append(path2, opType), nil); r != nil {
if r := find(obj, m.Type(), append(path2, opType)); r != nil {
return Path(r), nil
}
}
@ -440,43 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
//
// The seen map is used to short circuit cycles through type parameters. If
// nil, it will be allocated as necessary.
func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
//
// The seenMethods map is used internally to short circuit cycles through
// interface methods, such as occur in the following example:
//
// type I interface { f() interface{I} }
//
// See golang/go#68046 for details.
func find(obj types.Object, T types.Type, path []byte) []byte {
return (&finder{obj: obj}).find(T, path)
}
// finder closes over search state for a call to find.
type finder struct {
obj types.Object // the sought object
seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters
seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces
}
func (f *finder) find(T types.Type, path []byte) []byte {
switch T := T.(type) {
case *aliases.Alias:
return find(obj, aliases.Unalias(T), path, seen)
case *types.Alias:
return f.find(types.Unalias(T), path)
case *types.Basic, *types.Named:
// Named types belonging to pkg were handled already,
// so T must belong to another package. No path.
return nil
case *types.Pointer:
return find(obj, T.Elem(), append(path, opElem), seen)
return f.find(T.Elem(), append(path, opElem))
case *types.Slice:
return find(obj, T.Elem(), append(path, opElem), seen)
return f.find(T.Elem(), append(path, opElem))
case *types.Array:
return find(obj, T.Elem(), append(path, opElem), seen)
return f.find(T.Elem(), append(path, opElem))
case *types.Chan:
return find(obj, T.Elem(), append(path, opElem), seen)
return f.find(T.Elem(), append(path, opElem))
case *types.Map:
if r := find(obj, T.Key(), append(path, opKey), seen); r != nil {
if r := f.find(T.Key(), append(path, opKey)); r != nil {
return r
}
return find(obj, T.Elem(), append(path, opElem), seen)
return f.find(T.Elem(), append(path, opElem))
case *types.Signature:
if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil {
if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil {
return r
}
if r := find(obj, T.Params(), append(path, opParams), seen); r != nil {
if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil {
return r
}
return find(obj, T.Results(), append(path, opResults), seen)
if r := f.find(T.Params(), append(path, opParams)); r != nil {
return r
}
return f.find(T.Results(), append(path, opResults))
case *types.Struct:
for i := 0; i < T.NumFields(); i++ {
fld := T.Field(i)
path2 := appendOpArg(path, opField, i)
if fld == obj {
if fld == f.obj {
return path2 // found field var
}
if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil {
if r := f.find(fld.Type(), append(path2, opType)); r != nil {
return r
}
}
@ -485,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
for i := 0; i < T.Len(); i++ {
v := T.At(i)
path2 := appendOpArg(path, opAt, i)
if v == obj {
if v == f.obj {
return path2 // found param/result var
}
if r := find(obj, v.Type(), append(path2, opType), seen); r != nil {
if r := f.find(v.Type(), append(path2, opType)); r != nil {
return r
}
}
@ -496,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
case *types.Interface:
for i := 0; i < T.NumMethods(); i++ {
m := T.Method(i)
if f.seenMethods[m] {
return nil
}
path2 := appendOpArg(path, opMethod, i)
if m == obj {
if m == f.obj {
return path2 // found interface method
}
if r := find(obj, m.Type(), append(path2, opType), seen); r != nil {
if f.seenMethods == nil {
f.seenMethods = make(map[*types.Func]bool)
}
f.seenMethods[m] = true
if r := f.find(m.Type(), append(path2, opType)); r != nil {
return r
}
}
return nil
case *types.TypeParam:
name := T.Obj()
if name == obj {
return append(path, opObj)
}
if seen[name] {
if f.seenTParamNames[name] {
return nil
}
if seen == nil {
seen = make(map[*types.TypeName]bool)
if name == f.obj {
return append(path, opObj)
}
seen[name] = true
if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil {
if f.seenTParamNames == nil {
f.seenTParamNames = make(map[*types.TypeName]bool)
}
f.seenTParamNames[name] = true
if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil {
return r
}
return nil
@ -525,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
panic(T)
}
func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte {
func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte {
return (&finder{obj: obj}).findTypeParam(list, path, op)
}
func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte {
for i := 0; i < list.Len(); i++ {
tparam := list.At(i)
path2 := appendOpArg(path, opTypeParam, i)
if r := find(obj, tparam, path2, seen); r != nil {
path2 := appendOpArg(path, op, i)
if r := f.find(tparam, path2); r != nil {
return r
}
}
@ -580,10 +619,10 @@ type hasObj interface {
code := suffix[0]
suffix = suffix[1:]
// Codes [AFM] have an integer operand.
// Codes [AFMTr] have an integer operand.
var index int
switch code {
case opAt, opField, opMethod, opTypeParam:
case opAt, opField, opMethod, opTypeParam, opRecvTypeParam:
rest := strings.TrimLeft(suffix, "0123456789")
numerals := suffix[:len(suffix)-len(rest)]
suffix = rest
@ -616,7 +655,7 @@ type hasObj interface {
// Inv: t != nil, obj == nil
t = aliases.Unalias(t)
t = types.Unalias(t)
switch code {
case opElem:
hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
@ -653,6 +692,16 @@ type hasObj interface {
}
t = named.Underlying()
case opRhs:
if alias, ok := t.(*types.Alias); ok {
t = aliases.Rhs(alias)
} else if false && aliases.Enabled() {
// The Enabled check is too expensive, so for now we
// simply assume that aliases are not enabled.
// TODO(adonovan): replace with "if true {" when go1.24 is assured.
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t)
}
case opTypeParam:
hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
if !ok {
@ -664,6 +713,17 @@ type hasObj interface {
}
t = tparams.At(index)
case opRecvTypeParam:
sig, ok := t.(*types.Signature) // Signature
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
}
rtparams := sig.RecvTypeParams()
if n := rtparams.Len(); index >= n {
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
}
t = rtparams.At(index)
case opConstraint:
tparam, ok := t.(*types.TypeParam)
if !ok {
@ -725,6 +785,10 @@ type hasObj interface {
}
}
if obj == nil {
panic(p) // path does not end in an object-valued operator
}
if obj.Pkg() != pkg {
return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
}

68
vendor/golang.org/x/tools/go/types/typeutil/callee.go generated vendored Normal file
View file

@ -0,0 +1,68 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeutil
import (
"go/ast"
"go/types"
"golang.org/x/tools/internal/typeparams"
)
// Callee returns the named target of a function call, if any:
// a function, method, builtin, or variable.
//
// Functions and methods may potentially have type parameters.
func Callee(info *types.Info, call *ast.CallExpr) types.Object {
fun := ast.Unparen(call.Fun)
// Look through type instantiation if necessary.
isInstance := false
switch fun.(type) {
case *ast.IndexExpr, *ast.IndexListExpr:
// When extracting the callee from an *IndexExpr, we need to check that
// it is a *types.Func and not a *types.Var.
// Example: Don't match a slice m within the expression `m[0]()`.
isInstance = true
fun, _, _, _ = typeparams.UnpackIndexExpr(fun)
}
var obj types.Object
switch fun := fun.(type) {
case *ast.Ident:
obj = info.Uses[fun] // type, var, builtin, or declared func
case *ast.SelectorExpr:
if sel, ok := info.Selections[fun]; ok {
obj = sel.Obj() // method or field
} else {
obj = info.Uses[fun.Sel] // qualified identifier?
}
}
if _, ok := obj.(*types.TypeName); ok {
return nil // T(x) is a conversion, not a call
}
// A Func is required to match instantiations.
if _, ok := obj.(*types.Func); isInstance && !ok {
return nil // Was not a Func.
}
return obj
}
// StaticCallee returns the target (function or method) of a static function
// call, if any. It returns nil for calls to builtins.
//
// Note: for calls of instantiated functions and methods, StaticCallee returns
// the corresponding generic function or method on the generic type.
func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
return f
}
return nil
}
func interfaceMethod(f *types.Func) bool {
recv := f.Type().(*types.Signature).Recv()
return recv != nil && types.IsInterface(recv.Type())
}

30
vendor/golang.org/x/tools/go/types/typeutil/imports.go generated vendored Normal file
View file

@ -0,0 +1,30 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeutil
import "go/types"
// Dependencies returns all dependencies of the specified packages.
//
// Dependent packages appear in topological order: if package P imports
// package Q, Q appears earlier than P in the result.
// The algorithm follows import statements in the order they
// appear in the source code, so the result is a total order.
func Dependencies(pkgs ...*types.Package) []*types.Package {
var result []*types.Package
seen := make(map[*types.Package]bool)
var visit func(pkgs []*types.Package)
visit = func(pkgs []*types.Package) {
for _, p := range pkgs {
if !seen[p] {
seen[p] = true
visit(p.Imports())
result = append(result, p)
}
}
}
visit(pkgs)
return result
}

517
vendor/golang.org/x/tools/go/types/typeutil/map.go generated vendored Normal file
View file

@ -0,0 +1,517 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package typeutil defines various utilities for types, such as Map,
// a mapping from types.Type to any values.
package typeutil // import "golang.org/x/tools/go/types/typeutil"
import (
"bytes"
"fmt"
"go/types"
"reflect"
"golang.org/x/tools/internal/typeparams"
)
// Map is a hash-table-based mapping from types (types.Type) to
// arbitrary any values. The concrete types that implement
// the Type interface are pointers. Since they are not canonicalized,
// == cannot be used to check for equivalence, and thus we cannot
// simply use a Go map.
//
// Just as with map[K]V, a nil *Map is a valid empty map.
//
// Not thread-safe.
type Map struct {
hasher Hasher // shared by many Maps
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
length int // number of map entries
}
// entry is an entry (key/value association) in a hash bucket.
type entry struct {
key types.Type
value any
}
// SetHasher sets the hasher used by Map.
//
// All Hashers are functionally equivalent but contain internal state
// used to cache the results of hashing previously seen types.
//
// A single Hasher created by MakeHasher() may be shared among many
// Maps. This is recommended if the instances have many keys in
// common, as it will amortize the cost of hash computation.
//
// A Hasher may grow without bound as new types are seen. Even when a
// type is deleted from the map, the Hasher never shrinks, since other
// types in the map may reference the deleted type indirectly.
//
// Hashers are not thread-safe, and read-only operations such as
// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
// read-lock) is require around all Map operations if a shared
// hasher is accessed from multiple threads.
//
// If SetHasher is not called, the Map will create a private hasher at
// the first call to Insert.
func (m *Map) SetHasher(hasher Hasher) {
m.hasher = hasher
}
// Delete removes the entry with the given key, if any.
// It returns true if the entry was found.
func (m *Map) Delete(key types.Type) bool {
if m != nil && m.table != nil {
hash := m.hasher.Hash(key)
bucket := m.table[hash]
for i, e := range bucket {
if e.key != nil && types.Identical(key, e.key) {
// We can't compact the bucket as it
// would disturb iterators.
bucket[i] = entry{}
m.length--
return true
}
}
}
return false
}
// At returns the map entry for the given key.
// The result is nil if the entry is not present.
func (m *Map) At(key types.Type) any {
if m != nil && m.table != nil {
for _, e := range m.table[m.hasher.Hash(key)] {
if e.key != nil && types.Identical(key, e.key) {
return e.value
}
}
}
return nil
}
// Set sets the map entry for key to val,
// and returns the previous entry, if any.
func (m *Map) Set(key types.Type, value any) (prev any) {
if m.table != nil {
hash := m.hasher.Hash(key)
bucket := m.table[hash]
var hole *entry
for i, e := range bucket {
if e.key == nil {
hole = &bucket[i]
} else if types.Identical(key, e.key) {
prev = e.value
bucket[i].value = value
return
}
}
if hole != nil {
*hole = entry{key, value} // overwrite deleted entry
} else {
m.table[hash] = append(bucket, entry{key, value})
}
} else {
if m.hasher.memo == nil {
m.hasher = MakeHasher()
}
hash := m.hasher.Hash(key)
m.table = map[uint32][]entry{hash: {entry{key, value}}}
}
m.length++
return
}
// Len returns the number of map entries.
func (m *Map) Len() int {
if m != nil {
return m.length
}
return 0
}
// Iterate calls function f on each entry in the map in unspecified order.
//
// If f should mutate the map, Iterate provides the same guarantees as
// Go maps: if f deletes a map entry that Iterate has not yet reached,
// f will not be invoked for it, but if f inserts a map entry that
// Iterate has not yet reached, whether or not f will be invoked for
// it is unspecified.
func (m *Map) Iterate(f func(key types.Type, value any)) {
if m != nil {
for _, bucket := range m.table {
for _, e := range bucket {
if e.key != nil {
f(e.key, e.value)
}
}
}
}
}
// Keys returns a new slice containing the set of map keys.
// The order is unspecified.
func (m *Map) Keys() []types.Type {
keys := make([]types.Type, 0, m.Len())
m.Iterate(func(key types.Type, _ any) {
keys = append(keys, key)
})
return keys
}
func (m *Map) toString(values bool) string {
if m == nil {
return "{}"
}
var buf bytes.Buffer
fmt.Fprint(&buf, "{")
sep := ""
m.Iterate(func(key types.Type, value any) {
fmt.Fprint(&buf, sep)
sep = ", "
fmt.Fprint(&buf, key)
if values {
fmt.Fprintf(&buf, ": %q", value)
}
})
fmt.Fprint(&buf, "}")
return buf.String()
}
// String returns a string representation of the map's entries.
// Values are printed using fmt.Sprintf("%v", v).
// Order is unspecified.
func (m *Map) String() string {
return m.toString(true)
}
// KeysString returns a string representation of the map's key set.
// Order is unspecified.
func (m *Map) KeysString() string {
return m.toString(false)
}
////////////////////////////////////////////////////////////////////////
// Hasher
// A Hasher maps each type to its hash value.
// For efficiency, a hasher uses memoization; thus its memory
// footprint grows monotonically over time.
// Hashers are not thread-safe.
// Hashers have reference semantics.
// Call MakeHasher to create a Hasher.
type Hasher struct {
memo map[types.Type]uint32
// ptrMap records pointer identity.
ptrMap map[any]uint32
// sigTParams holds type parameters from the signature being hashed.
// Signatures are considered identical modulo renaming of type parameters, so
// within the scope of a signature type the identity of the signature's type
// parameters is just their index.
//
// Since the language does not currently support referring to uninstantiated
// generic types or functions, and instantiated signatures do not have type
// parameter lists, we should never encounter a second non-empty type
// parameter list when hashing a generic signature.
sigTParams *types.TypeParamList
}
// MakeHasher returns a new Hasher instance.
func MakeHasher() Hasher {
return Hasher{
memo: make(map[types.Type]uint32),
ptrMap: make(map[any]uint32),
sigTParams: nil,
}
}
// Hash computes a hash value for the given type t such that
// Identical(t, t') => Hash(t) == Hash(t').
func (h Hasher) Hash(t types.Type) uint32 {
hash, ok := h.memo[t]
if !ok {
hash = h.hashFor(t)
h.memo[t] = hash
}
return hash
}
// hashString computes the FowlerNollVo hash of s.
func hashString(s string) uint32 {
var h uint32
for i := 0; i < len(s); i++ {
h ^= uint32(s[i])
h *= 16777619
}
return h
}
// hashFor computes the hash of t.
func (h Hasher) hashFor(t types.Type) uint32 {
// See Identical for rationale.
switch t := t.(type) {
case *types.Basic:
return uint32(t.Kind())
case *types.Alias:
return h.Hash(types.Unalias(t))
case *types.Array:
return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
case *types.Slice:
return 9049 + 2*h.Hash(t.Elem())
case *types.Struct:
var hash uint32 = 9059
for i, n := 0, t.NumFields(); i < n; i++ {
f := t.Field(i)
if f.Anonymous() {
hash += 8861
}
hash += hashString(t.Tag(i))
hash += hashString(f.Name()) // (ignore f.Pkg)
hash += h.Hash(f.Type())
}
return hash
case *types.Pointer:
return 9067 + 2*h.Hash(t.Elem())
case *types.Signature:
var hash uint32 = 9091
if t.Variadic() {
hash *= 8863
}
// Use a separate hasher for types inside of the signature, where type
// parameter identity is modified to be (index, constraint). We must use a
// new memo for this hasher as type identity may be affected by this
// masking. For example, in func[T any](*T), the identity of *T depends on
// whether we are mapping the argument in isolation, or recursively as part
// of hashing the signature.
//
// We should never encounter a generic signature while hashing another
// generic signature, but defensively set sigTParams only if h.mask is
// unset.
tparams := t.TypeParams()
if h.sigTParams == nil && tparams.Len() != 0 {
h = Hasher{
// There may be something more efficient than discarding the existing
// memo, but it would require detecting whether types are 'tainted' by
// references to type parameters.
memo: make(map[types.Type]uint32),
// Re-using ptrMap ensures that pointer identity is preserved in this
// hasher.
ptrMap: h.ptrMap,
sigTParams: tparams,
}
}
for i := 0; i < tparams.Len(); i++ {
tparam := tparams.At(i)
hash += 7 * h.Hash(tparam.Constraint())
}
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
case *types.Union:
return h.hashUnion(t)
case *types.Interface:
// Interfaces are identical if they have the same set of methods, with
// identical names and types, and they have the same set of type
// restrictions. See go/types.identical for more details.
var hash uint32 = 9103
// Hash methods.
for i, n := 0, t.NumMethods(); i < n; i++ {
// Method order is not significant.
// Ignore m.Pkg().
m := t.Method(i)
// Use shallow hash on method signature to
// avoid anonymous interface cycles.
hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type())
}
// Hash type restrictions.
terms, err := typeparams.InterfaceTermSet(t)
// if err != nil t has invalid type restrictions.
if err == nil {
hash += h.hashTermSet(terms)
}
return hash
case *types.Map:
return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
case *types.Chan:
return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
case *types.Named:
hash := h.hashPtr(t.Obj())
targs := t.TypeArgs()
for i := 0; i < targs.Len(); i++ {
targ := targs.At(i)
hash += 2 * h.Hash(targ)
}
return hash
case *types.TypeParam:
return h.hashTypeParam(t)
case *types.Tuple:
return h.hashTuple(t)
}
panic(fmt.Sprintf("%T: %v", t, t))
}
func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
// See go/types.identicalTypes for rationale.
n := tuple.Len()
hash := 9137 + 2*uint32(n)
for i := 0; i < n; i++ {
hash += 3 * h.Hash(tuple.At(i).Type())
}
return hash
}
func (h Hasher) hashUnion(t *types.Union) uint32 {
// Hash type restrictions.
terms, err := typeparams.UnionTermSet(t)
// if err != nil t has invalid type restrictions. Fall back on a non-zero
// hash.
if err != nil {
return 9151
}
return h.hashTermSet(terms)
}
func (h Hasher) hashTermSet(terms []*types.Term) uint32 {
hash := 9157 + 2*uint32(len(terms))
for _, term := range terms {
// term order is not significant.
termHash := h.Hash(term.Type())
if term.Tilde() {
termHash *= 9161
}
hash += 3 * termHash
}
return hash
}
// hashTypeParam returns a hash of the type parameter t, with a hash value
// depending on whether t is contained in h.sigTParams.
//
// If h.sigTParams is set and contains t, then we are in the process of hashing
// a signature, and the hash value of t must depend only on t's index and
// constraint: signatures are considered identical modulo type parameter
// renaming. To avoid infinite recursion, we only hash the type parameter
// index, and rely on types.Identical to handle signatures where constraints
// are not identical.
//
// Otherwise the hash of t depends only on t's pointer identity.
func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 {
if h.sigTParams != nil {
i := t.Index()
if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) {
return 9173 + 3*uint32(i)
}
}
return h.hashPtr(t.Obj())
}
// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that
// pointers values are not dependent on the GC.
func (h Hasher) hashPtr(ptr any) uint32 {
if hash, ok := h.ptrMap[ptr]; ok {
return hash
}
hash := uint32(reflect.ValueOf(ptr).Pointer())
h.ptrMap[ptr] = hash
return hash
}
// shallowHash computes a hash of t without looking at any of its
// element Types, to avoid potential anonymous cycles in the types of
// interface methods.
//
// When an unnamed non-empty interface type appears anywhere among the
// arguments or results of an interface method, there is a potential
// for endless recursion. Consider:
//
// type X interface { m() []*interface { X } }
//
// The problem is that the Methods of the interface in m's result type
// include m itself; there is no mention of the named type X that
// might help us break the cycle.
// (See comment in go/types.identical, case *Interface, for more.)
func (h Hasher) shallowHash(t types.Type) uint32 {
// t is the type of an interface method (Signature),
// its params or results (Tuples), or their immediate
// elements (mostly Slice, Pointer, Basic, Named),
// so there's no need to optimize anything else.
switch t := t.(type) {
case *types.Alias:
return h.shallowHash(types.Unalias(t))
case *types.Signature:
var hash uint32 = 604171
if t.Variadic() {
hash *= 971767
}
// The Signature/Tuple recursion is always finite
// and invariably shallow.
return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results())
case *types.Tuple:
n := t.Len()
hash := 9137 + 2*uint32(n)
for i := 0; i < n; i++ {
hash += 53471161 * h.shallowHash(t.At(i).Type())
}
return hash
case *types.Basic:
return 45212177 * uint32(t.Kind())
case *types.Array:
return 1524181 + 2*uint32(t.Len())
case *types.Slice:
return 2690201
case *types.Struct:
return 3326489
case *types.Pointer:
return 4393139
case *types.Union:
return 562448657
case *types.Interface:
return 2124679 // no recursion here
case *types.Map:
return 9109
case *types.Chan:
return 9127
case *types.Named:
return h.hashPtr(t.Obj())
case *types.TypeParam:
return h.hashPtr(t.Obj())
}
panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
}

View file

@ -0,0 +1,71 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements a cache of method sets.
package typeutil
import (
"go/types"
"sync"
)
// A MethodSetCache records the method set of each type T for which
// MethodSet(T) is called so that repeat queries are fast.
// The zero value is a ready-to-use cache instance.
type MethodSetCache struct {
mu sync.Mutex
named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
others map[types.Type]*types.MethodSet // all other types
}
// MethodSet returns the method set of type T. It is thread-safe.
//
// If cache is nil, this function is equivalent to types.NewMethodSet(T).
// Utility functions can thus expose an optional *MethodSetCache
// parameter to clients that care about performance.
func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
if cache == nil {
return types.NewMethodSet(T)
}
cache.mu.Lock()
defer cache.mu.Unlock()
switch T := types.Unalias(T).(type) {
case *types.Named:
return cache.lookupNamed(T).value
case *types.Pointer:
if N, ok := types.Unalias(T.Elem()).(*types.Named); ok {
return cache.lookupNamed(N).pointer
}
}
// all other types
// (The map uses pointer equivalence, not type identity.)
mset := cache.others[T]
if mset == nil {
mset = types.NewMethodSet(T)
if cache.others == nil {
cache.others = make(map[types.Type]*types.MethodSet)
}
cache.others[T] = mset
}
return mset
}
func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
if cache.named == nil {
cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
}
// Avoid recomputing mset(*T) for each distinct Pointer
// instance whose underlying type is a named type.
msets, ok := cache.named[named]
if !ok {
msets.value = types.NewMethodSet(named)
msets.pointer = types.NewMethodSet(types.NewPointer(named))
cache.named[named] = msets
}
return msets
}

53
vendor/golang.org/x/tools/go/types/typeutil/ui.go generated vendored Normal file
View file

@ -0,0 +1,53 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeutil
// This file defines utilities for user interfaces that display types.
import (
"go/types"
)
// IntuitiveMethodSet returns the intuitive method set of a type T,
// which is the set of methods you can call on an addressable value of
// that type.
//
// The result always contains MethodSet(T), and is exactly MethodSet(T)
// for interface types and for pointer-to-concrete types.
// For all other concrete types T, the result additionally
// contains each method belonging to *T if there is no identically
// named method on T itself.
//
// This corresponds to user intuition about method sets;
// this function is intended only for user interfaces.
//
// The order of the result is as for types.MethodSet(T).
func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
isPointerToConcrete := func(T types.Type) bool {
ptr, ok := types.Unalias(T).(*types.Pointer)
return ok && !types.IsInterface(ptr.Elem())
}
var result []*types.Selection
mset := msets.MethodSet(T)
if types.IsInterface(T) || isPointerToConcrete(T) {
for i, n := 0, mset.Len(); i < n; i++ {
result = append(result, mset.At(i))
}
} else {
// T is some other concrete type.
// Report methods of T and *T, preferring those of T.
pmset := msets.MethodSet(types.NewPointer(T))
for i, n := 0, pmset.Len(); i < n; i++ {
meth := pmset.At(i)
if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
meth = m
}
result = append(result, meth)
}
}
return result
}

View file

@ -22,11 +22,17 @@
// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled
// function is expensive and should be called once per task (e.g.
// package import), not once per call to NewAlias.
func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName {
//
// Precondition: enabled || len(tparams)==0.
// If materialized aliases are disabled, there must not be any type parameters.
func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName {
if enabled {
tname := types.NewTypeName(pos, pkg, name, nil)
newAlias(tname, rhs)
SetTypeParams(types.NewAlias(tname, rhs), tparams)
return tname
}
if len(tparams) > 0 {
panic("cannot create an alias with type parameters when gotypesalias is not enabled")
}
return types.NewTypeName(pos, pkg, name, rhs)
}

View file

@ -1,31 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.22
// +build !go1.22
package aliases
import (
"go/types"
)
// Alias is a placeholder for a go/types.Alias for <=1.21.
// It will never be created by go/types.
type Alias struct{}
func (*Alias) String() string { panic("unreachable") }
func (*Alias) Underlying() types.Type { panic("unreachable") }
func (*Alias) Obj() *types.TypeName { panic("unreachable") }
func Rhs(alias *Alias) types.Type { panic("unreachable") }
// Unalias returns the type t for go <=1.21.
func Unalias(t types.Type) types.Type { return t }
func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") }
// Enabled reports whether [NewAlias] should create [types.Alias] types.
//
// Before go1.22, this function always returns false.
func Enabled() bool { return false }

View file

@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.22
// +build go1.22
package aliases
import (
@ -14,31 +11,51 @@
"go/types"
)
// Alias is an alias of types.Alias.
type Alias = types.Alias
// Rhs returns the type on the right-hand side of the alias declaration.
func Rhs(alias *Alias) types.Type {
func Rhs(alias *types.Alias) types.Type {
if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok {
return alias.Rhs() // go1.23+
}
// go1.22's Alias didn't have the Rhs method,
// so Unalias is the best we can do.
return Unalias(alias)
return types.Unalias(alias)
}
// Unalias is a wrapper of types.Unalias.
func Unalias(t types.Type) types.Type { return types.Unalias(t) }
// TypeParams returns the type parameter list of the alias.
func TypeParams(alias *types.Alias) *types.TypeParamList {
if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok {
return alias.TypeParams() // go1.23+
}
return nil
}
// newAlias is an internal alias around types.NewAlias.
// Direct usage is discouraged as the moment.
// Try to use NewAlias instead.
func newAlias(tname *types.TypeName, rhs types.Type) *Alias {
a := types.NewAlias(tname, rhs)
// TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect.
Unalias(a)
return a
// SetTypeParams sets the type parameters of the alias type.
func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) {
if alias, ok := any(alias).(interface {
SetTypeParams(tparams []*types.TypeParam)
}); ok {
alias.SetTypeParams(tparams) // go1.23+
} else if len(tparams) > 0 {
panic("cannot set type parameters of an Alias type in go1.22")
}
}
// TypeArgs returns the type arguments used to instantiate the Alias type.
func TypeArgs(alias *types.Alias) *types.TypeList {
if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok {
return alias.TypeArgs() // go1.23+
}
return nil // empty (go1.22)
}
// Origin returns the generic Alias type of which alias is an instance.
// If alias is not an instance of a generic alias, Origin returns alias.
func Origin(alias *types.Alias) *types.Alias {
if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok {
return alias.Origin() // go1.23+
}
return alias // not an instance of a generic alias (go1.22)
}
// Enabled reports whether [NewAlias] should create [types.Alias] types.
@ -56,7 +73,7 @@ func Enabled() bool {
// many tests. Therefore any attempt to cache the result
// is just incorrect.
fset := token.NewFileSet()
f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0)
f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution)
pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil)
_, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias)
return enabled

View file

@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir {
return 0
}
}
var predeclOnce sync.Once
var predecl []types.Type // initialized lazily
func predeclared() []types.Type {
predeclOnce.Do(func() {
// initialize lazily to be sure that all
// elements have been initialized before
predecl = []types.Type{ // basic types
types.Typ[types.Bool],
types.Typ[types.Int],
types.Typ[types.Int8],
types.Typ[types.Int16],
types.Typ[types.Int32],
types.Typ[types.Int64],
types.Typ[types.Uint],
types.Typ[types.Uint8],
types.Typ[types.Uint16],
types.Typ[types.Uint32],
types.Typ[types.Uint64],
types.Typ[types.Uintptr],
types.Typ[types.Float32],
types.Typ[types.Float64],
types.Typ[types.Complex64],
types.Typ[types.Complex128],
types.Typ[types.String],
// basic type aliases
types.Universe.Lookup("byte").Type(),
types.Universe.Lookup("rune").Type(),
// error
types.Universe.Lookup("error").Type(),
// untyped types
types.Typ[types.UntypedBool],
types.Typ[types.UntypedInt],
types.Typ[types.UntypedRune],
types.Typ[types.UntypedFloat],
types.Typ[types.UntypedComplex],
types.Typ[types.UntypedString],
types.Typ[types.UntypedNil],
// package unsafe
types.Typ[types.UnsafePointer],
// invalid type
types.Typ[types.Invalid], // only appears in packages with errors
// used internally by gc; never used by this package or in .a files
anyType{},
}
predecl = append(predecl, additionalPredeclared()...)
})
return predecl
}
type anyType struct{}
func (t anyType) Underlying() types.Type { return t }
func (t anyType) String() string { return "any" }

View file

@ -39,12 +39,15 @@ func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
}
// FindExportData positions the reader r at the beginning of the
// export data section of an underlying GC-created object/archive
// export data section of an underlying cmd/compile created archive
// file by reading from it. The reader must be positioned at the
// start of the file before calling this function. The hdr result
// is the string before the export data, either "$$" or "$$B".
// The size result is the length of the export data in bytes, or -1 if not known.
func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
// start of the file before calling this function.
// The size result is the length of the export data in bytes.
//
// This function is needed by [gcexportdata.Read], which must
// accept inputs produced by the last two releases of cmd/compile,
// plus tip.
func FindExportData(r *bufio.Reader) (size int64, err error) {
// Read first line to make sure this is an object file.
line, err := r.ReadSlice('\n')
if err != nil {
@ -52,28 +55,33 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
return
}
if string(line) == "!<arch>\n" {
// Archive file. Scan to __.PKGDEF.
var name string
if name, size, err = readGopackHeader(r); err != nil {
return
}
// First entry should be __.PKGDEF.
if name != "__.PKGDEF" {
err = fmt.Errorf("go archive is missing __.PKGDEF")
return
}
// Read first line of __.PKGDEF data, so that line
// is once again the first line of the input.
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
size -= int64(len(line))
// Is the first line an archive file signature?
if string(line) != "!<arch>\n" {
err = fmt.Errorf("not the start of an archive file (%q)", line)
return
}
// Archive file. Scan to __.PKGDEF.
var name string
if name, size, err = readGopackHeader(r); err != nil {
return
}
arsize := size
// First entry should be __.PKGDEF.
if name != "__.PKGDEF" {
err = fmt.Errorf("go archive is missing __.PKGDEF")
return
}
// Read first line of __.PKGDEF data, so that line
// is once again the first line of the input.
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
size -= int64(len(line))
// Now at __.PKGDEF in archive or still at beginning of file.
// Either way, line should begin with "go object ".
if !strings.HasPrefix(string(line), "go object ") {
@ -81,8 +89,8 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
return
}
// Skip over object header to export data.
// Begins after first line starting with $$.
// Skip over object headers to get to the export data section header "$$B\n".
// Object headers are lines that do not start with '$'.
for line[0] != '$' {
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
@ -90,9 +98,18 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
}
size -= int64(len(line))
}
hdr = string(line)
// Check for the binary export data section header "$$B\n".
hdr := string(line)
if hdr != "$$B\n" {
err = fmt.Errorf("unknown export data header: %q", hdr)
return
}
// TODO(taking): Remove end-of-section marker "\n$$\n" from size.
if size < 0 {
size = -1
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
return
}
return

View file

@ -161,6 +161,8 @@ func FindPkg(path, srcDir string) (filename, id string) {
// Import imports a gc-generated package given its import path and srcDir, adds
// the corresponding package object to the packages map, and returns the object.
// The packages map must contain all packages already imported.
//
// TODO(taking): Import is only used in tests. Move to gcimporter_test.
func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
var rc io.ReadCloser
var filename, id string
@ -210,53 +212,50 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
}
defer rc.Close()
var hdr string
var size int64
buf := bufio.NewReader(rc)
if hdr, size, err = FindExportData(buf); err != nil {
if size, err = FindExportData(buf); err != nil {
return
}
switch hdr {
case "$$B\n":
var data []byte
data, err = io.ReadAll(buf)
if err != nil {
break
}
// TODO(gri): allow clients of go/importer to provide a FileSet.
// Or, define a new standard go/types/gcexportdata package.
fset := token.NewFileSet()
// Select appropriate importer.
if len(data) > 0 {
switch data[0] {
case 'v', 'c', 'd': // binary, till go1.10
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
case 'i': // indexed, till go1.19
_, pkg, err := IImportData(fset, packages, data[1:], id)
return pkg, err
case 'u': // unified, from go1.20
_, pkg, err := UImportData(fset, packages, data[1:size], id)
return pkg, err
default:
l := len(data)
if l > 10 {
l = 10
}
return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
}
}
default:
err = fmt.Errorf("unknown export data header: %q", hdr)
var data []byte
data, err = io.ReadAll(buf)
if err != nil {
return
}
if len(data) == 0 {
return nil, fmt.Errorf("no data to load a package from for path %s", id)
}
return
// TODO(gri): allow clients of go/importer to provide a FileSet.
// Or, define a new standard go/types/gcexportdata package.
fset := token.NewFileSet()
// Select appropriate importer.
switch data[0] {
case 'v', 'c', 'd':
// binary: emitted by cmd/compile till go1.10; obsolete.
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
case 'i':
// indexed: emitted by cmd/compile till go1.19;
// now used only for serializing go/types.
// See https://github.com/golang/go/issues/69491.
_, pkg, err := IImportData(fset, packages, data[1:], id)
return pkg, err
case 'u':
// unified: emitted by cmd/compile since go1.20.
_, pkg, err := UImportData(fset, packages, data[1:size], id)
return pkg, err
default:
l := len(data)
if l > 10 {
l = 10
}
return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
}
}
type byPath []*types.Package

View file

@ -2,9 +2,227 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Indexed binary package export.
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
// see that file for specification of the format.
// Indexed package export.
//
// The indexed export data format is an evolution of the previous
// binary export data format. Its chief contribution is introducing an
// index table, which allows efficient random access of individual
// declarations and inline function bodies. In turn, this allows
// avoiding unnecessary work for compilation units that import large
// packages.
//
//
// The top-level data format is structured as:
//
// Header struct {
// Tag byte // 'i'
// Version uvarint
// StringSize uvarint
// DataSize uvarint
// }
//
// Strings [StringSize]byte
// Data [DataSize]byte
//
// MainIndex []struct{
// PkgPath stringOff
// PkgName stringOff
// PkgHeight uvarint
//
// Decls []struct{
// Name stringOff
// Offset declOff
// }
// }
//
// Fingerprint [8]byte
//
// uvarint means a uint64 written out using uvarint encoding.
//
// []T means a uvarint followed by that many T objects. In other
// words:
//
// Len uvarint
// Elems [Len]T
//
// stringOff means a uvarint that indicates an offset within the
// Strings section. At that offset is another uvarint, followed by
// that many bytes, which form the string value.
//
// declOff means a uvarint that indicates an offset within the Data
// section where the associated declaration can be found.
//
//
// There are five kinds of declarations, distinguished by their first
// byte:
//
// type Var struct {
// Tag byte // 'V'
// Pos Pos
// Type typeOff
// }
//
// type Func struct {
// Tag byte // 'F' or 'G'
// Pos Pos
// TypeParams []typeOff // only present if Tag == 'G'
// Signature Signature
// }
//
// type Const struct {
// Tag byte // 'C'
// Pos Pos
// Value Value
// }
//
// type Type struct {
// Tag byte // 'T' or 'U'
// Pos Pos
// TypeParams []typeOff // only present if Tag == 'U'
// Underlying typeOff
//
// Methods []struct{ // omitted if Underlying is an interface type
// Pos Pos
// Name stringOff
// Recv Param
// Signature Signature
// }
// }
//
// type Alias struct {
// Tag byte // 'A' or 'B'
// Pos Pos
// TypeParams []typeOff // only present if Tag == 'B'
// Type typeOff
// }
//
// // "Automatic" declaration of each typeparam
// type TypeParam struct {
// Tag byte // 'P'
// Pos Pos
// Implicit bool
// Constraint typeOff
// }
//
// typeOff means a uvarint that either indicates a predeclared type,
// or an offset into the Data section. If the uvarint is less than
// predeclReserved, then it indicates the index into the predeclared
// types list (see predeclared in bexport.go for order). Otherwise,
// subtracting predeclReserved yields the offset of a type descriptor.
//
// Value means a type, kind, and type-specific value. See
// (*exportWriter).value for details.
//
//
// There are twelve kinds of type descriptors, distinguished by an itag:
//
// type DefinedType struct {
// Tag itag // definedType
// Name stringOff
// PkgPath stringOff
// }
//
// type PointerType struct {
// Tag itag // pointerType
// Elem typeOff
// }
//
// type SliceType struct {
// Tag itag // sliceType
// Elem typeOff
// }
//
// type ArrayType struct {
// Tag itag // arrayType
// Len uint64
// Elem typeOff
// }
//
// type ChanType struct {
// Tag itag // chanType
// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv
// Elem typeOff
// }
//
// type MapType struct {
// Tag itag // mapType
// Key typeOff
// Elem typeOff
// }
//
// type FuncType struct {
// Tag itag // signatureType
// PkgPath stringOff
// Signature Signature
// }
//
// type StructType struct {
// Tag itag // structType
// PkgPath stringOff
// Fields []struct {
// Pos Pos
// Name stringOff
// Type typeOff
// Embedded bool
// Note stringOff
// }
// }
//
// type InterfaceType struct {
// Tag itag // interfaceType
// PkgPath stringOff
// Embeddeds []struct {
// Pos Pos
// Type typeOff
// }
// Methods []struct {
// Pos Pos
// Name stringOff
// Signature Signature
// }
// }
//
// // Reference to a type param declaration
// type TypeParamType struct {
// Tag itag // typeParamType
// Name stringOff
// PkgPath stringOff
// }
//
// // Instantiation of a generic type (like List[T2] or List[int])
// type InstanceType struct {
// Tag itag // instanceType
// Pos pos
// TypeArgs []typeOff
// BaseType typeOff
// }
//
// type UnionType struct {
// Tag itag // interfaceType
// Terms []struct {
// tilde bool
// Type typeOff
// }
// }
//
//
//
// type Signature struct {
// Params []Param
// Results []Param
// Variadic bool // omitted if Results is empty
// }
//
// type Param struct {
// Pos Pos
// Name stringOff
// Type typOff
// }
//
//
// Pos encodes a file:line:column triple, incorporating a simple delta
// encoding scheme within a data object. See exportWriter.pos for
// details.
package gcimporter
@ -24,11 +242,30 @@
"golang.org/x/tools/go/types/objectpath"
"golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/tokeninternal"
)
// IExportShallow encodes "shallow" export data for the specified package.
//
// For types, we use "shallow" export data. Historically, the Go
// compiler always produced a summary of the types for a given package
// that included types from other packages that it indirectly
// referenced: "deep" export data. This had the advantage that the
// compiler (and analogous tools such as gopls) need only load one
// file per direct import. However, it meant that the files tended to
// get larger based on the level of the package in the import
// graph. For example, higher-level packages in the kubernetes module
// have over 1MB of "deep" export data, even when they have almost no
// content of their own, merely because they mention a major type that
// references many others. In pathological cases the export data was
// 300x larger than the source for a package due to this quadratic
// growth.
//
// "Shallow" export data means that the serialized types describe only
// a single package. If those types mention types from other packages,
// the type checker may need to request additional packages beyond
// just the direct imports. Type information for the entire transitive
// closure of imports is provided (lazily) by the DAG.
//
// No promises are made about the encoding other than that it can be decoded by
// the same version of IIExportShallow. If you plan to save export data in the
// file system, be sure to include a cryptographic digest of the executable in
@ -51,8 +288,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc)
}
// IImportShallow decodes "shallow" types.Package data encoded by
// IExportShallow in the same executable. This function cannot import data from
// cmd/compile or gcexportdata.Write.
// [IExportShallow] in the same executable. This function cannot import data
// from cmd/compile or gcexportdata.Write.
//
// The importer calls getPackages to obtain package symbols for all
// packages mentioned in the export data, including the one being
@ -223,7 +460,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64)
// Sort the set of needed offsets. Duplicates are harmless.
sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] })
lines := tokeninternal.GetLines(file) // byte offset of each line start
lines := file.Lines() // byte offset of each line start
w.uint64(uint64(len(lines)))
// Rather than record the entire array of line start offsets,
@ -507,13 +744,13 @@ func (p *iexporter) doDecl(obj types.Object) {
case *types.TypeName:
t := obj.Type()
if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok {
if tparam, ok := types.Unalias(t).(*types.TypeParam); ok {
w.tag(typeParamTag)
w.pos(obj.Pos())
constraint := tparam.Constraint()
if p.version >= iexportVersionGo1_18 {
implicit := false
if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil {
if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil {
implicit = iface.IsImplicit()
}
w.bool(implicit)
@ -523,9 +760,22 @@ func (p *iexporter) doDecl(obj types.Object) {
}
if obj.IsAlias() {
w.tag(aliasTag)
alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled
var tparams *types.TypeParamList
if materialized {
tparams = aliases.TypeParams(alias)
}
if tparams.Len() == 0 {
w.tag(aliasTag)
} else {
w.tag(genericAliasTag)
}
w.pos(obj.Pos())
if alias, ok := t.(*aliases.Alias); ok {
if tparams.Len() > 0 {
w.tparamList(obj.Name(), tparams, obj.Pkg())
}
if materialized {
// Preserve materialized aliases,
// even of non-exported types.
t = aliases.Rhs(alias)
@ -744,8 +994,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
}()
}
switch t := t.(type) {
case *aliases.Alias:
// TODO(adonovan): support parameterized aliases, following *types.Named.
case *types.Alias:
if targs := aliases.TypeArgs(t); targs.Len() > 0 {
w.startType(instanceType)
w.pos(t.Obj().Pos())
w.typeList(targs, pkg)
w.typ(aliases.Origin(t), pkg)
return
}
w.startType(aliasType)
w.qualifiedType(t.Obj())
@ -854,7 +1110,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
for i := 0; i < n; i++ {
ft := t.EmbeddedType(i)
tPkg := pkg
if named, _ := aliases.Unalias(ft).(*types.Named); named != nil {
if named, _ := types.Unalias(ft).(*types.Named); named != nil {
w.pos(named.Obj().Pos())
} else {
w.pos(token.NoPos)

View file

@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Indexed package import.
// See cmd/compile/internal/gc/iexport.go for the export data format.
// See iexport.go for the export data format.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
@ -53,6 +53,7 @@ func (r *intReader) uint64() uint64 {
iexportVersionPosCol = 1
iexportVersionGo1_18 = 2
iexportVersionGenerics = 2
iexportVersion = iexportVersionGenerics
iexportVersionCurrent = 2
)
@ -540,7 +541,7 @@ func canReuse(def *types.Named, rhs types.Type) bool {
if def == nil {
return true
}
iface, _ := aliases.Unalias(rhs).(*types.Interface)
iface, _ := types.Unalias(rhs).(*types.Interface)
if iface == nil {
return true
}
@ -557,19 +558,28 @@ type importReader struct {
prevColumn int64
}
// markBlack is redefined in iimport_go123.go, to work around golang/go#69912.
//
// If TypeNames are not marked black (in the sense of go/types cycle
// detection), they may be mutated when dot-imported. Fix this by punching a
// hole through the type, when compiling with Go 1.23. (The bug has been fixed
// for 1.24, but the fix was not worth back-porting).
var markBlack = func(name *types.TypeName) {}
func (r *importReader) obj(name string) {
tag := r.byte()
pos := r.pos()
switch tag {
case aliasTag:
case aliasTag, genericAliasTag:
var tparams []*types.TypeParam
if tag == genericAliasTag {
tparams = r.tparamList()
}
typ := r.typ()
// TODO(adonovan): support generic aliases:
// if tag == genericAliasTag {
// tparams := r.tparamList()
// alias.SetTypeParams(tparams)
// }
r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ))
obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams)
markBlack(obj) // workaround for golang/go#69912
r.declare(obj)
case constTag:
typ, val := r.value()
@ -589,6 +599,9 @@ func (r *importReader) obj(name string) {
// declaration before recursing.
obj := types.NewTypeName(pos, r.currPkg, name, nil)
named := types.NewNamed(obj, nil, nil)
markBlack(obj) // workaround for golang/go#69912
// Declare obj before calling r.tparamList, so the new type name is recognized
// if used in the constraint of one of its own typeparams (see #48280).
r.declare(obj)
@ -615,7 +628,7 @@ func (r *importReader) obj(name string) {
if targs.Len() > 0 {
rparams = make([]*types.TypeParam, targs.Len())
for i := range rparams {
rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam)
rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam)
}
}
msig := r.signature(recv, rparams, nil)
@ -645,7 +658,7 @@ func (r *importReader) obj(name string) {
}
constraint := r.typ()
if implicit {
iface, _ := aliases.Unalias(constraint).(*types.Interface)
iface, _ := types.Unalias(constraint).(*types.Interface)
if iface == nil {
errorf("non-interface constraint marked implicit")
}
@ -852,7 +865,7 @@ func (r *importReader) typ() types.Type {
}
func isInterface(t types.Type) bool {
_, ok := aliases.Unalias(t).(*types.Interface)
_, ok := types.Unalias(t).(*types.Interface)
return ok
}
@ -862,7 +875,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
func (r *importReader) doType(base *types.Named) (res types.Type) {
k := r.kind()
if debug {
r.p.trace("importing type %d (base: %s)", k, base)
r.p.trace("importing type %d (base: %v)", k, base)
r.p.indent++
defer func() {
r.p.indent--
@ -959,7 +972,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) {
methods[i] = method
}
typ := newInterface(methods, embeddeds)
typ := types.NewInterfaceType(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
@ -1051,7 +1064,7 @@ func (r *importReader) tparamList() []*types.TypeParam {
for i := range xs {
// Note: the standard library importer is tolerant of nil types here,
// though would panic in SetTypeParams.
xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam)
xs[i] = types.Unalias(r.typ()).(*types.TypeParam)
}
return xs
}

View file

@ -0,0 +1,53 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.22 && !go1.24
package gcimporter
import (
"go/token"
"go/types"
"unsafe"
)
// TODO(rfindley): delete this workaround once go1.24 is assured.
func init() {
// Update markBlack so that it correctly sets the color
// of imported TypeNames.
//
// See the doc comment for markBlack for details.
type color uint32
const (
white color = iota
black
grey
)
type object struct {
_ *types.Scope
_ token.Pos
_ *types.Package
_ string
_ types.Type
_ uint32
color_ color
_ token.Pos
}
type typeName struct {
object
}
// If the size of types.TypeName changes, this will fail to compile.
const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{}))
var _ [-delta * delta]int
markBlack = func(obj *types.TypeName) {
type uP = unsafe.Pointer
var ptr *typeName
*(*uP)(uP(&ptr)) = uP(obj)
ptr.color_ = black
}
}

View file

@ -1,22 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.11
// +build !go1.11
package gcimporter
import "go/types"
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
named := make([]*types.Named, len(embeddeds))
for i, e := range embeddeds {
var ok bool
named[i], ok = e.(*types.Named)
if !ok {
panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
}
}
return types.NewInterface(methods, named)
}

View file

@ -1,14 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.11
// +build go1.11
package gcimporter
import "go/types"
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
return types.NewInterfaceType(methods, embeddeds)
}

View file

@ -0,0 +1,91 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gcimporter
import (
"go/types"
"sync"
)
// predecl is a cache for the predeclared types in types.Universe.
//
// Cache a distinct result based on the runtime value of any.
// The pointer value of the any type varies based on GODEBUG settings.
var predeclMu sync.Mutex
var predecl map[types.Type][]types.Type
func predeclared() []types.Type {
anyt := types.Universe.Lookup("any").Type()
predeclMu.Lock()
defer predeclMu.Unlock()
if pre, ok := predecl[anyt]; ok {
return pre
}
if predecl == nil {
predecl = make(map[types.Type][]types.Type)
}
decls := []types.Type{ // basic types
types.Typ[types.Bool],
types.Typ[types.Int],
types.Typ[types.Int8],
types.Typ[types.Int16],
types.Typ[types.Int32],
types.Typ[types.Int64],
types.Typ[types.Uint],
types.Typ[types.Uint8],
types.Typ[types.Uint16],
types.Typ[types.Uint32],
types.Typ[types.Uint64],
types.Typ[types.Uintptr],
types.Typ[types.Float32],
types.Typ[types.Float64],
types.Typ[types.Complex64],
types.Typ[types.Complex128],
types.Typ[types.String],
// basic type aliases
types.Universe.Lookup("byte").Type(),
types.Universe.Lookup("rune").Type(),
// error
types.Universe.Lookup("error").Type(),
// untyped types
types.Typ[types.UntypedBool],
types.Typ[types.UntypedInt],
types.Typ[types.UntypedRune],
types.Typ[types.UntypedFloat],
types.Typ[types.UntypedComplex],
types.Typ[types.UntypedString],
types.Typ[types.UntypedNil],
// package unsafe
types.Typ[types.UnsafePointer],
// invalid type
types.Typ[types.Invalid], // only appears in packages with errors
// used internally by gc; never used by this package or in .a files
anyType{},
// comparable
types.Universe.Lookup("comparable").Type(),
// any
anyt,
}
predecl[anyt] = decls
return decls
}
type anyType struct{}
func (t anyType) Underlying() types.Type { return t }
func (t anyType) String() string { return "any" }

View file

@ -1,34 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gcimporter
import "go/types"
const iexportVersion = iexportVersionGenerics
// additionalPredeclared returns additional predeclared types in go.1.18.
func additionalPredeclared() []types.Type {
return []types.Type{
// comparable
types.Universe.Lookup("comparable").Type(),
// any
types.Universe.Lookup("any").Type(),
}
}
// See cmd/compile/internal/types.SplitVargenSuffix.
func splitVargenSuffix(name string) (base, suffix string) {
i := len(name)
for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
i--
}
const dot = "·"
if i >= len(dot) && name[i-len(dot):i] == dot {
i -= len(dot)
return name[:i], name[i:]
}
return name, ""
}

View file

@ -1,10 +0,0 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.unified
// +build !goexperiment.unified
package gcimporter
const unifiedIR = false

View file

@ -1,10 +0,0 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.unified
// +build goexperiment.unified
package gcimporter
const unifiedIR = true

View file

@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) {
// See cmd/compile/internal/noder.derivedInfo.
type derivedInfo struct {
idx pkgbits.Index
needed bool
idx pkgbits.Index
}
// See cmd/compile/internal/noder.typeInfo.
@ -110,13 +109,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st
r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
pkg := r.pkg()
r.Bool() // has init
if r.Version().Has(pkgbits.HasInit) {
r.Bool()
}
for i, n := 0, r.Len(); i < n; i++ {
// As if r.obj(), but avoiding the Scope.Lookup call,
// to avoid eager loading of imports.
r.Sync(pkgbits.SyncObject)
assert(!r.Bool())
if r.Version().Has(pkgbits.DerivedFuncInstance) {
assert(!r.Bool())
}
r.p.objIdx(r.Reloc(pkgbits.RelocObj))
assert(r.Len() == 0)
}
@ -165,7 +168,7 @@ type readerDict struct {
// tparams is a slice of the constructed TypeParams for the element.
tparams []*types.TypeParam
// devived is a slice of types derived from tparams, which may be
// derived is a slice of types derived from tparams, which may be
// instantiated while reading the current element.
derived []derivedInfo
derivedTypes []types.Type // lazily instantiated from derived
@ -471,7 +474,9 @@ func (r *reader) param() *types.Var {
func (r *reader) obj() (types.Object, []types.Type) {
r.Sync(pkgbits.SyncObject)
assert(!r.Bool())
if r.Version().Has(pkgbits.DerivedFuncInstance) {
assert(!r.Bool())
}
pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
obj := pkgScope(pkg).Lookup(name)
@ -525,8 +530,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
case pkgbits.ObjAlias:
pos := r.pos()
var tparams []*types.TypeParam
if r.Version().Has(pkgbits.AliasTypeParamNames) {
tparams = r.typeParamNames()
}
typ := r.typ()
declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ))
declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams))
case pkgbits.ObjConst:
pos := r.pos()
@ -553,7 +562,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
// If the underlying type is an interface, we need to
// duplicate its methods so we can replace the receiver
// parameter's type (#49906).
if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
methods := make([]*types.Func, iface.NumExplicitMethods())
for i := range methods {
fn := iface.ExplicitMethod(i)
@ -632,7 +641,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
dict.derived = make([]derivedInfo, r.Len())
dict.derivedTypes = make([]types.Type, len(dict.derived))
for i := range dict.derived {
dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)}
if r.Version().Has(pkgbits.DerivedInfoNeeded) {
assert(!r.Bool())
}
}
pr.retireReader(r)
@ -726,3 +738,17 @@ func pkgScope(pkg *types.Package) *types.Scope {
}
return types.Universe
}
// See cmd/compile/internal/types.SplitVargenSuffix.
func splitVargenSuffix(name string) (base, suffix string) {
i := len(name)
for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
i--
}
const dot = "·"
if i >= len(dot) && name[i-len(dot):i] == dot {
i -= len(dot)
return name[:i], name[i:]
}
return name, ""
}

View file

@ -16,7 +16,6 @@
"os"
"os/exec"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
@ -200,12 +199,14 @@ func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io
return
}
func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
log := i.Logf
if log == nil {
log = func(string, ...interface{}) {}
// logf logs if i.Logf is non-nil.
func (i *Invocation) logf(format string, args ...any) {
if i.Logf != nil {
i.Logf(format, args...)
}
}
func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
goArgs := []string{i.Verb}
appendModFile := func() {
@ -248,16 +249,13 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
cmd.Stdout = stdout
cmd.Stderr = stderr
// cmd.WaitDelay was added only in go1.20 (see #50436).
if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() {
// https://go.dev/issue/59541: don't wait forever copying stderr
// after the command has exited.
// After CL 484741 we copy stdout manually, so we we'll stop reading that as
// soon as ctx is done. However, we also don't want to wait around forever
// for stderr. Give a much-longer-than-reasonable delay and then assume that
// something has wedged in the kernel or runtime.
waitDelay.Set(reflect.ValueOf(30 * time.Second))
}
// https://go.dev/issue/59541: don't wait forever copying stderr
// after the command has exited.
// After CL 484741 we copy stdout manually, so we we'll stop reading that as
// soon as ctx is done. However, we also don't want to wait around forever
// for stderr. Give a much-longer-than-reasonable delay and then assume that
// something has wedged in the kernel or runtime.
cmd.WaitDelay = 30 * time.Second
// The cwd gets resolved to the real path. On Darwin, where
// /tmp is a symlink, this breaks anything that expects the
@ -277,7 +275,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
cmd.Dir = i.WorkingDir
}
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
debugStr := cmdDebugStr(cmd)
i.logf("starting %v", debugStr)
start := time.Now()
defer func() {
i.logf("%s for %v", time.Since(start), debugStr)
}()
return runCmdContext(ctx, cmd)
}
@ -514,7 +517,7 @@ func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(),
for k, v := range overlay {
// Use a unique basename for each file (001-foo.go),
// to avoid creating nested directories.
base := fmt.Sprintf("%d-%s.go", 1+len(overlays), filepath.Base(k))
base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k))
filename := filepath.Join(dir, base)
err := os.WriteFile(filename, v, 0666)
if err != nil {

View file

@ -90,18 +90,6 @@ type ImportFix struct {
Relevance float64 // see pkg
}
// An ImportInfo represents a single import statement.
type ImportInfo struct {
ImportPath string // import path, e.g. "crypto/rand".
Name string // import name, e.g. "crand", or "" if none.
}
// A packageInfo represents what's known about a package.
type packageInfo struct {
name string // real package name, if known.
exports map[string]bool // known exports.
}
// parseOtherFiles parses all the Go files in srcDir except filename, including
// test files if filename looks like a test.
//
@ -130,7 +118,7 @@ func parseOtherFiles(ctx context.Context, fset *token.FileSet, srcDir, filename
continue
}
f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0)
f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, parser.SkipObjectResolution)
if err != nil {
continue
}
@ -161,8 +149,8 @@ func addGlobals(f *ast.File, globals map[string]bool) {
// collectReferences builds a map of selector expressions, from
// left hand side (X) to a set of right hand sides (Sel).
func collectReferences(f *ast.File) references {
refs := references{}
func collectReferences(f *ast.File) References {
refs := References{}
var visitor visitFn
visitor = func(node ast.Node) ast.Visitor {
@ -232,7 +220,7 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
allFound := true
for right := range syms {
if !pkgInfo.exports[right] {
if !pkgInfo.Exports[right] {
allFound = false
break
}
@ -245,11 +233,6 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
return nil
}
// references is set of references found in a Go file. The first map key is the
// left hand side of a selector expression, the second key is the right hand
// side, and the value should always be true.
type references map[string]map[string]bool
// A pass contains all the inputs and state necessary to fix a file's imports.
// It can be modified in some ways during use; see comments below.
type pass struct {
@ -257,27 +240,29 @@ type pass struct {
fset *token.FileSet // fset used to parse f and its siblings.
f *ast.File // the file being fixed.
srcDir string // the directory containing f.
env *ProcessEnv // the environment to use for go commands, etc.
loadRealPackageNames bool // if true, load package names from disk rather than guessing them.
otherFiles []*ast.File // sibling files.
logf func(string, ...any)
source Source // the environment to use for go commands, etc.
loadRealPackageNames bool // if true, load package names from disk rather than guessing them.
otherFiles []*ast.File // sibling files.
goroot string
// Intermediate state, generated by load.
existingImports map[string][]*ImportInfo
allRefs references
missingRefs references
allRefs References
missingRefs References
// Inputs to fix. These can be augmented between successive fix calls.
lastTry bool // indicates that this is the last call and fix should clean up as best it can.
candidates []*ImportInfo // candidate imports in priority order.
knownPackages map[string]*packageInfo // information about all known packages.
knownPackages map[string]*PackageInfo // information about all known packages.
}
// loadPackageNames saves the package names for everything referenced by imports.
func (p *pass) loadPackageNames(imports []*ImportInfo) error {
if p.env.Logf != nil {
p.env.Logf("loading package names for %v packages", len(imports))
func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) error {
if p.logf != nil {
p.logf("loading package names for %v packages", len(imports))
defer func() {
p.env.Logf("done loading package names for %v packages", len(imports))
p.logf("done loading package names for %v packages", len(imports))
}()
}
var unknown []string
@ -288,20 +273,17 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error {
unknown = append(unknown, imp.ImportPath)
}
resolver, err := p.env.GetResolver()
if err != nil {
return err
}
names, err := resolver.loadPackageNames(unknown, p.srcDir)
names, err := p.source.LoadPackageNames(ctx, p.srcDir, unknown)
if err != nil {
return err
}
// TODO(rfindley): revisit this. Why do we need to store known packages with
// no exports? The inconsistent data is confusing.
for path, name := range names {
p.knownPackages[path] = &packageInfo{
name: name,
exports: map[string]bool{},
p.knownPackages[path] = &PackageInfo{
Name: name,
Exports: map[string]bool{},
}
}
return nil
@ -329,8 +311,8 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
return imp.Name
}
known := p.knownPackages[imp.ImportPath]
if known != nil && known.name != "" {
return withoutVersion(known.name)
if known != nil && known.Name != "" {
return withoutVersion(known.Name)
}
return ImportPathToAssumedName(imp.ImportPath)
}
@ -338,9 +320,9 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
// load reads in everything necessary to run a pass, and reports whether the
// file already has all the imports it needs. It fills in p.missingRefs with the
// file's missing symbols, if any, or removes unused imports if not.
func (p *pass) load() ([]*ImportFix, bool) {
p.knownPackages = map[string]*packageInfo{}
p.missingRefs = references{}
func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) {
p.knownPackages = map[string]*PackageInfo{}
p.missingRefs = References{}
p.existingImports = map[string][]*ImportInfo{}
// Load basic information about the file in question.
@ -363,10 +345,10 @@ func (p *pass) load() ([]*ImportFix, bool) {
// f's imports by the identifier they introduce.
imports := collectImports(p.f)
if p.loadRealPackageNames {
err := p.loadPackageNames(append(imports, p.candidates...))
err := p.loadPackageNames(ctx, append(imports, p.candidates...))
if err != nil {
if p.env.Logf != nil {
p.env.Logf("loading package names: %v", err)
if p.logf != nil {
p.logf("loading package names: %v", err)
}
return nil, false
}
@ -536,9 +518,10 @@ func (p *pass) assumeSiblingImportsValid() {
// We have the stdlib in memory; no need to guess.
rights = symbolNameSet(m)
}
p.addCandidate(imp, &packageInfo{
// TODO(rfindley): we should set package name here, for consistency.
p.addCandidate(imp, &PackageInfo{
// no name; we already know it.
exports: rights,
Exports: rights,
})
}
}
@ -547,14 +530,14 @@ func (p *pass) assumeSiblingImportsValid() {
// addCandidate adds a candidate import to p, and merges in the information
// in pkg.
func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) {
p.candidates = append(p.candidates, imp)
if existing, ok := p.knownPackages[imp.ImportPath]; ok {
if existing.name == "" {
existing.name = pkg.name
if existing.Name == "" {
existing.Name = pkg.Name
}
for export := range pkg.exports {
existing.exports[export] = true
for export := range pkg.Exports {
existing.Exports[export] = true
}
} else {
p.knownPackages[imp.ImportPath] = pkg
@ -563,7 +546,14 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
// fixImports adds and removes imports from f so that all its references are
// satisfied and there are no unused imports.
func fixImports(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
//
// This is declared as a variable rather than a function so goimports can
// easily be extended by adding a file with an init function.
//
// DO NOT REMOVE: used internally at Google.
var fixImports = fixImportsDefault
func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
fixes, err := getFixes(context.Background(), fset, f, filename, env)
if err != nil {
return err
@ -575,21 +565,42 @@ func fixImports(fset *token.FileSet, f *ast.File, filename string, env *ProcessE
// getFixes gets the import fixes that need to be made to f in order to fix the imports.
// It does not modify the ast.
func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) {
source, err := NewProcessEnvSource(env, filename, f.Name.Name)
if err != nil {
return nil, err
}
goEnv, err := env.goEnv()
if err != nil {
return nil, err
}
return getFixesWithSource(ctx, fset, f, filename, goEnv["GOROOT"], env.logf, source)
}
func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, goroot string, logf func(string, ...any), source Source) ([]*ImportFix, error) {
// This logic is defensively duplicated from getFixes.
abs, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
srcDir := filepath.Dir(abs)
if env.Logf != nil {
env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
if logf != nil {
logf("fixImports(filename=%q), srcDir=%q ...", filename, abs, srcDir)
}
// First pass: looking only at f, and using the naive algorithm to
// derive package names from import paths, see if the file is already
// complete. We can't add any imports yet, because we don't know
// if missing references are actually package vars.
p := &pass{fset: fset, f: f, srcDir: srcDir, env: env}
if fixes, done := p.load(); done {
p := &pass{
fset: fset,
f: f,
srcDir: srcDir,
logf: logf,
goroot: goroot,
source: source,
}
if fixes, done := p.load(ctx); done {
return fixes, nil
}
@ -601,7 +612,7 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
// Second pass: add information from other files in the same package,
// like their package vars and imports.
p.otherFiles = otherFiles
if fixes, done := p.load(); done {
if fixes, done := p.load(ctx); done {
return fixes, nil
}
@ -614,10 +625,17 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
// Third pass: get real package names where we had previously used
// the naive algorithm.
p = &pass{fset: fset, f: f, srcDir: srcDir, env: env}
p = &pass{
fset: fset,
f: f,
srcDir: srcDir,
logf: logf,
goroot: goroot,
source: p.source, // safe to reuse, as it's just a wrapper around env
}
p.loadRealPackageNames = true
p.otherFiles = otherFiles
if fixes, done := p.load(); done {
if fixes, done := p.load(ctx); done {
return fixes, nil
}
@ -831,7 +849,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
return true
},
dirFound: func(pkg *pkg) bool {
return pkgIsCandidate(filename, references{searchPkg: nil}, pkg)
return pkgIsCandidate(filename, References{searchPkg: nil}, pkg)
},
packageNameLoaded: func(pkg *pkg) bool {
return pkg.packageName == searchPkg
@ -1014,16 +1032,26 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) {
// already know the view type.
if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
e.resolver = newGopathResolver(e)
e.logf("created gopath resolver")
} else if r, err := newModuleResolver(e, e.ModCache); err != nil {
e.resolverErr = err
e.logf("failed to create module resolver: %v", err)
} else {
e.resolver = Resolver(r)
e.logf("created module resolver")
}
}
return e.resolver, e.resolverErr
}
// logf logs if e.Logf is non-nil.
func (e *ProcessEnv) logf(format string, args ...any) {
if e.Logf != nil {
e.Logf(format, args...)
}
}
// buildContext returns the build.Context to use for matching files.
//
// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform
@ -1072,11 +1100,7 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string)
return e.GocmdRunner.Run(ctx, inv)
}
func addStdlibCandidates(pass *pass, refs references) error {
goenv, err := pass.env.goEnv()
if err != nil {
return err
}
func addStdlibCandidates(pass *pass, refs References) error {
localbase := func(nm string) string {
ans := path.Base(nm)
if ans[0] == 'v' {
@ -1091,13 +1115,13 @@ func addStdlibCandidates(pass *pass, refs references) error {
}
add := func(pkg string) {
// Prevent self-imports.
if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir {
if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.goroot, "src", pkg) == pass.srcDir {
return
}
exports := symbolNameSet(stdlib.PackageSymbols[pkg])
pass.addCandidate(
&ImportInfo{ImportPath: pkg},
&packageInfo{name: localbase(pkg), exports: exports})
&PackageInfo{Name: localbase(pkg), Exports: exports})
}
for left := range refs {
if left == "rand" {
@ -1127,8 +1151,8 @@ type Resolver interface {
// scan works with callback to search for packages. See scanCallback for details.
scan(ctx context.Context, callback *scanCallback) error
// loadExports returns the set of exported symbols in the package at dir.
// loadExports may be called concurrently.
// loadExports returns the package name and set of exported symbols in the
// package at dir. loadExports may be called concurrently.
loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error)
// scoreImportPath returns the relevance for an import path.
@ -1161,101 +1185,22 @@ type scanCallback struct {
exportsLoaded func(pkg *pkg, exports []stdlib.Symbol)
}
func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error {
func addExternalCandidates(ctx context.Context, pass *pass, refs References, filename string) error {
ctx, done := event.Start(ctx, "imports.addExternalCandidates")
defer done()
var mu sync.Mutex
found := make(map[string][]pkgDistance)
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
return true // We want everything.
},
dirFound: func(pkg *pkg) bool {
return pkgIsCandidate(filename, refs, pkg)
},
packageNameLoaded: func(pkg *pkg) bool {
if _, want := refs[pkg.packageName]; !want {
return false
}
if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName {
// The candidate is in the same directory and has the
// same package name. Don't try to import ourselves.
return false
}
if !canUse(filename, pkg.dir) {
return false
}
mu.Lock()
defer mu.Unlock()
found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)})
return false // We'll do our own loading after we sort.
},
}
resolver, err := pass.env.GetResolver()
results, err := pass.source.ResolveReferences(ctx, filename, refs)
if err != nil {
return err
}
if err = resolver.scan(ctx, callback); err != nil {
return err
}
// Search for imports matching potential package references.
type result struct {
imp *ImportInfo
pkg *packageInfo
}
results := make(chan result, len(refs))
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
defer func() {
cancel()
wg.Wait()
}()
var (
firstErr error
firstErrOnce sync.Once
)
for pkgName, symbols := range refs {
wg.Add(1)
go func(pkgName string, symbols map[string]bool) {
defer wg.Done()
found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols)
if err != nil {
firstErrOnce.Do(func() {
firstErr = err
cancel()
})
return
}
if found == nil {
return // No matching package.
}
imp := &ImportInfo{
ImportPath: found.importPathShort,
}
pkg := &packageInfo{
name: pkgName,
exports: symbols,
}
results <- result{imp, pkg}
}(pkgName, symbols)
}
go func() {
wg.Wait()
close(results)
}()
for result := range results {
for _, result := range results {
if result == nil {
continue
}
// Don't offer completions that would shadow predeclared
// names, such as github.com/coreos/etcd/error.
if types.Universe.Lookup(result.pkg.name) != nil { // predeclared
if types.Universe.Lookup(result.Package.Name) != nil { // predeclared
// Ideally we would skip this candidate only
// if the predeclared name is actually
// referenced by the file, but that's a lot
@ -1264,9 +1209,9 @@ type result struct {
// user before long.
continue
}
pass.addCandidate(result.imp, result.pkg)
pass.addCandidate(result.Import, result.Package)
}
return firstErr
return nil
}
// notIdentifier reports whether ch is an invalid identifier character.
@ -1608,11 +1553,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
}
fullFile := filepath.Join(dir, fi.Name())
// Legacy ast.Object resolution is needed here.
f, err := parser.ParseFile(fset, fullFile, nil, 0)
if err != nil {
if env.Logf != nil {
env.Logf("error parsing %v: %v", fullFile, err)
}
env.logf("error parsing %v: %v", fullFile, err)
continue
}
if f.Name.Name == "documentation" {
@ -1648,9 +1592,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
}
sortSymbols(exports)
if env.Logf != nil {
env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports)
}
env.logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports)
return pkgName, exports, nil
}
@ -1660,25 +1602,39 @@ func sortSymbols(syms []stdlib.Symbol) {
})
}
// findImport searches for a package with the given symbols.
// If no package is found, findImport returns ("", false, nil)
func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
// A symbolSearcher searches for a package with a set of symbols, among a set
// of candidates. See [symbolSearcher.search].
//
// The search occurs within the scope of a single file, with context captured
// in srcDir and xtest.
type symbolSearcher struct {
logf func(string, ...any)
srcDir string // directory containing the file
xtest bool // if set, the file containing is an x_test file
loadExports func(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error)
}
// search searches the provided candidates for a package containing all
// exported symbols.
//
// If successful, returns the resulting package.
func (s *symbolSearcher) search(ctx context.Context, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
// Sort the candidates by their import package length,
// assuming that shorter package names are better than long
// ones. Note that this sorts by the de-vendored name, so
// there's no "penalty" for vendoring.
sort.Sort(byDistanceOrImportPathShortLength(candidates))
if pass.env.Logf != nil {
if s.logf != nil {
for i, c := range candidates {
pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
s.logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
}
}
resolver, err := pass.env.GetResolver()
if err != nil {
return nil, err
}
// Collect exports for packages with matching names.
// Arrange rescv so that we can we can await results in order of relevance
// and exit as soon as we find the first match.
//
// Search with bounded concurrency, returning as soon as the first result
// among rescv is non-nil.
rescv := make([]chan *pkg, len(candidates))
for i := range candidates {
rescv[i] = make(chan *pkg, 1)
@ -1686,6 +1642,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
const maxConcurrentPackageImport = 4
loadExportsSem := make(chan struct{}, maxConcurrentPackageImport)
// Ensure that all work is completed at exit.
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
defer func() {
@ -1693,6 +1650,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
wg.Wait()
}()
// Start the search.
wg.Add(1)
go func() {
defer wg.Done()
@ -1703,55 +1661,67 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
return
}
i := i
c := c
wg.Add(1)
go func(c pkgDistance, resc chan<- *pkg) {
go func() {
defer func() {
<-loadExportsSem
wg.Done()
}()
if pass.env.Logf != nil {
pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
if s.logf != nil {
s.logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
}
// If we're an x_test, load the package under test's test variant.
includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir
_, exports, err := resolver.loadExports(ctx, c.pkg, includeTest)
pkg, err := s.searchOne(ctx, c, symbols)
if err != nil {
if pass.env.Logf != nil {
pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
if s.logf != nil && ctx.Err() == nil {
s.logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
}
resc <- nil
return
pkg = nil
}
exportsMap := make(map[string]bool, len(exports))
for _, sym := range exports {
exportsMap[sym.Name] = true
}
// If it doesn't have the right
// symbols, send nil to mean no match.
for symbol := range symbols {
if !exportsMap[symbol] {
resc <- nil
return
}
}
resc <- c.pkg
}(c, rescv[i])
rescv[i] <- pkg // may be nil
}()
}
}()
// Await the first (best) result.
for _, resc := range rescv {
pkg := <-resc
if pkg == nil {
continue
select {
case r := <-resc:
if r != nil {
return r, nil
}
case <-ctx.Done():
return nil, ctx.Err()
}
return pkg, nil
}
return nil, nil
}
func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols map[string]bool) (*pkg, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
// If we're considering the package under test from an x_test, load the
// test variant.
includeTest := s.xtest && c.pkg.dir == s.srcDir
_, exports, err := s.loadExports(ctx, c.pkg, includeTest)
if err != nil {
return nil, err
}
exportsMap := make(map[string]bool, len(exports))
for _, sym := range exports {
exportsMap[sym.Name] = true
}
for symbol := range symbols {
if !exportsMap[symbol] {
return nil, nil // no match
}
}
return c.pkg, nil
}
// pkgIsCandidate reports whether pkg is a candidate for satisfying the
// finding which package pkgIdent in the file named by filename is trying
// to refer to.
@ -1764,65 +1734,31 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
// filename is the file being formatted.
// pkgIdent is the package being searched for, like "client" (if
// searching for "client.New")
func pkgIsCandidate(filename string, refs references, pkg *pkg) bool {
func pkgIsCandidate(filename string, refs References, pkg *pkg) bool {
// Check "internal" and "vendor" visibility:
if !canUse(filename, pkg.dir) {
return false
}
// Speed optimization to minimize disk I/O:
// the last two components on disk must contain the
// package name somewhere.
//
// This permits mismatch naming like directory
// "go-foo" being package "foo", or "pkg.v3" being "pkg",
// or directory "google.golang.org/api/cloudbilling/v1"
// being package "cloudbilling", but doesn't
// permit a directory "foo" to be package
// "bar", which is strongly discouraged
// anyway. There's no reason goimports needs
// to be slow just to accommodate that.
// Use the matchesPath heuristic to filter to package paths that could
// reasonably match a dangling reference.
//
// This permits mismatch naming like directory "go-foo" being package "foo",
// or "pkg.v3" being "pkg", or directory
// "google.golang.org/api/cloudbilling/v1" being package "cloudbilling", but
// doesn't permit a directory "foo" to be package "bar", which is strongly
// discouraged anyway. There's no reason goimports needs to be slow just to
// accommodate that.
for pkgIdent := range refs {
lastTwo := lastTwoComponents(pkg.importPathShort)
if strings.Contains(lastTwo, pkgIdent) {
return true
}
if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) {
lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
if strings.Contains(lastTwo, pkgIdent) {
return true
}
}
}
return false
}
func hasHyphenOrUpperASCII(s string) bool {
for i := 0; i < len(s); i++ {
b := s[i]
if b == '-' || ('A' <= b && b <= 'Z') {
if matchesPath(pkgIdent, pkg.importPathShort) {
return true
}
}
return false
}
func lowerASCIIAndRemoveHyphen(s string) (ret string) {
buf := make([]byte, 0, len(s))
for i := 0; i < len(s); i++ {
b := s[i]
switch {
case b == '-':
continue
case 'A' <= b && b <= 'Z':
buf = append(buf, b+('a'-'A'))
default:
buf = append(buf, b)
}
}
return string(buf)
}
// canUse reports whether the package in dir is usable from filename,
// respecting the Go "internal" and "vendor" visibility rules.
func canUse(filename, dir string) bool {
@ -1863,19 +1799,84 @@ func canUse(filename, dir string) bool {
return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal")
}
// lastTwoComponents returns at most the last two path components
// of v, using either / or \ as the path separator.
func lastTwoComponents(v string) string {
// matchesPath reports whether ident may match a potential package name
// referred to by path, using heuristics to filter out unidiomatic package
// names.
//
// Specifically, it checks whether either of the last two '/'- or '\'-delimited
// path segments matches the identifier. The segment-matching heuristic must
// allow for various conventions around segment naming, including go-foo,
// foo-go, and foo.v3. To handle all of these, matching considers both (1) the
// entire segment, ignoring '-' and '.', as well as (2) the last subsegment
// separated by '-' or '.'. So the segment foo-go matches all of the following
// identifiers: foo, go, and foogo. All matches are case insensitive (for ASCII
// identifiers).
//
// See the docstring for [pkgIsCandidate] for an explanation of how this
// heuristic filters potential candidate packages.
func matchesPath(ident, path string) bool {
// Ignore case, for ASCII.
lowerIfASCII := func(b byte) byte {
if 'A' <= b && b <= 'Z' {
return b + ('a' - 'A')
}
return b
}
// match reports whether path[start:end] matches ident, ignoring [.-].
match := func(start, end int) bool {
ii := len(ident) - 1 // current byte in ident
pi := end - 1 // current byte in path
for ; pi >= start && ii >= 0; pi-- {
pb := path[pi]
if pb == '-' || pb == '.' {
continue
}
pb = lowerIfASCII(pb)
ib := lowerIfASCII(ident[ii])
if pb != ib {
return false
}
ii--
}
return ii < 0 && pi < start // all bytes matched
}
// segmentEnd and subsegmentEnd hold the end points of the current segment
// and subsegment intervals.
segmentEnd := len(path)
subsegmentEnd := len(path)
// Count slashes; we only care about the last two segments.
nslash := 0
for i := len(v) - 1; i >= 0; i-- {
if v[i] == '/' || v[i] == '\\' {
for i := len(path) - 1; i >= 0; i-- {
switch b := path[i]; b {
// TODO(rfindley): we handle backlashes here only because the previous
// heuristic handled backslashes. This is perhaps overly defensive, but is
// the result of many lessons regarding Chesterton's fence and the
// goimports codebase.
//
// However, this function is only ever called with something called an
// 'importPath'. Is it possible that this is a real import path, and
// therefore we need only consider forward slashes?
case '/', '\\':
if match(i+1, segmentEnd) || match(i+1, subsegmentEnd) {
return true
}
nslash++
if nslash == 2 {
return v[i:]
return false // did not match above
}
segmentEnd, subsegmentEnd = i, i // reset
case '-', '.':
if match(i+1, subsegmentEnd) {
return true
}
subsegmentEnd = i
}
}
return v
return match(0, segmentEnd) || match(0, subsegmentEnd)
}
type visitFn func(node ast.Node) ast.Visitor

View file

@ -47,7 +47,14 @@ type Options struct {
// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env.
func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
fileSet := token.NewFileSet()
file, adjust, err := parse(fileSet, filename, src, opt)
var parserMode parser.Mode
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
}
file, adjust, err := parse(fileSet, filename, src, parserMode, opt.Fragment)
if err != nil {
return nil, err
}
@ -66,17 +73,19 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e
//
// Note that filename's directory influences which imports can be chosen,
// so it is important that filename be accurate.
func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
func FixImports(ctx context.Context, filename string, src []byte, goroot string, logf func(string, ...any), source Source) (fixes []*ImportFix, err error) {
ctx, done := event.Start(ctx, "imports.FixImports")
defer done()
fileSet := token.NewFileSet()
file, _, err := parse(fileSet, filename, src, opt)
// TODO(rfindley): these default values for ParseComments and AllErrors were
// extracted from gopls, but are they even needed?
file, _, err := parse(fileSet, filename, src, parser.ParseComments|parser.AllErrors, true)
if err != nil {
return nil, err
}
return getFixes(ctx, fileSet, file, filename, opt.Env)
return getFixesWithSource(ctx, fileSet, file, filename, goroot, logf, source)
}
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
@ -86,7 +95,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
// Don't use parse() -- we don't care about fragments or statement lists
// here, and we need to work with unparseable files.
fileSet := token.NewFileSet()
parserMode := parser.Mode(0)
parserMode := parser.SkipObjectResolution
if opt.Comments {
parserMode |= parser.ParseComments
}
@ -114,7 +123,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
// formatted file, and returns the postpocessed result.
func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
mergeImports(file)
sortImports(opt.LocalPrefix, fset.File(file.Pos()), file)
sortImports(opt.LocalPrefix, fset.File(file.FileStart), file)
var spacesBefore []string // import paths we need spaces before
for _, impSection := range astutil.Imports(fset, file) {
// Within each block of contiguous imports, see if any
@ -164,13 +173,9 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori
// parse parses src, which was read from filename,
// as a Go source file or statement list.
func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
parserMode := parser.Mode(0)
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.Mode, fragment bool) (*ast.File, func(orig, src []byte) []byte, error) {
if parserMode&parser.SkipObjectResolution != 0 {
panic("legacy ast.Object resolution is required")
}
// Try as whole source file.
@ -181,7 +186,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast
// If the error is that the source file didn't begin with a
// package line and we accept fragmented input, fall through to
// try as a source fragment. Stop and return on any other error.
if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
if !fragment || !strings.Contains(err.Error(), "expected 'package'") {
return nil, nil, err
}

View file

@ -245,7 +245,10 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe
// 2. Use this to separate module cache scanning from other scanning.
func gomodcacheForEnv(goenv map[string]string) string {
if gmc := goenv["GOMODCACHE"]; gmc != "" {
return gmc
// golang/go#67156: ensure that the module cache is clean, since it is
// assumed as a prefix to directories scanned by gopathwalk, which are
// themselves clean.
return filepath.Clean(gmc)
}
gopaths := filepath.SplitList(goenv["GOPATH"])
if len(gopaths) == 0 {
@ -265,9 +268,7 @@ func (r *ModuleResolver) initAllMods() error {
return err
}
if mod.Dir == "" {
if r.env.Logf != nil {
r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path)
}
r.env.logf("module %v has not been downloaded and will be ignored", mod.Path)
// Can't do anything with a module that's not downloaded.
continue
}
@ -742,8 +743,8 @@ func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
subdir := ""
if dir != root.Path {
subdir = dir[len(root.Path)+len("/"):]
if prefix := root.Path + string(filepath.Separator); strings.HasPrefix(dir, prefix) {
subdir = dir[len(prefix):]
}
importPath := filepath.ToSlash(subdir)
if strings.HasPrefix(importPath, "vendor/") {
@ -766,9 +767,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
}
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
if err != nil {
if r.env.Logf != nil {
r.env.Logf("decoding module cache path %q: %v", subdir, err)
}
r.env.logf("decoding module cache path %q: %v", subdir, err)
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),

63
vendor/golang.org/x/tools/internal/imports/source.go generated vendored Normal file
View file

@ -0,0 +1,63 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import "context"
// These types document the APIs below.
//
// TODO(rfindley): consider making these defined types rather than aliases.
type (
ImportPath = string
PackageName = string
Symbol = string
// References is set of References found in a Go file. The first map key is the
// left hand side of a selector expression, the second key is the right hand
// side, and the value should always be true.
References = map[PackageName]map[Symbol]bool
)
// A Result satisfies a missing import.
//
// The Import field describes the missing import spec, and the Package field
// summarizes the package exports.
type Result struct {
Import *ImportInfo
Package *PackageInfo
}
// An ImportInfo represents a single import statement.
type ImportInfo struct {
ImportPath string // import path, e.g. "crypto/rand".
Name string // import name, e.g. "crand", or "" if none.
}
// A PackageInfo represents what's known about a package.
type PackageInfo struct {
Name string // package name in the package declaration, if known
Exports map[string]bool // set of names of known package level sortSymbols
}
// A Source provides imports to satisfy unresolved references in the file being
// fixed.
type Source interface {
// LoadPackageNames queries PackageName information for the requested import
// paths, when operating from the provided srcDir.
//
// TODO(rfindley): try to refactor to remove this operation.
LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error)
// ResolveReferences asks the Source for the best package name to satisfy
// each of the missing references, in the context of fixing the given
// filename.
//
// Returns a map from package name to a [Result] for that package name that
// provides the required symbols. Keys may be omitted in the map if no
// candidates satisfy all missing references for that package name. It is up
// to each data source to select the best result for each entry in the
// missing map.
ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error)
}

View file

@ -0,0 +1,129 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import (
"context"
"path/filepath"
"strings"
"sync"
"golang.org/x/sync/errgroup"
"golang.org/x/tools/internal/gopathwalk"
)
// ProcessEnvSource implements the [Source] interface using the legacy
// [ProcessEnv] abstraction.
type ProcessEnvSource struct {
env *ProcessEnv
srcDir string
filename string
pkgName string
}
// NewProcessEnvSource returns a [ProcessEnvSource] wrapping the given
// env, to be used for fixing imports in the file with name filename in package
// named pkgName.
func NewProcessEnvSource(env *ProcessEnv, filename, pkgName string) (*ProcessEnvSource, error) {
abs, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
srcDir := filepath.Dir(abs)
return &ProcessEnvSource{
env: env,
srcDir: srcDir,
filename: filename,
pkgName: pkgName,
}, nil
}
func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, unknown []string) (map[string]string, error) {
r, err := s.env.GetResolver()
if err != nil {
return nil, err
}
return r.loadPackageNames(unknown, srcDir)
}
func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) {
var mu sync.Mutex
found := make(map[string][]pkgDistance)
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
return true // We want everything.
},
dirFound: func(pkg *pkg) bool {
return pkgIsCandidate(filename, refs, pkg)
},
packageNameLoaded: func(pkg *pkg) bool {
if _, want := refs[pkg.packageName]; !want {
return false
}
if pkg.dir == s.srcDir && s.pkgName == pkg.packageName {
// The candidate is in the same directory and has the
// same package name. Don't try to import ourselves.
return false
}
if !canUse(filename, pkg.dir) {
return false
}
mu.Lock()
defer mu.Unlock()
found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(s.srcDir, pkg.dir)})
return false // We'll do our own loading after we sort.
},
}
resolver, err := s.env.GetResolver()
if err != nil {
return nil, err
}
if err := resolver.scan(ctx, callback); err != nil {
return nil, err
}
g, ctx := errgroup.WithContext(ctx)
searcher := symbolSearcher{
logf: s.env.logf,
srcDir: s.srcDir,
xtest: strings.HasSuffix(s.pkgName, "_test"),
loadExports: resolver.loadExports,
}
var resultMu sync.Mutex
results := make(map[string]*Result, len(refs))
for pkgName, symbols := range refs {
g.Go(func() error {
found, err := searcher.search(ctx, found[pkgName], pkgName, symbols)
if err != nil {
return err
}
if found == nil {
return nil // No matching package.
}
imp := &ImportInfo{
ImportPath: found.importPathShort,
}
pkg := &PackageInfo{
Name: pkgName,
Exports: symbols,
}
resultMu.Lock()
results[pkgName] = &Result{Import: imp, Package: pkg}
resultMu.Unlock()
return nil
})
}
if err := g.Wait(); err != nil {
return nil, err
}
var ans []*Result
for _, x := range results {
ans = append(ans, x)
}
return ans, nil
}

View file

@ -0,0 +1,103 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import (
"context"
"sync"
"time"
"golang.org/x/tools/internal/modindex"
)
// This code is here rather than in the modindex package
// to avoid import loops
// implements Source using modindex, so only for module cache.
//
// this is perhaps over-engineered. A new Index is read at first use.
// And then Update is called after every 15 minutes, and a new Index
// is read if the index changed. It is not clear the Mutex is needed.
type IndexSource struct {
modcachedir string
mutex sync.Mutex
ix *modindex.Index
expires time.Time
}
// create a new Source. Called from NewView in cache/session.go.
func NewIndexSource(cachedir string) *IndexSource {
return &IndexSource{modcachedir: cachedir}
}
func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) {
/// This is used by goimports to resolve the package names of imports of the
// current package, which is irrelevant for the module cache.
return nil, nil
}
func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) {
if err := s.maybeReadIndex(); err != nil {
return nil, err
}
var cs []modindex.Candidate
for pkg, nms := range missing {
for nm := range nms {
x := s.ix.Lookup(pkg, nm, false)
cs = append(cs, x...)
}
}
found := make(map[string]*Result)
for _, c := range cs {
var x *Result
if x = found[c.ImportPath]; x == nil {
x = &Result{
Import: &ImportInfo{
ImportPath: c.ImportPath,
Name: "",
},
Package: &PackageInfo{
Name: c.PkgName,
Exports: make(map[string]bool),
},
}
found[c.ImportPath] = x
}
x.Package.Exports[c.Name] = true
}
var ans []*Result
for _, x := range found {
ans = append(ans, x)
}
return ans, nil
}
func (s *IndexSource) maybeReadIndex() error {
s.mutex.Lock()
defer s.mutex.Unlock()
var readIndex bool
if time.Now().After(s.expires) {
ok, err := modindex.Update(s.modcachedir)
if err != nil {
return err
}
if ok {
readIndex = true
}
}
if readIndex || s.ix == nil {
ix, err := modindex.ReadIndex(s.modcachedir)
if err != nil {
return err
}
s.ix = ix
// for now refresh every 15 minutes
s.expires = time.Now().Add(time.Minute * 15)
}
return nil
}

View file

@ -0,0 +1,135 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"fmt"
"log"
"os"
"path/filepath"
"regexp"
"slices"
"strings"
"sync"
"time"
"golang.org/x/mod/semver"
"golang.org/x/tools/internal/gopathwalk"
)
type directory struct {
path Relpath
importPath string
version string // semantic version
syms []symbol
}
// filterDirs groups the directories by import path,
// sorting the ones with the same import path by semantic version,
// most recent first.
func byImportPath(dirs []Relpath) (map[string][]*directory, error) {
ans := make(map[string][]*directory) // key is import path
for _, d := range dirs {
ip, sv, err := DirToImportPathVersion(d)
if err != nil {
return nil, err
}
ans[ip] = append(ans[ip], &directory{
path: d,
importPath: ip,
version: sv,
})
}
for k, v := range ans {
semanticSort(v)
ans[k] = v
}
return ans, nil
}
// sort the directories by semantic version, latest first
func semanticSort(v []*directory) {
slices.SortFunc(v, func(l, r *directory) int {
if n := semver.Compare(l.version, r.version); n != 0 {
return -n // latest first
}
return strings.Compare(string(l.path), string(r.path))
})
}
// modCacheRegexp splits a relpathpath into module, module version, and package.
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
// DirToImportPathVersion computes import path and semantic version
func DirToImportPathVersion(dir Relpath) (string, string, error) {
m := modCacheRegexp.FindStringSubmatch(string(dir))
// m[1] is the module path
// m[2] is the version major.minor.patch(-<pre release identifier)
// m[3] is the rest of the package path
if len(m) != 4 {
return "", "", fmt.Errorf("bad dir %s", dir)
}
if !semver.IsValid(m[2]) {
return "", "", fmt.Errorf("bad semantic version %s", m[2])
}
// ToSlash is required for Windows.
return filepath.ToSlash(m[1] + m[3]), m[2], nil
}
// a region controls what directories to look at, for
// updating the index incrementally, and for testing that.
// (for testing one builds an index as of A, incrementally
// updates it to B, and compares the result to an index build
// as of B.)
type region struct {
onlyAfter, onlyBefore time.Time
sync.Mutex
ans []Relpath
}
func findDirs(root string, onlyAfter, onlyBefore time.Time) []Relpath {
roots := []gopathwalk.Root{{Path: root, Type: gopathwalk.RootModuleCache}}
// TODO(PJW): adjust concurrency
opts := gopathwalk.Options{ModulesEnabled: true, Concurrency: 1 /* ,Logf: log.Printf*/}
betw := &region{
onlyAfter: onlyAfter,
onlyBefore: onlyBefore,
}
gopathwalk.WalkSkip(roots, betw.addDir, betw.skipDir, opts)
return betw.ans
}
func (r *region) addDir(rt gopathwalk.Root, dir string) {
// do we need to check times?
r.Lock()
defer r.Unlock()
x := filepath.ToSlash(string(toRelpath(Abspath(rt.Path), dir)))
r.ans = append(r.ans, toRelpath(Abspath(rt.Path), x))
}
func (r *region) skipDir(_ gopathwalk.Root, dir string) bool {
// The cache directory is already ignored in gopathwalk\
if filepath.Base(dir) == "internal" {
return true
}
if strings.Contains(dir, "toolchain@") {
return true
}
// don't look inside @ directories that are too old
if strings.Contains(filepath.Base(dir), "@") {
st, err := os.Stat(dir)
if err != nil {
log.Printf("can't stat dir %s %v", dir, err)
return true
}
if st.ModTime().Before(r.onlyAfter) {
return true
}
if st.ModTime().After(r.onlyBefore) {
return true
}
}
return false
}

262
vendor/golang.org/x/tools/internal/modindex/index.go generated vendored Normal file
View file

@ -0,0 +1,262 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"bufio"
"encoding/csv"
"errors"
"fmt"
"hash/crc64"
"io"
"io/fs"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"time"
)
/*
The on-disk index is a text file.
The first 3 lines are header information containing CurrentVersion,
the value of GOMODCACHE, and the validity date of the index.
(This is when the code started building the index.)
Following the header are sections of lines, one section for each
import path. These sections are sorted by package name.
The first line of each section, marked by a leading :, contains
the package name, the import path, the name of the directory relative
to GOMODCACHE, and its semantic version.
The rest of each section consists of one line per exported symbol.
The lines are sorted by the symbol's name and contain the name,
an indication of its lexical type (C, T, V, F), and if it is the
name of a function, information about the signature.
The fields in the section header lines are separated by commas, and
in the unlikely event this would be confusing, the csv package is used
to write (and read) them.
In the lines containing exported names, C=const, V=var, T=type, F=func.
If it is a func, the next field is the number of returned values,
followed by pairs consisting of formal parameter names and types.
All these fields are separated by spaces. Any spaces in a type
(e.g., chan struct{}) are replaced by $s on the disk. The $s are
turned back into spaces when read.
Here is an index header (the comments are not part of the index):
0 // version (of the index format)
/usr/local/google/home/pjw/go/pkg/mod // GOMODCACHE
2024-09-11 18:55:09 // validity date of the index
Here is an index section:
:yaml,gopkg.in/yaml.v1,gopkg.in/yaml.v1@v1.0.0-20140924161607-9f9df34309c0,v1.0.0-20140924161607-9f9df34309c0
Getter T
Marshal F 2 in interface{}
Setter T
Unmarshal F 1 in []byte out interface{}
The package name is yaml, the import path is gopkg.in/yaml.v1.
Getter and Setter are types, and Marshal and Unmarshal are functions.
The latter returns one value and has two arguments, 'in' and 'out'
whose types are []byte and interface{}.
*/
// CurrentVersion tells readers about the format of the index.
const CurrentVersion int = 0
// Index is returned by ReadIndex().
type Index struct {
Version int
Cachedir Abspath // The directory containing the module cache
Changed time.Time // The index is up to date as of Changed
Entries []Entry
}
// An Entry contains information for an import path.
type Entry struct {
Dir Relpath // directory in modcache
ImportPath string
PkgName string
Version string
//ModTime STime // is this useful?
Names []string // exported names and information
}
// ReadIndex reads the latest version of the on-disk index
// for the cache directory cd.
// It returns (nil, nil) if there is no index, but returns
// a non-nil error if the index exists but could not be read.
func ReadIndex(cachedir string) (*Index, error) {
cachedir, err := filepath.Abs(cachedir)
if err != nil {
return nil, err
}
cd := Abspath(cachedir)
dir, err := IndexDir()
if err != nil {
return nil, err
}
base := indexNameBase(cd)
iname := filepath.Join(dir, base)
buf, err := os.ReadFile(iname)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, nil
}
return nil, fmt.Errorf("cannot read %s: %w", iname, err)
}
fname := filepath.Join(dir, string(buf))
fd, err := os.Open(fname)
if err != nil {
return nil, err
}
defer fd.Close()
r := bufio.NewReader(fd)
ix, err := readIndexFrom(cd, r)
if err != nil {
return nil, err
}
return ix, nil
}
func readIndexFrom(cd Abspath, bx io.Reader) (*Index, error) {
b := bufio.NewScanner(bx)
var ans Index
// header
ok := b.Scan()
if !ok {
return nil, fmt.Errorf("unexpected scan error")
}
l := b.Text()
var err error
ans.Version, err = strconv.Atoi(l)
if err != nil {
return nil, err
}
if ans.Version != CurrentVersion {
return nil, fmt.Errorf("got version %d, expected %d", ans.Version, CurrentVersion)
}
if ok := b.Scan(); !ok {
return nil, fmt.Errorf("scanner error reading cachedir")
}
ans.Cachedir = Abspath(b.Text())
if ok := b.Scan(); !ok {
return nil, fmt.Errorf("scanner error reading index creation time")
}
// TODO(pjw): need to check that this is the expected cachedir
// so the tag should be passed in to this function
ans.Changed, err = time.ParseInLocation(time.DateTime, b.Text(), time.Local)
if err != nil {
return nil, err
}
var curEntry *Entry
for b.Scan() {
v := b.Text()
if v[0] == ':' {
if curEntry != nil {
ans.Entries = append(ans.Entries, *curEntry)
}
// as directories may contain commas and quotes, they need to be read as csv.
rdr := strings.NewReader(v[1:])
cs := csv.NewReader(rdr)
flds, err := cs.Read()
if err != nil {
return nil, err
}
if len(flds) != 4 {
return nil, fmt.Errorf("header contains %d fields, not 4: %q", len(v), v)
}
curEntry = &Entry{PkgName: flds[0], ImportPath: flds[1], Dir: toRelpath(cd, flds[2]), Version: flds[3]}
continue
}
curEntry.Names = append(curEntry.Names, v)
}
if curEntry != nil {
ans.Entries = append(ans.Entries, *curEntry)
}
if err := b.Err(); err != nil {
return nil, fmt.Errorf("scanner failed %v", err)
}
return &ans, nil
}
// write the index as a text file
func writeIndex(cachedir Abspath, ix *Index) error {
dir, err := IndexDir()
if err != nil {
return err
}
ipat := fmt.Sprintf("index-%d-*", CurrentVersion)
fd, err := os.CreateTemp(dir, ipat)
if err != nil {
return err // can this happen?
}
defer fd.Close()
if err := writeIndexToFile(ix, fd); err != nil {
return err
}
content := fd.Name()
content = filepath.Base(content)
base := indexNameBase(cachedir)
nm := filepath.Join(dir, base)
err = os.WriteFile(nm, []byte(content), 0666)
if err != nil {
return err
}
return nil
}
func writeIndexToFile(x *Index, fd *os.File) error {
cnt := 0
w := bufio.NewWriter(fd)
fmt.Fprintf(w, "%d\n", x.Version)
fmt.Fprintf(w, "%s\n", x.Cachedir)
// round the time down
tm := x.Changed.Add(-time.Second / 2)
fmt.Fprintf(w, "%s\n", tm.Format(time.DateTime))
for _, e := range x.Entries {
if e.ImportPath == "" {
continue // shouldn't happen
}
// PJW: maybe always write these headers as csv?
if strings.ContainsAny(string(e.Dir), ",\"") {
log.Printf("DIR: %s", e.Dir)
cw := csv.NewWriter(w)
cw.Write([]string{":" + e.PkgName, e.ImportPath, string(e.Dir), e.Version})
cw.Flush()
} else {
fmt.Fprintf(w, ":%s,%s,%s,%s\n", e.PkgName, e.ImportPath, e.Dir, e.Version)
}
for _, x := range e.Names {
fmt.Fprintf(w, "%s\n", x)
cnt++
}
}
if err := w.Flush(); err != nil {
return err
}
return nil
}
// tests can override this
var IndexDir = indexDir
// IndexDir computes the directory containing the index
func indexDir() (string, error) {
dir, err := os.UserCacheDir()
if err != nil {
return "", fmt.Errorf("cannot open UserCacheDir, %w", err)
}
return filepath.Join(dir, "go", "imports"), nil
}
// return the base name of the file containing the name of the current index
func indexNameBase(cachedir Abspath) string {
// crc64 is a way to convert path names into 16 hex digits.
h := crc64.Checksum([]byte(cachedir), crc64.MakeTable(crc64.ECMA))
fname := fmt.Sprintf("index-name-%d-%016x", CurrentVersion, h)
return fname
}

145
vendor/golang.org/x/tools/internal/modindex/lookup.go generated vendored Normal file
View file

@ -0,0 +1,145 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"slices"
"strconv"
"strings"
)
type Candidate struct {
PkgName string
Name string
Dir string
ImportPath string
Type LexType
// information for Funcs
Results int16 // how many results
Sig []Field // arg names and types
}
type Field struct {
Arg, Type string
}
type LexType int8
const (
Const LexType = iota
Var
Type
Func
)
// Lookup finds all the symbols in the index with the given PkgName and name.
// If prefix is true, it finds all of these with name as a prefix.
func (ix *Index) Lookup(pkg, name string, prefix bool) []Candidate {
loc, ok := slices.BinarySearchFunc(ix.Entries, pkg, func(e Entry, pkg string) int {
return strings.Compare(e.PkgName, pkg)
})
if !ok {
return nil // didn't find the package
}
var ans []Candidate
// loc is the first entry for this package name, but there may be severeal
for i := loc; i < len(ix.Entries); i++ {
e := ix.Entries[i]
if e.PkgName != pkg {
break // end of sorted package names
}
nloc, ok := slices.BinarySearchFunc(e.Names, name, func(s string, name string) int {
if strings.HasPrefix(s, name) {
return 0
}
if s < name {
return -1
}
return 1
})
if !ok {
continue // didn't find the name, nor any symbols with name as a prefix
}
for j := nloc; j < len(e.Names); j++ {
nstr := e.Names[j]
// benchmarks show this makes a difference when there are a lot of Possibilities
flds := fastSplit(nstr)
if !(flds[0] == name || prefix && strings.HasPrefix(flds[0], name)) {
// past range of matching Names
break
}
if len(flds) < 2 {
continue // should never happen
}
px := Candidate{
PkgName: pkg,
Name: flds[0],
Dir: string(e.Dir),
ImportPath: e.ImportPath,
Type: asLexType(flds[1][0]),
}
if flds[1] == "F" {
n, err := strconv.Atoi(flds[2])
if err != nil {
continue // should never happen
}
px.Results = int16(n)
if len(flds) >= 4 {
sig := strings.Split(flds[3], " ")
for i := 0; i < len(sig); i++ {
// $ cannot otherwise occur. removing the spaces
// almost works, but for chan struct{}, e.g.
sig[i] = strings.Replace(sig[i], "$", " ", -1)
}
px.Sig = toFields(sig)
}
}
ans = append(ans, px)
}
}
return ans
}
func toFields(sig []string) []Field {
ans := make([]Field, len(sig)/2)
for i := 0; i < len(ans); i++ {
ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
}
return ans
}
// benchmarks show this is measurably better than strings.Split
func fastSplit(x string) []string {
ans := make([]string, 0, 4)
nxt := 0
start := 0
for i := 0; i < len(x); i++ {
if x[i] != ' ' {
continue
}
ans = append(ans, x[start:i])
nxt++
start = i + 1
if nxt >= 3 {
break
}
}
ans = append(ans, x[start:])
return ans
}
func asLexType(c byte) LexType {
switch c {
case 'C':
return Const
case 'V':
return Var
case 'T':
return Type
case 'F':
return Func
}
return -1
}

164
vendor/golang.org/x/tools/internal/modindex/modindex.go generated vendored Normal file
View file

@ -0,0 +1,164 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package modindex contains code for building and searching an index to
// the Go module cache. The directory containing the index, returned by
// IndexDir(), contains a file index-name-<ver> that contains the name
// of the current index. We believe writing that short file is atomic.
// ReadIndex reads that file to get the file name of the index.
// WriteIndex writes an index with a unique name and then
// writes that name into a new version of index-name-<ver>.
// (<ver> stands for the CurrentVersion of the index format.)
package modindex
import (
"path/filepath"
"slices"
"strings"
"time"
"golang.org/x/mod/semver"
)
// Create always creates a new index for the go module cache that is in cachedir.
func Create(cachedir string) error {
_, err := indexModCache(cachedir, true)
return err
}
// Update the index for the go module cache that is in cachedir,
// If there is no existing index it will build one.
// If there are changed directories since the last index, it will
// write a new one and return true. Otherwise it returns false.
func Update(cachedir string) (bool, error) {
return indexModCache(cachedir, false)
}
// indexModCache writes an index current as of when it is called.
// If clear is true the index is constructed from all of GOMODCACHE
// otherwise the index is constructed from the last previous index
// and the updates to the cache. It returns true if it wrote an index,
// false otherwise.
func indexModCache(cachedir string, clear bool) (bool, error) {
cachedir, err := filepath.Abs(cachedir)
if err != nil {
return false, err
}
cd := Abspath(cachedir)
future := time.Now().Add(24 * time.Hour) // safely in the future
ok, err := modindexTimed(future, cd, clear)
if err != nil {
return false, err
}
return ok, nil
}
// modindexTimed writes an index current as of onlyBefore.
// If clear is true the index is constructed from all of GOMODCACHE
// otherwise the index is constructed from the last previous index
// and all the updates to the cache before onlyBefore.
// It returns true if it wrote a new index, false if it wrote nothing.
func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
var curIndex *Index
if !clear {
var err error
curIndex, err = ReadIndex(string(cachedir))
if clear && err != nil {
return false, err
}
// TODO(pjw): check that most of those directories still exist
}
cfg := &work{
onlyBefore: onlyBefore,
oldIndex: curIndex,
cacheDir: cachedir,
}
if curIndex != nil {
cfg.onlyAfter = curIndex.Changed
}
if err := cfg.buildIndex(); err != nil {
return false, err
}
if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
// no changes from existing curIndex, don't write a new index
return false, nil
}
if err := cfg.writeIndex(); err != nil {
return false, err
}
return true, nil
}
type work struct {
onlyBefore time.Time // do not use directories later than this
onlyAfter time.Time // only interested in directories after this
// directories from before onlyAfter come from oldIndex
oldIndex *Index
newIndex *Index
cacheDir Abspath
}
func (w *work) buildIndex() error {
// The effective date of the new index should be at least
// slightly earlier than when the directories are scanned
// so set it now.
w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
if len(dirs) == 0 {
return nil
}
newdirs, err := byImportPath(dirs)
if err != nil {
return err
}
// for each import path it might occur only in newdirs,
// only in w.oldIndex, or in both.
// If it occurs in both, use the semantically later one
if w.oldIndex != nil {
for _, e := range w.oldIndex.Entries {
found, ok := newdirs[e.ImportPath]
if !ok {
w.newIndex.Entries = append(w.newIndex.Entries, e)
continue // use this one, there is no new one
}
if semver.Compare(found[0].version, e.Version) > 0 {
// use the new one
} else {
// use the old one, forget the new one
w.newIndex.Entries = append(w.newIndex.Entries, e)
delete(newdirs, e.ImportPath)
}
}
}
// get symbol information for all the new diredtories
getSymbols(w.cacheDir, newdirs)
// assemble the new index entries
for k, v := range newdirs {
d := v[0]
pkg, names := processSyms(d.syms)
if pkg == "" {
continue // PJW: does this ever happen?
}
entry := Entry{
PkgName: pkg,
Dir: d.path,
ImportPath: k,
Version: d.version,
Names: names,
}
w.newIndex.Entries = append(w.newIndex.Entries, entry)
}
// sort the entries in the new index
slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
return n
}
return strings.Compare(l.ImportPath, r.ImportPath)
})
return nil
}
func (w *work) writeIndex() error {
return writeIndex(w.cacheDir, w.newIndex)
}

189
vendor/golang.org/x/tools/internal/modindex/symbols.go generated vendored Normal file
View file

@ -0,0 +1,189 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
"go/types"
"os"
"path/filepath"
"slices"
"strings"
"golang.org/x/sync/errgroup"
)
// The name of a symbol contains information about the symbol:
// <name> T for types
// <name> C for consts
// <name> V for vars
// and for funcs: <name> F <num of return values> (<arg-name> <arg-type>)*
// any spaces in <arg-type> are replaced by $s so that the fields
// of the name are space separated
type symbol struct {
pkg string // name of the symbols's package
name string // declared name
kind string // T, C, V, or F
sig string // signature information, for F
}
// find the symbols for the best directories
func getSymbols(cd Abspath, dirs map[string][]*directory) {
var g errgroup.Group
g.SetLimit(-1) // maybe throttle this some day
for _, vv := range dirs {
// throttling some day?
d := vv[0]
g.Go(func() error {
thedir := filepath.Join(string(cd), string(d.path))
mode := parser.SkipObjectResolution
fi, err := os.ReadDir(thedir)
if err != nil {
return nil // log this someday?
}
for _, fx := range fi {
if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
continue
}
fname := filepath.Join(thedir, fx.Name())
tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
if err != nil {
continue // ignore errors, someday log them?
}
d.syms = append(d.syms, getFileExports(tr)...)
}
return nil
})
}
g.Wait()
}
func getFileExports(f *ast.File) []symbol {
pkg := f.Name.Name
if pkg == "main" {
return nil
}
var ans []symbol
// should we look for //go:build ignore?
for _, decl := range f.Decls {
switch decl := decl.(type) {
case *ast.FuncDecl:
if decl.Recv != nil {
// ignore methods, as we are completing package selections
continue
}
name := decl.Name.Name
dtype := decl.Type
// not looking at dtype.TypeParams. That is, treating
// generic functions just like non-generic ones.
sig := dtype.Params
kind := "F"
result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
for _, x := range sig.List {
// This code creates a string representing the type.
// TODO(pjw): it may be fragile:
// 1. x.Type could be nil, perhaps in ill-formed code
// 2. ExprString might someday change incompatibly to
// include struct tags, which can be arbitrary strings
if x.Type == nil {
// Can this happen without a parse error? (Files with parse
// errors are ignored in getSymbols)
continue // maybe report this someday
}
tp := types.ExprString(x.Type)
if len(tp) == 0 {
// Can this happen?
continue // maybe report this someday
}
// This is only safe if ExprString never returns anything with a $
// The only place a $ can occur seems to be in a struct tag, which
// can be an arbitrary string literal, and ExprString does not presently
// print struct tags. So for this to happen the type of a formal parameter
// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
// would have to show the struct tag. Even testing for this case seems
// a waste of effort, but let's not ignore such pathologies
if strings.Contains(tp, "$") {
continue
}
tp = strings.Replace(tp, " ", "$", -1)
if len(x.Names) == 0 {
result = append(result, "_")
result = append(result, tp)
} else {
for _, y := range x.Names {
result = append(result, y.Name)
result = append(result, tp)
}
}
}
sigs := strings.Join(result, " ")
if s := newsym(pkg, name, kind, sigs); s != nil {
ans = append(ans, *s)
}
case *ast.GenDecl:
switch decl.Tok {
case token.CONST, token.VAR:
tp := "V"
if decl.Tok == token.CONST {
tp = "C"
}
for _, sp := range decl.Specs {
for _, x := range sp.(*ast.ValueSpec).Names {
if s := newsym(pkg, x.Name, tp, ""); s != nil {
ans = append(ans, *s)
}
}
}
case token.TYPE:
for _, sp := range decl.Specs {
if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, "T", ""); s != nil {
ans = append(ans, *s)
}
}
}
}
}
return ans
}
func newsym(pkg, name, kind, sig string) *symbol {
if len(name) == 0 || !ast.IsExported(name) {
return nil
}
sym := symbol{pkg: pkg, name: name, kind: kind, sig: sig}
return &sym
}
// return the package name and the value for the symbols.
// if there are multiple packages, choose one arbitrarily
// the returned slice is sorted lexicographically
func processSyms(syms []symbol) (string, []string) {
if len(syms) == 0 {
return "", nil
}
slices.SortFunc(syms, func(l, r symbol) int {
return strings.Compare(l.name, r.name)
})
pkg := syms[0].pkg
var names []string
for _, s := range syms {
var nx string
if s.pkg == pkg {
if s.sig != "" {
nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
} else {
nx = fmt.Sprintf("%s %s", s.name, s.kind)
}
names = append(names, nx)
} else {
continue // PJW: do we want to keep track of these?
}
}
return pkg, names
}

25
vendor/golang.org/x/tools/internal/modindex/types.go generated vendored Normal file
View file

@ -0,0 +1,25 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"strings"
)
// some special types to avoid confusions
// distinguish various types of directory names. It's easy to get confused.
type Abspath string // absolute paths
type Relpath string // paths with GOMODCACHE prefix removed
func toRelpath(cachedir Abspath, s string) Relpath {
if strings.HasPrefix(s, string(cachedir)) {
if s == string(cachedir) {
return Relpath("")
}
return Relpath(s[len(cachedir)+1:])
}
return Relpath(s)
}

View file

@ -5,7 +5,6 @@
// Package packagesinternal exposes internal-only fields from go/packages.
package packagesinternal
var GetForTest = func(p interface{}) string { return "" }
var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
type PackageError struct {
@ -16,7 +15,6 @@ type PackageError struct {
var TypecheckCgo int
var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
var ForTest int // must be set as a LoadMode to call GetForTest
var SetModFlag = func(config interface{}, value string) {}
var SetModFile = func(config interface{}, value string) {}

View file

@ -21,10 +21,7 @@
// export data.
type PkgDecoder struct {
// version is the file format version.
version uint32
// aliases determines whether types.Aliases should be created
aliases bool
version Version
// sync indicates whether the file uses sync markers.
sync bool
@ -71,12 +68,9 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
// IR export data from input. pkgPath is the package path for the
// compilation unit that produced the export data.
//
// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
func NewPkgDecoder(pkgPath, input string) PkgDecoder {
pr := PkgDecoder{
pkgPath: pkgPath,
//aliases: aliases.Enabled(),
}
// TODO(mdempsky): Implement direct indexing of input string to
@ -84,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
r := strings.NewReader(input)
assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
var ver uint32
assert(binary.Read(r, binary.LittleEndian, &ver) == nil)
pr.version = Version(ver)
switch pr.version {
default:
panic(fmt.Errorf("unsupported version: %v", pr.version))
case 0:
// no flags
case 1:
if pr.version >= numVersions {
panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1))
}
if pr.version.Has(Flags) {
var flags uint32
assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
pr.sync = flags&flagSyncMarkers != 0
@ -106,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
assert(err == nil)
pr.elemData = input[pos:]
assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
const fingerprintSize = 8
assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1]))
return pr
}
@ -140,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
absIdx += int(pr.elemEndsEnds[k-1])
}
if absIdx >= int(pr.elemEndsEnds[k]) {
errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
}
return absIdx
}
@ -197,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
Idx: idx,
}
// TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
r.Data = *strings.NewReader(pr.DataIdx(k, idx))
r.Data.Reset(pr.DataIdx(k, idx))
r.Sync(SyncRelocs)
r.Relocs = make([]RelocEnt, r.Len())
for i := range r.Relocs {
@ -248,7 +243,7 @@ type Decoder struct {
func (r *Decoder) checkErr(err error) {
if err != nil {
errorf("unexpected decoding error: %w", err)
panicf("unexpected decoding error: %w", err)
}
}
@ -519,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
return path, name, tag
}
// Version reports the version of the bitstream.
func (w *Decoder) Version() Version { return w.common.version }

View file

@ -12,18 +12,15 @@
"io"
"math/big"
"runtime"
"strings"
)
// currentVersion is the current version number.
//
// - v0: initial prototype
//
// - v1: adds the flags uint32 word
const currentVersion uint32 = 1
// A PkgEncoder provides methods for encoding a package's Unified IR
// export data.
type PkgEncoder struct {
// version of the bitstream.
version Version
// elems holds the bitstream for previously encoded elements.
elems [numRelocs][]string
@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
// export data files, but can help diagnosing desync errors in
// higher-level Unified IR reader/writer code. If syncFrames is
// negative, then sync markers are omitted entirely.
func NewPkgEncoder(syncFrames int) PkgEncoder {
func NewPkgEncoder(version Version, syncFrames int) PkgEncoder {
return PkgEncoder{
version: version,
stringsIdx: make(map[string]Index),
syncFrames: syncFrames,
}
@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
assert(binary.Write(out, binary.LittleEndian, x) == nil)
}
writeUint32(currentVersion)
writeUint32(uint32(pw.version))
var flags uint32
if pw.SyncMarkers() {
flags |= flagSyncMarkers
if pw.version.Has(Flags) {
var flags uint32
if pw.SyncMarkers() {
flags |= flagSyncMarkers
}
writeUint32(flags)
}
writeUint32(flags)
// Write elemEndsEnds.
var sum uint32
@ -159,7 +159,7 @@ type Encoder struct {
// Flush finalizes the element's bitstream and returns its Index.
func (w *Encoder) Flush() Index {
var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
var sb strings.Builder
// Backup the data so we write the relocations at the front.
var tmp bytes.Buffer
@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index {
func (w *Encoder) checkErr(err error) {
if err != nil {
errorf("unexpected encoding error: %v", err)
panicf("unexpected encoding error: %v", err)
}
}
@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) {
// section (if not already present), and then writing a relocation
// into the element bitstream.
func (w *Encoder) String(s string) {
w.StringRef(w.p.StringIdx(s))
}
// StringRef writes a reference to the given index, which must be a
// previously encoded string value.
func (w *Encoder) StringRef(idx Index) {
w.Sync(SyncString)
w.Reloc(RelocString, w.p.StringIdx(s))
w.Reloc(RelocString, idx)
}
// Strings encodes and writes a variable-length slice of strings into
@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) {
func (w *Encoder) scalar(val constant.Value) {
switch v := constant.Val(val).(type) {
default:
errorf("unhandled %v (%v)", val, val.Kind())
panicf("unhandled %v (%v)", val, val.Kind())
case bool:
w.Code(ValBool)
w.Bool(v)
@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) {
b := v.Append(nil, 'p', -1)
w.String(string(b)) // TODO: More efficient encoding.
}
// Version reports the version of the bitstream.
func (w *Encoder) Version() Version { return w.p.version }

View file

@ -1,21 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.7
// +build !go1.7
// TODO(mdempsky): Remove after #44505 is resolved
package pkgbits
import "runtime"
func walkFrames(pcs []uintptr, visit frameVisitor) {
for _, pc := range pcs {
fn := runtime.FuncForPC(pc)
file, line := fn.FileLine(pc)
visit(file, line, fn.Name(), pc-fn.Entry())
}
}

View file

@ -1,28 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.7
// +build go1.7
package pkgbits
import "runtime"
// walkFrames calls visit for each call frame represented by pcs.
//
// pcs should be a slice of PCs, as returned by runtime.Callers.
func walkFrames(pcs []uintptr, visit frameVisitor) {
if len(pcs) == 0 {
return
}
frames := runtime.CallersFrames(pcs)
for {
frame, more := frames.Next()
visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
if !more {
return
}
}
}

View file

@ -12,6 +12,6 @@ func assert(b bool) {
}
}
func errorf(format string, args ...interface{}) {
func panicf(format string, args ...any) {
panic(fmt.Errorf(format, args...))
}

View file

@ -6,6 +6,7 @@
import (
"fmt"
"runtime"
"strings"
)
@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string {
type frameVisitor func(file string, line int, name string, offset uintptr)
// walkFrames calls visit for each call frame represented by pcs.
//
// pcs should be a slice of PCs, as returned by runtime.Callers.
func walkFrames(pcs []uintptr, visit frameVisitor) {
if len(pcs) == 0 {
return
}
frames := runtime.CallersFrames(pcs)
for {
frame, more := frames.Next()
visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
if !more {
return
}
}
}
// SyncMarker is an enum type that represents markers that may be
// written to export data to ensure the reader and writer stay
// synchronized.
@ -110,4 +129,8 @@ func fmtFrames(pcs ...uintptr) []string {
SyncStmtsEnd
SyncLabel
SyncOptLabel
SyncMultiExpr
SyncRType
SyncConvRTTI
)

View file

@ -74,11 +74,14 @@ func _() {
_ = x[SyncStmtsEnd-64]
_ = x[SyncLabel-65]
_ = x[SyncOptLabel-66]
_ = x[SyncMultiExpr-67]
_ = x[SyncRType-68]
_ = x[SyncConvRTTI-69]
}
const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI"
var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480}
func (i SyncMarker) String() string {
i -= 1

85
vendor/golang.org/x/tools/internal/pkgbits/version.go generated vendored Normal file
View file

@ -0,0 +1,85 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgbits
// Version indicates a version of a unified IR bitstream.
// Each Version indicates the addition, removal, or change of
// new data in the bitstream.
//
// These are serialized to disk and the interpretation remains fixed.
type Version uint32
const (
// V0: initial prototype.
//
// All data that is not assigned a Field is in version V0
// and has not been deprecated.
V0 Version = iota
// V1: adds the Flags uint32 word
V1
// V2: removes unused legacy fields and supports type parameters for aliases.
// - remove the legacy "has init" bool from the public root
// - remove obj's "derived func instance" bool
// - add a TypeParamNames field to ObjAlias
// - remove derived info "needed" bool
V2
numVersions = iota
)
// Field denotes a unit of data in the serialized unified IR bitstream.
// It is conceptually a like field in a structure.
//
// We only really need Fields when the data may or may not be present
// in a stream based on the Version of the bitstream.
//
// Unlike much of pkgbits, Fields are not serialized and
// can change values as needed.
type Field int
const (
// Flags in a uint32 in the header of a bitstream
// that is used to indicate whether optional features are enabled.
Flags Field = iota
// Deprecated: HasInit was a bool indicating whether a package
// has any init functions.
HasInit
// Deprecated: DerivedFuncInstance was a bool indicating
// whether an object was a function instance.
DerivedFuncInstance
// ObjAlias has a list of TypeParamNames.
AliasTypeParamNames
// Deprecated: DerivedInfoNeeded was a bool indicating
// whether a type was a derived type.
DerivedInfoNeeded
numFields = iota
)
// introduced is the version a field was added.
var introduced = [numFields]Version{
Flags: V1,
AliasTypeParamNames: V2,
}
// removed is the version a field was removed in or 0 for fields
// that have not yet been deprecated.
// (So removed[f]-1 is the last version it is included in.)
var removed = [numFields]Version{
HasInit: V2,
DerivedFuncInstance: V2,
DerivedInfoNeeded: V2,
}
// Has reports whether field f is present in a bitstream at version v.
func (v Version) Has(f Field) bool {
return introduced[f] <= v && (v < removed[f] || removed[f] == V0)
}

View file

@ -23,6 +23,7 @@
{"ErrWriteAfterClose", Var, 0},
{"ErrWriteTooLong", Var, 0},
{"FileInfoHeader", Func, 1},
{"FileInfoNames", Type, 23},
{"Format", Type, 10},
{"FormatGNU", Const, 10},
{"FormatPAX", Const, 10},
@ -820,6 +821,7 @@
{"(*ConnectionState).ExportKeyingMaterial", Method, 11},
{"(*Dialer).Dial", Method, 15},
{"(*Dialer).DialContext", Method, 15},
{"(*ECHRejectionError).Error", Method, 23},
{"(*QUICConn).Close", Method, 21},
{"(*QUICConn).ConnectionState", Method, 21},
{"(*QUICConn).HandleData", Method, 21},
@ -827,6 +829,7 @@
{"(*QUICConn).SendSessionTicket", Method, 21},
{"(*QUICConn).SetTransportParameters", Method, 21},
{"(*QUICConn).Start", Method, 21},
{"(*QUICConn).StoreSession", Method, 23},
{"(*SessionState).Bytes", Method, 21},
{"(AlertError).Error", Method, 21},
{"(ClientAuthType).String", Method, 15},
@ -877,6 +880,8 @@
{"Config.ClientSessionCache", Field, 3},
{"Config.CurvePreferences", Field, 3},
{"Config.DynamicRecordSizingDisabled", Field, 7},
{"Config.EncryptedClientHelloConfigList", Field, 23},
{"Config.EncryptedClientHelloRejectionVerify", Field, 23},
{"Config.GetCertificate", Field, 4},
{"Config.GetClientCertificate", Field, 8},
{"Config.GetConfigForClient", Field, 8},
@ -902,6 +907,7 @@
{"ConnectionState", Type, 0},
{"ConnectionState.CipherSuite", Field, 0},
{"ConnectionState.DidResume", Field, 1},
{"ConnectionState.ECHAccepted", Field, 23},
{"ConnectionState.HandshakeComplete", Field, 0},
{"ConnectionState.NegotiatedProtocol", Field, 0},
{"ConnectionState.NegotiatedProtocolIsMutual", Field, 0},
@ -925,6 +931,8 @@
{"ECDSAWithP384AndSHA384", Const, 8},
{"ECDSAWithP521AndSHA512", Const, 8},
{"ECDSAWithSHA1", Const, 10},
{"ECHRejectionError", Type, 23},
{"ECHRejectionError.RetryConfigList", Field, 23},
{"Ed25519", Const, 13},
{"InsecureCipherSuites", Func, 14},
{"Listen", Func, 0},
@ -943,6 +951,7 @@
{"ParseSessionState", Func, 21},
{"QUICClient", Func, 21},
{"QUICConfig", Type, 21},
{"QUICConfig.EnableSessionEvents", Field, 23},
{"QUICConfig.TLSConfig", Field, 21},
{"QUICConn", Type, 21},
{"QUICEncryptionLevel", Type, 21},
@ -954,16 +963,20 @@
{"QUICEvent.Data", Field, 21},
{"QUICEvent.Kind", Field, 21},
{"QUICEvent.Level", Field, 21},
{"QUICEvent.SessionState", Field, 23},
{"QUICEvent.Suite", Field, 21},
{"QUICEventKind", Type, 21},
{"QUICHandshakeDone", Const, 21},
{"QUICNoEvent", Const, 21},
{"QUICRejectedEarlyData", Const, 21},
{"QUICResumeSession", Const, 23},
{"QUICServer", Func, 21},
{"QUICSessionTicketOptions", Type, 21},
{"QUICSessionTicketOptions.EarlyData", Field, 21},
{"QUICSessionTicketOptions.Extra", Field, 23},
{"QUICSetReadSecret", Const, 21},
{"QUICSetWriteSecret", Const, 21},
{"QUICStoreSession", Const, 23},
{"QUICTransportParameters", Const, 21},
{"QUICTransportParametersRequired", Const, 21},
{"QUICWriteData", Const, 21},
@ -1036,6 +1049,8 @@
{"(*Certificate).Verify", Method, 0},
{"(*Certificate).VerifyHostname", Method, 0},
{"(*CertificateRequest).CheckSignature", Method, 5},
{"(*OID).UnmarshalBinary", Method, 23},
{"(*OID).UnmarshalText", Method, 23},
{"(*RevocationList).CheckSignatureFrom", Method, 19},
{"(CertificateInvalidError).Error", Method, 0},
{"(ConstraintViolationError).Error", Method, 0},
@ -1043,6 +1058,8 @@
{"(InsecureAlgorithmError).Error", Method, 6},
{"(OID).Equal", Method, 22},
{"(OID).EqualASN1OID", Method, 22},
{"(OID).MarshalBinary", Method, 23},
{"(OID).MarshalText", Method, 23},
{"(OID).String", Method, 22},
{"(PublicKeyAlgorithm).String", Method, 10},
{"(SignatureAlgorithm).String", Method, 6},
@ -1196,6 +1213,7 @@
{"ParseCertificates", Func, 0},
{"ParseDERCRL", Func, 0},
{"ParseECPrivateKey", Func, 1},
{"ParseOID", Func, 23},
{"ParsePKCS1PrivateKey", Func, 0},
{"ParsePKCS1PublicKey", Func, 10},
{"ParsePKCS8PrivateKey", Func, 0},
@ -2541,6 +2559,7 @@
{"PT_NOTE", Const, 0},
{"PT_NULL", Const, 0},
{"PT_OPENBSD_BOOTDATA", Const, 16},
{"PT_OPENBSD_NOBTCFI", Const, 23},
{"PT_OPENBSD_RANDOMIZE", Const, 16},
{"PT_OPENBSD_WXNEEDED", Const, 16},
{"PT_PAX_FLAGS", Const, 16},
@ -3620,13 +3639,16 @@
{"STT_COMMON", Const, 0},
{"STT_FILE", Const, 0},
{"STT_FUNC", Const, 0},
{"STT_GNU_IFUNC", Const, 23},
{"STT_HIOS", Const, 0},
{"STT_HIPROC", Const, 0},
{"STT_LOOS", Const, 0},
{"STT_LOPROC", Const, 0},
{"STT_NOTYPE", Const, 0},
{"STT_OBJECT", Const, 0},
{"STT_RELC", Const, 23},
{"STT_SECTION", Const, 0},
{"STT_SRELC", Const, 23},
{"STT_TLS", Const, 0},
{"STV_DEFAULT", Const, 0},
{"STV_HIDDEN", Const, 0},
@ -4544,11 +4566,14 @@
{"URLEncoding", Var, 0},
},
"encoding/binary": {
{"Append", Func, 23},
{"AppendByteOrder", Type, 19},
{"AppendUvarint", Func, 19},
{"AppendVarint", Func, 19},
{"BigEndian", Var, 0},
{"ByteOrder", Type, 0},
{"Decode", Func, 23},
{"Encode", Func, 23},
{"LittleEndian", Var, 0},
{"MaxVarintLen16", Const, 0},
{"MaxVarintLen32", Const, 0},
@ -5308,6 +5333,7 @@
{"ParenExpr.Rparen", Field, 0},
{"ParenExpr.X", Field, 0},
{"Pkg", Const, 0},
{"Preorder", Func, 23},
{"Print", Func, 0},
{"RECV", Const, 0},
{"RangeStmt", Type, 0},
@ -5898,7 +5924,12 @@
},
"go/types": {
{"(*Alias).Obj", Method, 22},
{"(*Alias).Origin", Method, 23},
{"(*Alias).Rhs", Method, 23},
{"(*Alias).SetTypeParams", Method, 23},
{"(*Alias).String", Method, 22},
{"(*Alias).TypeArgs", Method, 23},
{"(*Alias).TypeParams", Method, 23},
{"(*Alias).Underlying", Method, 22},
{"(*ArgumentError).Error", Method, 18},
{"(*ArgumentError).Unwrap", Method, 18},
@ -5943,6 +5974,7 @@
{"(*Func).Pkg", Method, 5},
{"(*Func).Pos", Method, 5},
{"(*Func).Scope", Method, 5},
{"(*Func).Signature", Method, 23},
{"(*Func).String", Method, 5},
{"(*Func).Type", Method, 5},
{"(*Info).ObjectOf", Method, 5},
@ -6992,6 +7024,12 @@
{"TempFile", Func, 0},
{"WriteFile", Func, 0},
},
"iter": {
{"Pull", Func, 23},
{"Pull2", Func, 23},
{"Seq", Type, 23},
{"Seq2", Type, 23},
},
"log": {
{"(*Logger).Fatal", Method, 0},
{"(*Logger).Fatalf", Method, 0},
@ -7222,11 +7260,16 @@
{"Writer", Type, 0},
},
"maps": {
{"All", Func, 23},
{"Clone", Func, 21},
{"Collect", Func, 23},
{"Copy", Func, 21},
{"DeleteFunc", Func, 21},
{"Equal", Func, 21},
{"EqualFunc", Func, 21},
{"Insert", Func, 23},
{"Keys", Func, 23},
{"Values", Func, 23},
},
"math": {
{"Abs", Func, 0},
@ -7617,6 +7660,7 @@
},
"math/rand/v2": {
{"(*ChaCha8).MarshalBinary", Method, 22},
{"(*ChaCha8).Read", Method, 23},
{"(*ChaCha8).Seed", Method, 22},
{"(*ChaCha8).Uint64", Method, 22},
{"(*ChaCha8).UnmarshalBinary", Method, 22},
@ -7636,6 +7680,7 @@
{"(*Rand).NormFloat64", Method, 22},
{"(*Rand).Perm", Method, 22},
{"(*Rand).Shuffle", Method, 22},
{"(*Rand).Uint", Method, 23},
{"(*Rand).Uint32", Method, 22},
{"(*Rand).Uint32N", Method, 22},
{"(*Rand).Uint64", Method, 22},
@ -7663,6 +7708,7 @@
{"Rand", Type, 22},
{"Shuffle", Func, 22},
{"Source", Type, 22},
{"Uint", Func, 23},
{"Uint32", Func, 22},
{"Uint32N", Func, 22},
{"Uint64", Func, 22},
@ -7743,6 +7789,7 @@
{"(*DNSError).Error", Method, 0},
{"(*DNSError).Temporary", Method, 0},
{"(*DNSError).Timeout", Method, 0},
{"(*DNSError).Unwrap", Method, 23},
{"(*Dialer).Dial", Method, 1},
{"(*Dialer).DialContext", Method, 7},
{"(*Dialer).MultipathTCP", Method, 21},
@ -7809,6 +7856,7 @@
{"(*TCPConn).RemoteAddr", Method, 0},
{"(*TCPConn).SetDeadline", Method, 0},
{"(*TCPConn).SetKeepAlive", Method, 0},
{"(*TCPConn).SetKeepAliveConfig", Method, 23},
{"(*TCPConn).SetKeepAlivePeriod", Method, 2},
{"(*TCPConn).SetLinger", Method, 0},
{"(*TCPConn).SetNoDelay", Method, 0},
@ -7922,6 +7970,7 @@
{"DNSError.IsTimeout", Field, 0},
{"DNSError.Name", Field, 0},
{"DNSError.Server", Field, 0},
{"DNSError.UnwrapErr", Field, 23},
{"DefaultResolver", Var, 8},
{"Dial", Func, 0},
{"DialIP", Func, 0},
@ -7937,6 +7986,7 @@
{"Dialer.DualStack", Field, 2},
{"Dialer.FallbackDelay", Field, 5},
{"Dialer.KeepAlive", Field, 3},
{"Dialer.KeepAliveConfig", Field, 23},
{"Dialer.LocalAddr", Field, 1},
{"Dialer.Resolver", Field, 8},
{"Dialer.Timeout", Field, 1},
@ -7989,10 +8039,16 @@
{"Interfaces", Func, 0},
{"InvalidAddrError", Type, 0},
{"JoinHostPort", Func, 0},
{"KeepAliveConfig", Type, 23},
{"KeepAliveConfig.Count", Field, 23},
{"KeepAliveConfig.Enable", Field, 23},
{"KeepAliveConfig.Idle", Field, 23},
{"KeepAliveConfig.Interval", Field, 23},
{"Listen", Func, 0},
{"ListenConfig", Type, 11},
{"ListenConfig.Control", Field, 11},
{"ListenConfig.KeepAlive", Field, 13},
{"ListenConfig.KeepAliveConfig", Field, 23},
{"ListenIP", Func, 0},
{"ListenMulticastUDP", Func, 0},
{"ListenPacket", Func, 0},
@ -8081,6 +8137,7 @@
{"(*Request).Context", Method, 7},
{"(*Request).Cookie", Method, 0},
{"(*Request).Cookies", Method, 0},
{"(*Request).CookiesNamed", Method, 23},
{"(*Request).FormFile", Method, 0},
{"(*Request).FormValue", Method, 0},
{"(*Request).MultipartReader", Method, 0},
@ -8148,7 +8205,9 @@
{"Cookie.HttpOnly", Field, 0},
{"Cookie.MaxAge", Field, 0},
{"Cookie.Name", Field, 0},
{"Cookie.Partitioned", Field, 23},
{"Cookie.Path", Field, 0},
{"Cookie.Quoted", Field, 23},
{"Cookie.Raw", Field, 0},
{"Cookie.RawExpires", Field, 0},
{"Cookie.SameSite", Field, 11},
@ -8225,7 +8284,9 @@
{"NoBody", Var, 8},
{"NotFound", Func, 0},
{"NotFoundHandler", Func, 0},
{"ParseCookie", Func, 23},
{"ParseHTTPVersion", Func, 0},
{"ParseSetCookie", Func, 23},
{"ParseTime", Func, 1},
{"Post", Func, 0},
{"PostForm", Func, 0},
@ -8252,6 +8313,7 @@
{"Request.Host", Field, 0},
{"Request.Method", Field, 0},
{"Request.MultipartForm", Field, 0},
{"Request.Pattern", Field, 23},
{"Request.PostForm", Field, 1},
{"Request.Proto", Field, 0},
{"Request.ProtoMajor", Field, 0},
@ -8453,6 +8515,7 @@
{"DefaultRemoteAddr", Const, 0},
{"NewRecorder", Func, 0},
{"NewRequest", Func, 7},
{"NewRequestWithContext", Func, 23},
{"NewServer", Func, 0},
{"NewTLSServer", Func, 0},
{"NewUnstartedServer", Func, 0},
@ -8917,6 +8980,7 @@
{"Chown", Func, 0},
{"Chtimes", Func, 0},
{"Clearenv", Func, 0},
{"CopyFS", Func, 23},
{"Create", Func, 0},
{"CreateTemp", Func, 16},
{"DevNull", Const, 0},
@ -9150,6 +9214,7 @@
{"IsLocal", Func, 20},
{"Join", Func, 0},
{"ListSeparator", Const, 0},
{"Localize", Func, 23},
{"Match", Func, 0},
{"Rel", Func, 0},
{"Separator", Const, 0},
@ -9232,6 +9297,8 @@
{"(Value).Pointer", Method, 0},
{"(Value).Recv", Method, 0},
{"(Value).Send", Method, 0},
{"(Value).Seq", Method, 23},
{"(Value).Seq2", Method, 23},
{"(Value).Set", Method, 0},
{"(Value).SetBool", Method, 0},
{"(Value).SetBytes", Method, 0},
@ -9314,6 +9381,7 @@
{"SelectSend", Const, 1},
{"SendDir", Const, 0},
{"Slice", Const, 0},
{"SliceAt", Func, 23},
{"SliceHeader", Type, 0},
{"SliceHeader.Cap", Field, 0},
{"SliceHeader.Data", Field, 0},
@ -9655,6 +9723,7 @@
{"BuildSetting", Type, 18},
{"BuildSetting.Key", Field, 18},
{"BuildSetting.Value", Field, 18},
{"CrashOptions", Type, 23},
{"FreeOSMemory", Func, 1},
{"GCStats", Type, 1},
{"GCStats.LastGC", Field, 1},
@ -9672,6 +9741,7 @@
{"PrintStack", Func, 0},
{"ReadBuildInfo", Func, 12},
{"ReadGCStats", Func, 1},
{"SetCrashOutput", Func, 23},
{"SetGCPercent", Func, 1},
{"SetMaxStack", Func, 2},
{"SetMaxThreads", Func, 2},
@ -9742,10 +9812,15 @@
{"WithRegion", Func, 11},
},
"slices": {
{"All", Func, 23},
{"AppendSeq", Func, 23},
{"Backward", Func, 23},
{"BinarySearch", Func, 21},
{"BinarySearchFunc", Func, 21},
{"Chunk", Func, 23},
{"Clip", Func, 21},
{"Clone", Func, 21},
{"Collect", Func, 23},
{"Compact", Func, 21},
{"CompactFunc", Func, 21},
{"Compare", Func, 21},
@ -9767,11 +9842,16 @@
{"MaxFunc", Func, 21},
{"Min", Func, 21},
{"MinFunc", Func, 21},
{"Repeat", Func, 23},
{"Replace", Func, 21},
{"Reverse", Func, 21},
{"Sort", Func, 21},
{"SortFunc", Func, 21},
{"SortStableFunc", Func, 21},
{"Sorted", Func, 23},
{"SortedFunc", Func, 23},
{"SortedStableFunc", Func, 23},
{"Values", Func, 23},
},
"sort": {
{"(Float64Slice).Len", Method, 0},
@ -9936,10 +10016,14 @@
{"TrimSpace", Func, 0},
{"TrimSuffix", Func, 1},
},
"structs": {
{"HostLayout", Type, 23},
},
"sync": {
{"(*Cond).Broadcast", Method, 0},
{"(*Cond).Signal", Method, 0},
{"(*Cond).Wait", Method, 0},
{"(*Map).Clear", Method, 23},
{"(*Map).CompareAndDelete", Method, 20},
{"(*Map).CompareAndSwap", Method, 20},
{"(*Map).Delete", Method, 9},
@ -9986,13 +10070,17 @@
{"(*Bool).Store", Method, 19},
{"(*Bool).Swap", Method, 19},
{"(*Int32).Add", Method, 19},
{"(*Int32).And", Method, 23},
{"(*Int32).CompareAndSwap", Method, 19},
{"(*Int32).Load", Method, 19},
{"(*Int32).Or", Method, 23},
{"(*Int32).Store", Method, 19},
{"(*Int32).Swap", Method, 19},
{"(*Int64).Add", Method, 19},
{"(*Int64).And", Method, 23},
{"(*Int64).CompareAndSwap", Method, 19},
{"(*Int64).Load", Method, 19},
{"(*Int64).Or", Method, 23},
{"(*Int64).Store", Method, 19},
{"(*Int64).Swap", Method, 19},
{"(*Pointer).CompareAndSwap", Method, 19},
@ -10000,18 +10088,24 @@
{"(*Pointer).Store", Method, 19},
{"(*Pointer).Swap", Method, 19},
{"(*Uint32).Add", Method, 19},
{"(*Uint32).And", Method, 23},
{"(*Uint32).CompareAndSwap", Method, 19},
{"(*Uint32).Load", Method, 19},
{"(*Uint32).Or", Method, 23},
{"(*Uint32).Store", Method, 19},
{"(*Uint32).Swap", Method, 19},
{"(*Uint64).Add", Method, 19},
{"(*Uint64).And", Method, 23},
{"(*Uint64).CompareAndSwap", Method, 19},
{"(*Uint64).Load", Method, 19},
{"(*Uint64).Or", Method, 23},
{"(*Uint64).Store", Method, 19},
{"(*Uint64).Swap", Method, 19},
{"(*Uintptr).Add", Method, 19},
{"(*Uintptr).And", Method, 23},
{"(*Uintptr).CompareAndSwap", Method, 19},
{"(*Uintptr).Load", Method, 19},
{"(*Uintptr).Or", Method, 23},
{"(*Uintptr).Store", Method, 19},
{"(*Uintptr).Swap", Method, 19},
{"(*Value).CompareAndSwap", Method, 17},
@ -10023,6 +10117,11 @@
{"AddUint32", Func, 0},
{"AddUint64", Func, 0},
{"AddUintptr", Func, 0},
{"AndInt32", Func, 23},
{"AndInt64", Func, 23},
{"AndUint32", Func, 23},
{"AndUint64", Func, 23},
{"AndUintptr", Func, 23},
{"Bool", Type, 19},
{"CompareAndSwapInt32", Func, 0},
{"CompareAndSwapInt64", Func, 0},
@ -10038,6 +10137,11 @@
{"LoadUint32", Func, 0},
{"LoadUint64", Func, 0},
{"LoadUintptr", Func, 0},
{"OrInt32", Func, 23},
{"OrInt64", Func, 23},
{"OrUint32", Func, 23},
{"OrUint64", Func, 23},
{"OrUintptr", Func, 23},
{"Pointer", Type, 19},
{"StoreInt32", Func, 0},
{"StoreInt64", Func, 0},
@ -16200,6 +16304,7 @@
{"WSAEACCES", Const, 2},
{"WSAECONNABORTED", Const, 9},
{"WSAECONNRESET", Const, 3},
{"WSAENOPROTOOPT", Const, 23},
{"WSAEnumProtocols", Func, 2},
{"WSAID_CONNECTEX", Var, 1},
{"WSAIoctl", Func, 0},
@ -17284,6 +17389,7 @@
{"Encode", Func, 0},
{"EncodeRune", Func, 0},
{"IsSurrogate", Func, 0},
{"RuneLen", Func, 23},
},
"unicode/utf8": {
{"AppendRune", Func, 18},
@ -17306,6 +17412,11 @@
{"ValidRune", Func, 1},
{"ValidString", Func, 0},
},
"unique": {
{"(Handle).Value", Method, 23},
{"Handle", Type, 23},
{"Make", Func, 23},
},
"unsafe": {
{"Add", Func, 0},
{"Alignof", Func, 0},

View file

@ -1,137 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// package tokeninternal provides access to some internal features of the token
// package.
package tokeninternal
import (
"fmt"
"go/token"
"sort"
"sync"
"unsafe"
)
// GetLines returns the table of line-start offsets from a token.File.
func GetLines(file *token.File) []int {
// token.File has a Lines method on Go 1.21 and later.
if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
return file.Lines()
}
// This declaration must match that of token.File.
// This creates a risk of dependency skew.
// For now we check that the size of the two
// declarations is the same, on the (fragile) assumption
// that future changes would add fields.
type tokenFile119 struct {
_ string
_ int
_ int
mu sync.Mutex // we're not complete monsters
lines []int
_ []struct{}
}
if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) {
panic("unexpected token.File size")
}
var ptr *tokenFile119
type uP = unsafe.Pointer
*(*uP)(uP(&ptr)) = uP(file)
ptr.mu.Lock()
defer ptr.mu.Unlock()
return ptr.lines
}
// AddExistingFiles adds the specified files to the FileSet if they
// are not already present. It panics if any pair of files in the
// resulting FileSet would overlap.
func AddExistingFiles(fset *token.FileSet, files []*token.File) {
// Punch through the FileSet encapsulation.
type tokenFileSet struct {
// This type remained essentially consistent from go1.16 to go1.21.
mutex sync.RWMutex
base int
files []*token.File
_ *token.File // changed to atomic.Pointer[token.File] in go1.19
}
// If the size of token.FileSet changes, this will fail to compile.
const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
var _ [-delta * delta]int
type uP = unsafe.Pointer
var ptr *tokenFileSet
*(*uP)(uP(&ptr)) = uP(fset)
ptr.mutex.Lock()
defer ptr.mutex.Unlock()
// Merge and sort.
newFiles := append(ptr.files, files...)
sort.Slice(newFiles, func(i, j int) bool {
return newFiles[i].Base() < newFiles[j].Base()
})
// Reject overlapping files.
// Discard adjacent identical files.
out := newFiles[:0]
for i, file := range newFiles {
if i > 0 {
prev := newFiles[i-1]
if file == prev {
continue
}
if prev.Base()+prev.Size()+1 > file.Base() {
panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
prev.Name(), prev.Base(), prev.Base()+prev.Size(),
file.Name(), file.Base(), file.Base()+file.Size()))
}
}
out = append(out, file)
}
newFiles = out
ptr.files = newFiles
// Advance FileSet.Base().
if len(newFiles) > 0 {
last := newFiles[len(newFiles)-1]
newBase := last.Base() + last.Size() + 1
if ptr.base < newBase {
ptr.base = newBase
}
}
}
// FileSetFor returns a new FileSet containing a sequence of new Files with
// the same base, size, and line as the input files, for use in APIs that
// require a FileSet.
//
// Precondition: the input files must be non-overlapping, and sorted in order
// of their Base.
func FileSetFor(files ...*token.File) *token.FileSet {
fset := token.NewFileSet()
for _, f := range files {
f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
lines := GetLines(f)
f2.SetLines(lines)
}
return fset
}
// CloneFileSet creates a new FileSet holding all files in fset. It does not
// create copies of the token.Files in fset: they are added to the resulting
// FileSet unmodified.
func CloneFileSet(fset *token.FileSet) *token.FileSet {
var files []*token.File
fset.Iterate(func(f *token.File) bool {
files = append(files, f)
return true
})
newFileSet := token.NewFileSet()
AddExistingFiles(newFileSet, files)
return newFileSet
}

140
vendor/golang.org/x/tools/internal/typeparams/common.go generated vendored Normal file
View file

@ -0,0 +1,140 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package typeparams contains common utilities for writing tools that
// interact with generic Go code, as introduced with Go 1.18. It
// supplements the standard library APIs. Notably, the StructuralTerms
// API computes a minimal representation of the structural
// restrictions on a type parameter.
//
// An external version of these APIs is available in the
// golang.org/x/exp/typeparams module.
package typeparams
import (
"go/ast"
"go/token"
"go/types"
)
// UnpackIndexExpr extracts data from AST nodes that represent index
// expressions.
//
// For an ast.IndexExpr, the resulting indices slice will contain exactly one
// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
// number of index expressions.
//
// For nodes that don't represent index expressions, the first return value of
// UnpackIndexExpr will be nil.
func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
switch e := n.(type) {
case *ast.IndexExpr:
return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
case *ast.IndexListExpr:
return e.X, e.Lbrack, e.Indices, e.Rbrack
}
return nil, token.NoPos, nil, token.NoPos
}
// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
// will panic.
func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
switch len(indices) {
case 0:
panic("empty indices")
case 1:
return &ast.IndexExpr{
X: x,
Lbrack: lbrack,
Index: indices[0],
Rbrack: rbrack,
}
default:
return &ast.IndexListExpr{
X: x,
Lbrack: lbrack,
Indices: indices,
Rbrack: rbrack,
}
}
}
// IsTypeParam reports whether t is a type parameter (or an alias of one).
func IsTypeParam(t types.Type) bool {
_, ok := types.Unalias(t).(*types.TypeParam)
return ok
}
// GenericAssignableTo is a generalization of types.AssignableTo that
// implements the following rule for uninstantiated generic types:
//
// If V and T are generic named types, then V is considered assignable to T if,
// for every possible instantiation of V[A_1, ..., A_N], the instantiation
// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
//
// If T has structural constraints, they must be satisfied by V.
//
// For example, consider the following type declarations:
//
// type Interface[T any] interface {
// Accept(T)
// }
//
// type Container[T any] struct {
// Element T
// }
//
// func (c Container[T]) Accept(t T) { c.Element = t }
//
// In this case, GenericAssignableTo reports that instantiations of Container
// are assignable to the corresponding instantiation of Interface.
func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool {
V = types.Unalias(V)
T = types.Unalias(T)
// If V and T are not both named, or do not have matching non-empty type
// parameter lists, fall back on types.AssignableTo.
VN, Vnamed := V.(*types.Named)
TN, Tnamed := T.(*types.Named)
if !Vnamed || !Tnamed {
return types.AssignableTo(V, T)
}
vtparams := VN.TypeParams()
ttparams := TN.TypeParams()
if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 {
return types.AssignableTo(V, T)
}
// V and T have the same (non-zero) number of type params. Instantiate both
// with the type parameters of V. This must always succeed for V, and will
// succeed for T if and only if the type set of each type parameter of V is a
// subset of the type set of the corresponding type parameter of T, meaning
// that every instantiation of V corresponds to a valid instantiation of T.
// Minor optimization: ensure we share a context across the two
// instantiations below.
if ctxt == nil {
ctxt = types.NewContext()
}
var targs []types.Type
for i := 0; i < vtparams.Len(); i++ {
targs = append(targs, vtparams.At(i))
}
vinst, err := types.Instantiate(ctxt, V, targs, true)
if err != nil {
panic("type parameters should satisfy their own constraints")
}
tinst, err := types.Instantiate(ctxt, T, targs, true)
if err != nil {
return false
}
return types.AssignableTo(vinst, tinst)
}

View file

@ -0,0 +1,150 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeparams
import (
"fmt"
"go/types"
)
// CoreType returns the core type of T or nil if T does not have a core type.
//
// See https://go.dev/ref/spec#Core_types for the definition of a core type.
func CoreType(T types.Type) types.Type {
U := T.Underlying()
if _, ok := U.(*types.Interface); !ok {
return U // for non-interface types,
}
terms, err := NormalTerms(U)
if len(terms) == 0 || err != nil {
// len(terms) -> empty type set of interface.
// err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
return nil // no core type.
}
U = terms[0].Type().Underlying()
var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
for identical = 1; identical < len(terms); identical++ {
if !types.Identical(U, terms[identical].Type().Underlying()) {
break
}
}
if identical == len(terms) {
// https://go.dev/ref/spec#Core_types
// "There is a single type U which is the underlying type of all types in the type set of T"
return U
}
ch, ok := U.(*types.Chan)
if !ok {
return nil // no core type as identical < len(terms) and U is not a channel.
}
// https://go.dev/ref/spec#Core_types
// "the type chan E if T contains only bidirectional channels, or the type chan<- E or
// <-chan E depending on the direction of the directional channels present."
for chans := identical; chans < len(terms); chans++ {
curr, ok := terms[chans].Type().Underlying().(*types.Chan)
if !ok {
return nil
}
if !types.Identical(ch.Elem(), curr.Elem()) {
return nil // channel elements are not identical.
}
if ch.Dir() == types.SendRecv {
// ch is bidirectional. We can safely always use curr's direction.
ch = curr
} else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
// ch and curr are not bidirectional and not the same direction.
return nil
}
}
return ch
}
// NormalTerms returns a slice of terms representing the normalized structural
// type restrictions of a type, if any.
//
// For all types other than *types.TypeParam, *types.Interface, and
// *types.Union, this is just a single term with Tilde() == false and
// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
// below.
//
// Structural type restrictions of a type parameter are created via
// non-interface types embedded in its constraint interface (directly, or via a
// chain of interface embeddings). For example, in the declaration type
// T[P interface{~int; m()}] int the structural restriction of the type
// parameter P is ~int.
//
// With interface embedding and unions, the specification of structural type
// restrictions may be arbitrarily complex. For example, consider the
// following:
//
// type A interface{ ~string|~[]byte }
//
// type B interface{ int|string }
//
// type C interface { ~string|~int }
//
// type T[P interface{ A|B; C }] int
//
// In this example, the structural type restriction of P is ~string|int: A|B
// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
// which when intersected with C (~string|~int) yields ~string|int.
//
// NormalTerms computes these expansions and reductions, producing a
// "normalized" form of the embeddings. A structural restriction is normalized
// if it is a single union containing no interface terms, and is minimal in the
// sense that removing any term changes the set of types satisfying the
// constraint. It is left as a proof for the reader that, modulo sorting, there
// is exactly one such normalized form.
//
// Because the minimal representation always takes this form, NormalTerms
// returns a slice of tilde terms corresponding to the terms of the union in
// the normalized structural restriction. An error is returned if the type is
// invalid, exceeds complexity bounds, or has an empty type set. In the latter
// case, NormalTerms returns ErrEmptyTypeSet.
//
// NormalTerms makes no guarantees about the order of terms, except that it
// is deterministic.
func NormalTerms(typ types.Type) ([]*types.Term, error) {
switch typ := typ.Underlying().(type) {
case *types.TypeParam:
return StructuralTerms(typ)
case *types.Union:
return UnionTermSet(typ)
case *types.Interface:
return InterfaceTermSet(typ)
default:
return []*types.Term{types.NewTerm(false, typ)}, nil
}
}
// Deref returns the type of the variable pointed to by t,
// if t's core type is a pointer; otherwise it returns t.
//
// Do not assume that Deref(T)==T implies T is not a pointer:
// consider "type T *T", for example.
//
// TODO(adonovan): ideally this would live in typesinternal, but that
// creates an import cycle. Move there when we melt this package down.
func Deref(t types.Type) types.Type {
if ptr, ok := CoreType(t).(*types.Pointer); ok {
return ptr.Elem()
}
return t
}
// MustDeref returns the type of the variable pointed to by t.
// It panics if t's core type is not a pointer.
//
// TODO(adonovan): ideally this would live in typesinternal, but that
// creates an import cycle. Move there when we melt this package down.
func MustDeref(t types.Type) types.Type {
if ptr, ok := CoreType(t).(*types.Pointer); ok {
return ptr.Elem()
}
panic(fmt.Sprintf("%v is not a pointer", t))
}

131
vendor/golang.org/x/tools/internal/typeparams/free.go generated vendored Normal file
View file

@ -0,0 +1,131 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeparams
import (
"go/types"
"golang.org/x/tools/internal/aliases"
)
// Free is a memoization of the set of free type parameters within a
// type. It makes a sequence of calls to [Free.Has] for overlapping
// types more efficient. The zero value is ready for use.
//
// NOTE: Adapted from go/types/infer.go. If it is later exported, factor.
type Free struct {
seen map[types.Type]bool
}
// Has reports whether the specified type has a free type parameter.
func (w *Free) Has(typ types.Type) (res bool) {
// detect cycles
if x, ok := w.seen[typ]; ok {
return x
}
if w.seen == nil {
w.seen = make(map[types.Type]bool)
}
w.seen[typ] = false
defer func() {
w.seen[typ] = res
}()
switch t := typ.(type) {
case nil, *types.Basic: // TODO(gri) should nil be handled here?
break
case *types.Alias:
if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() {
return true // This is an uninstantiated Alias.
}
// The expansion of an alias can have free type parameters,
// whether or not the alias itself has type parameters:
//
// func _[K comparable]() {
// type Set = map[K]bool // free(Set) = {K}
// type MapTo[V] = map[K]V // free(Map[foo]) = {V}
// }
//
// So, we must Unalias.
return w.Has(types.Unalias(t))
case *types.Array:
return w.Has(t.Elem())
case *types.Slice:
return w.Has(t.Elem())
case *types.Struct:
for i, n := 0, t.NumFields(); i < n; i++ {
if w.Has(t.Field(i).Type()) {
return true
}
}
case *types.Pointer:
return w.Has(t.Elem())
case *types.Tuple:
n := t.Len()
for i := 0; i < n; i++ {
if w.Has(t.At(i).Type()) {
return true
}
}
case *types.Signature:
// t.tparams may not be nil if we are looking at a signature
// of a generic function type (or an interface method) that is
// part of the type we're testing. We don't care about these type
// parameters.
// Similarly, the receiver of a method may declare (rather than
// use) type parameters, we don't care about those either.
// Thus, we only need to look at the input and result parameters.
return w.Has(t.Params()) || w.Has(t.Results())
case *types.Interface:
for i, n := 0, t.NumMethods(); i < n; i++ {
if w.Has(t.Method(i).Type()) {
return true
}
}
terms, err := InterfaceTermSet(t)
if err != nil {
return false // ill typed
}
for _, term := range terms {
if w.Has(term.Type()) {
return true
}
}
case *types.Map:
return w.Has(t.Key()) || w.Has(t.Elem())
case *types.Chan:
return w.Has(t.Elem())
case *types.Named:
args := t.TypeArgs()
if params := t.TypeParams(); params.Len() > args.Len() {
return true // this is an uninstantiated named type.
}
for i, n := 0, args.Len(); i < n; i++ {
if w.Has(args.At(i)) {
return true
}
}
return w.Has(t.Underlying()) // recurse for types local to parameterized functions
case *types.TypeParam:
return true
default:
panic(t) // unreachable
}
return false
}

View file

@ -0,0 +1,218 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeparams
import (
"errors"
"fmt"
"go/types"
"os"
"strings"
)
//go:generate go run copytermlist.go
const debug = false
var ErrEmptyTypeSet = errors.New("empty type set")
// StructuralTerms returns a slice of terms representing the normalized
// structural type restrictions of a type parameter, if any.
//
// Structural type restrictions of a type parameter are created via
// non-interface types embedded in its constraint interface (directly, or via a
// chain of interface embeddings). For example, in the declaration
//
// type T[P interface{~int; m()}] int
//
// the structural restriction of the type parameter P is ~int.
//
// With interface embedding and unions, the specification of structural type
// restrictions may be arbitrarily complex. For example, consider the
// following:
//
// type A interface{ ~string|~[]byte }
//
// type B interface{ int|string }
//
// type C interface { ~string|~int }
//
// type T[P interface{ A|B; C }] int
//
// In this example, the structural type restriction of P is ~string|int: A|B
// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
// which when intersected with C (~string|~int) yields ~string|int.
//
// StructuralTerms computes these expansions and reductions, producing a
// "normalized" form of the embeddings. A structural restriction is normalized
// if it is a single union containing no interface terms, and is minimal in the
// sense that removing any term changes the set of types satisfying the
// constraint. It is left as a proof for the reader that, modulo sorting, there
// is exactly one such normalized form.
//
// Because the minimal representation always takes this form, StructuralTerms
// returns a slice of tilde terms corresponding to the terms of the union in
// the normalized structural restriction. An error is returned if the
// constraint interface is invalid, exceeds complexity bounds, or has an empty
// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
//
// StructuralTerms makes no guarantees about the order of terms, except that it
// is deterministic.
func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) {
constraint := tparam.Constraint()
if constraint == nil {
return nil, fmt.Errorf("%s has nil constraint", tparam)
}
iface, _ := constraint.Underlying().(*types.Interface)
if iface == nil {
return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
}
return InterfaceTermSet(iface)
}
// InterfaceTermSet computes the normalized terms for a constraint interface,
// returning an error if the term set cannot be computed or is empty. In the
// latter case, the error will be ErrEmptyTypeSet.
//
// See the documentation of StructuralTerms for more information on
// normalization.
func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) {
return computeTermSet(iface)
}
// UnionTermSet computes the normalized terms for a union, returning an error
// if the term set cannot be computed or is empty. In the latter case, the
// error will be ErrEmptyTypeSet.
//
// See the documentation of StructuralTerms for more information on
// normalization.
func UnionTermSet(union *types.Union) ([]*types.Term, error) {
return computeTermSet(union)
}
func computeTermSet(typ types.Type) ([]*types.Term, error) {
tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
if err != nil {
return nil, err
}
if tset.terms.isEmpty() {
return nil, ErrEmptyTypeSet
}
if tset.terms.isAll() {
return nil, nil
}
var terms []*types.Term
for _, term := range tset.terms {
terms = append(terms, types.NewTerm(term.tilde, term.typ))
}
return terms, nil
}
// A termSet holds the normalized set of terms for a given type.
//
// The name termSet is intentionally distinct from 'type set': a type set is
// all types that implement a type (and includes method restrictions), whereas
// a term set just represents the structural restrictions on a type.
type termSet struct {
complete bool
terms termlist
}
func indentf(depth int, format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
}
func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
if t == nil {
panic("nil type")
}
if debug {
indentf(depth, "%s", t.String())
defer func() {
if err != nil {
indentf(depth, "=> %s", err)
} else {
indentf(depth, "=> %s", res.terms.String())
}
}()
}
const maxTermCount = 100
if tset, ok := seen[t]; ok {
if !tset.complete {
return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
}
return tset, nil
}
// Mark the current type as seen to avoid infinite recursion.
tset := new(termSet)
defer func() {
tset.complete = true
}()
seen[t] = tset
switch u := t.Underlying().(type) {
case *types.Interface:
// The term set of an interface is the intersection of the term sets of its
// embedded types.
tset.terms = allTermlist
for i := 0; i < u.NumEmbeddeds(); i++ {
embedded := u.EmbeddedType(i)
if _, ok := embedded.Underlying().(*types.TypeParam); ok {
return nil, fmt.Errorf("invalid embedded type %T", embedded)
}
tset2, err := computeTermSetInternal(embedded, seen, depth+1)
if err != nil {
return nil, err
}
tset.terms = tset.terms.intersect(tset2.terms)
}
case *types.Union:
// The term set of a union is the union of term sets of its terms.
tset.terms = nil
for i := 0; i < u.Len(); i++ {
t := u.Term(i)
var terms termlist
switch t.Type().Underlying().(type) {
case *types.Interface:
tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
if err != nil {
return nil, err
}
terms = tset2.terms
case *types.TypeParam, *types.Union:
// A stand-alone type parameter or union is not permitted as union
// term.
return nil, fmt.Errorf("invalid union term %T", t)
default:
if t.Type() == types.Typ[types.Invalid] {
continue
}
terms = termlist{{t.Tilde(), t.Type()}}
}
tset.terms = tset.terms.union(terms)
if len(tset.terms) > maxTermCount {
return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
}
}
case *types.TypeParam:
panic("unreachable")
default:
// For all other types, the term set is just a single non-tilde term
// holding the type itself.
if u != types.Typ[types.Invalid] {
tset.terms = termlist{{false, t}}
}
}
return tset, nil
}
// under is a facade for the go/types internal function of the same name. It is
// used by typeterm.go.
func under(t types.Type) types.Type {
return t.Underlying()
}

View file

@ -0,0 +1,163 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by copytermlist.go DO NOT EDIT.
package typeparams
import (
"bytes"
"go/types"
)
// A termlist represents the type set represented by the union
// t1 y2 ... tn of the type sets of the terms t1 to tn.
// A termlist is in normal form if all terms are disjoint.
// termlist operations don't require the operands to be in
// normal form.
type termlist []*term
// allTermlist represents the set of all types.
// It is in normal form.
var allTermlist = termlist{new(term)}
// String prints the termlist exactly (without normalization).
func (xl termlist) String() string {
if len(xl) == 0 {
return "∅"
}
var buf bytes.Buffer
for i, x := range xl {
if i > 0 {
buf.WriteString(" | ")
}
buf.WriteString(x.String())
}
return buf.String()
}
// isEmpty reports whether the termlist xl represents the empty set of types.
func (xl termlist) isEmpty() bool {
// If there's a non-nil term, the entire list is not empty.
// If the termlist is in normal form, this requires at most
// one iteration.
for _, x := range xl {
if x != nil {
return false
}
}
return true
}
// isAll reports whether the termlist xl represents the set of all types.
func (xl termlist) isAll() bool {
// If there's a 𝓤 term, the entire list is 𝓤.
// If the termlist is in normal form, this requires at most
// one iteration.
for _, x := range xl {
if x != nil && x.typ == nil {
return true
}
}
return false
}
// norm returns the normal form of xl.
func (xl termlist) norm() termlist {
// Quadratic algorithm, but good enough for now.
// TODO(gri) fix asymptotic performance
used := make([]bool, len(xl))
var rl termlist
for i, xi := range xl {
if xi == nil || used[i] {
continue
}
for j := i + 1; j < len(xl); j++ {
xj := xl[j]
if xj == nil || used[j] {
continue
}
if u1, u2 := xi.union(xj); u2 == nil {
// If we encounter a 𝓤 term, the entire list is 𝓤.
// Exit early.
// (Note that this is not just an optimization;
// if we continue, we may end up with a 𝓤 term
// and other terms and the result would not be
// in normal form.)
if u1.typ == nil {
return allTermlist
}
xi = u1
used[j] = true // xj is now unioned into xi - ignore it in future iterations
}
}
rl = append(rl, xi)
}
return rl
}
// union returns the union xl yl.
func (xl termlist) union(yl termlist) termlist {
return append(xl, yl...).norm()
}
// intersect returns the intersection xl ∩ yl.
func (xl termlist) intersect(yl termlist) termlist {
if xl.isEmpty() || yl.isEmpty() {
return nil
}
// Quadratic algorithm, but good enough for now.
// TODO(gri) fix asymptotic performance
var rl termlist
for _, x := range xl {
for _, y := range yl {
if r := x.intersect(y); r != nil {
rl = append(rl, r)
}
}
}
return rl.norm()
}
// equal reports whether xl and yl represent the same type set.
func (xl termlist) equal(yl termlist) bool {
// TODO(gri) this should be more efficient
return xl.subsetOf(yl) && yl.subsetOf(xl)
}
// includes reports whether t ∈ xl.
func (xl termlist) includes(t types.Type) bool {
for _, x := range xl {
if x.includes(t) {
return true
}
}
return false
}
// supersetOf reports whether y ⊆ xl.
func (xl termlist) supersetOf(y *term) bool {
for _, x := range xl {
if y.subsetOf(x) {
return true
}
}
return false
}
// subsetOf reports whether xl ⊆ yl.
func (xl termlist) subsetOf(yl termlist) bool {
if yl.isEmpty() {
return xl.isEmpty()
}
// each term x of xl must be a subset of yl
for _, x := range xl {
if !yl.supersetOf(x) {
return false // x is not a subset yl
}
}
return true
}

View file

@ -0,0 +1,169 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by copytermlist.go DO NOT EDIT.
package typeparams
import "go/types"
// A term describes elementary type sets:
//
// ∅: (*term)(nil) == ∅ // set of no types (empty set)
// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
// T: &term{false, T} == {T} // set of type T
// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
type term struct {
tilde bool // valid if typ != nil
typ types.Type
}
func (x *term) String() string {
switch {
case x == nil:
return "∅"
case x.typ == nil:
return "𝓤"
case x.tilde:
return "~" + x.typ.String()
default:
return x.typ.String()
}
}
// equal reports whether x and y represent the same type set.
func (x *term) equal(y *term) bool {
// easy cases
switch {
case x == nil || y == nil:
return x == y
case x.typ == nil || y.typ == nil:
return x.typ == y.typ
}
// ∅ ⊂ x, y ⊂ 𝓤
return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
}
// union returns the union x y: zero, one, or two non-nil terms.
func (x *term) union(y *term) (_, _ *term) {
// easy cases
switch {
case x == nil && y == nil:
return nil, nil // ∅ ∅ == ∅
case x == nil:
return y, nil // ∅ y == y
case y == nil:
return x, nil // x ∅ == x
case x.typ == nil:
return x, nil // 𝓤 y == 𝓤
case y.typ == nil:
return y, nil // x 𝓤 == 𝓤
}
// ∅ ⊂ x, y ⊂ 𝓤
if x.disjoint(y) {
return x, y // x y == (x, y) if x ∩ y == ∅
}
// x.typ == y.typ
// ~t ~t == ~t
// ~t T == ~t
// T ~t == ~t
// T T == T
if x.tilde || !y.tilde {
return x, nil
}
return y, nil
}
// intersect returns the intersection x ∩ y.
func (x *term) intersect(y *term) *term {
// easy cases
switch {
case x == nil || y == nil:
return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
case x.typ == nil:
return y // 𝓤 ∩ y == y
case y.typ == nil:
return x // x ∩ 𝓤 == x
}
// ∅ ⊂ x, y ⊂ 𝓤
if x.disjoint(y) {
return nil // x ∩ y == ∅ if x ∩ y == ∅
}
// x.typ == y.typ
// ~t ∩ ~t == ~t
// ~t ∩ T == T
// T ∩ ~t == T
// T ∩ T == T
if !x.tilde || y.tilde {
return x
}
return y
}
// includes reports whether t ∈ x.
func (x *term) includes(t types.Type) bool {
// easy cases
switch {
case x == nil:
return false // t ∈ ∅ == false
case x.typ == nil:
return true // t ∈ 𝓤 == true
}
// ∅ ⊂ x ⊂ 𝓤
u := t
if x.tilde {
u = under(u)
}
return types.Identical(x.typ, u)
}
// subsetOf reports whether x ⊆ y.
func (x *term) subsetOf(y *term) bool {
// easy cases
switch {
case x == nil:
return true // ∅ ⊆ y == true
case y == nil:
return false // x ⊆ ∅ == false since x != ∅
case y.typ == nil:
return true // x ⊆ 𝓤 == true
case x.typ == nil:
return false // 𝓤 ⊆ y == false since y != 𝓤
}
// ∅ ⊂ x, y ⊂ 𝓤
if x.disjoint(y) {
return false // x ⊆ y == false if x ∩ y == ∅
}
// x.typ == y.typ
// ~t ⊆ ~t == true
// ~t ⊆ T == false
// T ⊆ ~t == true
// T ⊆ T == true
return !x.tilde || y.tilde
}
// disjoint reports whether x ∩ y == ∅.
// x.typ and y.typ must not be nil.
func (x *term) disjoint(y *term) bool {
if debug && (x.typ == nil || y.typ == nil) {
panic("invalid argument(s)")
}
ux := x.typ
if y.tilde {
ux = under(ux)
}
uy := y.typ
if x.tilde {
uy = under(uy)
}
return !types.Identical(ux, uy)
}

View file

@ -0,0 +1,133 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typesinternal
import (
"fmt"
"go/types"
"golang.org/x/tools/go/types/typeutil"
)
// ForEachElement calls f for type T and each type reachable from its
// type through reflection. It does this by recursively stripping off
// type constructors; in addition, for each named type N, the type *N
// is added to the result as it may have additional methods.
//
// The caller must provide an initially empty set used to de-duplicate
// identical types, potentially across multiple calls to ForEachElement.
// (Its final value holds all the elements seen, matching the arguments
// passed to f.)
//
// TODO(adonovan): share/harmonize with go/callgraph/rta.
func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) {
var visit func(T types.Type, skip bool)
visit = func(T types.Type, skip bool) {
if !skip {
if seen, _ := rtypes.Set(T, true).(bool); seen {
return // de-dup
}
f(T) // notify caller of new element type
}
// Recursion over signatures of each method.
tmset := msets.MethodSet(T)
for i := 0; i < tmset.Len(); i++ {
sig := tmset.At(i).Type().(*types.Signature)
// It is tempting to call visit(sig, false)
// but, as noted in golang.org/cl/65450043,
// the Signature.Recv field is ignored by
// types.Identical and typeutil.Map, which
// is confusing at best.
//
// More importantly, the true signature rtype
// reachable from a method using reflection
// has no receiver but an extra ordinary parameter.
// For the Read method of io.Reader we want:
// func(Reader, []byte) (int, error)
// but here sig is:
// func([]byte) (int, error)
// with .Recv = Reader (though it is hard to
// notice because it doesn't affect Signature.String
// or types.Identical).
//
// TODO(adonovan): construct and visit the correct
// non-method signature with an extra parameter
// (though since unnamed func types have no methods
// there is essentially no actual demand for this).
//
// TODO(adonovan): document whether or not it is
// safe to skip non-exported methods (as RTA does).
visit(sig.Params(), true) // skip the Tuple
visit(sig.Results(), true) // skip the Tuple
}
switch T := T.(type) {
case *types.Alias:
visit(types.Unalias(T), skip) // emulates the pre-Alias behavior
case *types.Basic:
// nop
case *types.Interface:
// nop---handled by recursion over method set.
case *types.Pointer:
visit(T.Elem(), false)
case *types.Slice:
visit(T.Elem(), false)
case *types.Chan:
visit(T.Elem(), false)
case *types.Map:
visit(T.Key(), false)
visit(T.Elem(), false)
case *types.Signature:
if T.Recv() != nil {
panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv()))
}
visit(T.Params(), true) // skip the Tuple
visit(T.Results(), true) // skip the Tuple
case *types.Named:
// A pointer-to-named type can be derived from a named
// type via reflection. It may have methods too.
visit(types.NewPointer(T), false)
// Consider 'type T struct{S}' where S has methods.
// Reflection provides no way to get from T to struct{S},
// only to S, so the method set of struct{S} is unwanted,
// so set 'skip' flag during recursion.
visit(T.Underlying(), true) // skip the unnamed type
case *types.Array:
visit(T.Elem(), false)
case *types.Struct:
for i, n := 0, T.NumFields(); i < n; i++ {
// TODO(adonovan): document whether or not
// it is safe to skip non-exported fields.
visit(T.Field(i).Type(), false)
}
case *types.Tuple:
for i, n := 0, T.Len(); i < n; i++ {
visit(T.At(i).Type(), false)
}
case *types.TypeParam, *types.Union:
// forEachReachable must not be called on parameterized types.
panic(T)
default:
panic(T)
}
}
visit(T, false)
}

View file

@ -838,7 +838,7 @@
// InvalidCap occurs when an argument to the cap built-in function is not of
// supported type.
//
// See https://golang.org/ref/spec#Lengthand_capacity for information on
// See https://golang.org/ref/spec#Length_and_capacity for information on
// which underlying types are supported as arguments to cap and len.
//
// Example:
@ -859,7 +859,7 @@
// InvalidCopy occurs when the arguments are not of slice type or do not
// have compatible type.
//
// See https://golang.org/ref/spec#Appendingand_copying_slices for more
// See https://golang.org/ref/spec#Appending_and_copying_slices for more
// information on the type requirements for the copy built-in.
//
// Example:
@ -897,7 +897,7 @@
// InvalidLen occurs when an argument to the len built-in function is not of
// supported type.
//
// See https://golang.org/ref/spec#Lengthand_capacity for information on
// See https://golang.org/ref/spec#Length_and_capacity for information on
// which underlying types are supported as arguments to cap and len.
//
// Example:
@ -914,7 +914,7 @@
// InvalidMake occurs when make is called with an unsupported type argument.
//
// See https://golang.org/ref/spec#Makingslices_maps_and_channels for
// See https://golang.org/ref/spec#Making_slices_maps_and_channels for
// information on the types that may be created using make.
//
// Example:

View file

@ -6,8 +6,6 @@
import (
"go/types"
"golang.org/x/tools/internal/aliases"
)
// ReceiverNamed returns the named type (if any) associated with the
@ -15,11 +13,11 @@
// It also reports whether a Pointer was present.
func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
t := recv.Type()
if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
isPtr = true
t = ptr.Elem()
}
named, _ = aliases.Unalias(t).(*types.Named)
named, _ = types.Unalias(t).(*types.Named)
return
}
@ -36,7 +34,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
// indirection from the type, regardless of named types (analogous to
// a LOAD instruction).
func Unpointer(t types.Type) types.Type {
if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
return ptr.Elem()
}
return t

View file

@ -11,6 +11,8 @@
"go/types"
"reflect"
"unsafe"
"golang.org/x/tools/internal/aliases"
)
func SetUsesCgo(conf *types.Config) bool {
@ -48,3 +50,72 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos,
}
return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
}
// NameRelativeTo returns a types.Qualifier that qualifies members of
// all packages other than pkg, using only the package name.
// (By contrast, [types.RelativeTo] uses the complete package path,
// which is often excessive.)
//
// If pkg is nil, it is equivalent to [*types.Package.Name].
func NameRelativeTo(pkg *types.Package) types.Qualifier {
return func(other *types.Package) string {
if pkg != nil && pkg == other {
return "" // same package; unqualified
}
return other.Name()
}
}
// A NamedOrAlias is a [types.Type] that is named (as
// defined by the spec) and capable of bearing type parameters: it
// abstracts aliases ([types.Alias]) and defined types
// ([types.Named]).
//
// Every type declared by an explicit "type" declaration is a
// NamedOrAlias. (Built-in type symbols may additionally
// have type [types.Basic], which is not a NamedOrAlias,
// though the spec regards them as "named".)
//
// NamedOrAlias cannot expose the Origin method, because
// [types.Alias.Origin] and [types.Named.Origin] have different
// (covariant) result types; use [Origin] instead.
type NamedOrAlias interface {
types.Type
Obj() *types.TypeName
}
// TypeParams is a light shim around t.TypeParams().
// (go/types.Alias).TypeParams requires >= 1.23.
func TypeParams(t NamedOrAlias) *types.TypeParamList {
switch t := t.(type) {
case *types.Alias:
return aliases.TypeParams(t)
case *types.Named:
return t.TypeParams()
}
return nil
}
// TypeArgs is a light shim around t.TypeArgs().
// (go/types.Alias).TypeArgs requires >= 1.23.
func TypeArgs(t NamedOrAlias) *types.TypeList {
switch t := t.(type) {
case *types.Alias:
return aliases.TypeArgs(t)
case *types.Named:
return t.TypeArgs()
}
return nil
}
// Origin returns the generic type of the Named or Alias type t if it
// is instantiated, otherwise it returns t.
func Origin(t NamedOrAlias) NamedOrAlias {
switch t := t.(type) {
case *types.Alias:
return aliases.Origin(t)
case *types.Named:
return t.Origin()
}
return t
}

View file

@ -0,0 +1,282 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typesinternal
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"strconv"
"strings"
)
// ZeroString returns the string representation of the "zero" value of the type t.
// This string can be used on the right-hand side of an assignment where the
// left-hand side has that explicit type.
// Exception: This does not apply to tuples. Their string representation is
// informational only and cannot be used in an assignment.
// When assigning to a wider type (such as 'any'), it's the caller's
// responsibility to handle any necessary type conversions.
// See [ZeroExpr] for a variant that returns an [ast.Expr].
func ZeroString(t types.Type, qf types.Qualifier) string {
switch t := t.(type) {
case *types.Basic:
switch {
case t.Info()&types.IsBoolean != 0:
return "false"
case t.Info()&types.IsNumeric != 0:
return "0"
case t.Info()&types.IsString != 0:
return `""`
case t.Kind() == types.UnsafePointer:
fallthrough
case t.Kind() == types.UntypedNil:
return "nil"
default:
panic(fmt.Sprint("ZeroString for unexpected type:", t))
}
case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
return "nil"
case *types.Named, *types.Alias:
switch under := t.Underlying().(type) {
case *types.Struct, *types.Array:
return types.TypeString(t, qf) + "{}"
default:
return ZeroString(under, qf)
}
case *types.Array, *types.Struct:
return types.TypeString(t, qf) + "{}"
case *types.TypeParam:
// Assumes func new is not shadowed.
return "*new(" + types.TypeString(t, qf) + ")"
case *types.Tuple:
// Tuples are not normal values.
// We are currently format as "(t[0], ..., t[n])". Could be something else.
components := make([]string, t.Len())
for i := 0; i < t.Len(); i++ {
components[i] = ZeroString(t.At(i).Type(), qf)
}
return "(" + strings.Join(components, ", ") + ")"
case *types.Union:
// Variables of these types cannot be created, so it makes
// no sense to ask for their zero value.
panic(fmt.Sprintf("invalid type for a variable: %v", t))
default:
panic(t) // unreachable.
}
}
// ZeroExpr returns the ast.Expr representation of the "zero" value of the type t.
// ZeroExpr is defined for types that are suitable for variables.
// It may panic for other types such as Tuple or Union.
// See [ZeroString] for a variant that returns a string.
func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
switch t := typ.(type) {
case *types.Basic:
switch {
case t.Info()&types.IsBoolean != 0:
return &ast.Ident{Name: "false"}
case t.Info()&types.IsNumeric != 0:
return &ast.BasicLit{Kind: token.INT, Value: "0"}
case t.Info()&types.IsString != 0:
return &ast.BasicLit{Kind: token.STRING, Value: `""`}
case t.Kind() == types.UnsafePointer:
fallthrough
case t.Kind() == types.UntypedNil:
return ast.NewIdent("nil")
default:
panic(fmt.Sprint("ZeroExpr for unexpected type:", t))
}
case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
return ast.NewIdent("nil")
case *types.Named, *types.Alias:
switch under := t.Underlying().(type) {
case *types.Struct, *types.Array:
return &ast.CompositeLit{
Type: TypeExpr(f, pkg, typ),
}
default:
return ZeroExpr(f, pkg, under)
}
case *types.Array, *types.Struct:
return &ast.CompositeLit{
Type: TypeExpr(f, pkg, typ),
}
case *types.TypeParam:
return &ast.StarExpr{ // *new(T)
X: &ast.CallExpr{
// Assumes func new is not shadowed.
Fun: ast.NewIdent("new"),
Args: []ast.Expr{
ast.NewIdent(t.Obj().Name()),
},
},
}
case *types.Tuple:
// Unlike ZeroString, there is no ast.Expr can express tuple by
// "(t[0], ..., t[n])".
panic(fmt.Sprintf("invalid type for a variable: %v", t))
case *types.Union:
// Variables of these types cannot be created, so it makes
// no sense to ask for their zero value.
panic(fmt.Sprintf("invalid type for a variable: %v", t))
default:
panic(t) // unreachable.
}
}
// IsZeroExpr uses simple syntactic heuristics to report whether expr
// is a obvious zero value, such as 0, "", nil, or false.
// It cannot do better without type information.
func IsZeroExpr(expr ast.Expr) bool {
switch e := expr.(type) {
case *ast.BasicLit:
return e.Value == "0" || e.Value == `""`
case *ast.Ident:
return e.Name == "nil" || e.Name == "false"
default:
return false
}
}
// TypeExpr returns syntax for the specified type. References to named types
// from packages other than pkg are qualified by an appropriate package name, as
// defined by the import environment of file.
// It may panic for types such as Tuple or Union.
func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
switch t := typ.(type) {
case *types.Basic:
switch t.Kind() {
case types.UnsafePointer:
// TODO(hxjiang): replace the implementation with types.Qualifier.
return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")}
default:
return ast.NewIdent(t.Name())
}
case *types.Pointer:
return &ast.UnaryExpr{
Op: token.MUL,
X: TypeExpr(f, pkg, t.Elem()),
}
case *types.Array:
return &ast.ArrayType{
Len: &ast.BasicLit{
Kind: token.INT,
Value: fmt.Sprintf("%d", t.Len()),
},
Elt: TypeExpr(f, pkg, t.Elem()),
}
case *types.Slice:
return &ast.ArrayType{
Elt: TypeExpr(f, pkg, t.Elem()),
}
case *types.Map:
return &ast.MapType{
Key: TypeExpr(f, pkg, t.Key()),
Value: TypeExpr(f, pkg, t.Elem()),
}
case *types.Chan:
dir := ast.ChanDir(t.Dir())
if t.Dir() == types.SendRecv {
dir = ast.SEND | ast.RECV
}
return &ast.ChanType{
Dir: dir,
Value: TypeExpr(f, pkg, t.Elem()),
}
case *types.Signature:
var params []*ast.Field
for i := 0; i < t.Params().Len(); i++ {
params = append(params, &ast.Field{
Type: TypeExpr(f, pkg, t.Params().At(i).Type()),
Names: []*ast.Ident{
{
Name: t.Params().At(i).Name(),
},
},
})
}
if t.Variadic() {
last := params[len(params)-1]
last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt}
}
var returns []*ast.Field
for i := 0; i < t.Results().Len(); i++ {
returns = append(returns, &ast.Field{
Type: TypeExpr(f, pkg, t.Results().At(i).Type()),
})
}
return &ast.FuncType{
Params: &ast.FieldList{
List: params,
},
Results: &ast.FieldList{
List: returns,
},
}
case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam}
switch t.Obj().Pkg() {
case pkg, nil:
return ast.NewIdent(t.Obj().Name())
}
pkgName := t.Obj().Pkg().Name()
// TODO(hxjiang): replace the implementation with types.Qualifier.
// If the file already imports the package under another name, use that.
for _, cand := range f.Imports {
if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() {
if cand.Name != nil && cand.Name.Name != "" {
pkgName = cand.Name.Name
}
}
}
if pkgName == "." {
return ast.NewIdent(t.Obj().Name())
}
return &ast.SelectorExpr{
X: ast.NewIdent(pkgName),
Sel: ast.NewIdent(t.Obj().Name()),
}
case *types.Struct:
return ast.NewIdent(t.String())
case *types.Interface:
return ast.NewIdent(t.String())
case *types.Union:
// TODO(hxjiang): handle the union through syntax (~A | ... | ~Z).
// Remove nil check when calling typesinternal.TypeExpr.
return nil
case *types.Tuple:
panic("invalid input type types.Tuple")
default:
panic("unreachable")
}
}

View file

@ -1,14 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package versions
// toolchain is maximum version (<1.22) that the go toolchain used
// to build the current tool is known to support.
//
// When a tool is built with >=1.22, the value of toolchain is unused.
//
// x/tools does not support building with go <1.18. So we take this
// as the minimum possible maximum.
var toolchain string = Go1_18

View file

@ -1,14 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.19
// +build go1.19
package versions
func init() {
if Compare(toolchain, Go1_19) < 0 {
toolchain = Go1_19
}
}

View file

@ -1,14 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.20
// +build go1.20
package versions
func init() {
if Compare(toolchain, Go1_20) < 0 {
toolchain = Go1_20
}
}

View file

@ -1,14 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21
// +build go1.21
package versions
func init() {
if Compare(toolchain, Go1_21) < 0 {
toolchain = Go1_21
}
}

View file

@ -5,15 +5,29 @@
package versions
import (
"go/ast"
"go/types"
)
// GoVersion returns the Go version of the type package.
// It returns zero if no version can be determined.
func GoVersion(pkg *types.Package) string {
// TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25.
if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok {
return pkg.GoVersion()
// FileVersion returns a file's Go version.
// The reported version is an unknown Future version if a
// version cannot be determined.
func FileVersion(info *types.Info, file *ast.File) string {
// In tools built with Go >= 1.22, the Go version of a file
// follow a cascades of sources:
// 1) types.Info.FileVersion, which follows the cascade:
// 1.a) file version (ast.File.GoVersion),
// 1.b) the package version (types.Config.GoVersion), or
// 2) is some unknown Future version.
//
// File versions require a valid package version to be provided to types
// in Config.GoVersion. Config.GoVersion is either from the package's module
// or the toolchain (go run). This value should be provided by go/packages
// or unitchecker.Config.GoVersion.
if v := info.FileVersions[file]; IsValid(v) {
return v
}
return ""
// Note: we could instead return runtime.Version() [if valid].
// This would act as a max version on what a tool can support.
return Future
}

View file

@ -1,30 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.22
// +build !go1.22
package versions
import (
"go/ast"
"go/types"
)
// FileVersion returns a language version (<=1.21) derived from runtime.Version()
// or an unknown future version.
func FileVersion(info *types.Info, file *ast.File) string {
// In x/tools built with Go <= 1.21, we do not have Info.FileVersions
// available. We use a go version derived from the toolchain used to
// compile the tool by default.
// This will be <= go1.21. We take this as the maximum version that
// this tool can support.
//
// There are no features currently in x/tools that need to tell fine grained
// differences for versions <1.22.
return toolchain
}
// InitFileVersions is a noop when compiled with this Go version.
func InitFileVersions(*types.Info) {}

View file

@ -1,41 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.22
// +build go1.22
package versions
import (
"go/ast"
"go/types"
)
// FileVersion returns a file's Go version.
// The reported version is an unknown Future version if a
// version cannot be determined.
func FileVersion(info *types.Info, file *ast.File) string {
// In tools built with Go >= 1.22, the Go version of a file
// follow a cascades of sources:
// 1) types.Info.FileVersion, which follows the cascade:
// 1.a) file version (ast.File.GoVersion),
// 1.b) the package version (types.Config.GoVersion), or
// 2) is some unknown Future version.
//
// File versions require a valid package version to be provided to types
// in Config.GoVersion. Config.GoVersion is either from the package's module
// or the toolchain (go run). This value should be provided by go/packages
// or unitchecker.Config.GoVersion.
if v := info.FileVersions[file]; IsValid(v) {
return v
}
// Note: we could instead return runtime.Version() [if valid].
// This would act as a max version on what a tool can support.
return Future
}
// InitFileVersions initializes info to record Go versions for Go files.
func InitFileVersions(info *types.Info) {
info.FileVersions = make(map[*ast.File]string)
}

21
vendor/modules.txt vendored
View file

@ -600,8 +600,8 @@ github.com/quasoft/memstore
# github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec
## explicit; go 1.12
github.com/remyoudompheng/bigfft
# github.com/rogpeppe/go-internal v1.12.0
## explicit; go 1.20
# github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a
## explicit; go 1.22.0
github.com/rogpeppe/go-internal/fmtsort
# github.com/rs/xid v1.6.0
## explicit; go 1.16
@ -1115,8 +1115,8 @@ golang.org/x/image/riff
golang.org/x/image/vp8
golang.org/x/image/vp8l
golang.org/x/image/webp
# golang.org/x/mod v0.18.0
## explicit; go 1.18
# golang.org/x/mod v0.22.0
## explicit; go 1.22.0
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
golang.org/x/mod/semver
@ -1174,13 +1174,13 @@ golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
golang.org/x/text/width
# golang.org/x/tools v0.22.0
## explicit; go 1.19
# golang.org/x/tools v0.28.0
## explicit; go 1.22.0
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/go/gcexportdata
golang.org/x/tools/go/internal/packagesdriver
golang.org/x/tools/go/packages
golang.org/x/tools/go/types/objectpath
golang.org/x/tools/go/types/typeutil
golang.org/x/tools/imports
golang.org/x/tools/internal/aliases
golang.org/x/tools/internal/event
@ -1191,10 +1191,11 @@ golang.org/x/tools/internal/gcimporter
golang.org/x/tools/internal/gocommand
golang.org/x/tools/internal/gopathwalk
golang.org/x/tools/internal/imports
golang.org/x/tools/internal/modindex
golang.org/x/tools/internal/packagesinternal
golang.org/x/tools/internal/pkgbits
golang.org/x/tools/internal/stdlib
golang.org/x/tools/internal/tokeninternal
golang.org/x/tools/internal/typeparams
golang.org/x/tools/internal/typesinternal
golang.org/x/tools/internal/versions
# google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1
@ -1365,8 +1366,8 @@ modernc.org/strutil
# modernc.org/token v1.1.0
## explicit
modernc.org/token
# mvdan.cc/xurls/v2 v2.5.0
## explicit; go 1.19
# mvdan.cc/xurls/v2 v2.6.0
## explicit; go 1.22.0
mvdan.cc/xurls/v2
# github.com/go-swagger/go-swagger => github.com/superseriousbusiness/go-swagger v0.31.0-gts-go1.23-fix
# modernc.org/sqlite => gitlab.com/NyaaaWhatsUpDoc/sqlite v1.34.2-concurrency-workaround

View file

@ -2,7 +2,7 @@
[![Go Reference](https://pkg.go.dev/badge/mvdan.cc/xurls/v2.svg)](https://pkg.go.dev/mvdan.cc/xurls/v2)
Extract urls from text using regular expressions. Requires Go 1.19 or later.
Extract urls from text using regular expressions. Requires Go 1.22 or later.
```go
import "mvdan.cc/xurls/v2"
@ -18,8 +18,8 @@ func main() {
}
```
Since API is centered around [regexp.Regexp](https://golang.org/pkg/regexp/#Regexp),
many other methods are available, such as finding the [byte indexes](https://golang.org/pkg/regexp/#Regexp.FindAllIndex)
Since API is centered around [regexp.Regexp](https://pkg.go.dev/regexp#Regexp),
many other methods are available, such as finding the [byte indexes](https://pkg.go.dev/regexp#Regexp.FindAllIndex)
for all matches.
The regular expressions are compiled when the API is first called.

View file

@ -24,6 +24,7 @@
`apt`,
`ar`,
`ark`,
`at`,
`attachment`,
`aw`,
`barion`,
@ -32,7 +33,9 @@
`bitcoin`,
`bitcoincash`,
`blob`,
`bluetooth`,
`bolo`,
`brid`,
`browserext`,
`cabal`,
`calculator`,
@ -59,6 +62,7 @@
`dat`,
`data`,
`dav`,
`dhttp`,
`diaspora`,
`dict`,
`did`,
@ -110,6 +114,7 @@
`ham`,
`hcap`,
`hcp`,
`hs20`,
`http`,
`https`,
`hxxp`,
@ -147,9 +152,11 @@
`ldap`,
`ldaps`,
`leaptofrogans`,
`lid`,
`lorawan`,
`lpa`,
`lvlt`,
`machineProvisioningProgressReporter`,
`magnet`,
`mailserver`,
`mailto`,
@ -179,6 +186,7 @@
`ms-help`,
`ms-infopath`,
`ms-inputapp`,
`ms-launchremotedesktop`,
`ms-lockscreencomponent-config`,
`ms-media-stream-id`,
`ms-meetnow`,
@ -187,9 +195,12 @@
`ms-newsandinterests`,
`ms-officeapp`,
`ms-people`,
`ms-personacard`,
`ms-project`,
`ms-powerpoint`,
`ms-publisher`,
`ms-recall`,
`ms-remotedesktop`,
`ms-remotedesktop-launch`,
`ms-restoretabcompanion`,
`ms-screenclip`,
@ -238,6 +249,8 @@
`mumble`,
`mupdate`,
`mvn`,
`mvrp`,
`mvrps`,
`news`,
`nfs`,
`ni`,
@ -250,6 +263,7 @@
`onenote`,
`onenote-cmd`,
`opaquelocktoken`,
`openid`,
`openpgp4fpr`,
`otpauth`,
`p1`,
@ -290,6 +304,7 @@
`sftp`,
`sgn`,
`shc`,
`shelter`,
`sieve`,
`simpleledger`,
`simplex`,
@ -321,12 +336,15 @@
`tag`,
`taler`,
`teamspeak`,
`teapot`,
`teapots`,
`tel`,
`teliaeid`,
`telnet`,
`tftp`,
`things`,
`thismessage`,
`thzp`,
`tip`,
`tn3270`,
`tool`,
@ -367,6 +385,8 @@
`xmlrpc.beep`,
`xmlrpc.beeps`,
`xmpp`,
`xftp`,
`xrcp`,
`xri`,
`ymsgr`,
`z39.50`,

View file

@ -10,7 +10,6 @@
var TLDs = []string{
`aaa`,
`aarp`,
`abarth`,
`abb`,
`abbott`,
`abbvie`,
@ -45,7 +44,6 @@
`airtel`,
`akdn`,
`al`,
`alfaromeo`,
`alibaba`,
`alipay`,
`allfinanz`,
@ -96,7 +94,6 @@
`author`,
`auto`,
`autos`,
`avianca`,
`aw`,
`aws`,
`ax`,
@ -107,7 +104,6 @@
`baby`,
`baidu`,
`banamex`,
`bananarepublic`,
`band`,
`bank`,
`bar`,
@ -224,7 +220,6 @@
`cba`,
`cbn`,
`cbre`,
`cbs`,
`cc`,
`cd`,
`center`,
@ -253,7 +248,6 @@
`citi`,
`citic`,
`city`,
`cityeats`,
`ck`,
`cl`,
`claims`,
@ -274,7 +268,6 @@
`college`,
`cologne`,
`com`,
`comcast`,
`commbank`,
`community`,
`company`,
@ -287,7 +280,6 @@
`contact`,
`contractors`,
`cooking`,
`cookingchannel`,
`cool`,
`coop`,
`corsica`,
@ -314,7 +306,6 @@
`cymru`,
`cyou`,
`cz`,
`dabur`,
`dad`,
`dance`,
`data`,
@ -393,7 +384,6 @@
`esq`,
`estate`,
`et`,
`etisalat`,
`eu`,
`eurovision`,
`eus`,
@ -419,7 +409,6 @@
`ferrari`,
`ferrero`,
`fi`,
`fiat`,
`fidelity`,
`fido`,
`film`,
@ -445,7 +434,6 @@
`fo`,
`foo`,
`food`,
`foodnetwork`,
`football`,
`ford`,
`forex`,
@ -458,7 +446,6 @@
`fresenius`,
`frl`,
`frogans`,
`frontdoor`,
`frontier`,
`ftr`,
`fujitsu`,
@ -530,7 +517,6 @@
`gs`,
`gt`,
`gu`,
`guardian`,
`gucci`,
`guge`,
`guide`,
@ -551,7 +537,6 @@
`helsinki`,
`here`,
`hermes`,
`hgtv`,
`hiphop`,
`hisamitsu`,
`hitachi`,
@ -573,7 +558,6 @@
`host`,
`hosting`,
`hot`,
`hoteles`,
`hotels`,
`hotmail`,
`house`,
@ -661,7 +645,6 @@
`kia`,
`kids`,
`kim`,
`kinder`,
`kindle`,
`kitchen`,
`kiwi`,
@ -686,7 +669,6 @@
`lamborghini`,
`lamer`,
`lancaster`,
`lancia`,
`land`,
`landrover`,
`lanxess`,
@ -760,7 +742,6 @@
`markets`,
`marriott`,
`marshalls`,
`maserati`,
`mattel`,
`mba`,
`mc`,
@ -775,6 +756,7 @@
`memorial`,
`men`,
`menu`,
`merck`,
`merckmsd`,
`mg`,
`mh`,
@ -820,7 +802,6 @@
`mu`,
`museum`,
`music`,
`mutual`,
`mv`,
`mw`,
`mx`,
@ -830,7 +811,6 @@
`nab`,
`nagoya`,
`name`,
`natura`,
`navy`,
`nba`,
`nc`,
@ -861,7 +841,6 @@
`nl`,
`no`,
`nokia`,
`northwesternmutual`,
`norton`,
`now`,
`nowruz`,
@ -880,7 +859,6 @@
`okinawa`,
`olayan`,
`olayangroup`,
`oldnavy`,
`ollo`,
`om`,
`omega`,
@ -908,7 +886,6 @@
`partners`,
`parts`,
`party`,
`passagens`,
`pay`,
`pccw`,
`pe`,
@ -1009,7 +986,6 @@
`rio`,
`rip`,
`ro`,
`rocher`,
`rocks`,
`rodeo`,
`rogers`,
@ -1044,7 +1020,6 @@
`sbi`,
`sbs`,
`sc`,
`sca`,
`scb`,
`schaeffler`,
`schmidt`,
@ -1073,7 +1048,6 @@
`sh`,
`shangrila`,
`sharp`,
`shaw`,
`shell`,
`shia`,
`shiksha`,
@ -1082,7 +1056,6 @@
`shopping`,
`shouji`,
`show`,
`showtime`,
`si`,
`silk`,
`sina`,
@ -1180,7 +1153,6 @@
`tiaa`,
`tickets`,
`tienda`,
`tiffany`,
`tips`,
`tires`,
`tirol`,
@ -1210,7 +1182,6 @@
`trading`,
`training`,
`travel`,
`travelchannel`,
`travelers`,
`travelersinsurance`,
`trust`,
@ -1267,14 +1238,12 @@
`vlaanderen`,
`vn`,
`vodka`,
`volkswagen`,
`volvo`,
`vote`,
`voting`,
`voto`,
`voyage`,
`vu`,
`vuelos`,
`wales`,
`walmart`,
`walter`,
@ -1312,7 +1281,6 @@
`wtf`,
`xbox`,
`xerox`,
`xfinity`,
`xihuan`,
`xin`,
`xxx`,
@ -1361,7 +1329,6 @@
`ישראל`,
`קום`,
`ابوظبي`,
`اتصالات`,
`ارامكو`,
`الاردن`,
`البحرين`,

View file

@ -2,6 +2,6 @@
package xurls
const allowedUcsChar = "¡-ᙿᚁ-\u1fff\u200b-‧\u202a-\u202e‰-⁞\u2060-\u2fff、-\ud7ff豈-\ufdcfﷰ-\uffef𐀀-\U0001fffd𠀀-\U0002fffd𰀀-\U0003fffd\U00040000-\U0004fffd\U00050000-\U0005fffd\U00060000-\U0006fffd\U00070000-\U0007fffd\U00080000-\U0008fffd\U00090000-\U0009fffd\U000a0000-\U000afffd\U000b0000-\U000bfffd\U000c0000-\U000cfffd\U000d0000-\U000dfffd\U000e1000-\U000efffd"
const allowedUcsChar = "¡-ᙿᚁ-\u1fff\u200b-‧\u202a-\u202e‰-⁞\u2060-\u2fff、-\ud7ff豈-ﷰ-\uffef𐀀-\U0001fffd𠀀-\U0002fffd𰀀-\U0003fffd\U00040000-\U0004fffd\U00050000-\U0005fffd\U00060000-\U0006fffd\U00070000-\U0007fffd\U00080000-\U0008fffd\U00090000-\U0009fffd\U000a0000-\U000afffd\U000b0000-\U000bfffd\U000c0000-\U000cfffd\U000d0000-\U000dfffd\U000e1000-\U000efffd"
const allowedUcsCharMinusPunc = "¢-¦¨-µ¸-¾À-ͽͿ-ΆΈ-ՙՠ-ֈ֊-ֿׁ-ׂׄ-ׇׅ-ײ\u05f5-؈؋؎-ؚ\u061c-\u061dؠ-٩ٮ-ۓە-ۿ\u070e-߶ߺ-\u082f\u083f-\u085d\u085f-ॣ०-९ॱ-ৼ৾-ੵ\u0a77-૯૱-\u0c76౸-ಃಅ-ෳ\u0df5-๎๐-๙\u0e5c-༃༓༕-྄྆-࿏࿕-࿘\u0fdb-၉ၐ-ჺჼ-፟፩-᙭ᙯ-ᙿᚁ-ᛪᛮ-᜴\u1737-៓ៗ៛-\u17ff᠆-\u1943᥆-\u1a1dᨠ-\u1a9fᪧ\u1aae-᭙᭡-\u1bfbᰀ-\u1c3a᱀-ᱽᲀ-Ჿ\u1cc8-᳔᳒-\u1fff\u200b-―‘-‟\u202a-\u202e-›‿-⁀⁄-⁆⁒⁔\u2060-\u2cf8⳽ⴀ-ⵯ\u2d71-ⷿ⸂-⸅⸉-⸊⸌-⸍⸗⸚⸜-⸝⸠-⸩ⸯ⸺-⸻⹀⹂⹐-⹑\u2e53-\u2fff〄-〼〾-ヺー-ꓽꔀ-ꘌꘐ-꙲ꙴ-꙽ꙿ-꛱\ua6f8-ꡳ\ua878-\ua8cd꣐-ꣷꣻꣽ-꤭ꤰ-\ua95eꥠ-꧀\ua9ce-\ua9ddꧠ-\uaa5bꩠ-ꫝꫠ-ꫯꫲ-ꯪ꯬-\ud7ff豈-\ufdcfﷰ-️︗-︘\ufe1a-︯︱-﹄﹇-﹈﹍-\ufe53-﹞﹢-\ufe67﹩\ufe6c-\uff00-----⦆「-」ヲ-\uffef𐀀-\U000100ff\U00010103-\U0001039e𐎠-𐏏𐏑-\U0001056e\U00010570-\U00010856𐡘-\U0001091e𐤠-\U0001093e\U00010940-\U00010a4f\U00010a59-𐩾𐪀-𐫯\U00010af7-\U00010b38𐭀-\U00010b98\U00010b9d-𐽔\U00010f5a-𑁆\U0001104e-𑂺\U000110bd\U000110c2-𑄿𑅄-𑅳𑅶-𑇄𑇉-𑇌𑇎-𑇚𑇜\U000111e0-𑈷𑈾-𑊨\U000112aa-𑑊𑑐-𑑙\U0001145c𑑞-𑓅𑓇-𑗀𑗘-𑙀𑙄-\U0001165f\U0001166d-𑜻𑜿-𑠺\U0001183c-𑥃\U00011947-𑧡𑧣-𑨾𑩇-𑪙𑪝\U00011aa3-𑱀\U00011c46-\U00011c6f𑱲-𑻶\U00011ef9-\U00011ffe𒀀-\U0001246f\U00012475-\U00016a6d\U00016a70-𖫴\U00016af6-𖬶𖬼-𖭃𖭅-𖺖\U00016e9b-𖿡𖿣-𛲞\U0001bca0-𝪆\U0001da8c-\U0001e95d\U0001e960-\U0001fffd𠀀-\U0002fffd𰀀-\U0003fffd\U00040000-\U0004fffd\U00050000-\U0005fffd\U00060000-\U0006fffd\U00070000-\U0007fffd\U00080000-\U0008fffd\U00090000-\U0009fffd\U000a0000-\U000afffd\U000b0000-\U000bfffd\U000c0000-\U000cfffd\U000d0000-\U000dfffd\U000e1000-\U000efffd"
const allowedUcsCharMinusPunc = "¢-¦¨-µ¸-¾À-ͽͿ-ΆΈ-ՙՠ-ֈ֊-ֿׁ-ׂׄ-ׇׅ-ײ\u05f5-؈؋؎-ؚ\u061cؠ-٩ٮ-ۓە-ۿ\u070e-߶ߺ-\u082f\u083f-\u085d\u085f-ॣ०-९ॱ-ৼ৾-ੵ\u0a77-૯૱-\u0c76౸-ಃಅ-ෳ\u0df5-๎๐-๙\u0e5c-༃༓༕-྄྆-࿏࿕-࿘\u0fdb-၉ၐ-ჺჼ-፟፩-᙭ᙯ-ᙿᚁ-ᛪᛮ-᜴\u1737-៓ៗ៛-\u17ff᠆-\u1943᥆-\u1a1dᨠ-\u1a9fᪧ\u1aae-᭙᭡-᭼\u1b7f-\u1bfbᰀ-\u1c3a᱀-ᱽᲀ-Ჿ\u1cc8-᳔᳒-\u1fff\u200b-―‘-‟\u202a-\u202e-›‿-⁀⁄-⁆⁒⁔\u2060-\u2cf8⳽ⴀ-ⵯ\u2d71-ⷿ⸂-⸅⸉-⸊⸌-⸍⸗⸚⸜-⸝⸠-⸩ⸯ⸺-⸻⹀⹂⹐-⹑-\u2fff〄-〼〾-ヺー-ꓽꔀ-ꘌꘐ-꙲ꙴ-꙽ꙿ-꛱\ua6f8-ꡳ\ua878-\ua8cd꣐-ꣷꣻꣽ-꤭ꤰ-\ua95eꥠ-꧀\ua9ce-\ua9ddꧠ-\uaa5bꩠ-ꫝꫠ-ꫯꫲ-ꯪ꯬-\ud7ff豈-ﷰ-️︗-︘\ufe1a-︯︱-﹄﹇-﹈﹍-\ufe53-﹞﹢-\ufe67﹩\ufe6c-\uff00-----⦆「-」ヲ-\uffef𐀀-\U000100ff\U00010103-\U0001039e𐎠-𐏏𐏑-\U0001056e𐕰-\U00010856𐡘-\U0001091e𐤠-\U0001093e\U00010940-\U00010a4f\U00010a59-𐩾𐪀-𐫯\U00010af7-\U00010b38𐭀-\U00010b98\U00010b9d-𐽔\U00010f5a-𐾅\U00010f8a-𑁆\U0001104e-𑂺\U000110bd𑃂-𑄿𑅄-𑅳𑅶-𑇄𑇉-𑇌𑇎-𑇚𑇜\U000111e0-𑈷𑈾-𑊨\U000112aa-𑑊𑑐-𑑙\U0001145c𑑞-𑓅𑓇-𑗀𑗘-𑙀𑙄-\U0001165f\U0001166d-𑚸\U000116ba-𑜻𑜿-𑠺\U0001183c-𑥃\U00011947-𑧡𑧣-𑨾𑩇-𑪙𑪝\U00011aa3-\U00011aff\U00011b0a-𑱀\U00011c46-\U00011c6f𑱲-𑻶\U00011ef9-𑽂𑽐-\U00011ffe𒀀-\U0001246f\U00012475-𒿰\U00012ff3-\U00016a6d𖩰-𖫴\U00016af6-𖬶𖬼-𖭃𖭅-𖺖\U00016e9b-𖿡𖿣-𛲞\U0001bca0-𝪆\U0001da8c-\U0001e95d\U0001e960-\U0001fffd𠀀-\U0002fffd𰀀-\U0003fffd\U00040000-\U0004fffd\U00050000-\U0005fffd\U00060000-\U0006fffd\U00070000-\U0007fffd\U00080000-\U0008fffd\U00090000-\U0009fffd\U000a0000-\U000afffd\U000b0000-\U000bfffd\U000c0000-\U000cfffd\U000d0000-\U000dfffd\U000e1000-\U000efffd"

View file

@ -75,7 +75,7 @@
`)`
ipv6Addr = `(?:` + ipv6AddrMinusEmpty + `|::)`
ipAddrMinusEmpty = `(?:` + ipv6AddrMinusEmpty + `|\b` + ipv4Addr + `\b)`
port = `(?::[0-9]*)?`
port = `(?::[0-9]+)?`
)
// AnyScheme can be passed to StrictMatchingScheme to match any possibly valid
@ -89,8 +89,10 @@
`bitcoin`, // Bitcoin
`cid`, // Content-ID
`file`, // Files
`geo`, // Geographic location
`magnet`, // Torrent magnets
`mailto`, // Mail
`matrix`, // Matrix
`mid`, // Message-ID
`sms`, // SMS
`tel`, // Telephone
@ -163,7 +165,7 @@ func relaxedExp() string {
hostName := `(?:` + domain + `|\[` + ipv6Addr + `\]|\b` + ipv4Addr + `\b)`
webURL := hostName + port + `(?:/` + pathCont + `|/)?`
email := `[a-zA-Z0-9._%\-+]+@` + domain
email := `(?P<relaxedEmail>[a-zA-Z0-9._%\-+]+@` + domain + `)`
return strictExp() + `|` + webURL + `|` + email + `|` + ipv6AddrMinusEmpty
}
@ -178,7 +180,10 @@ func Strict() *regexp.Regexp {
}
// Relaxed produces a regexp that matches any URL matched by Strict, plus any
// URL with no scheme or email address.
// URL or email address with no scheme.
//
// Email addresses without a scheme match the `relaxedEmail` subexpression,
// which can be used to filter them as needed.
func Relaxed() *regexp.Regexp {
relaxedInit.Do(func() {
relaxedRe = regexp.MustCompile(relaxedExp())