mirror of
https://github.com/aquasecurity/trivy.git
synced 2025-12-23 07:29:00 -08:00
@@ -519,6 +519,7 @@ func initScannerConfig(opts flag.Options, cacheClient cache.Cache) (ScannerConfi
|
|||||||
SBOMSources: opts.SBOMSources,
|
SBOMSources: opts.SBOMSources,
|
||||||
RekorURL: opts.RekorURL,
|
RekorURL: opts.RekorURL,
|
||||||
Platform: opts.Platform,
|
Platform: opts.Platform,
|
||||||
|
Slow: opts.Slow,
|
||||||
|
|
||||||
// For misconfiguration scanning
|
// For misconfiguration scanning
|
||||||
MisconfScannerOption: configScannerOptions,
|
MisconfScannerOption: configScannerOptions,
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ type Option struct {
|
|||||||
SBOMSources []string
|
SBOMSources []string
|
||||||
RekorURL string
|
RekorURL string
|
||||||
Platform string
|
Platform string
|
||||||
|
Slow bool // Lower CPU and memory
|
||||||
|
|
||||||
MisconfScannerOption misconf.ScannerOption
|
MisconfScannerOption misconf.ScannerOption
|
||||||
SecretScannerOption analyzer.SecretScannerOption
|
SecretScannerOption analyzer.SecretScannerOption
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ func NewArtifact(img types.Image, c cache.ArtifactCache, opt artifact.Option) (a
|
|||||||
return Artifact{
|
return Artifact{
|
||||||
image: img,
|
image: img,
|
||||||
cache: c,
|
cache: c,
|
||||||
walker: walker.NewLayerTar(opt.SkipFiles, opt.SkipDirs),
|
walker: walker.NewLayerTar(opt.SkipFiles, opt.SkipDirs, opt.Slow),
|
||||||
analyzer: a,
|
analyzer: a,
|
||||||
handlerManager: handlerManager,
|
handlerManager: handlerManager,
|
||||||
|
|
||||||
@@ -206,9 +206,24 @@ func (a Artifact) inspect(ctx context.Context, missingImage string, layerKeys, b
|
|||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
errCh := make(chan error)
|
errCh := make(chan error)
|
||||||
|
|
||||||
|
limit := semaphore.NewWeighted(parallel)
|
||||||
|
if a.artifactOption.Slow {
|
||||||
|
// Inspect layers in series
|
||||||
|
limit = semaphore.NewWeighted(1)
|
||||||
|
}
|
||||||
|
|
||||||
var osFound types.OS
|
var osFound types.OS
|
||||||
for _, k := range layerKeys {
|
for _, k := range layerKeys {
|
||||||
|
if err := limit.Acquire(ctx, 1); err != nil {
|
||||||
|
return xerrors.Errorf("semaphore acquire: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
go func(ctx context.Context, layerKey string) {
|
go func(ctx context.Context, layerKey string) {
|
||||||
|
defer func() {
|
||||||
|
limit.Release(1)
|
||||||
|
done <- struct{}{}
|
||||||
|
}()
|
||||||
|
|
||||||
layer := layerKeyMap[layerKey]
|
layer := layerKeyMap[layerKey]
|
||||||
|
|
||||||
// If it is a base layer, secret scanning should not be performed.
|
// If it is a base layer, secret scanning should not be performed.
|
||||||
@@ -229,7 +244,6 @@ func (a Artifact) inspect(ctx context.Context, missingImage string, layerKeys, b
|
|||||||
if layerInfo.OS != nil {
|
if layerInfo.OS != nil {
|
||||||
osFound = *layerInfo.OS
|
osFound = *layerInfo.OS
|
||||||
}
|
}
|
||||||
done <- struct{}{}
|
|
||||||
}(ctx, k)
|
}(ctx, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -266,6 +280,10 @@ func (a Artifact) inspectLayer(ctx context.Context, layerInfo LayerInfo, disable
|
|||||||
opts := analyzer.AnalysisOptions{Offline: a.artifactOption.Offline}
|
opts := analyzer.AnalysisOptions{Offline: a.artifactOption.Offline}
|
||||||
result := analyzer.NewAnalysisResult()
|
result := analyzer.NewAnalysisResult()
|
||||||
limit := semaphore.NewWeighted(parallel)
|
limit := semaphore.NewWeighted(parallel)
|
||||||
|
if a.artifactOption.Slow {
|
||||||
|
// Analyze files in series
|
||||||
|
limit = semaphore.NewWeighted(1)
|
||||||
|
}
|
||||||
|
|
||||||
// Walk a tar layer
|
// Walk a tar layer
|
||||||
opqDirs, whFiles, err := a.walker.Walk(r, func(filePath string, info os.FileInfo, opener analyzer.Opener) error {
|
opqDirs, whFiles, err := a.walker.Walk(r, func(filePath string, info os.FileInfo, opener analyzer.Opener) error {
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ func NewArtifact(rootPath string, c cache.ArtifactCache, opt artifact.Option) (a
|
|||||||
return Artifact{
|
return Artifact{
|
||||||
rootPath: filepath.Clean(rootPath),
|
rootPath: filepath.Clean(rootPath),
|
||||||
cache: c,
|
cache: c,
|
||||||
walker: walker.NewFS(buildAbsPaths(rootPath, opt.SkipFiles), buildAbsPaths(rootPath, opt.SkipDirs)),
|
walker: walker.NewFS(buildAbsPaths(rootPath, opt.SkipFiles), buildAbsPaths(rootPath, opt.SkipDirs), opt.Slow),
|
||||||
analyzer: a,
|
analyzer: a,
|
||||||
handlerManager: handlerManager,
|
handlerManager: handlerManager,
|
||||||
|
|
||||||
@@ -79,6 +79,10 @@ func (a Artifact) Inspect(ctx context.Context) (types.ArtifactReference, error)
|
|||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
result := analyzer.NewAnalysisResult()
|
result := analyzer.NewAnalysisResult()
|
||||||
limit := semaphore.NewWeighted(parallel)
|
limit := semaphore.NewWeighted(parallel)
|
||||||
|
if a.artifactOption.Slow {
|
||||||
|
// Analyze files in series
|
||||||
|
limit = semaphore.NewWeighted(1)
|
||||||
|
}
|
||||||
|
|
||||||
err := a.walker.Walk(a.rootPath, func(filePath string, info os.FileInfo, opener analyzer.Opener) error {
|
err := a.walker.Walk(a.rootPath, func(filePath string, info os.FileInfo, opener analyzer.Opener) error {
|
||||||
directory := a.rootPath
|
directory := a.rootPath
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package walker
|
package walker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
@@ -8,15 +9,16 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
dio "github.com/aquasecurity/go-dep-parser/pkg/io"
|
dio "github.com/aquasecurity/go-dep-parser/pkg/io"
|
||||||
|
"github.com/aquasecurity/trivy/pkg/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FS struct {
|
type FS struct {
|
||||||
walker
|
walker
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFS(skipFiles, skipDirs []string) FS {
|
func NewFS(skipFiles, skipDirs []string, slow bool) FS {
|
||||||
return FS{
|
return FS{
|
||||||
walker: newWalker(skipFiles, skipDirs),
|
walker: newWalker(skipFiles, skipDirs, slow),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -44,6 +46,18 @@ func (w FS) Walk(root string, fn WalkFunc) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if w.slow {
|
||||||
|
// In series: fast, with higher CPU/memory
|
||||||
|
return walkSlow(root, walkFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// In parallel: slow, with lower CPU/memory
|
||||||
|
return walkFast(root, walkFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
type fastWalkFunc func(pathname string, fi os.FileInfo) error
|
||||||
|
|
||||||
|
func walkFast(root string, walkFn fastWalkFunc) error {
|
||||||
// error function called for every error encountered
|
// error function called for every error encountered
|
||||||
errorCallbackOption := swalker.WithErrorCallback(func(pathname string, err error) error {
|
errorCallbackOption := swalker.WithErrorCallback(func(pathname string, err error) error {
|
||||||
// ignore permission errors
|
// ignore permission errors
|
||||||
@@ -56,12 +70,28 @@ func (w FS) Walk(root string, fn WalkFunc) error {
|
|||||||
|
|
||||||
// Multiple goroutines stat the filesystem concurrently. The provided
|
// Multiple goroutines stat the filesystem concurrently. The provided
|
||||||
// walkFn must be safe for concurrent use.
|
// walkFn must be safe for concurrent use.
|
||||||
|
log.Logger.Debugf("Walk the file tree rooted at '%s' in parallel", root)
|
||||||
if err := swalker.Walk(root, walkFn, errorCallbackOption); err != nil {
|
if err := swalker.Walk(root, walkFn, errorCallbackOption); err != nil {
|
||||||
return xerrors.Errorf("walk error: %w", err)
|
return xerrors.Errorf("walk error: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func walkSlow(root string, walkFn fastWalkFunc) error {
|
||||||
|
log.Logger.Debugf("Walk the file tree rooted at '%s' in series", root)
|
||||||
|
err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {
|
||||||
|
info, err := d.Info()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("file info error: %w", err)
|
||||||
|
}
|
||||||
|
return walkFn(path, info)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("walk dir error: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// fileOpener returns a function opening a file.
|
// fileOpener returns a function opening a file.
|
||||||
func (w *walker) fileOpener(pathname string) func() (dio.ReadSeekCloserAt, error) {
|
func (w *walker) fileOpener(pathname string) func() (dio.ReadSeekCloserAt, error) {
|
||||||
return func() (dio.ReadSeekCloserAt, error) {
|
return func() (dio.ReadSeekCloserAt, error) {
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ func TestDir_Walk(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
w := walker.NewFS(tt.fields.skipFiles, tt.fields.skipDirs)
|
w := walker.NewFS(tt.fields.skipFiles, tt.fields.skipDirs, true)
|
||||||
|
|
||||||
err := w.Walk(tt.rootDir, tt.analyzeFn)
|
err := w.Walk(tt.rootDir, tt.analyzeFn)
|
||||||
if tt.wantErr != "" {
|
if tt.wantErr != "" {
|
||||||
|
|||||||
@@ -22,15 +22,23 @@ const (
|
|||||||
|
|
||||||
type LayerTar struct {
|
type LayerTar struct {
|
||||||
walker
|
walker
|
||||||
|
threshold int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLayerTar(skipFiles, skipDirs []string) LayerTar {
|
func NewLayerTar(skipFiles, skipDirs []string, slow bool) LayerTar {
|
||||||
|
threshold := defaultSizeThreshold
|
||||||
|
if slow {
|
||||||
|
threshold = slowSizeThreshold
|
||||||
|
}
|
||||||
|
|
||||||
return LayerTar{
|
return LayerTar{
|
||||||
walker: newWalker(skipFiles, skipDirs),
|
walker: newWalker(skipFiles, skipDirs, slow),
|
||||||
|
threshold: threshold,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w LayerTar) Walk(layer io.Reader, analyzeFn WalkFunc) ([]string, []string, error) {
|
func (w LayerTar) Walk(layer io.Reader, analyzeFn WalkFunc) ([]string, []string, error) {
|
||||||
|
|
||||||
var opqDirs, whFiles, skipDirs []string
|
var opqDirs, whFiles, skipDirs []string
|
||||||
tr := tar.NewReader(layer)
|
tr := tar.NewReader(layer)
|
||||||
for {
|
for {
|
||||||
@@ -86,7 +94,7 @@ func (w LayerTar) Walk(layer io.Reader, analyzeFn WalkFunc) ([]string, []string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (w LayerTar) processFile(filePath string, tr *tar.Reader, fi fs.FileInfo, analyzeFn WalkFunc) error {
|
func (w LayerTar) processFile(filePath string, tr *tar.Reader, fi fs.FileInfo, analyzeFn WalkFunc) error {
|
||||||
tf := newTarFile(fi.Size(), tr)
|
tf := newTarFile(fi.Size(), tr, w.threshold)
|
||||||
defer func() {
|
defer func() {
|
||||||
// nolint
|
// nolint
|
||||||
_ = tf.Clean()
|
_ = tf.Clean()
|
||||||
@@ -120,14 +128,17 @@ type tarFile struct {
|
|||||||
size int64
|
size int64
|
||||||
reader io.Reader
|
reader io.Reader
|
||||||
|
|
||||||
|
threshold int64 // Files larger than this threshold are written to file without being read into memory.
|
||||||
|
|
||||||
content []byte // It will be populated if this file is small
|
content []byte // It will be populated if this file is small
|
||||||
filePath string // It will be populated if this file is large
|
filePath string // It will be populated if this file is large
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTarFile(size int64, r io.Reader) tarFile {
|
func newTarFile(size int64, r io.Reader, threshold int64) tarFile {
|
||||||
return tarFile{
|
return tarFile{
|
||||||
size: size,
|
size: size,
|
||||||
reader: r,
|
reader: r,
|
||||||
|
threshold: threshold,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -137,7 +148,7 @@ func newTarFile(size int64, r io.Reader) tarFile {
|
|||||||
func (o *tarFile) Open() (dio.ReadSeekCloserAt, error) {
|
func (o *tarFile) Open() (dio.ReadSeekCloserAt, error) {
|
||||||
o.once.Do(func() {
|
o.once.Do(func() {
|
||||||
// When the file is large, it will be written down to a temp file.
|
// When the file is large, it will be written down to a temp file.
|
||||||
if o.size >= ThresholdSize {
|
if o.size >= o.threshold {
|
||||||
f, err := os.CreateTemp("", "fanal-*")
|
f, err := os.CreateTemp("", "fanal-*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
o.err = xerrors.Errorf("failed to create the temp file: %w", err)
|
o.err = xerrors.Errorf("failed to create the temp file: %w", err)
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ func TestLayerTar_Walk(t *testing.T) {
|
|||||||
f, err := os.Open("testdata/test.tar")
|
f, err := os.Open("testdata/test.tar")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
w := walker.NewLayerTar(tt.fields.skipFiles, tt.fields.skipDirs)
|
w := walker.NewLayerTar(tt.fields.skipFiles, tt.fields.skipDirs, true)
|
||||||
|
|
||||||
gotOpqDirs, gotWhFiles, err := w.Walk(f, tt.analyzeFn)
|
gotOpqDirs, gotWhFiles, err := w.Walk(f, tt.analyzeFn)
|
||||||
if tt.wantErr != "" {
|
if tt.wantErr != "" {
|
||||||
|
|||||||
@@ -15,16 +15,20 @@ var (
|
|||||||
SystemDirs = []string{"proc", "sys", "dev"}
|
SystemDirs = []string{"proc", "sys", "dev"}
|
||||||
)
|
)
|
||||||
|
|
||||||
const ThresholdSize = int64(200) << 20
|
const (
|
||||||
|
defaultSizeThreshold = int64(200) << 20 // 200MB
|
||||||
|
slowSizeThreshold = int64(200) << 20 // 10KB
|
||||||
|
)
|
||||||
|
|
||||||
type WalkFunc func(filePath string, info os.FileInfo, opener analyzer.Opener) error
|
type WalkFunc func(filePath string, info os.FileInfo, opener analyzer.Opener) error
|
||||||
|
|
||||||
type walker struct {
|
type walker struct {
|
||||||
skipFiles []string
|
skipFiles []string
|
||||||
skipDirs []string
|
skipDirs []string
|
||||||
|
slow bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newWalker(skipFiles, skipDirs []string) walker {
|
func newWalker(skipFiles, skipDirs []string, slow bool) walker {
|
||||||
var cleanSkipFiles, cleanSkipDirs []string
|
var cleanSkipFiles, cleanSkipDirs []string
|
||||||
for _, skipFile := range skipFiles {
|
for _, skipFile := range skipFiles {
|
||||||
skipFile = filepath.ToSlash(filepath.Clean(skipFile))
|
skipFile = filepath.ToSlash(filepath.Clean(skipFile))
|
||||||
@@ -41,6 +45,7 @@ func newWalker(skipFiles, skipDirs []string) walker {
|
|||||||
return walker{
|
return walker{
|
||||||
skipFiles: cleanSkipFiles,
|
skipFiles: cleanSkipFiles,
|
||||||
skipDirs: cleanSkipDirs,
|
skipDirs: cleanSkipDirs,
|
||||||
|
slow: slow,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -38,6 +38,12 @@ var (
|
|||||||
Value: []string{},
|
Value: []string{},
|
||||||
Usage: "specify config file patterns",
|
Usage: "specify config file patterns",
|
||||||
}
|
}
|
||||||
|
SlowFlag = Flag{
|
||||||
|
Name: "slow",
|
||||||
|
ConfigName: "scan.slow",
|
||||||
|
Value: false,
|
||||||
|
Usage: "scan over time with lower CPU and memory utilization",
|
||||||
|
}
|
||||||
SBOMSourcesFlag = Flag{
|
SBOMSourcesFlag = Flag{
|
||||||
Name: "sbom-sources",
|
Name: "sbom-sources",
|
||||||
ConfigName: "scan.sbom-sources",
|
ConfigName: "scan.sbom-sources",
|
||||||
@@ -58,6 +64,7 @@ type ScanFlagGroup struct {
|
|||||||
OfflineScan *Flag
|
OfflineScan *Flag
|
||||||
SecurityChecks *Flag
|
SecurityChecks *Flag
|
||||||
FilePatterns *Flag
|
FilePatterns *Flag
|
||||||
|
Slow *Flag
|
||||||
SBOMSources *Flag
|
SBOMSources *Flag
|
||||||
RekorURL *Flag
|
RekorURL *Flag
|
||||||
}
|
}
|
||||||
@@ -69,6 +76,7 @@ type ScanOptions struct {
|
|||||||
OfflineScan bool
|
OfflineScan bool
|
||||||
SecurityChecks []string
|
SecurityChecks []string
|
||||||
FilePatterns []string
|
FilePatterns []string
|
||||||
|
Slow bool
|
||||||
SBOMSources []string
|
SBOMSources []string
|
||||||
RekorURL string
|
RekorURL string
|
||||||
}
|
}
|
||||||
@@ -80,6 +88,7 @@ func NewScanFlagGroup() *ScanFlagGroup {
|
|||||||
OfflineScan: &OfflineScanFlag,
|
OfflineScan: &OfflineScanFlag,
|
||||||
SecurityChecks: &SecurityChecksFlag,
|
SecurityChecks: &SecurityChecksFlag,
|
||||||
FilePatterns: &FilePatternsFlag,
|
FilePatterns: &FilePatternsFlag,
|
||||||
|
Slow: &SlowFlag,
|
||||||
SBOMSources: &SBOMSourcesFlag,
|
SBOMSources: &SBOMSourcesFlag,
|
||||||
RekorURL: &RekorURLFlag,
|
RekorURL: &RekorURLFlag,
|
||||||
}
|
}
|
||||||
@@ -90,7 +99,8 @@ func (f *ScanFlagGroup) Name() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *ScanFlagGroup) Flags() []*Flag {
|
func (f *ScanFlagGroup) Flags() []*Flag {
|
||||||
return []*Flag{f.SkipDirs, f.SkipFiles, f.OfflineScan, f.SecurityChecks, f.FilePatterns, f.SBOMSources, f.RekorURL}
|
return []*Flag{f.SkipDirs, f.SkipFiles, f.OfflineScan, f.SecurityChecks, f.FilePatterns,
|
||||||
|
f.Slow, f.SBOMSources, f.RekorURL}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *ScanFlagGroup) ToOptions(args []string) (ScanOptions, error) {
|
func (f *ScanFlagGroup) ToOptions(args []string) (ScanOptions, error) {
|
||||||
@@ -115,6 +125,7 @@ func (f *ScanFlagGroup) ToOptions(args []string) (ScanOptions, error) {
|
|||||||
OfflineScan: getBool(f.OfflineScan),
|
OfflineScan: getBool(f.OfflineScan),
|
||||||
SecurityChecks: securityChecks,
|
SecurityChecks: securityChecks,
|
||||||
FilePatterns: getStringSlice(f.FilePatterns),
|
FilePatterns: getStringSlice(f.FilePatterns),
|
||||||
|
Slow: getBool(f.Slow),
|
||||||
SBOMSources: sbomSources,
|
SBOMSources: sbomSources,
|
||||||
RekorURL: getString(f.RekorURL),
|
RekorURL: getString(f.RekorURL),
|
||||||
}, nil
|
}, nil
|
||||||
|
|||||||
Reference in New Issue
Block a user