feat: add memory cache backend (#7048)

Signed-off-by: knqyf263 <knqyf263@gmail.com>
This commit is contained in:
Teppei Fukuda
2024-06-28 13:42:02 +04:00
committed by GitHub
parent 14d71ba63c
commit 55ccd06df4
16 changed files with 577 additions and 42 deletions

View File

@@ -11,7 +11,7 @@ The cache option is common to all scanners.
## Clear Caches
`trivy clean` subcommand removes caches.
```
```bash
$ trivy clean --scan-cache
```
@@ -31,31 +31,59 @@ See `trivy clean --help` for details.
## Cache Directory
Specify where the cache is stored with `--cache-dir`.
```
```bash
$ trivy --cache-dir /tmp/trivy/ image python:3.4-alpine3.9
```
## Cache Backend
## Scan Cache Backend
!!! warning "EXPERIMENTAL"
This feature might change without preserving backwards compatibility.
Trivy supports local filesystem and Redis as the cache backend. This option is useful especially for client/server mode.
Trivy utilizes a scan cache to store analysis results, such as package lists.
It supports three types of backends for this cache:
Two options:
- `fs`
- the cache path can be specified by `--cache-dir`
- `redis://`
- Local File System (`fs`)
- The cache path can be specified by `--cache-dir`
- Memory (`memory`)
- Redis (`redis://`)
- `redis://[HOST]:[PORT]`
- TTL can be configured via `--cache-ttl`
### Local File System
The local file system backend is the default choice for container and VM image scans.
When scanning container images, it stores analysis results on a per-layer basis, using layer IDs as keys.
This approach enables faster scans of the same container image or different images that share layers.
!!! note
Internally, this backend uses [BoltDB][boltdb], which has an important limitation: only one process can access the cache at a time.
Subsequent processes attempting to access the cache will be locked.
For more details on this limitation, refer to the [troubleshooting guide][parallel-run].
### Memory
The memory backend stores analysis results in memory, which means the cache is discarded when the process ends.
This makes it useful in scenarios where caching is not required or desired.
It serves as the default for repository, filesystem and SBOM scans and can also be employed for container image scans when caching is unnecessary.
To use the memory backend for a container image scan, you can use the following command:
```bash
$ trivy image debian:11 --cache-backend memory
```
### Redis
The Redis backend is particularly useful when you need to share the cache across multiple Trivy instances.
You can set up Trivy to use a Redis backend with a command like this:
```bash
$ trivy server --cache-backend redis://localhost:6379
```
This approach allows for centralized caching, which can be beneficial in distributed or high-concurrency environments.
If you want to use TLS with Redis, you can enable it by specifying the `--redis-tls` flag.
```shell
```bash
$ trivy server --cache-backend redis://localhost:6379 --redis-tls
```
@@ -72,6 +100,8 @@ $ trivy server --cache-backend redis://localhost:6379 \
[trivy-db]: ./db.md#vulnerability-database
[trivy-java-db]: ./db.md#java-index-database
[misconf-checks]: ../scanner/misconfiguration/check/builtin.md
[boltdb]: https://github.com/etcd-io/bbolt
[parallel-run]: https://aquasecurity.github.io/trivy/v0.52/docs/references/troubleshooting/#running-in-parallel-takes-same-time-as-series-run
[^1]: Downloaded when scanning for vulnerabilities
[^2]: Downloaded when scanning `jar/war/par/ear` files

View File

@@ -9,7 +9,7 @@ trivy config [flags] DIR
### Options
```
--cache-backend string cache backend (e.g. redis://localhost:6379) (default "fs")
--cache-backend string [EXPERIMENTAL] cache backend (e.g. redis://localhost:6379) (default "memory")
--cache-ttl duration cache TTL when using redis as cache backend
--cf-params strings specify paths to override the CloudFormation parameters files
--check-namespaces strings Rego namespaces

View File

@@ -19,7 +19,7 @@ trivy filesystem [flags] PATH
### Options
```
--cache-backend string cache backend (e.g. redis://localhost:6379) (default "fs")
--cache-backend string [EXPERIMENTAL] cache backend (e.g. redis://localhost:6379) (default "memory")
--cache-ttl duration cache TTL when using redis as cache backend
--cf-params strings specify paths to override the CloudFormation parameters files
--check-namespaces strings Rego namespaces

View File

@@ -34,7 +34,7 @@ trivy image [flags] IMAGE_NAME
### Options
```
--cache-backend string cache backend (e.g. redis://localhost:6379) (default "fs")
--cache-backend string [EXPERIMENTAL] cache backend (e.g. redis://localhost:6379) (default "fs")
--cache-ttl duration cache TTL when using redis as cache backend
--check-namespaces strings Rego namespaces
--checks-bundle-repository string OCI registry URL to retrieve checks bundle from (default "ghcr.io/aquasecurity/trivy-checks:0")

View File

@@ -30,7 +30,7 @@ trivy kubernetes [flags] [CONTEXT]
```
--burst int specify the maximum burst for throttle (default 10)
--cache-backend string cache backend (e.g. redis://localhost:6379) (default "fs")
--cache-backend string [EXPERIMENTAL] cache backend (e.g. redis://localhost:6379) (default "fs")
--cache-ttl duration cache TTL when using redis as cache backend
--check-namespaces strings Rego namespaces
--checks-bundle-repository string OCI registry URL to retrieve checks bundle from (default "ghcr.io/aquasecurity/trivy-checks:0")

View File

@@ -19,7 +19,7 @@ trivy repository [flags] (REPO_PATH | REPO_URL)
```
--branch string pass the branch name to be scanned
--cache-backend string cache backend (e.g. redis://localhost:6379) (default "fs")
--cache-backend string [EXPERIMENTAL] cache backend (e.g. redis://localhost:6379) (default "memory")
--cache-ttl duration cache TTL when using redis as cache backend
--cf-params strings specify paths to override the CloudFormation parameters files
--check-namespaces strings Rego namespaces

View File

@@ -22,7 +22,7 @@ trivy rootfs [flags] ROOTDIR
### Options
```
--cache-backend string cache backend (e.g. redis://localhost:6379) (default "fs")
--cache-backend string [EXPERIMENTAL] cache backend (e.g. redis://localhost:6379) (default "memory")
--cache-ttl duration cache TTL when using redis as cache backend
--cf-params strings specify paths to override the CloudFormation parameters files
--check-namespaces strings Rego namespaces

View File

@@ -20,7 +20,7 @@ trivy sbom [flags] SBOM_PATH
### Options
```
--cache-backend string cache backend (e.g. redis://localhost:6379) (default "fs")
--cache-backend string [EXPERIMENTAL] cache backend (e.g. redis://localhost:6379) (default "memory")
--cache-ttl duration cache TTL when using redis as cache backend
--compliance string compliance report to generate
--custom-headers strings custom headers in client mode

View File

@@ -20,7 +20,7 @@ trivy server [flags]
### Options
```
--cache-backend string cache backend (e.g. redis://localhost:6379) (default "fs")
--cache-backend string [EXPERIMENTAL] cache backend (e.g. redis://localhost:6379) (default "fs")
--cache-ttl duration cache TTL when using redis as cache backend
--db-repository string OCI repository to retrieve trivy-db from (default "ghcr.io/aquasecurity/trivy-db:2")
--download-db-only download/update vulnerability database but don't run a scan

View File

@@ -21,7 +21,7 @@ trivy vm [flags] VM_IMAGE
```
--aws-region string AWS region to scan
--cache-backend string cache backend (e.g. redis://localhost:6379) (default "fs")
--cache-backend string [EXPERIMENTAL] cache backend (e.g. redis://localhost:6379) (default "fs")
--cache-ttl duration cache TTL when using redis as cache backend
--checks-bundle-repository string OCI registry URL to retrieve checks bundle from (default "ghcr.io/aquasecurity/trivy-checks:0")
--compliance string compliance report to generate

8
pkg/cache/client.go vendored
View File

@@ -5,12 +5,15 @@ import (
"time"
"golang.org/x/xerrors"
"github.com/aquasecurity/trivy/pkg/log"
)
const (
TypeUnknown Type = "unknown"
TypeFS Type = "fs"
TypeRedis Type = "redis"
TypeMemory Type = "memory"
)
type Type string
@@ -33,6 +36,8 @@ func NewType(backend string) Type {
return TypeRedis
case backend == "fs", backend == "":
return TypeFS
case backend == "memory":
return TypeMemory
default:
return TypeUnknown
}
@@ -44,6 +49,7 @@ func New(opts Options) (Cache, func(), error) {
var cache Cache
t := NewType(opts.Backend)
log.Debug("Initializing scan cache...", log.String("type", string(t)))
switch t {
case TypeRedis:
redisCache, err := NewRedisCache(opts.Backend, opts.RedisCACert, opts.RedisCert, opts.RedisKey, opts.RedisTLS, opts.TTL)
@@ -58,6 +64,8 @@ func New(opts Options) (Cache, func(), error) {
return nil, cleanup, xerrors.Errorf("unable to initialize fs cache: %w", err)
}
cache = fsCache
case TypeMemory:
cache = NewMemoryCache()
default:
return nil, cleanup, xerrors.Errorf("unknown cache type: %s", t)
}

98
pkg/cache/memory.go vendored Normal file
View File

@@ -0,0 +1,98 @@
package cache
import (
"sync"
"golang.org/x/xerrors"
"github.com/aquasecurity/trivy/pkg/fanal/types"
)
var _ Cache = &MemoryCache{}
type MemoryCache struct {
artifacts sync.Map // Map to store artifact information
blobs sync.Map // Map to store blob information
}
func NewMemoryCache() *MemoryCache {
return &MemoryCache{}
}
// PutArtifact stores the artifact information in the memory cache
func (c *MemoryCache) PutArtifact(artifactID string, artifactInfo types.ArtifactInfo) error {
c.artifacts.Store(artifactID, artifactInfo)
return nil
}
// PutBlob stores the blob information in the memory cache
func (c *MemoryCache) PutBlob(blobID string, blobInfo types.BlobInfo) error {
c.blobs.Store(blobID, blobInfo)
return nil
}
// DeleteBlobs removes the specified blobs from the memory cache
func (c *MemoryCache) DeleteBlobs(blobIDs []string) error {
for _, blobID := range blobIDs {
c.blobs.Delete(blobID)
}
return nil
}
// GetArtifact retrieves the artifact information from the memory cache
func (c *MemoryCache) GetArtifact(artifactID string) (types.ArtifactInfo, error) {
info, ok := c.artifacts.Load(artifactID)
if !ok {
return types.ArtifactInfo{}, xerrors.Errorf("artifact (%s) not found in memory cache", artifactID)
}
artifactInfo, ok := info.(types.ArtifactInfo)
if !ok {
return types.ArtifactInfo{}, xerrors.Errorf("invalid type for artifact (%s) in memory cache", artifactID)
}
return artifactInfo, nil
}
// GetBlob retrieves the blob information from the memory cache
func (c *MemoryCache) GetBlob(blobID string) (types.BlobInfo, error) {
info, ok := c.blobs.Load(blobID)
if !ok {
return types.BlobInfo{}, xerrors.Errorf("blob (%s) not found in memory cache", blobID)
}
blobInfo, ok := info.(types.BlobInfo)
if !ok {
return types.BlobInfo{}, xerrors.Errorf("invalid type for blob (%s) in memory cache", blobID)
}
return blobInfo, nil
}
// MissingBlobs determines the missing artifact and blob information in the memory cache
func (c *MemoryCache) MissingBlobs(artifactID string, blobIDs []string) (bool, []string, error) {
var missingArtifact bool
var missingBlobIDs []string
if _, err := c.GetArtifact(artifactID); err != nil {
missingArtifact = true
}
for _, blobID := range blobIDs {
if _, err := c.GetBlob(blobID); err != nil {
missingBlobIDs = append(missingBlobIDs, blobID)
}
}
return missingArtifact, missingBlobIDs, nil
}
// Close clears the artifact and blob information from the memory cache
func (c *MemoryCache) Close() error {
c.artifacts = sync.Map{}
c.blobs = sync.Map{}
return nil
}
// Clear clears the artifact and blob information from the memory cache
func (c *MemoryCache) Clear() error {
c.artifacts = sync.Map{}
c.blobs = sync.Map{}
return nil
}

396
pkg/cache/memory_test.go vendored Normal file
View File

@@ -0,0 +1,396 @@
package cache_test
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/aquasecurity/trivy/pkg/cache"
"github.com/aquasecurity/trivy/pkg/fanal/types"
)
func TestMemoryCache_PutArtifact(t *testing.T) {
tests := []struct {
name string
artifactID string
artifactInfo types.ArtifactInfo
}{
{
name: "happy path",
artifactID: "sha256:8652b9f0cb4c0599575e5a003f5906876e10c1ceb2ab9fe1786712dac14a50cf",
artifactInfo: types.ArtifactInfo{
SchemaVersion: 2,
Architecture: "amd64",
Created: time.Date(2020, 11, 14, 0, 20, 4, 0, time.UTC),
DockerVersion: "19.03.12",
OS: "linux",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := cache.NewMemoryCache()
err := c.PutArtifact(tt.artifactID, tt.artifactInfo)
require.NoError(t, err)
got, err := c.GetArtifact(tt.artifactID)
require.NoError(t, err)
assert.Equal(t, tt.artifactInfo, got)
})
}
}
func TestMemoryCache_PutBlob(t *testing.T) {
tests := []struct {
name string
blobID string
blobInfo types.BlobInfo
}{
{
name: "happy path",
blobID: "sha256:03901b4a2ea88eeaad62dbe59b072b28b6efa00491962b8741081c5df50c65e0",
blobInfo: types.BlobInfo{
SchemaVersion: 2,
Digest: "sha256:9d48c3bd43c520dc2784e868a780e976b207cbf493eaff8c6596eb871cbd9609",
DiffID: "sha256:03901b4a2ea88eeaad62dbe59b072b28b6efa00491962b8741081c5df50c65e0",
OS: types.OS{
Family: "alpine",
Name: "3.10.2",
},
PackageInfos: []types.PackageInfo{
{
FilePath: "lib/apk/db/installed",
Packages: []types.Package{
{
Name: "musl",
Version: "1.1.22-r3",
SrcName: "musl",
SrcVersion: "1.1.22-r3",
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := cache.NewMemoryCache()
err := c.PutBlob(tt.blobID, tt.blobInfo)
require.NoError(t, err)
got, err := c.GetBlob(tt.blobID)
require.NoError(t, err)
assert.Equal(t, tt.blobInfo, got)
})
}
}
func TestMemoryCache_GetArtifact(t *testing.T) {
tests := []struct {
name string
artifactID string
artifactInfo types.ArtifactInfo
wantErr bool
}{
{
name: "happy path",
artifactID: "sha256:8652b9f0cb4c0599575e5a003f5906876e10c1ceb2ab9fe1786712dac14a50cf",
artifactInfo: types.ArtifactInfo{
SchemaVersion: 2,
Architecture: "amd64",
Created: time.Date(2020, 11, 14, 0, 20, 4, 0, time.UTC),
DockerVersion: "19.03.12",
OS: "linux",
},
wantErr: false,
},
{
name: "not found",
artifactID: "sha256:nonexistent",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := cache.NewMemoryCache()
if !tt.wantErr {
err := c.PutArtifact(tt.artifactID, tt.artifactInfo)
require.NoError(t, err)
}
got, err := c.GetArtifact(tt.artifactID)
if tt.wantErr {
require.ErrorContains(t, err, "not found in memory cache")
return
}
require.NoError(t, err)
assert.Equal(t, tt.artifactInfo, got)
})
}
}
func TestMemoryCache_GetBlob(t *testing.T) {
tests := []struct {
name string
blobID string
blobInfo types.BlobInfo
wantErr bool
}{
{
name: "happy path",
blobID: "sha256:03901b4a2ea88eeaad62dbe59b072b28b6efa00491962b8741081c5df50c65e0",
blobInfo: types.BlobInfo{
SchemaVersion: 2,
Digest: "sha256:9d48c3bd43c520dc2784e868a780e976b207cbf493eaff8c6596eb871cbd9609",
DiffID: "sha256:03901b4a2ea88eeaad62dbe59b072b28b6efa00491962b8741081c5df50c65e0",
OS: types.OS{
Family: "alpine",
Name: "3.10.2",
},
},
wantErr: false,
},
{
name: "not found",
blobID: "sha256:nonexistent",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := cache.NewMemoryCache()
if !tt.wantErr {
err := c.PutBlob(tt.blobID, tt.blobInfo)
require.NoError(t, err)
}
got, err := c.GetBlob(tt.blobID)
if tt.wantErr {
require.ErrorContains(t, err, "not found in memory cache")
return
}
require.NoError(t, err)
assert.Equal(t, tt.blobInfo, got)
})
}
}
func TestMemoryCache_MissingBlobs(t *testing.T) {
tests := []struct {
name string
artifactID string
blobIDs []string
putArtifact bool
putBlobs []string
wantMissingArtifact bool
wantMissingBlobIDs []string
}{
{
name: "missing both artifact and blob",
artifactID: "sha256:artifact1",
blobIDs: []string{
"sha256:blob1",
"sha256:blob2",
},
putArtifact: false,
putBlobs: []string{},
wantMissingArtifact: true,
wantMissingBlobIDs: []string{
"sha256:blob1",
"sha256:blob2",
},
},
{
name: "missing artifact only",
artifactID: "sha256:artifact1",
blobIDs: []string{
"sha256:blob1",
"sha256:blob2",
},
putArtifact: false,
putBlobs: []string{
"sha256:blob1",
"sha256:blob2",
},
wantMissingArtifact: true,
wantMissingBlobIDs: nil,
},
{
name: "missing one blob",
artifactID: "sha256:artifact1",
blobIDs: []string{
"sha256:blob1",
"sha256:blob2",
},
putArtifact: true,
putBlobs: []string{"sha256:blob1"},
wantMissingArtifact: false,
wantMissingBlobIDs: []string{"sha256:blob2"},
},
{
name: "no missing blobs",
artifactID: "sha256:artifact1",
blobIDs: []string{
"sha256:blob1",
"sha256:blob2",
},
putArtifact: true,
putBlobs: []string{
"sha256:blob1",
"sha256:blob2",
},
wantMissingArtifact: false,
wantMissingBlobIDs: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := cache.NewMemoryCache()
if tt.putArtifact {
err := c.PutArtifact(tt.artifactID, types.ArtifactInfo{})
require.NoError(t, err)
}
for _, blobID := range tt.putBlobs {
err := c.PutBlob(blobID, types.BlobInfo{})
require.NoError(t, err)
}
gotMissingArtifact, gotMissingBlobIDs, err := c.MissingBlobs(tt.artifactID, tt.blobIDs)
require.NoError(t, err)
assert.Equal(t, tt.wantMissingArtifact, gotMissingArtifact)
assert.Equal(t, tt.wantMissingBlobIDs, gotMissingBlobIDs)
})
}
}
func TestMemoryCache_DeleteBlobs(t *testing.T) {
tests := []struct {
name string
blobIDs []string
}{
{
name: "delete existing blobs",
blobIDs: []string{
"sha256:blob1",
"sha256:blob2",
},
},
{
name: "delete non-existing blobs",
blobIDs: []string{
"sha256:nonexistent1",
"sha256:nonexistent2",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := cache.NewMemoryCache()
// Put some blobs in the cache
for _, blobID := range tt.blobIDs {
err := c.PutBlob(blobID, types.BlobInfo{})
require.NoError(t, err)
}
err := c.DeleteBlobs(tt.blobIDs)
require.NoError(t, err)
// Check that the blobs are no longer in the cache
for _, blobID := range tt.blobIDs {
_, err := c.GetBlob(blobID)
require.Error(t, err)
assert.Contains(t, err.Error(), "not found in memory cache")
}
})
}
}
func TestMemoryCache_Clear(t *testing.T) {
tests := []struct {
name string
artifactID string
blobID string
}{
{
name: "clear cache",
artifactID: "sha256:artifact1",
blobID: "sha256:blob1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := cache.NewMemoryCache()
err := c.PutArtifact(tt.artifactID, types.ArtifactInfo{})
require.NoError(t, err)
err = c.PutBlob(tt.blobID, types.BlobInfo{})
require.NoError(t, err)
err = c.Clear()
require.NoError(t, err)
_, err = c.GetArtifact(tt.artifactID)
require.Error(t, err)
assert.Contains(t, err.Error(), "not found in memory cache")
_, err = c.GetBlob(tt.blobID)
require.Error(t, err)
assert.Contains(t, err.Error(), "not found in memory cache")
})
}
}
func TestMemoryCache_Close(t *testing.T) {
tests := []struct {
name string
artifactID string
blobID string
}{
{
name: "close cache",
artifactID: "sha256:artifact1",
blobID: "sha256:blob1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := cache.NewMemoryCache()
err := c.PutArtifact(tt.artifactID, types.ArtifactInfo{})
require.NoError(t, err)
err = c.PutBlob(tt.blobID, types.BlobInfo{})
require.NoError(t, err)
err = c.Close()
require.NoError(t, err)
_, err = c.GetArtifact(tt.artifactID)
require.Error(t, err)
assert.Contains(t, err.Error(), "not found in memory cache")
_, err = c.GetBlob(tt.blobID)
require.Error(t, err)
assert.Contains(t, err.Error(), "not found in memory cache")
})
}
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/spf13/viper"
"golang.org/x/xerrors"
"github.com/aquasecurity/trivy/pkg/cache"
"github.com/aquasecurity/trivy/pkg/commands/artifact"
"github.com/aquasecurity/trivy/pkg/commands/clean"
"github.com/aquasecurity/trivy/pkg/commands/convert"
@@ -330,12 +331,6 @@ func NewImageCommand(globalFlags *flag.GlobalFlagGroup) *cobra.Command {
}
func NewFilesystemCommand(globalFlags *flag.GlobalFlagGroup) *cobra.Command {
reportFlagGroup := flag.NewReportFlagGroup()
reportFormat := flag.ReportFormatFlag.Clone()
reportFormat.Usage = "specify a compliance report format for the output" // @TODO: support --report summary for non compliance reports
reportFlagGroup.ReportFormat = reportFormat
reportFlagGroup.ExitOnEOL = nil // disable '--exit-on-eol'
fsFlags := &flag.Flags{
GlobalFlagGroup: globalFlags,
CacheFlagGroup: flag.NewCacheFlagGroup(),
@@ -346,12 +341,16 @@ func NewFilesystemCommand(globalFlags *flag.GlobalFlagGroup) *cobra.Command {
RemoteFlagGroup: flag.NewClientFlags(), // for client/server mode
RegistryFlagGroup: flag.NewRegistryFlagGroup(),
RegoFlagGroup: flag.NewRegoFlagGroup(),
ReportFlagGroup: reportFlagGroup,
ReportFlagGroup: flag.NewReportFlagGroup(),
ScanFlagGroup: flag.NewScanFlagGroup(),
SecretFlagGroup: flag.NewSecretFlagGroup(),
VulnerabilityFlagGroup: flag.NewVulnerabilityFlagGroup(),
}
fsFlags.CacheFlagGroup.CacheBackend.Default = string(cache.TypeMemory) // Use memory cache by default
fsFlags.ReportFlagGroup.ReportFormat.Usage = "specify a compliance report format for the output" // @TODO: support --report summary for non compliance reports
fsFlags.ReportFlagGroup.ExitOnEOL = nil // disable '--exit-on-eol'
cmd := &cobra.Command{
Use: "filesystem [flags] PATH",
Aliases: []string{"fs"},
@@ -405,10 +404,11 @@ func NewRootfsCommand(globalFlags *flag.GlobalFlagGroup) *cobra.Command {
SecretFlagGroup: flag.NewSecretFlagGroup(),
VulnerabilityFlagGroup: flag.NewVulnerabilityFlagGroup(),
}
rootfsFlags.ReportFlagGroup.ReportFormat = nil // TODO: support --report summary
rootfsFlags.ReportFlagGroup.Compliance = nil // disable '--compliance'
rootfsFlags.ReportFlagGroup.ReportFormat = nil // disable '--report'
rootfsFlags.ScanFlagGroup.IncludeDevDeps = nil // disable '--include-dev-deps'
rootfsFlags.ReportFlagGroup.ReportFormat = nil // TODO: support --report summary
rootfsFlags.ReportFlagGroup.Compliance = nil // disable '--compliance'
rootfsFlags.ReportFlagGroup.ReportFormat = nil // disable '--report'
rootfsFlags.ScanFlagGroup.IncludeDevDeps = nil // disable '--include-dev-deps'
rootfsFlags.CacheFlagGroup.CacheBackend.Default = string(cache.TypeMemory) // Use memory cache by default
cmd := &cobra.Command{
Use: "rootfs [flags] ROOTDIR",
@@ -469,6 +469,8 @@ func NewRepositoryCommand(globalFlags *flag.GlobalFlagGroup) *cobra.Command {
repoFlags.ReportFlagGroup.Compliance = nil // disable '--compliance'
repoFlags.ReportFlagGroup.ExitOnEOL = nil // disable '--exit-on-eol'
repoFlags.CacheFlagGroup.CacheBackend.Default = string(cache.TypeMemory) // Use memory cache by default
cmd := &cobra.Command{
Use: "repository [flags] (REPO_PATH | REPO_URL)",
Aliases: []string{"repo"},
@@ -651,15 +653,6 @@ func NewServerCommand(globalFlags *flag.GlobalFlagGroup) *cobra.Command {
}
func NewConfigCommand(globalFlags *flag.GlobalFlagGroup) *cobra.Command {
reportFlagGroup := flag.NewReportFlagGroup()
reportFlagGroup.DependencyTree = nil // disable '--dependency-tree'
reportFlagGroup.ListAllPkgs = nil // disable '--list-all-pkgs'
reportFlagGroup.ExitOnEOL = nil // disable '--exit-on-eol'
reportFlagGroup.ShowSuppressed = nil // disable '--show-suppressed'
reportFormat := flag.ReportFormatFlag.Clone()
reportFormat.Usage = "specify a compliance report format for the output" // @TODO: support --report summary for non compliance reports
reportFlagGroup.ReportFormat = reportFormat
scanFlags := &flag.ScanFlagGroup{
// Enable only '--skip-dirs' and '--skip-files' and disable other flags
SkipDirs: flag.SkipDirsFlag.Clone(),
@@ -678,10 +671,17 @@ func NewConfigCommand(globalFlags *flag.GlobalFlagGroup) *cobra.Command {
// disable unneeded flags
K8sVersion: flag.K8sVersionFlag.Clone(),
},
ReportFlagGroup: reportFlagGroup,
ReportFlagGroup: flag.NewReportFlagGroup(),
ScanFlagGroup: scanFlags,
}
configFlags.ReportFlagGroup.DependencyTree = nil // disable '--dependency-tree'
configFlags.ReportFlagGroup.ListAllPkgs = nil // disable '--list-all-pkgs'
configFlags.ReportFlagGroup.ExitOnEOL = nil // disable '--exit-on-eol'
configFlags.ReportFlagGroup.ShowSuppressed = nil // disable '--show-suppressed'
configFlags.ReportFlagGroup.ReportFormat.Usage = "specify a compliance report format for the output" // @TODO: support --report summary for non compliance reports
configFlags.CacheFlagGroup.CacheBackend.Default = string(cache.TypeMemory)
cmd := &cobra.Command{
Use: "config [flags] DIR",
Aliases: []string{"conf"},
@@ -1142,6 +1142,8 @@ func NewSBOMCommand(globalFlags *flag.GlobalFlagGroup) *cobra.Command {
LicenseFlagGroup: licenseFlagGroup,
}
sbomFlags.CacheFlagGroup.CacheBackend.Default = string(cache.TypeMemory) // Use memory cache by default
cmd := &cobra.Command{
Use: "sbom [flags] SBOM_PATH",
Short: "Scan SBOM for vulnerabilities and licenses",
@@ -1220,6 +1222,7 @@ func NewCleanCommand(globalFlags *flag.GlobalFlagGroup) *cobra.Command {
return cmd
}
func NewVersionCommand(globalFlags *flag.GlobalFlagGroup) *cobra.Command {
var versionFormat string
cmd := &cobra.Command{

View File

@@ -170,7 +170,7 @@ type Package struct {
SrcEpoch int `json:",omitempty"`
Licenses []string `json:",omitempty"`
Maintainer string `json:",omitempty"`
ExternalReferences []ExternalRef `json:"-"`
ExternalReferences []ExternalRef `json:"-" hash:"ignore"`
Modularitylabel string `json:",omitempty"` // only for Red Hat based distributions
BuildInfo *BuildInfo `json:",omitempty"` // only for Red Hat

View File

@@ -25,7 +25,7 @@ var (
Name: "cache-backend",
ConfigName: "cache.backend",
Default: "fs",
Usage: "cache backend (e.g. redis://localhost:6379)",
Usage: "[EXPERIMENTAL] cache backend (e.g. redis://localhost:6379)",
}
CacheTTLFlag = Flag[time.Duration]{
Name: "cache-ttl",