Upgrade config, UI, and command package patterns (#406)

* split and upgrade config processing

Signed-off-by: Alex Goodman <alex.goodman@anchore.com>

* upgrade UI organization

Signed-off-by: Alex Goodman <alex.goodman@anchore.com>

* expose logger writter

Signed-off-by: Alex Goodman <alex.goodman@anchore.com>

* add (unused) signal handler

Signed-off-by: Alex Goodman <alex.goodman@anchore.com>

* add (unused) event loop abstraction

Signed-off-by: Alex Goodman <alex.goodman@anchore.com>

* update aux commands to use Cobra RunE over Run

Signed-off-by: Alex Goodman <alex.goodman@anchore.com>

* upgrade root command to use new event loop and signal handler

Signed-off-by: Alex Goodman <alex.goodman@anchore.com>

* update CLI test to account for config representation

Signed-off-by: Alex Goodman <alex.goodman@anchore.com>

* update dependencies + fix linting

Signed-off-by: Alex Goodman <alex.goodman@anchore.com>

* decompose application config parse func + add missing config struct tags

Signed-off-by: Alex Goodman <alex.goodman@anchore.com>

* restore unparam lint exclusion for registry config

Signed-off-by: Alex Goodman <alex.goodman@anchore.com>
This commit is contained in:
Alex Goodman 2021-10-01 13:03:50 -04:00 committed by GitHub
parent 34b4885b87
commit b1f3be4520
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
34 changed files with 1346 additions and 761 deletions

View file

@ -6,28 +6,25 @@ import (
"github.com/anchore/grype/grype"
"github.com/anchore/grype/internal/config"
"github.com/anchore/grype/internal/format"
"github.com/anchore/grype/internal/log"
"github.com/anchore/grype/internal/logger"
"github.com/anchore/stereoscope"
"github.com/anchore/syft/syft"
"github.com/sirupsen/logrus"
"github.com/gookit/color"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/wagoodman/go-partybus"
"gopkg.in/yaml.v2"
)
var appConfig *config.Application
var log *logrus.Logger
var cliOnlyOpts config.CliOnlyOptions
var eventBus *partybus.Bus
var eventSubscription *partybus.Subscription
var (
appConfig *config.Application
eventBus *partybus.Bus
eventSubscription *partybus.Subscription
)
func init() {
setGlobalCliOptions()
// read in config and setup logger
cobra.OnInitialize(
initRootCmdConfigOptions,
initAppConfig,
initLogging,
logAppConfig,
@ -35,23 +32,6 @@ func init() {
)
}
func setGlobalCliOptions() {
// setup global CLI options (available on all CLI commands)
rootCmd.PersistentFlags().StringVarP(&cliOnlyOpts.ConfigPath, "config", "c", "", "application config file")
flag := "quiet"
rootCmd.PersistentFlags().BoolP(
flag, "q", false,
"suppress all logging output",
)
if err := viper.BindPFlag(flag, rootCmd.PersistentFlags().Lookup(flag)); err != nil {
fmt.Printf("unable to bind flag '%s': %+v", flag, err)
os.Exit(1)
}
rootCmd.PersistentFlags().CountVarP(&cliOnlyOpts.Verbosity, "verbose", "v", "increase verbosity (-v = info, -vv = debug)")
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err.Error())
@ -59,8 +39,14 @@ func Execute() {
}
}
func initRootCmdConfigOptions() {
if err := bindRootConfigOptions(rootCmd.Flags()); err != nil {
panic(err)
}
}
func initAppConfig() {
cfg, err := config.LoadConfigFromFile(viper.GetViper(), &cliOnlyOpts)
cfg, err := config.LoadApplicationConfig(viper.GetViper(), persistentOpts)
if err != nil {
fmt.Printf("failed to load application config: \n\t%+v\n", err)
os.Exit(1)
@ -79,26 +65,19 @@ func initLogging() {
logWrapper := logger.NewLogrusLogger(cfg)
log = logWrapper.Logger
grype.SetLogger(logWrapper)
// add a structured field to all loggers of dependencies
syft.SetLogger(&logger.LogrusNestedLogger{
Logger: log.WithField("from-lib", "syft"),
Logger: logWrapper.Logger.WithField("from-lib", "syft"),
})
stereoscope.SetLogger(&logger.LogrusNestedLogger{
Logger: log.WithField("from-lib", "stereoscope"),
Logger: logWrapper.Logger.WithField("from-lib", "stereoscope"),
})
}
func logAppConfig() {
appCfgStr, err := yaml.Marshal(&appConfig)
if err != nil {
log.Debugf("Could not display application config: %+v", err)
} else {
log.Debugf("Application config:\n%+v", format.Magenta.Format(string(appCfgStr)))
}
log.Debugf("application config:\n%+v", color.Magenta.Sprint(appConfig.String()))
}
func initEventBus() {

View file

@ -1,7 +1,13 @@
package cmd
import (
"context"
"os"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/spf13/cobra"
)
@ -44,7 +50,7 @@ $ grype completion fish > ~/.config/fish/completions/grype.fish
DisableFlagsInUseLine: true,
ValidArgs: []string{"bash", "fish", "zsh"},
Args: cobra.ExactValidArgs(1),
Run: func(cmd *cobra.Command, args []string) {
RunE: func(cmd *cobra.Command, args []string) error {
var err error
switch args[0] {
case "zsh":
@ -54,12 +60,52 @@ $ grype completion fish > ~/.config/fish/completions/grype.fish
case "fish":
err = cmd.Root().GenFishCompletion(os.Stdout, true)
}
if err != nil {
panic(err)
}
return err
},
}
func init() {
rootCmd.AddCommand(completionCmd)
}
func dockerImageValidArgsFunction(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
// Since we use ValidArgsFunction, Cobra will call this AFTER having parsed all flags and arguments provided
dockerImageRepoTags, err := listLocalDockerImages(toComplete)
if err != nil {
// Indicates that an error occurred and completions should be ignored
return []string{"completion failed"}, cobra.ShellCompDirectiveError
}
if len(dockerImageRepoTags) == 0 {
return []string{"no docker images found"}, cobra.ShellCompDirectiveError
}
// ShellCompDirectiveDefault indicates that the shell will perform its default behavior after completions have
// been provided (without implying other possible directives)
return dockerImageRepoTags, cobra.ShellCompDirectiveDefault
}
func listLocalDockerImages(prefix string) ([]string, error) {
var repoTags = make([]string, 0)
ctx := context.Background()
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return repoTags, err
}
// Only want to return tagged images
imageListArgs := filters.NewArgs()
imageListArgs.Add("dangling", "false")
images, err := cli.ImageList(ctx, types.ImageListOptions{All: false, Filters: imageListArgs})
if err != nil {
return repoTags, err
}
for _, image := range images {
// image may have multiple tags
for _, tag := range image.RepoTags {
if strings.HasPrefix(tag, prefix) {
repoTags = append(repoTags, tag)
}
}
}
return repoTags, nil
}

View file

@ -2,7 +2,6 @@ package cmd
import (
"fmt"
"os"
"github.com/anchore/grype/grype/db"
"github.com/spf13/cobra"
@ -12,35 +11,27 @@ var dbCheckCmd = &cobra.Command{
Use: "check",
Short: "check to see if there is a database update available",
Args: cobra.ExactArgs(0),
Run: func(cmd *cobra.Command, args []string) {
ret := runDbCheckCmd(cmd, args)
if ret != 0 {
fmt.Println("Unable to check for vulnerability database updates")
}
os.Exit(ret)
},
RunE: runDbCheckCmd,
}
func init() {
dbCmd.AddCommand(dbCheckCmd)
}
func runDbCheckCmd(_ *cobra.Command, _ []string) int {
func runDbCheckCmd(_ *cobra.Command, _ []string) error {
dbCurator := db.NewCurator(appConfig.Db.ToCuratorConfig())
updateAvailable, _, err := dbCurator.IsUpdateAvailable()
if err != nil {
// TODO: should this be so fatal? we can certainly continue with a warning...
log.Errorf("unable to check for vulnerability database update: %+v", err)
return 1
return fmt.Errorf("unable to check for vulnerability database update: %+v", err)
}
if !updateAvailable {
fmt.Println("No update available")
return 0
return nil
}
fmt.Println("Update available!")
return 0
return nil
}

View file

@ -2,7 +2,6 @@ package cmd
import (
"fmt"
"os"
"github.com/anchore/grype/grype/db"
"github.com/spf13/cobra"
@ -12,28 +11,20 @@ var dbDeleteCmd = &cobra.Command{
Use: "delete",
Short: "delete the vulnerability database",
Args: cobra.ExactArgs(0),
Run: func(cmd *cobra.Command, args []string) {
ret := runDbDeleteCmd(cmd, args)
if ret != 0 {
fmt.Println("Unable to delete vulnerability database")
}
os.Exit(ret)
},
RunE: runDbDeleteCmd,
}
func init() {
dbCmd.AddCommand(dbDeleteCmd)
}
func runDbDeleteCmd(_ *cobra.Command, _ []string) int {
func runDbDeleteCmd(_ *cobra.Command, _ []string) error {
dbCurator := db.NewCurator(appConfig.Db.ToCuratorConfig())
if err := dbCurator.Delete(); err != nil {
log.Errorf("unable to delete vulnerability database: %+v", err)
return 1
return fmt.Errorf("unable to delete vulnerability database: %+v", err)
}
fmt.Println("Vulnerability database deleted")
return 0
return nil
}

View file

@ -2,7 +2,6 @@ package cmd
import (
"fmt"
"os"
"github.com/anchore/grype/internal"
@ -15,28 +14,20 @@ var dbImportCmd = &cobra.Command{
Short: "import a vulnerability database archive",
Long: fmt.Sprintf("import a vulnerability database archive from a local FILE.\nDB archives can be obtained from %q.", internal.DBUpdateURL),
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
ret := runDbImportCmd(cmd, args)
if ret != 0 {
fmt.Println("Unable to import vulnerability database")
}
os.Exit(ret)
},
RunE: runDbImportCmd,
}
func init() {
dbCmd.AddCommand(dbImportCmd)
}
func runDbImportCmd(_ *cobra.Command, args []string) int {
func runDbImportCmd(_ *cobra.Command, args []string) error {
dbCurator := db.NewCurator(appConfig.Db.ToCuratorConfig())
if err := dbCurator.ImportFrom(args[0]); err != nil {
log.Errorf("unable to import vulnerability database: %+v", err)
return 1
return fmt.Errorf("unable to import vulnerability database: %+v", err)
}
fmt.Println("Vulnerability database imported")
return 0
return nil
}

View file

@ -2,7 +2,6 @@ package cmd
import (
"fmt"
"os"
"github.com/anchore/grype/grype/db"
@ -13,13 +12,7 @@ var statusCmd = &cobra.Command{
Use: "status",
Short: "display database status",
Args: cobra.ExactArgs(0),
Run: func(cmd *cobra.Command, args []string) {
err := runDbStatusCmd(cmd, args)
if err != nil {
log.Errorf(err.Error())
os.Exit(1)
}
},
RunE: runDbStatusCmd,
}
func init() {

View file

@ -2,7 +2,6 @@ package cmd
import (
"fmt"
"os"
"github.com/anchore/grype/grype/db"
"github.com/spf13/cobra"
@ -12,33 +11,27 @@ var dbUpdateCmd = &cobra.Command{
Use: "update",
Short: "download the latest vulnerability database",
Args: cobra.ExactArgs(0),
Run: func(cmd *cobra.Command, args []string) {
ret := runDbUpdateCmd(cmd, args)
if ret != 0 {
fmt.Println("Unable to update vulnerability database")
}
os.Exit(ret)
},
RunE: runDbUpdateCmd,
}
func init() {
dbCmd.AddCommand(dbUpdateCmd)
}
func runDbUpdateCmd(_ *cobra.Command, _ []string) int {
func runDbUpdateCmd(_ *cobra.Command, _ []string) error {
dbCurator := db.NewCurator(appConfig.Db.ToCuratorConfig())
updated, err := dbCurator.Update()
if err != nil {
log.Errorf("unable to update vulnerability database: %+v", err)
return 1
fmt.Println("Unable to update vulnerability database")
return fmt.Errorf("unable to update vulnerability database: %+v", err)
}
if updated {
fmt.Println("Vulnerability database updated!")
return 0
return nil
}
fmt.Println("No vulnerability database update available")
return 0
return nil
}

92
cmd/event_loop.go Normal file
View file

@ -0,0 +1,92 @@
package cmd
import (
"errors"
"os"
"github.com/anchore/grype/internal/log"
"github.com/anchore/grype/internal/ui"
"github.com/hashicorp/go-multierror"
"github.com/wagoodman/go-partybus"
)
// eventLoop listens to worker errors (from execution path), worker events (from a partybus subscription), and
// signal interrupts. Is responsible for handling each event relative to a given UI an to coordinate eventing until
// an eventual graceful exit.
// nolint:gocognit,funlen
func eventLoop(workerErrs <-chan error, signals <-chan os.Signal, subscription *partybus.Subscription, ux ui.UI, cleanupFn func()) error {
defer cleanupFn()
events := subscription.Events()
var err error
if ux, err = setupUI(subscription.Unsubscribe, ux); err != nil {
return err
}
var retErr error
var forceTeardown bool
for {
if workerErrs == nil && events == nil {
break
}
select {
case err, isOpen := <-workerErrs:
if !isOpen {
workerErrs = nil
continue
}
if err != nil {
// capture the error from the worker and unsubscribe to complete a graceful shutdown
retErr = multierror.Append(retErr, err)
if err := subscription.Unsubscribe(); err != nil {
retErr = multierror.Append(retErr, err)
}
}
case e, isOpen := <-events:
if !isOpen {
events = nil
continue
}
if err := ux.Handle(e); err != nil {
if errors.Is(err, partybus.ErrUnsubscribe) {
log.Warnf("unable to unsubscribe from the event bus")
events = nil
} else {
retErr = multierror.Append(retErr, err)
// TODO: should we unsubscribe? should we try to halt execution? or continue?
}
}
case <-signals:
// ignore further results from any event source and exit ASAP, but ensure that all cache is cleaned up.
// we ignore further errors since cleaning up the tmp directories will affect running catalogers that are
// reading/writing from/to their nested temp dirs. This is acceptable since we are bailing without result.
// TODO: potential future improvement would be to pass context into workers with a cancel function that is
// to the event loop. In this way we can have a more controlled shutdown even at the most nested levels
// of processing.
events = nil
workerErrs = nil
forceTeardown = true
}
}
if err := ux.Teardown(forceTeardown); err != nil {
retErr = multierror.Append(retErr, err)
}
return retErr
}
func setupUI(unsubscribe func() error, ux ui.UI) (ui.UI, error) {
if err := ux.Setup(unsubscribe); err != nil {
// replace the existing UI with a (simpler) logger UI
ux = ui.NewLoggerUI()
if err := ux.Setup(unsubscribe); err != nil {
// something is very wrong, bail.
return ux, err
}
log.Errorf("unable to setup given UI, falling back to logger: %+v", err)
}
return ux, nil
}

455
cmd/event_loop_test.go Normal file
View file

@ -0,0 +1,455 @@
package cmd
import (
"fmt"
"os"
"syscall"
"testing"
"time"
"github.com/anchore/grype/grype/event"
"github.com/anchore/grype/internal/ui"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/wagoodman/go-partybus"
)
var _ ui.UI = (*uiMock)(nil)
type uiMock struct {
t *testing.T
finalEvent partybus.Event
unsubscribe func() error
mock.Mock
}
func (u *uiMock) Setup(unsubscribe func() error) error {
u.t.Logf("UI Setup called")
u.unsubscribe = unsubscribe
return u.Called(unsubscribe).Error(0)
}
func (u *uiMock) Handle(event partybus.Event) error {
u.t.Logf("UI Handle called: %+v", event.Type)
if event == u.finalEvent {
assert.NoError(u.t, u.unsubscribe())
}
return u.Called(event).Error(0)
}
func (u *uiMock) Teardown(_ bool) error {
u.t.Logf("UI Teardown called")
return u.Called().Error(0)
}
func Test_eventLoop_gracefulExit(t *testing.T) {
test := func(t *testing.T) {
testBus := partybus.NewBus()
subscription := testBus.Subscribe()
t.Cleanup(testBus.Close)
finalEvent := partybus.Event{
Type: event.VulnerabilityScanningFinished,
}
worker := func() <-chan error {
ret := make(chan error)
go func() {
t.Log("worker running")
// send an empty item (which is ignored) ensuring we've entered the select statement,
// then close (a partial shutdown).
ret <- nil
t.Log("worker sent nothing")
close(ret)
t.Log("worker closed")
// do the other half of the shutdown
testBus.Publish(finalEvent)
t.Log("worker published final event")
}()
return ret
}
signaler := func() <-chan os.Signal {
return nil
}
ux := &uiMock{
t: t,
finalEvent: finalEvent,
}
// ensure the mock sees at least the final event
ux.On("Handle", finalEvent).Return(nil)
// ensure the mock sees basic setup/teardown events
ux.On("Setup", mock.AnythingOfType("func() error")).Return(nil)
ux.On("Teardown").Return(nil)
var cleanupCalled bool
cleanupFn := func() {
t.Log("cleanup called")
cleanupCalled = true
}
assert.NoError(t,
eventLoop(
worker(),
signaler(),
subscription,
ux,
cleanupFn,
),
)
assert.True(t, cleanupCalled, "cleanup function not called")
ux.AssertExpectations(t)
}
// if there is a bug, then there is a risk of the event loop never returning
testWithTimeout(t, 5*time.Second, test)
}
func Test_eventLoop_workerError(t *testing.T) {
test := func(t *testing.T) {
testBus := partybus.NewBus()
subscription := testBus.Subscribe()
t.Cleanup(testBus.Close)
workerErr := fmt.Errorf("worker error")
worker := func() <-chan error {
ret := make(chan error)
go func() {
t.Log("worker running")
// send an empty item (which is ignored) ensuring we've entered the select statement,
// then close (a partial shutdown).
ret <- nil
t.Log("worker sent nothing")
ret <- workerErr
t.Log("worker sent error")
close(ret)
t.Log("worker closed")
// note: NO final event is fired
}()
return ret
}
signaler := func() <-chan os.Signal {
return nil
}
ux := &uiMock{
t: t,
}
// ensure the mock sees basic setup/teardown events
ux.On("Setup", mock.AnythingOfType("func() error")).Return(nil)
ux.On("Teardown").Return(nil)
var cleanupCalled bool
cleanupFn := func() {
t.Log("cleanup called")
cleanupCalled = true
}
// ensure we see an error returned
assert.ErrorIs(t,
eventLoop(
worker(),
signaler(),
subscription,
ux,
cleanupFn,
),
workerErr,
"should have seen a worker error, but did not",
)
assert.True(t, cleanupCalled, "cleanup function not called")
ux.AssertExpectations(t)
}
// if there is a bug, then there is a risk of the event loop never returning
testWithTimeout(t, 5*time.Second, test)
}
func Test_eventLoop_unsubscribeError(t *testing.T) {
test := func(t *testing.T) {
testBus := partybus.NewBus()
subscription := testBus.Subscribe()
t.Cleanup(testBus.Close)
finalEvent := partybus.Event{
Type: event.VulnerabilityScanningFinished,
}
worker := func() <-chan error {
ret := make(chan error)
go func() {
t.Log("worker running")
// send an empty item (which is ignored) ensuring we've entered the select statement,
// then close (a partial shutdown).
ret <- nil
t.Log("worker sent nothing")
close(ret)
t.Log("worker closed")
// do the other half of the shutdown
testBus.Publish(finalEvent)
t.Log("worker published final event")
}()
return ret
}
signaler := func() <-chan os.Signal {
return nil
}
ux := &uiMock{
t: t,
finalEvent: finalEvent,
}
// ensure the mock sees at least the final event... note the unsubscribe error here
ux.On("Handle", finalEvent).Return(partybus.ErrUnsubscribe)
// ensure the mock sees basic setup/teardown events
ux.On("Setup", mock.AnythingOfType("func() error")).Return(nil)
ux.On("Teardown").Return(nil)
var cleanupCalled bool
cleanupFn := func() {
t.Log("cleanup called")
cleanupCalled = true
}
// unsubscribe errors should be handled and ignored, not propagated. We are additionally asserting that
// this case is handled as a controlled shutdown (this test should not timeout)
assert.NoError(t,
eventLoop(
worker(),
signaler(),
subscription,
ux,
cleanupFn,
),
)
assert.True(t, cleanupCalled, "cleanup function not called")
ux.AssertExpectations(t)
}
// if there is a bug, then there is a risk of the event loop never returning
testWithTimeout(t, 5*time.Second, test)
}
func Test_eventLoop_handlerError(t *testing.T) {
test := func(t *testing.T) {
testBus := partybus.NewBus()
subscription := testBus.Subscribe()
t.Cleanup(testBus.Close)
finalEvent := partybus.Event{
Type: event.VulnerabilityScanningFinished,
Error: fmt.Errorf("unable to create presenter"),
}
worker := func() <-chan error {
ret := make(chan error)
go func() {
t.Log("worker running")
// send an empty item (which is ignored) ensuring we've entered the select statement,
// then close (a partial shutdown).
ret <- nil
t.Log("worker sent nothing")
close(ret)
t.Log("worker closed")
// do the other half of the shutdown
testBus.Publish(finalEvent)
t.Log("worker published final event")
}()
return ret
}
signaler := func() <-chan os.Signal {
return nil
}
ux := &uiMock{
t: t,
finalEvent: finalEvent,
}
// ensure the mock sees at least the final event... note the event error is propagated
ux.On("Handle", finalEvent).Return(finalEvent.Error)
// ensure the mock sees basic setup/teardown events
ux.On("Setup", mock.AnythingOfType("func() error")).Return(nil)
ux.On("Teardown").Return(nil)
var cleanupCalled bool
cleanupFn := func() {
t.Log("cleanup called")
cleanupCalled = true
}
// handle errors SHOULD propagate the event loop. We are additionally asserting that this case is
// handled as a controlled shutdown (this test should not timeout)
assert.ErrorIs(t,
eventLoop(
worker(),
signaler(),
subscription,
ux,
cleanupFn,
),
finalEvent.Error,
"should have seen a event error, but did not",
)
assert.True(t, cleanupCalled, "cleanup function not called")
ux.AssertExpectations(t)
}
// if there is a bug, then there is a risk of the event loop never returning
testWithTimeout(t, 5*time.Second, test)
}
func Test_eventLoop_signalsStopExecution(t *testing.T) {
test := func(t *testing.T) {
testBus := partybus.NewBus()
subscription := testBus.Subscribe()
t.Cleanup(testBus.Close)
worker := func() <-chan error {
// the worker will never return work and the event loop will always be waiting...
return make(chan error)
}
signaler := func() <-chan os.Signal {
ret := make(chan os.Signal)
go func() {
ret <- syscall.SIGINT
// note: we do NOT close the channel to ensure the event loop does not depend on that behavior to exit
}()
return ret
}
ux := &uiMock{
t: t,
}
// ensure the mock sees basic setup/teardown events
ux.On("Setup", mock.AnythingOfType("func() error")).Return(nil)
ux.On("Teardown").Return(nil)
var cleanupCalled bool
cleanupFn := func() {
t.Log("cleanup called")
cleanupCalled = true
}
assert.NoError(t,
eventLoop(
worker(),
signaler(),
subscription,
ux,
cleanupFn,
),
)
assert.True(t, cleanupCalled, "cleanup function not called")
ux.AssertExpectations(t)
}
// if there is a bug, then there is a risk of the event loop never returning
testWithTimeout(t, 5*time.Second, test)
}
func Test_eventLoop_uiTeardownError(t *testing.T) {
test := func(t *testing.T) {
testBus := partybus.NewBus()
subscription := testBus.Subscribe()
t.Cleanup(testBus.Close)
finalEvent := partybus.Event{
Type: event.VulnerabilityScanningFinished,
}
worker := func() <-chan error {
ret := make(chan error)
go func() {
t.Log("worker running")
// send an empty item (which is ignored) ensuring we've entered the select statement,
// then close (a partial shutdown).
ret <- nil
t.Log("worker sent nothing")
close(ret)
t.Log("worker closed")
// do the other half of the shutdown
testBus.Publish(finalEvent)
t.Log("worker published final event")
}()
return ret
}
signaler := func() <-chan os.Signal {
return nil
}
ux := &uiMock{
t: t,
finalEvent: finalEvent,
}
teardownError := fmt.Errorf("sorry, dave, the UI doesn't want to be torn down")
// ensure the mock sees at least the final event... note the event error is propagated
ux.On("Handle", finalEvent).Return(nil)
// ensure the mock sees basic setup/teardown events
ux.On("Setup", mock.AnythingOfType("func() error")).Return(nil)
ux.On("Teardown").Return(teardownError)
var cleanupCalled bool
cleanupFn := func() {
t.Log("cleanup called")
cleanupCalled = true
}
// ensure we see an error returned
assert.ErrorIs(t,
eventLoop(
worker(),
signaler(),
subscription,
ux,
cleanupFn,
),
teardownError,
"should have seen a UI teardown error, but did not",
)
assert.True(t, cleanupCalled, "cleanup function not called")
ux.AssertExpectations(t)
}
// if there is a bug, then there is a risk of the event loop never returning
testWithTimeout(t, 5*time.Second, test)
}
func testWithTimeout(t *testing.T, timeout time.Duration, test func(*testing.T)) {
done := make(chan bool)
go func() {
test(t)
done <- true
}()
select {
case <-time.After(timeout):
t.Fatal("test timed out")
case <-done:
}
}

View file

@ -1,17 +1,12 @@
package cmd
import (
"context"
"errors"
"fmt"
"os"
"runtime/pprof"
"strings"
"sync"
"github.com/anchore/grype/grype/db"
"github.com/anchore/grype/grype"
"github.com/anchore/grype/grype/db"
"github.com/anchore/grype/grype/event"
"github.com/anchore/grype/grype/grypeerr"
"github.com/anchore/grype/grype/match"
@ -20,24 +15,21 @@ import (
"github.com/anchore/grype/grype/vulnerability"
"github.com/anchore/grype/internal"
"github.com/anchore/grype/internal/bus"
"github.com/anchore/grype/internal/config"
"github.com/anchore/grype/internal/format"
"github.com/anchore/grype/internal/log"
"github.com/anchore/grype/internal/ui"
"github.com/anchore/grype/internal/version"
"github.com/anchore/stereoscope"
"github.com/anchore/syft/syft/source"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/pkg/profile"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/wagoodman/go-partybus"
)
const (
scopeFlag = "scope"
outputFlag = "output"
failOnFlag = "fail-on"
templateFlag = "template"
)
var persistentOpts = config.CliOnlyOptions{}
var (
rootCmd = &cobra.Command{
@ -64,104 +56,94 @@ You can also pipe in Syft JSON directly:
"appName": internal.ApplicationName,
}),
Args: validateRootArgs,
Run: func(cmd *cobra.Command, args []string) {
RunE: func(cmd *cobra.Command, args []string) error {
if appConfig.Dev.ProfileCPU {
f, err := os.Create("cpu.profile")
if err != nil {
log.Errorf("unable to create CPU profile: %+v", err)
} else {
err := pprof.StartCPUProfile(f)
if err != nil {
log.Errorf("unable to start CPU profile: %+v", err)
}
}
defer profile.Start(profile.CPUProfile).Stop()
} else if appConfig.Dev.ProfileMem {
defer profile.Start(profile.MemProfile).Stop()
}
err := runDefaultCmd(cmd, args)
if appConfig.Dev.ProfileCPU {
pprof.StopCPUProfile()
}
if err != nil {
var grypeErr grypeerr.ExpectedErr
if errors.As(err, &grypeErr) {
fmt.Fprintln(os.Stderr, format.Red.Format(grypeErr.Error()))
} else {
log.Errorf(err.Error())
}
os.Exit(1)
}
},
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
// Since we use ValidArgsFunction, Cobra will call this AFTER having parsed all flags and arguments provided
dockerImageRepoTags, err := listLocalDockerImages(toComplete)
if err != nil {
// Indicates that an error occurred and completions should be ignored
return []string{"completion failed"}, cobra.ShellCompDirectiveError
}
if len(dockerImageRepoTags) == 0 {
return []string{"no docker images found"}, cobra.ShellCompDirectiveError
}
// ShellCompDirectiveDefault indicates that the shell will perform its default behavior after completions have
// been provided (without implying other possible directives)
return dockerImageRepoTags, cobra.ShellCompDirectiveDefault
return rootExec(cmd, args)
},
ValidArgsFunction: dockerImageValidArgsFunction,
}
)
func validateRootArgs(cmd *cobra.Command, args []string) error {
// the user must specify at least one argument OR wait for input on stdin IF it is a pipe
if len(args) == 0 && !internal.IsPipedInput() {
// return an error with no message for the user, which will implicitly show the help text (but no specific error)
return fmt.Errorf("")
}
return cobra.MaximumNArgs(1)(cmd, args)
func init() {
setGlobalCliOptions()
setRootFlags(rootCmd.Flags())
}
func init() {
// setup CLI options specific to scanning an image
func setGlobalCliOptions() {
// setup global CLI options (available on all CLI commands)
rootCmd.PersistentFlags().StringVarP(&persistentOpts.ConfigPath, "config", "c", "", "application config file")
// scan options
flag := scopeFlag
rootCmd.Flags().StringP(
scopeFlag, "s", source.SquashedScope.String(),
flag := "quiet"
rootCmd.PersistentFlags().BoolP(
flag, "q", false,
"suppress all logging output",
)
if err := viper.BindPFlag(flag, rootCmd.PersistentFlags().Lookup(flag)); err != nil {
fmt.Printf("unable to bind flag '%s': %+v", flag, err)
os.Exit(1)
}
rootCmd.PersistentFlags().CountVarP(&persistentOpts.Verbosity, "verbose", "v", "increase verbosity (-v = info, -vv = debug)")
}
func setRootFlags(flags *pflag.FlagSet) {
flags.StringP(
"scope", "s", source.SquashedScope.String(),
fmt.Sprintf("selection of layers to analyze, options=%v", source.AllScopes),
)
if err := viper.BindPFlag(flag, rootCmd.Flags().Lookup(flag)); err != nil {
fmt.Printf("unable to bind flag '%s': %+v", flag, err)
os.Exit(1)
}
// output & formatting options
flag = outputFlag
rootCmd.Flags().StringP(
flag, "o", "",
flags.StringP(
"output", "o", "",
fmt.Sprintf("report output formatter, formats=%v", presenter.AvailableFormats),
)
if err := viper.BindPFlag(flag, rootCmd.Flags().Lookup(flag)); err != nil {
fmt.Printf("unable to bind flag '%s': %+v", flag, err)
os.Exit(1)
}
flag = templateFlag
rootCmd.Flags().StringP(flag, "t", "", "specify the path to a Go template file ("+
flags.StringP("template", "t", "", "specify the path to a Go template file ("+
"requires 'template' output to be selected)")
if err := viper.BindPFlag("output-template-file", rootCmd.Flags().Lookup(flag)); err != nil {
fmt.Printf("unable to bind flag '%s': %+v", flag, err)
os.Exit(1)
}
flag = failOnFlag
rootCmd.Flags().StringP(
flag, "f", "",
flags.StringP(
"fail-on", "f", "",
fmt.Sprintf("set the return code to 1 if a vulnerability is found with a severity >= the given severity, options=%v", vulnerability.AllSeverities),
)
if err := viper.BindPFlag("fail-on-severity", rootCmd.Flags().Lookup(flag)); err != nil {
fmt.Printf("unable to bind flag '%s': %+v", flag, err)
os.Exit(1)
}
func bindRootConfigOptions(flags *pflag.FlagSet) error {
if err := viper.BindPFlag("scope", flags.Lookup("scope")); err != nil {
return err
}
if err := viper.BindPFlag("output", flags.Lookup("output")); err != nil {
return err
}
if err := viper.BindPFlag("output-template-file", flags.Lookup("template")); err != nil {
return err
}
if err := viper.BindPFlag("fail-on-severity", flags.Lookup("fail-on")); err != nil {
return err
}
return nil
}
func rootExec(_ *cobra.Command, args []string) error {
// we may not be provided an image if the user is piping in SBOM input
var userInput string
if len(args) > 0 {
userInput = args[0]
}
return eventLoop(
startWorker(userInput, appConfig.FailOnSeverity),
setupSignals(),
eventSubscription,
ui.Select(appConfig.CliOptions.Verbosity > 0, appConfig.Quiet),
stereoscope.Cleanup,
)
}
// nolint:funlen
@ -250,16 +232,14 @@ func startWorker(userInput string, failOnSeverity *vulnerability.Severity) <-cha
return errs
}
func runDefaultCmd(_ *cobra.Command, args []string) error {
// we may not be provided an image if the user is piping in SBOM input
var userInput string
if len(args) > 0 {
userInput = args[0]
func validateRootArgs(cmd *cobra.Command, args []string) error {
// the user must specify at least one argument OR wait for input on stdin IF it is a pipe
if len(args) == 0 && !internal.IsPipedInput() {
// return an error with no message for the user, which will implicitly show the help text (but no specific error)
return fmt.Errorf("")
}
errs := startWorker(userInput, appConfig.FailOnSeverity)
ux := ui.Select(appConfig.CliOptions.Verbosity > 0, appConfig.Quiet)
return ux(errs, eventSubscription)
return cobra.MaximumNArgs(1)(cmd, args)
}
// hitSeverityThreshold indicates if there are any severities >= to the max allowable severity (which is optional)
@ -283,30 +263,3 @@ func hitSeverityThreshold(thresholdSeverity *vulnerability.Severity, matches mat
}
return false
}
func listLocalDockerImages(prefix string) ([]string, error) {
var repoTags = make([]string, 0)
ctx := context.Background()
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return repoTags, err
}
// Only want to return tagged images
imageListArgs := filters.NewArgs()
imageListArgs.Add("dangling", "false")
images, err := cli.ImageList(ctx, types.ImageListOptions{All: false, Filters: imageListArgs})
if err != nil {
return repoTags, err
}
for _, image := range images {
// image may have multiple tags
for _, tag := range image.RepoTags {
if strings.HasPrefix(tag, prefix) {
repoTags = append(repoTags, tag)
}
}
}
return repoTags, nil
}

20
cmd/signals.go Normal file
View file

@ -0,0 +1,20 @@
package cmd
import (
"os"
"os/signal"
"syscall"
)
func setupSignals() <-chan os.Signal {
c := make(chan os.Signal, 1) // Note: A buffered channel is recommended for this; see https://golang.org/pkg/os/signal/#Notify
interruptions := []os.Signal{
syscall.SIGINT,
syscall.SIGTERM,
}
signal.Notify(c, interruptions...)
return c
}

View file

@ -16,7 +16,7 @@ var outputFormat string
var versionCmd = &cobra.Command{
Use: "version",
Short: "show the version",
Run: printVersion,
RunE: printVersion,
}
func init() {
@ -25,7 +25,7 @@ func init() {
rootCmd.AddCommand(versionCmd)
}
func printVersion(_ *cobra.Command, _ []string) {
func printVersion(_ *cobra.Command, _ []string) error {
versionInfo := version.FromBuild()
switch outputFormat {
case "text":
@ -54,11 +54,10 @@ func printVersion(_ *cobra.Command, _ []string) {
SchemaVersion: vulnerability.SchemaVersion,
})
if err != nil {
fmt.Printf("failed to show version information: %+v\n", err)
os.Exit(1)
return fmt.Errorf("failed to show version information: %+v", err)
}
default:
fmt.Printf("unsupported output format: %s\n", outputFormat)
os.Exit(1)
return fmt.Errorf("unsupported output format: %s", outputFormat)
}
return nil
}

4
go.mod
View file

@ -17,18 +17,20 @@ require (
github.com/go-test/deep v1.0.7
github.com/google/go-cmp v0.4.1
github.com/google/uuid v1.1.1
github.com/gookit/color v1.2.7
github.com/gookit/color v1.4.2
github.com/hashicorp/go-getter v1.4.1
github.com/hashicorp/go-multierror v1.1.0
github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a
github.com/knqyf263/go-deb-version v0.0.0-20190517075300-09fca494f03d
github.com/mitchellh/go-homedir v1.1.0
github.com/olekukonko/tablewriter v0.0.4
github.com/pkg/profile v1.6.0
github.com/scylladb/go-set v1.0.2
github.com/sergi/go-diff v1.1.0
github.com/sirupsen/logrus v1.6.0
github.com/spf13/afero v1.3.2
github.com/spf13/cobra v1.0.1-0.20200909172742-8a63648dd905
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.7.0
github.com/stretchr/testify v1.7.0
github.com/wagoodman/go-partybus v0.0.0-20210627031916-db1f5573bbc5

9
go.sum
View file

@ -417,8 +417,9 @@ github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsC
github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gookit/color v1.2.4/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg=
github.com/gookit/color v1.2.5/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg=
github.com/gookit/color v1.2.7 h1:4qePMNWZhrmbfYJDix+J4V2l0iVW+6jQGjicELlN14E=
github.com/gookit/color v1.2.7/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg=
github.com/gookit/color v1.4.2 h1:tXy44JFSFkKnELV6WaMo/lLfu/meqITX3iAV52do7lk=
github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw=
@ -659,6 +660,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
github.com/pkg/profile v1.6.0 h1:hUDfIISABYI59DyeB3OTay/HxSRwTQ8rB/H83k6r5dM=
github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@ -759,6 +762,7 @@ github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
@ -823,6 +827,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 h1:QldyIu/L63oPpyvQmHgvgickp1Yw510KJOqX7H24mg8=
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -1009,6 +1015,7 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=

View file

@ -0,0 +1,250 @@
package config
import (
"errors"
"fmt"
"path"
"reflect"
"strings"
"github.com/adrg/xdg"
"github.com/anchore/grype/grype/match"
"github.com/anchore/grype/grype/vulnerability"
"github.com/anchore/grype/internal"
"github.com/anchore/syft/syft/source"
"github.com/mitchellh/go-homedir"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
"gopkg.in/yaml.v2"
)
var ErrApplicationConfigNotFound = fmt.Errorf("application config not found")
type defaultValueLoader interface {
loadDefaultValues(*viper.Viper)
}
type parser interface {
parseConfigValues() error
}
type Application struct {
ConfigPath string `yaml:",omitempty" json:"configPath"` // the location where the application config was read from (either from -c or discovered while loading)
Output string `yaml:"output" json:"output" mapstructure:"output"` // -o, the Presenter hint string to use for report formatting
OutputTemplateFile string `yaml:"output-template-file" json:"output-template-file" mapstructure:"output-template-file"` // -t, the template file to use for formatting the final report
Quiet bool `yaml:"quiet" json:"quiet" mapstructure:"quiet"` // -q, indicates to not show any status output to stderr (ETUI or logging UI)
CheckForAppUpdate bool `yaml:"check-for-app-update" json:"check-for-app-update" mapstructure:"check-for-app-update"` // whether to check for an application update on start up or not
CliOptions CliOnlyOptions `yaml:"-" json:"-"`
ScopeOpt source.Scope `json:"-"`
Scope string `yaml:"scope" json:"scope" mapstructure:"scope"`
Log logging `yaml:"log" json:"log" mapstructure:"log"`
Db database `yaml:"db" json:"db" mapstructure:"db"`
Dev development `yaml:"dev" json:"dev" mapstructure:"dev"`
FailOn string `yaml:"fail-on-severity" json:"fail-on-severity" mapstructure:"fail-on-severity"`
FailOnSeverity *vulnerability.Severity `json:"-"`
Registry registry `yaml:"registry" json:"registry" mapstructure:"registry"`
Ignore []match.IgnoreRule `yaml:"ignore" json:"ignore" mapstructure:"ignore"`
}
func newApplicationConfig(v *viper.Viper, cliOpts CliOnlyOptions) *Application {
config := &Application{
CliOptions: cliOpts,
}
config.loadDefaultValues(v)
return config
}
func LoadApplicationConfig(v *viper.Viper, cliOpts CliOnlyOptions) (*Application, error) {
// the user may not have a config, and this is OK, we can use the default config + default cobra cli values instead
config := newApplicationConfig(v, cliOpts)
if err := readConfig(v, cliOpts.ConfigPath); err != nil && !errors.Is(err, ErrApplicationConfigNotFound) {
return nil, err
}
if err := v.Unmarshal(config); err != nil {
return nil, fmt.Errorf("unable to parse config: %w", err)
}
config.ConfigPath = v.ConfigFileUsed()
if err := config.parseConfigValues(); err != nil {
return nil, fmt.Errorf("invalid application config: %w", err)
}
return config, nil
}
// init loads the default configuration values into the viper instance (before the config values are read and parsed).
func (cfg Application) loadDefaultValues(v *viper.Viper) {
// set the default values for primitive fields in this struct
v.SetDefault("check-for-app-update", true)
// for each field in the configuration struct, see if the field implements the defaultValueLoader interface and invoke it if it does
value := reflect.ValueOf(cfg)
for i := 0; i < value.NumField(); i++ {
// note: the defaultValueLoader method receiver is NOT a pointer receiver.
if loadable, ok := value.Field(i).Interface().(defaultValueLoader); ok {
// the field implements defaultValueLoader, call it
loadable.loadDefaultValues(v)
}
}
}
func (cfg *Application) parseConfigValues() error {
// parse application config options
for _, optionFn := range []func() error{
cfg.parseScopeOption,
cfg.parseLogLevelOption,
cfg.parseFailOnOption,
} {
if err := optionFn(); err != nil {
return err
}
}
// parse nested config options
// for each field in the configuration struct, see if the field implements the parser interface
// note: the app config is a pointer, so we need to grab the elements explicitly (to traverse the address)
value := reflect.ValueOf(cfg).Elem()
for i := 0; i < value.NumField(); i++ {
// note: since the interface method of parser is a pointer receiver we need to get the value of the field as a pointer.
if parsable, ok := value.Field(i).Addr().Interface().(parser); ok {
// the field implements parser, call it
if err := parsable.parseConfigValues(); err != nil {
return err
}
}
}
return nil
}
func (cfg *Application) parseScopeOption() error {
scopeOption := source.ParseScope(cfg.Scope)
if scopeOption == source.UnknownScope {
return fmt.Errorf("bad --scope value '%s'", cfg.Scope)
}
cfg.ScopeOpt = scopeOption
return nil
}
func (cfg *Application) parseLogLevelOption() error {
if cfg.Quiet {
// TODO: this is bad: quiet option trumps all other logging options (such as to a file on disk)
// we should be able to quiet the console logging and leave file logging alone...
// ... this will be an enhancement for later
cfg.Log.LevelOpt = logrus.PanicLevel
} else {
if cfg.Log.Level != "" {
if cfg.CliOptions.Verbosity > 0 {
return fmt.Errorf("cannot explicitly set log level (cfg file or env var) and use -v flag together")
}
lvl, err := logrus.ParseLevel(strings.ToLower(cfg.Log.Level))
if err != nil {
return fmt.Errorf("bad log level configured (%q): %w", cfg.Log.Level, err)
}
// set the log level explicitly
cfg.Log.LevelOpt = lvl
} else {
// set the log level implicitly
switch v := cfg.CliOptions.Verbosity; {
case v == 1:
cfg.Log.LevelOpt = logrus.InfoLevel
case v >= 2:
cfg.Log.LevelOpt = logrus.DebugLevel
default:
cfg.Log.LevelOpt = logrus.ErrorLevel
}
}
}
return nil
}
func (cfg *Application) parseFailOnOption() error {
if cfg.FailOn != "" {
failOnSeverity := vulnerability.ParseSeverity(cfg.FailOn)
if failOnSeverity == vulnerability.UnknownSeverity {
return fmt.Errorf("bad --fail-on severity value '%s'", cfg.FailOn)
}
cfg.FailOnSeverity = &failOnSeverity
}
return nil
}
func (cfg Application) String() string {
// yaml is pretty human friendly (at least when compared to json)
appCfgStr, err := yaml.Marshal(&cfg)
if err != nil {
return err.Error()
}
return string(appCfgStr)
}
// readConfig attempts to read the given config path from disk or discover an alternate store location
// nolint:funlen
func readConfig(v *viper.Viper, configPath string) error {
var err error
v.AutomaticEnv()
v.SetEnvPrefix(internal.ApplicationName)
// allow for nested options to be specified via environment variables
// e.g. pod.context = APPNAME_POD_CONTEXT
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_"))
// use explicitly the given user config
if configPath != "" {
v.SetConfigFile(configPath)
if err := v.ReadInConfig(); err != nil {
return fmt.Errorf("unable to read application config=%q : %w", configPath, err)
}
// don't fall through to other options if the config path was explicitly provided
return nil
}
// start searching for valid configs in order...
// 1. look for .<appname>.yaml (in the current directory)
v.AddConfigPath(".")
v.SetConfigName("." + internal.ApplicationName)
if err = v.ReadInConfig(); err == nil {
return nil
} else if !errors.As(err, &viper.ConfigFileNotFoundError{}) {
return fmt.Errorf("unable to parse config=%q: %w", v.ConfigFileUsed(), err)
}
// 2. look for .<appname>/config.yaml (in the current directory)
v.AddConfigPath("." + internal.ApplicationName)
v.SetConfigName("config")
if err = v.ReadInConfig(); err == nil {
return nil
} else if !errors.As(err, &viper.ConfigFileNotFoundError{}) {
return fmt.Errorf("unable to parse config=%q: %w", v.ConfigFileUsed(), err)
}
// 3. look for ~/.<appname>.yaml
home, err := homedir.Dir()
if err == nil {
v.AddConfigPath(home)
v.SetConfigName("." + internal.ApplicationName)
if err = v.ReadInConfig(); err == nil {
return nil
} else if !errors.As(err, &viper.ConfigFileNotFoundError{}) {
return fmt.Errorf("unable to parse config=%q: %w", v.ConfigFileUsed(), err)
}
}
// 4. look for <appname>/config.yaml in xdg locations (starting with xdg home config dir, then moving upwards)
v.AddConfigPath(path.Join(xdg.ConfigHome, internal.ApplicationName))
for _, dir := range xdg.ConfigDirs {
v.AddConfigPath(path.Join(dir, internal.ApplicationName))
}
v.SetConfigName("config")
if err = v.ReadInConfig(); err == nil {
return nil
} else if !errors.As(err, &viper.ConfigFileNotFoundError{}) {
return fmt.Errorf("unable to parse config=%q: %w", v.ConfigFileUsed(), err)
}
return ErrApplicationConfigNotFound
}

View file

@ -0,0 +1,6 @@
package config
type CliOnlyOptions struct {
ConfigPath string
Verbosity int
}

View file

@ -1,217 +0,0 @@
package config
import (
"fmt"
"path"
"strings"
"github.com/adrg/xdg"
"github.com/anchore/grype/grype/db"
"github.com/anchore/grype/grype/match"
"github.com/anchore/grype/grype/vulnerability"
"github.com/anchore/grype/internal"
"github.com/anchore/syft/syft/source"
"github.com/mitchellh/go-homedir"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
type CliOnlyOptions struct {
ConfigPath string
Verbosity int
}
type Application struct {
ConfigPath string
Output string `mapstructure:"output"`
OutputTemplateFile string `mapstructure:"output-template-file"`
ScopeOpt source.Scope `json:"-"`
Scope string `mapstructure:"scope"`
Quiet bool `mapstructure:"quiet"`
Log Logging `mapstructure:"log"`
CliOptions CliOnlyOptions
Db Database `mapstructure:"db"`
Dev Development `mapstructure:"dev"`
CheckForAppUpdate bool `mapstructure:"check-for-app-update"`
FailOn string `mapstructure:"fail-on-severity"`
FailOnSeverity *vulnerability.Severity `json:"-"`
Registry registry `yaml:"registry" json:"registry" mapstructure:"registry"`
Ignore []match.IgnoreRule `yaml:"ignore" json:"ignore" mapstructure:"ignore"`
}
type Logging struct {
Structured bool `mapstructure:"structured"`
LevelOpt logrus.Level `json:"-"`
Level string `mapstructure:"level"`
FileLocation string `mapstructure:"file"`
}
type Database struct {
Dir string `mapstructure:"cache-dir"`
UpdateURL string `mapstructure:"update-url"`
AutoUpdate bool `mapstructure:"auto-update"`
ValidateByHashOnStart bool `mapstructure:"validate-by-hash-on-start"`
}
type Development struct {
ProfileCPU bool `mapstructure:"profile-cpu"`
}
func (d Database) ToCuratorConfig() db.Config {
return db.Config{
DbRootDir: d.Dir,
ListingURL: d.UpdateURL,
ValidateByHashOnGet: d.ValidateByHashOnStart,
}
}
func setNonCliDefaultValues(v *viper.Viper) {
v.SetDefault("log.level", "")
v.SetDefault("log.file", "")
v.SetDefault("log.structured", false)
// e.g. ~/.cache/appname/db
v.SetDefault("db.cache-dir", path.Join(xdg.CacheHome, internal.ApplicationName, "db"))
v.SetDefault("db.update-url", internal.DBUpdateURL)
v.SetDefault("db.auto-update", true)
v.SetDefault("db.validate-by-hash-on-start", false)
v.SetDefault("dev.profile-cpu", false)
v.SetDefault("check-for-app-update", true)
v.SetDefault("registry.insecure-skip-tls-verify", false)
v.SetDefault("registry.insecure-use-http", false)
v.SetDefault("registry.auth", []RegistryCredentials{})
}
func LoadConfigFromFile(v *viper.Viper, cliOpts *CliOnlyOptions) (*Application, error) {
// the user may not have a config, and this is OK, we can use the default config + default cobra cli values instead
setNonCliDefaultValues(v)
if cliOpts != nil {
_ = readConfig(v, cliOpts.ConfigPath)
} else {
_ = readConfig(v, "")
}
config := &Application{
CliOptions: *cliOpts,
}
err := v.Unmarshal(config)
if err != nil {
return nil, fmt.Errorf("unable to parse config: %w", err)
}
config.ConfigPath = v.ConfigFileUsed()
err = config.Build()
if err != nil {
return nil, fmt.Errorf("invalid config: %w", err)
}
return config, nil
}
func (cfg *Application) Build() error {
// set the scope
scopeOption := source.ParseScope(cfg.Scope)
if scopeOption == source.UnknownScope {
return fmt.Errorf("bad --scope value '%s'", cfg.Scope)
}
cfg.ScopeOpt = scopeOption
if cfg.Quiet {
// TODO: this is bad: quiet option trumps all other logging options (such as to a file on disk)
// we should be able to quiet the console logging and leave file logging alone...
// ... this will be an enhancement for later
cfg.Log.LevelOpt = logrus.PanicLevel
} else {
if cfg.Log.Level != "" {
if cfg.CliOptions.Verbosity > 0 {
return fmt.Errorf("cannot explicitly set log level (cfg file or env var) and use -v flag together")
}
lvl, err := logrus.ParseLevel(strings.ToLower(cfg.Log.Level))
if err != nil {
return fmt.Errorf("bad log level configured (%q): %w", cfg.Log.Level, err)
}
// set the log level explicitly
cfg.Log.LevelOpt = lvl
} else {
// set the log level implicitly
switch v := cfg.CliOptions.Verbosity; {
case v == 1:
cfg.Log.LevelOpt = logrus.InfoLevel
case v >= 2:
cfg.Log.LevelOpt = logrus.DebugLevel
default:
cfg.Log.LevelOpt = logrus.ErrorLevel
}
}
}
// set the fail-on option
if cfg.FailOn != "" {
failOnSeverity := vulnerability.ParseSeverity(cfg.FailOn)
if failOnSeverity == vulnerability.UnknownSeverity {
return fmt.Errorf("bad --fail-on severity value '%s'", cfg.FailOn)
}
cfg.FailOnSeverity = &failOnSeverity
}
// finalize registry values
cfg.Registry.parseConfigValues()
return nil
}
func readConfig(v *viper.Viper, configPath string) error {
v.AutomaticEnv()
v.SetEnvPrefix(internal.ApplicationName)
// allow for nested options to be specified via environment variables
// e.g. pod.context = APPNAME_POD_CONTEXT
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_"))
// use explicitly the given user config
if configPath != "" {
v.SetConfigFile(configPath)
if err := v.ReadInConfig(); err == nil {
return nil
}
// don't fall through to other options if this fails
return fmt.Errorf("unable to read config: %v", configPath)
}
// start searching for valid configs in order...
// 1. look for .<appname>.yaml (in the current directory)
v.AddConfigPath(".")
v.SetConfigName(internal.ApplicationName)
if err := v.ReadInConfig(); err == nil {
return nil
}
// 2. look for .<appname>/config.yaml (in the current directory)
v.AddConfigPath("." + internal.ApplicationName)
v.SetConfigName("config")
if err := v.ReadInConfig(); err == nil {
return nil
}
// 3. look for ~/.<appname>.yaml
home, err := homedir.Dir()
if err == nil {
v.AddConfigPath(home)
v.SetConfigName("." + internal.ApplicationName)
if err := v.ReadInConfig(); err == nil {
return nil
}
}
// 4. look for <appname>/config.yaml in xdg locations (starting with xdg home config dir, then moving upwards)
v.AddConfigPath(path.Join(xdg.ConfigHome, internal.ApplicationName))
for _, dir := range xdg.ConfigDirs {
v.AddConfigPath(path.Join(dir, internal.ApplicationName))
}
v.SetConfigName("config")
if err := v.ReadInConfig(); err == nil {
return nil
}
return fmt.Errorf("application config not found")
}

View file

@ -0,0 +1,32 @@
package config
import (
"path"
"github.com/adrg/xdg"
"github.com/anchore/grype/grype/db"
"github.com/anchore/grype/internal"
"github.com/spf13/viper"
)
type database struct {
Dir string `yaml:"cache-dir" json:"cache-dir" mapstructure:"cache-dir"`
UpdateURL string `yaml:"update-url" json:"update-url" mapstructure:"update-url"`
AutoUpdate bool `yaml:"auto-update" json:"auto-update" mapstructure:"auto-update"`
ValidateByHashOnStart bool `yaml:"validate-by-hash-on-start" json:"validate-by-hash-on-start" mapstructure:"validate-by-hash-on-start"`
}
func (cfg database) loadDefaultValues(v *viper.Viper) {
v.SetDefault("db.cache-dir", path.Join(xdg.CacheHome, internal.ApplicationName, "db"))
v.SetDefault("db.update-url", internal.DBUpdateURL)
v.SetDefault("db.auto-update", true)
v.SetDefault("db.validate-by-hash-on-start", false)
}
func (cfg database) ToCuratorConfig() db.Config {
return db.Config{
DbRootDir: cfg.Dir,
ListingURL: cfg.UpdateURL,
ValidateByHashOnGet: cfg.ValidateByHashOnStart,
}
}

View file

@ -0,0 +1,13 @@
package config
import "github.com/spf13/viper"
type development struct {
ProfileCPU bool `yaml:"profile-cpu" json:"profile-cpu" mapstructure:"profile-cpu"`
ProfileMem bool `yaml:"profile-mem" json:"profile-mem" mapstructure:"profile-mem"`
}
func (cfg development) loadDefaultValues(v *viper.Viper) {
v.SetDefault("dev.profile-cpu", false)
v.SetDefault("dev.profile-mem", false)
}

View file

@ -0,0 +1,20 @@
package config
import (
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
// logging contains all logging-related configuration options available to the user via the application config.
type logging struct {
Structured bool `yaml:"structured" json:"structured" mapstructure:"structured"` // show all log entries as JSON formatted strings
LevelOpt logrus.Level `yaml:"-" json:"-"` // the native log level object used by the logger
Level string `yaml:"level" json:"level" mapstructure:"level"` // the log level string hint
FileLocation string `yaml:"file" json:"file" mapstructure:"file"` // the file path to write logs to
}
func (cfg logging) loadDefaultValues(v *viper.Viper) {
v.SetDefault("log.level", "")
v.SetDefault("log.file", "")
v.SetDefault("log.structured", false)
}

View file

@ -3,6 +3,8 @@ package config
import (
"os"
"github.com/spf13/viper"
"github.com/anchore/stereoscope/pkg/image"
)
@ -22,7 +24,14 @@ type registry struct {
Auth []RegistryCredentials `yaml:"auth" json:"auth" mapstructure:"auth"`
}
func (cfg *registry) parseConfigValues() {
func (cfg registry) loadDefaultValues(v *viper.Viper) {
v.SetDefault("registry.insecure-skip-tls-verify", false)
v.SetDefault("registry.insecure-use-http", false)
v.SetDefault("registry.auth", []RegistryCredentials{})
}
// nolint:unparam
func (cfg *registry) parseConfigValues() error {
// there may be additional credentials provided by env var that should be appended to the set of credentials
authority, username, password, token :=
os.Getenv("GRYPE_REGISTRY_AUTH_AUTHORITY"),
@ -41,6 +50,7 @@ func (cfg *registry) parseConfigValues() {
},
}, cfg.Auth...)
}
return nil
}
func hasNonEmptyCredentials(username, password, token string) bool {

View file

@ -24,6 +24,7 @@ type LogrusConfig struct {
type LogrusLogger struct {
Config LogrusConfig
Logger *logrus.Logger
Output io.Writer
}
type LogrusNestedLogger struct {
@ -74,6 +75,7 @@ func NewLogrusLogger(cfg LogrusConfig) *LogrusLogger {
return &LogrusLogger{
Config: cfg,
Logger: appLogger,
Output: output,
}
}

View file

@ -1,22 +0,0 @@
package common
import (
"fmt"
"os"
grypeEventParsers "github.com/anchore/grype/grype/event/parsers"
"github.com/wagoodman/go-partybus"
)
func VulnerabilityScanningFinishedHandler(event partybus.Event) error {
// show the report to stdout
pres, err := grypeEventParsers.ParseVulnerabilityScanningFinished(event)
if err != nil {
return fmt.Errorf("bad CatalogerFinished event: %w", err)
}
if err := pres.Present(os.Stdout); err != nil {
return fmt.Errorf("unable to show vulnerability report: %w", err)
}
return nil
}

View file

@ -1,72 +0,0 @@
package common
import (
"strings"
"sync"
)
// TODO: move me to a common module (used in multiple repos)
const (
SpinnerCircleOutlineSet = "◜◠◯◎◉●◉◎◯◡◞"
SpinnerCircleSet = "◌◯◎◉●◉◎◯"
SpinnerDotSet = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
SpinnerHorizontalBarSet = "▉▊▋▌▍▎▏▎▍▌▋▊▉"
SpinnerVerticalBarSet = "▁▃▄▅▆▇█▇▆▅▄▃▁"
SpinnerDoubleBarSet = "▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▏▎▍▌▋▊▉█▇▆▅▄▃▂▁"
SpinnerArrowSet = "←↖↑↗→↘↓↙"
)
var SpinnerCircleDotSet = []string{
"⠈⠁",
"⠈⠑",
"⠈⠱",
"⠈⡱",
"⢀⡱",
"⢄⡱",
"⢄⡱",
"⢆⡱",
"⢎⡱",
"⢎⡰",
"⢎⡠",
"⢎⡀",
"⢎⠁",
"⠎⠁",
"⠊⠁",
}
type Spinner struct {
index int
charset []string
lock sync.Mutex
}
func NewSpinner(charset string) Spinner {
return Spinner{
charset: strings.Split(charset, ""),
}
}
func NewSpinnerFromSlice(charset []string) Spinner {
return Spinner{
charset: charset,
}
}
func (s *Spinner) Current() string {
s.lock.Lock()
defer s.lock.Unlock()
return s.charset[s.index]
}
func (s *Spinner) Next() string {
s.lock.Lock()
defer s.lock.Unlock()
c := s.charset[s.index]
s.index++
if s.index >= len(s.charset) {
s.index = 0
}
return c
}

View file

@ -0,0 +1,40 @@
package components
import (
"strings"
"sync"
)
// TODO: move me to a common module (used in multiple repos)
const SpinnerDotSet = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
type Spinner struct {
index int
charset []string
lock sync.Mutex
}
func NewSpinner(charset string) Spinner {
return Spinner{
charset: strings.Split(charset, ""),
}
}
func (s *Spinner) Current() string {
s.lock.Lock()
defer s.lock.Unlock()
return s.charset[s.index]
}
func (s *Spinner) Next() string {
s.lock.Lock()
defer s.lock.Unlock()
c := s.charset[s.index]
s.index++
if s.index >= len(s.charset) {
s.index = 0
}
return c
}

View file

@ -0,0 +1,149 @@
package ui
import (
"bytes"
"context"
"fmt"
"io"
"os"
"sync"
grypeEvent "github.com/anchore/grype/grype/event"
"github.com/anchore/grype/internal/log"
"github.com/anchore/grype/internal/logger"
"github.com/anchore/grype/ui"
"github.com/wagoodman/go-partybus"
"github.com/wagoodman/jotframe/pkg/frame"
)
// ephemeralTerminalUI provides an "ephemeral" terminal user interface to display the application state dynamically.
// The terminal is placed into raw mode and the cursor is manipulated to allow for a dynamic, multi-line
// UI (provided by the jotframe lib), for this reason all other application mechanisms that write to the screen
// must be suppressed before starting (such as logs); since bytes in the device and in application memory combine to make
// a shared state, bytes coming from elsewhere to the screen will disrupt this state.
//
// This UI is primarily driven off of events from the event bus, creating single-line terminal widgets to represent a
// published element on the event bus, typically polling the element for the latest state. This allows for the UI to
// control update frequency to the screen, provide "liveness" indications that are interpolated between bus events,
// and overall loosely couple the bus events from screen interactions.
//
// By convention, all elements published on the bus should be treated as read-only, and publishers on the bus should
// attempt to enforce this when possible by wrapping complex objects with interfaces to prescribe interactions. Also by
// convention, each new event that the UI should respond to should be added either in this package as a handler function,
// or in the shared ui package as a function on the main handler object. All handler functions should be completed
// processing an event before the ETUI exits (coordinated with a sync.WaitGroup)
type ephemeralTerminalUI struct {
unsubscribe func() error
handler *ui.Handler
waitGroup *sync.WaitGroup
frame *frame.Frame
logBuffer *bytes.Buffer
uiOutput *os.File
}
func NewEphemeralTerminalUI() UI {
return &ephemeralTerminalUI{
handler: ui.NewHandler(),
waitGroup: &sync.WaitGroup{},
uiOutput: os.Stderr,
}
}
func (h *ephemeralTerminalUI) Setup(unsubscribe func() error) error {
h.unsubscribe = unsubscribe
hideCursor(h.uiOutput)
// prep the logger to not clobber the screen from now on (logrus only)
h.logBuffer = bytes.NewBufferString("")
logWrapper, ok := log.Log.(*logger.LogrusLogger)
if ok {
logWrapper.Logger.SetOutput(h.logBuffer)
}
return h.openScreen()
}
func (h *ephemeralTerminalUI) Handle(event partybus.Event) error {
ctx := context.Background()
switch {
case h.handler.RespondsTo(event):
if err := h.handler.Handle(ctx, h.frame, event, h.waitGroup); err != nil {
log.Errorf("unable to show %s event: %+v", event.Type, err)
}
case event.Type == grypeEvent.AppUpdateAvailable:
if err := handleAppUpdateAvailable(ctx, h.frame, event, h.waitGroup); err != nil {
log.Errorf("unable to show %s event: %+v", event.Type, err)
}
case event.Type == grypeEvent.VulnerabilityScanningFinished:
// we need to close the screen now since signaling the the presenter is ready means that we
// are about to write bytes to stdout, so we should reset the terminal state first
h.closeScreen(false)
if err := handleVulnerabilityScanningFinished(event); err != nil {
log.Errorf("unable to show %s event: %+v", event.Type, err)
}
// this is the last expected event, stop listening to events
return h.unsubscribe()
}
return nil
}
func (h *ephemeralTerminalUI) openScreen() error {
config := frame.Config{
PositionPolicy: frame.PolicyFloatForward,
// only report output to stderr, reserve report output for stdout
Output: h.uiOutput,
}
fr, err := frame.New(config)
if err != nil {
return fmt.Errorf("failed to create the screen object: %w", err)
}
h.frame = fr
return nil
}
func (h *ephemeralTerminalUI) closeScreen(force bool) {
// we may have other background processes still displaying progress, wait for them to
// finish before discontinuing dynamic content and showing the final report
if !h.frame.IsClosed() {
if !force {
h.waitGroup.Wait()
}
h.frame.Close()
// TODO: there is a race condition within frame.Close() that sometimes leads to an extra blank line being output
frame.Close()
// only flush the log on close
h.flushLog()
}
}
func (h *ephemeralTerminalUI) flushLog() {
// flush any errors to the screen before the report
logWrapper, ok := log.Log.(*logger.LogrusLogger)
if ok {
fmt.Fprint(logWrapper.Output, h.logBuffer.String())
logWrapper.Logger.SetOutput(h.uiOutput)
} else {
fmt.Fprint(h.uiOutput, h.logBuffer.String())
}
}
func (h *ephemeralTerminalUI) Teardown(force bool) error {
h.closeScreen(force)
showCursor(h.uiOutput)
return nil
}
func hideCursor(output io.Writer) {
fmt.Fprint(output, "\x1b[?25l")
}
func showCursor(output io.Writer) {
fmt.Fprint(output, "\x1b[?25h")
}

View file

@ -1,140 +0,0 @@
package etui
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"sync"
"github.com/hashicorp/go-multierror"
grypeEvent "github.com/anchore/grype/grype/event"
"github.com/anchore/grype/grype/grypeerr"
"github.com/anchore/grype/internal/log"
"github.com/anchore/grype/internal/logger"
"github.com/anchore/grype/internal/ui/common"
grypeUI "github.com/anchore/grype/ui"
"github.com/wagoodman/go-partybus"
"github.com/wagoodman/jotframe/pkg/frame"
)
// TODO: specify per-platform implementations with build tags
func setupScreen(output *os.File) *frame.Frame {
config := frame.Config{
PositionPolicy: frame.PolicyFloatForward,
// only report output to stderr, reserve report output for stdout
Output: output,
}
fr, err := frame.New(config)
if err != nil {
log.Errorf("failed to create screen object: %+v", err)
return nil
}
return fr
}
// nolint:funlen,gocognit
func OutputToEphemeralTUI(workerErrs <-chan error, subscription *partybus.Subscription) error {
output := os.Stderr
// prep the logger to not clobber the screen from now on (logrus only)
logBuffer := bytes.NewBufferString("")
logWrapper, ok := log.Log.(*logger.LogrusLogger)
if ok {
logWrapper.Logger.SetOutput(logBuffer)
}
// hide cursor
_, _ = fmt.Fprint(output, "\x1b[?25l")
// show cursor
defer fmt.Fprint(output, "\x1b[?25h")
fr := setupScreen(output)
if fr == nil {
return fmt.Errorf("unable to setup screen")
}
var isClosed bool
defer func() {
if !isClosed {
fr.Close()
frame.Close()
// flush any errors to the screen before the report
fmt.Fprint(output, logBuffer.String())
}
logWrapper, ok := log.Log.(*logger.LogrusLogger)
if ok {
logWrapper.Logger.SetOutput(output)
}
}()
var err error
var wg = &sync.WaitGroup{}
events := subscription.Events()
ctx := context.Background()
grypeUIHandler := grypeUI.NewHandler()
var errResult error
for {
select {
case err, ok := <-workerErrs:
if err != nil {
if errors.Is(err, grypeerr.ErrAboveSeverityThreshold) {
errResult = err
continue
}
return err
}
if !ok {
// worker completed
workerErrs = nil
}
case e, ok := <-events:
if !ok {
// event bus closed
events = nil
}
switch {
case grypeUIHandler.RespondsTo(e):
if err = grypeUIHandler.Handle(ctx, fr, e, wg); err != nil {
log.Errorf("unable to show %s event: %+v", e.Type, err)
}
case e.Type == grypeEvent.AppUpdateAvailable:
if err = appUpdateAvailableHandler(ctx, fr, e, wg); err != nil {
log.Errorf("unable to show %s event: %+v", e.Type, err)
}
case e.Type == grypeEvent.VulnerabilityScanningFinished:
// we may have other background processes still displaying progress, wait for them to
// finish before discontinuing dynamic content and showing the final report
wg.Wait()
fr.Close()
// TODO: there is a race condition within frame.Close() that sometimes leads to an extra blank line being output
frame.Close()
isClosed = true
// flush any errors to the screen before the report
fmt.Fprint(output, logBuffer.String())
if err := common.VulnerabilityScanningFinishedHandler(e); err != nil {
log.Errorf("unable to show %s event: %+v", e.Type, err)
errResult = multierror.Append(errResult, err)
}
// this is the last expected event
events = nil
}
case <-ctx.Done():
return grypeerr.NewExpectedErr("canceled: %w", ctx.Err())
}
if events == nil && workerErrs == nil {
break
}
}
return errResult
}

View file

@ -1,9 +1,10 @@
package etui
package ui
import (
"context"
"fmt"
"io"
"os"
"sync"
grypeEventParsers "github.com/anchore/grype/grype/event/parsers"
@ -13,7 +14,20 @@ import (
"github.com/wagoodman/jotframe/pkg/frame"
)
func appUpdateAvailableHandler(_ context.Context, fr *frame.Frame, event partybus.Event, _ *sync.WaitGroup) error {
func handleVulnerabilityScanningFinished(event partybus.Event) error {
// show the report to stdout
pres, err := grypeEventParsers.ParseVulnerabilityScanningFinished(event)
if err != nil {
return fmt.Errorf("bad CatalogerFinished event: %w", err)
}
if err := pres.Present(os.Stdout); err != nil {
return fmt.Errorf("unable to show vulnerability report: %w", err)
}
return nil
}
func handleAppUpdateAvailable(_ context.Context, fr *frame.Frame, event partybus.Event, _ *sync.WaitGroup) error {
newVersion, err := grypeEventParsers.ParseAppUpdateAvailable(event)
if err != nil {
return fmt.Errorf("bad %s event: %w", event.Type, err)

View file

@ -1,52 +0,0 @@
package ui
import (
"errors"
grypeEvent "github.com/anchore/grype/grype/event"
"github.com/anchore/grype/grype/grypeerr"
"github.com/anchore/grype/internal/log"
"github.com/anchore/grype/internal/ui/common"
"github.com/wagoodman/go-partybus"
)
func LoggerUI(workerErrs <-chan error, subscription *partybus.Subscription) error {
events := subscription.Events()
var errResult error
for {
select {
case err, ok := <-workerErrs:
if err != nil {
if errors.Is(err, grypeerr.ErrAboveSeverityThreshold) {
errResult = err
continue
}
return err
}
if !ok {
// worker completed
workerErrs = nil
}
case e, ok := <-events:
if !ok {
// event bus closed
events = nil
}
// ignore all events except for the final event
if e.Type == grypeEvent.VulnerabilityScanningFinished {
err := common.VulnerabilityScanningFinishedHandler(e)
if err != nil {
log.Errorf("unable to show %s event: %+v", e.Type, err)
}
// this is the last expected event
events = nil
}
}
if events == nil && workerErrs == nil {
break
}
}
return errResult
}

38
internal/ui/logger_ui.go Normal file
View file

@ -0,0 +1,38 @@
package ui
import (
grypeEvent "github.com/anchore/grype/grype/event"
"github.com/anchore/grype/internal/log"
"github.com/wagoodman/go-partybus"
)
type loggerUI struct {
unsubscribe func() error
}
func NewLoggerUI() UI {
return &loggerUI{}
}
func (l *loggerUI) Setup(unsubscribe func() error) error {
l.unsubscribe = unsubscribe
return nil
}
func (l loggerUI) Handle(event partybus.Event) error {
// ignore all events except for the final event
if event.Type != grypeEvent.VulnerabilityScanningFinished {
return nil
}
if err := handleVulnerabilityScanningFinished(event); err != nil {
log.Warnf("unable to show catalog image finished event: %+v", err)
}
// this is the last expected event, stop listening to events
return l.unsubscribe()
}
func (l loggerUI) Teardown(_ bool) error {
return nil
}

View file

@ -4,27 +4,25 @@ import (
"os"
"runtime"
"github.com/anchore/grype/internal"
"github.com/anchore/grype/internal/ui/etui"
"golang.org/x/crypto/ssh/terminal"
)
// TODO: build tags to exclude options from windows
// Select is responsible for determining the specific UI function given select user option, the current platform
// config values, and environment status (such as a TTY being present).
func Select(verbose, quiet bool) UI {
var ui UI
isStdinPiped := internal.IsPipedInput()
isStdoutATty := terminal.IsTerminal(int(os.Stdout.Fd()))
isStderrATty := terminal.IsTerminal(int(os.Stderr.Fd()))
notATerminal := !isStderrATty && !isStdoutATty
switch {
case runtime.GOOS == "windows" || verbose || quiet || notATerminal || !isStderrATty || isStdinPiped:
ui = LoggerUI
case runtime.GOOS == "windows" || verbose || quiet || notATerminal || !isStderrATty:
ui = NewLoggerUI()
default:
ui = etui.OutputToEphemeralTUI
ui = NewEphemeralTerminalUI()
}
return ui

View file

@ -4,4 +4,8 @@ import (
"github.com/wagoodman/go-partybus"
)
type UI func(<-chan error, *partybus.Subscription) error
type UI interface {
Setup(unsubscribe func() error) error
partybus.Handler
Teardown(force bool) error
}

View file

@ -19,9 +19,9 @@ func TestJsonDescriptor(t *testing.T) {
"GRYPE_CHECK_FOR_APP_UPDATE": "false",
},
assertions: []traitAssertion{
assertInOutput(`"CheckForAppUpdate": false`), // app config
assertInOutput(`"db": {`), // db status
assertInOutput(`"built":`), // db status
assertInOutput(`"check-for-app-update": false`), // assert existence of the app config block
assertInOutput(`"db": {`), // assert existence of the db status block
assertInOutput(`"built":`), // assert existence of the db status block
},
},
}

View file

@ -8,7 +8,7 @@ import (
"time"
grypeEventParsers "github.com/anchore/grype/grype/event/parsers"
"github.com/anchore/grype/internal/ui/common"
"github.com/anchore/grype/internal/ui/components"
syftUI "github.com/anchore/syft/ui"
"github.com/dustin/go-humanize"
"github.com/gookit/color"
@ -19,21 +19,21 @@ import (
)
const maxBarWidth = 50
const statusSet = common.SpinnerDotSet // SpinnerCircleOutlineSet
const completedStatus = "✔" // "●"
const statusSet = components.SpinnerDotSet // SpinnerCircleOutlineSet
const completedStatus = "✔" // "●"
const tileFormat = color.Bold
var auxInfoFormat = color.HEX("#777777")
var statusTitleTemplate = fmt.Sprintf(" %%s %%-%ds ", syftUI.StatusTitleColumn)
func startProcess() (format.Simple, *common.Spinner) {
func startProcess() (format.Simple, *components.Spinner) {
width, _ := frame.GetTerminalSize()
barWidth := int(0.25 * float64(width))
if barWidth > maxBarWidth {
barWidth = maxBarWidth
}
formatter := format.NewSimpleWithTheme(barWidth, format.HeavyNoBarTheme, format.ColorCompleted, format.ColorTodo)
spinner := common.NewSpinner(statusSet)
spinner := components.NewSpinner(statusSet)
return formatter, &spinner
}