2022-01-13 20:02:24 +00:00
|
|
|
package engine
|
|
|
|
|
|
|
|
import (
|
2022-03-22 16:27:15 +00:00
|
|
|
"bytes"
|
2023-02-09 22:55:19 +00:00
|
|
|
"reflect"
|
2022-01-13 20:02:24 +00:00
|
|
|
"runtime"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
2022-01-19 06:24:56 +00:00
|
|
|
"sync/atomic"
|
2022-01-13 20:02:24 +00:00
|
|
|
"time"
|
|
|
|
|
2023-03-02 17:32:37 +00:00
|
|
|
ahocorasick "github.com/petar-dambovaliev/aho-corasick"
|
2023-06-26 16:39:57 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2023-01-10 17:35:44 +00:00
|
|
|
"google.golang.org/protobuf/proto"
|
2022-01-13 20:02:24 +00:00
|
|
|
|
2022-08-29 18:45:37 +00:00
|
|
|
"github.com/trufflesecurity/trufflehog/v3/pkg/common"
|
2023-04-18 15:36:00 +00:00
|
|
|
"github.com/trufflesecurity/trufflehog/v3/pkg/config"
|
2022-08-29 18:45:37 +00:00
|
|
|
"github.com/trufflesecurity/trufflehog/v3/pkg/context"
|
2022-02-10 18:54:33 +00:00
|
|
|
"github.com/trufflesecurity/trufflehog/v3/pkg/decoders"
|
|
|
|
"github.com/trufflesecurity/trufflehog/v3/pkg/detectors"
|
2022-10-06 18:55:07 +00:00
|
|
|
"github.com/trufflesecurity/trufflehog/v3/pkg/pb/detectorspb"
|
2022-04-04 04:13:39 +00:00
|
|
|
"github.com/trufflesecurity/trufflehog/v3/pkg/pb/source_metadatapb"
|
|
|
|
"github.com/trufflesecurity/trufflehog/v3/pkg/pb/sourcespb"
|
2022-02-10 18:54:33 +00:00
|
|
|
"github.com/trufflesecurity/trufflehog/v3/pkg/sources"
|
2022-01-13 20:02:24 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type Engine struct {
|
2022-02-07 18:29:06 +00:00
|
|
|
concurrency int
|
|
|
|
chunks chan *sources.Chunk
|
|
|
|
results chan detectors.ResultWithMetadata
|
|
|
|
decoders []decoders.Decoder
|
|
|
|
detectors map[bool][]detectors.Detector
|
|
|
|
chunksScanned uint64
|
2022-10-27 19:54:22 +00:00
|
|
|
bytesScanned uint64
|
2022-03-06 06:42:06 +00:00
|
|
|
detectorAvgTime sync.Map
|
2023-06-26 16:39:57 +00:00
|
|
|
sourcesWg *errgroup.Group
|
2022-05-25 16:35:44 +00:00
|
|
|
workersWg sync.WaitGroup
|
2022-10-31 16:36:10 +00:00
|
|
|
// filterUnverified is used to reduce the number of unverified results.
|
|
|
|
// If there are multiple unverified results for the same chunk for the same detector,
|
|
|
|
// only the first one will be kept.
|
|
|
|
filterUnverified bool
|
2023-03-02 17:32:37 +00:00
|
|
|
|
|
|
|
// prefilter is a ahocorasick struct used for doing efficient string
|
|
|
|
// matching given a set of words (keywords from the rules in the config)
|
|
|
|
prefilter ahocorasick.AhoCorasick
|
2022-01-13 20:02:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type EngineOption func(*Engine)
|
|
|
|
|
|
|
|
func WithConcurrency(concurrency int) EngineOption {
|
|
|
|
return func(e *Engine) {
|
|
|
|
e.concurrency = concurrency
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-29 13:45:56 +00:00
|
|
|
const ignoreTag = "trufflehog:ignore"
|
|
|
|
|
2022-01-13 20:02:24 +00:00
|
|
|
func WithDetectors(verify bool, d ...detectors.Detector) EngineOption {
|
|
|
|
return func(e *Engine) {
|
|
|
|
if e.detectors == nil {
|
|
|
|
e.detectors = make(map[bool][]detectors.Detector)
|
|
|
|
}
|
|
|
|
if e.detectors[verify] == nil {
|
2022-01-19 06:24:56 +00:00
|
|
|
e.detectors[true] = []detectors.Detector{}
|
|
|
|
e.detectors[false] = []detectors.Detector{}
|
2022-01-13 20:02:24 +00:00
|
|
|
}
|
|
|
|
e.detectors[verify] = append(e.detectors[verify], d...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func WithDecoders(decoders ...decoders.Decoder) EngineOption {
|
|
|
|
return func(e *Engine) {
|
|
|
|
e.decoders = decoders
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-31 16:36:10 +00:00
|
|
|
// WithFilterUnverified sets the filterUnverified flag on the engine. If set to
|
|
|
|
// true, the engine will only return the first unverified result for a chunk for a detector.
|
|
|
|
func WithFilterUnverified(filter bool) EngineOption {
|
|
|
|
return func(e *Engine) {
|
|
|
|
e.filterUnverified = filter
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-27 22:46:45 +00:00
|
|
|
// WithFilterDetectors applies a filter to the configured list of detectors. If
|
|
|
|
// the filterFunc returns true, the detector will be included for scanning.
|
|
|
|
// This option applies to the existing list of detectors configured, so the
|
|
|
|
// order this option appears matters. All filtering happens before scanning.
|
|
|
|
func WithFilterDetectors(filterFunc func(detectors.Detector) bool) EngineOption {
|
|
|
|
return func(e *Engine) {
|
|
|
|
// If no detectors are configured, do nothing.
|
|
|
|
if e.detectors == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
e.detectors[true] = filterDetectors(filterFunc, e.detectors[true])
|
|
|
|
e.detectors[false] = filterDetectors(filterFunc, e.detectors[false])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func filterDetectors(filterFunc func(detectors.Detector) bool, input []detectors.Detector) []detectors.Detector {
|
|
|
|
var output []detectors.Detector
|
|
|
|
for _, detector := range input {
|
|
|
|
if filterFunc(detector) {
|
|
|
|
output = append(output, detector)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return output
|
|
|
|
}
|
|
|
|
|
2022-01-13 20:02:24 +00:00
|
|
|
func Start(ctx context.Context, options ...EngineOption) *Engine {
|
|
|
|
e := &Engine{
|
2022-02-07 18:29:06 +00:00
|
|
|
chunks: make(chan *sources.Chunk),
|
|
|
|
results: make(chan detectors.ResultWithMetadata),
|
2022-03-06 06:42:06 +00:00
|
|
|
detectorAvgTime: sync.Map{},
|
2022-01-13 20:02:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, option := range options {
|
|
|
|
option(e)
|
|
|
|
}
|
|
|
|
|
2022-04-01 23:47:27 +00:00
|
|
|
// Set defaults.
|
2022-01-13 20:02:24 +00:00
|
|
|
if e.concurrency == 0 {
|
|
|
|
numCPU := runtime.NumCPU()
|
2023-02-09 22:55:19 +00:00
|
|
|
ctx.Logger().Info("No concurrency specified, defaulting to max", "cpu", numCPU)
|
2022-01-13 20:02:24 +00:00
|
|
|
e.concurrency = numCPU
|
|
|
|
}
|
2023-02-09 22:55:19 +00:00
|
|
|
ctx.Logger().V(2).Info("engine started", "workers", e.concurrency)
|
2022-01-13 20:02:24 +00:00
|
|
|
|
2023-06-26 16:39:57 +00:00
|
|
|
sourcesWg, egCtx := errgroup.WithContext(ctx)
|
|
|
|
sourcesWg.SetLimit(e.concurrency)
|
|
|
|
e.sourcesWg = sourcesWg
|
|
|
|
ctx.SetParent(egCtx)
|
|
|
|
|
2022-01-13 20:02:24 +00:00
|
|
|
if len(e.decoders) == 0 {
|
|
|
|
e.decoders = decoders.DefaultDecoders()
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(e.detectors) == 0 {
|
|
|
|
e.detectors = map[bool][]detectors.Detector{}
|
|
|
|
e.detectors[true] = DefaultDetectors()
|
2022-01-19 06:24:56 +00:00
|
|
|
e.detectors[false] = []detectors.Detector{}
|
2022-01-13 20:02:24 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 17:32:37 +00:00
|
|
|
// build ahocorasick prefilter for efficient string matching
|
|
|
|
// on keywords
|
|
|
|
keywords := []string{}
|
|
|
|
for _, d := range e.detectors[false] {
|
|
|
|
keywords = append(keywords, d.Keywords()...)
|
|
|
|
}
|
|
|
|
for _, d := range e.detectors[true] {
|
|
|
|
keywords = append(keywords, d.Keywords()...)
|
|
|
|
}
|
|
|
|
builder := ahocorasick.NewAhoCorasickBuilder(ahocorasick.Opts{
|
|
|
|
AsciiCaseInsensitive: true,
|
|
|
|
MatchOnlyWholeWords: false,
|
|
|
|
MatchKind: ahocorasick.LeftMostLongestMatch,
|
|
|
|
DFA: true,
|
|
|
|
})
|
|
|
|
e.prefilter = builder.Build(keywords)
|
|
|
|
|
2023-04-18 15:36:00 +00:00
|
|
|
ctx.Logger().Info("loaded decoders", "count", len(e.decoders))
|
|
|
|
ctx.Logger().Info("loaded detectors",
|
2023-02-09 22:55:19 +00:00
|
|
|
"total", len(e.detectors[true])+len(e.detectors[false]),
|
|
|
|
"verification_enabled", len(e.detectors[true]),
|
|
|
|
"verification_disabled", len(e.detectors[false]),
|
|
|
|
)
|
2022-01-19 06:24:56 +00:00
|
|
|
|
2023-04-18 15:36:00 +00:00
|
|
|
// Sanity check detectors for duplicate configuration. Only log in case
|
|
|
|
// a detector has been configured in a way that isn't represented by
|
|
|
|
// the DetectorID (type and version).
|
|
|
|
{
|
|
|
|
dets := append(e.detectors[true], e.detectors[false]...)
|
|
|
|
seenDetectors := make(map[config.DetectorID]struct{}, len(dets))
|
|
|
|
for _, det := range dets {
|
|
|
|
id := config.GetDetectorID(det)
|
2023-06-13 19:49:21 +00:00
|
|
|
if _, ok := seenDetectors[id]; ok && id.ID != detectorspb.DetectorType_CustomRegex {
|
2023-04-18 15:36:00 +00:00
|
|
|
ctx.Logger().Info("possible duplicate detector configured", "detector", id)
|
|
|
|
}
|
|
|
|
seenDetectors[id] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-29 19:26:39 +00:00
|
|
|
// Start the workers.
|
2022-05-25 16:35:44 +00:00
|
|
|
for i := 0; i < e.concurrency; i++ {
|
|
|
|
e.workersWg.Add(1)
|
|
|
|
go func() {
|
2022-09-22 14:01:10 +00:00
|
|
|
defer common.RecoverWithExit(ctx)
|
2022-05-25 16:35:44 +00:00
|
|
|
defer e.workersWg.Done()
|
|
|
|
e.detectorWorker(ctx)
|
|
|
|
}()
|
|
|
|
}
|
2022-01-19 06:24:56 +00:00
|
|
|
|
2022-01-13 20:02:24 +00:00
|
|
|
return e
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:35:44 +00:00
|
|
|
// Finish waits for running sources to complete and workers to finish scanning
|
|
|
|
// chunks before closing their respective channels. Once Finish is called, no
|
|
|
|
// more sources may be scanned by the engine.
|
2023-06-26 16:39:57 +00:00
|
|
|
func (e *Engine) Finish(ctx context.Context, logFunc func(error, string, ...any)) {
|
2022-09-22 14:01:10 +00:00
|
|
|
defer common.RecoverWithExit(ctx)
|
2022-05-25 16:35:44 +00:00
|
|
|
// wait for the sources to finish putting chunks onto the chunks channel
|
2023-06-26 16:39:57 +00:00
|
|
|
sourceErr := e.sourcesWg.Wait()
|
|
|
|
if sourceErr != nil {
|
|
|
|
logFunc(sourceErr, "error occurred while collecting chunks")
|
|
|
|
}
|
|
|
|
|
2022-05-25 16:35:44 +00:00
|
|
|
close(e.chunks)
|
|
|
|
// wait for the workers to finish processing all of the chunks and putting
|
|
|
|
// results onto the results channel
|
|
|
|
e.workersWg.Wait()
|
|
|
|
|
|
|
|
// TODO: re-evaluate whether this is needed and investigate why if so
|
|
|
|
//
|
|
|
|
// not entirely sure why results don't get processed without this pause
|
|
|
|
// since we've put all results on the channel at this point.
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
close(e.results)
|
|
|
|
}
|
|
|
|
|
2022-01-13 20:02:24 +00:00
|
|
|
func (e *Engine) ChunksChan() chan *sources.Chunk {
|
|
|
|
return e.chunks
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Engine) ResultsChan() chan detectors.ResultWithMetadata {
|
|
|
|
return e.results
|
|
|
|
}
|
|
|
|
|
2022-01-19 06:24:56 +00:00
|
|
|
func (e *Engine) ChunksScanned() uint64 {
|
|
|
|
return e.chunksScanned
|
|
|
|
}
|
|
|
|
|
2022-10-27 19:54:22 +00:00
|
|
|
func (e *Engine) BytesScanned() uint64 {
|
|
|
|
return e.bytesScanned
|
|
|
|
}
|
|
|
|
|
2022-02-07 18:29:06 +00:00
|
|
|
func (e *Engine) DetectorAvgTime() map[string][]time.Duration {
|
2023-02-09 22:55:19 +00:00
|
|
|
logger := context.Background().Logger()
|
2022-03-06 06:42:06 +00:00
|
|
|
avgTime := map[string][]time.Duration{}
|
|
|
|
e.detectorAvgTime.Range(func(k, v interface{}) bool {
|
|
|
|
key, ok := k.(string)
|
|
|
|
if !ok {
|
2023-02-09 22:55:19 +00:00
|
|
|
logger.Info("expected DetectorAvgTime key to be a string")
|
2022-03-06 06:42:06 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
value, ok := v.([]time.Duration)
|
|
|
|
if !ok {
|
2023-02-09 22:55:19 +00:00
|
|
|
logger.Info("expected DetectorAvgTime value to be []time.Duration")
|
2022-03-06 06:42:06 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
avgTime[key] = value
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
return avgTime
|
2022-02-07 18:29:06 +00:00
|
|
|
}
|
|
|
|
|
2022-01-13 20:02:24 +00:00
|
|
|
func (e *Engine) detectorWorker(ctx context.Context) {
|
2022-10-24 20:57:27 +00:00
|
|
|
for originalChunk := range e.chunks {
|
|
|
|
for chunk := range sources.Chunker(originalChunk) {
|
2023-03-02 17:32:37 +00:00
|
|
|
matchedKeywords := make(map[string]struct{})
|
2022-10-27 19:54:22 +00:00
|
|
|
atomic.AddUint64(&e.bytesScanned, uint64(len(chunk.Data)))
|
2022-10-24 20:57:27 +00:00
|
|
|
for _, decoder := range e.decoders {
|
|
|
|
var decoderType detectorspb.DecoderType
|
|
|
|
switch decoder.(type) {
|
2022-11-15 17:36:01 +00:00
|
|
|
case *decoders.UTF8:
|
2022-10-24 20:57:27 +00:00
|
|
|
decoderType = detectorspb.DecoderType_PLAIN
|
|
|
|
case *decoders.Base64:
|
|
|
|
decoderType = detectorspb.DecoderType_BASE64
|
2023-04-20 22:25:36 +00:00
|
|
|
case *decoders.UTF16:
|
|
|
|
decoderType = detectorspb.DecoderType_UTF16
|
2022-10-24 20:57:27 +00:00
|
|
|
default:
|
2023-02-09 22:55:19 +00:00
|
|
|
ctx.Logger().Info("unknown decoder type", "type", reflect.TypeOf(decoder).String())
|
2022-10-24 20:57:27 +00:00
|
|
|
decoderType = detectorspb.DecoderType_UNKNOWN
|
|
|
|
}
|
|
|
|
decoded := decoder.FromChunk(chunk)
|
|
|
|
if decoded == nil {
|
|
|
|
continue
|
|
|
|
}
|
2023-03-02 17:32:37 +00:00
|
|
|
|
2023-04-14 12:29:32 +00:00
|
|
|
// build a map of all keywords that were matched in the chunk
|
|
|
|
for _, m := range e.prefilter.FindAll(string(decoded.Data)) {
|
|
|
|
matchedKeywords[strings.ToLower(string(decoded.Data[m.Start():m.End()]))] = struct{}{}
|
2023-03-02 17:32:37 +00:00
|
|
|
}
|
|
|
|
|
2022-10-24 20:57:27 +00:00
|
|
|
for verify, detectorsSet := range e.detectors {
|
|
|
|
for _, detector := range detectorsSet {
|
2023-03-02 17:32:37 +00:00
|
|
|
chunkContainsKeyword := false
|
2022-10-24 20:57:27 +00:00
|
|
|
for _, kw := range detector.Keywords() {
|
2023-03-02 17:32:37 +00:00
|
|
|
if _, ok := matchedKeywords[strings.ToLower(kw)]; ok {
|
|
|
|
chunkContainsKeyword = true
|
2023-03-15 21:51:03 +00:00
|
|
|
break
|
2022-10-24 20:57:27 +00:00
|
|
|
}
|
|
|
|
}
|
2023-03-02 17:32:37 +00:00
|
|
|
|
|
|
|
if !chunkContainsKeyword {
|
2022-10-24 20:57:27 +00:00
|
|
|
continue
|
2022-01-13 20:02:24 +00:00
|
|
|
}
|
2022-09-22 22:47:42 +00:00
|
|
|
|
2023-03-02 17:32:37 +00:00
|
|
|
start := time.Now()
|
|
|
|
|
2022-10-24 20:57:27 +00:00
|
|
|
results, err := func() ([]detectors.Result, error) {
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
|
|
|
|
defer cancel()
|
|
|
|
defer common.Recover(ctx)
|
|
|
|
return detector.FromData(ctx, verify, decoded.Data)
|
|
|
|
}()
|
|
|
|
if err != nil {
|
2023-02-09 22:55:19 +00:00
|
|
|
ctx.Logger().Error(err, "could not scan chunk",
|
|
|
|
"source_type", decoded.SourceType.String(),
|
|
|
|
"metadata", decoded.SourceMetadata,
|
|
|
|
)
|
2022-10-24 20:57:27 +00:00
|
|
|
continue
|
2022-03-22 16:27:15 +00:00
|
|
|
}
|
2022-05-04 21:11:10 +00:00
|
|
|
|
2022-10-31 16:36:10 +00:00
|
|
|
if e.filterUnverified {
|
|
|
|
results = detectors.CleanResults(results)
|
|
|
|
}
|
2022-10-24 20:57:27 +00:00
|
|
|
for _, result := range results {
|
2023-01-10 17:35:44 +00:00
|
|
|
resultChunk := chunk
|
2023-06-29 13:45:56 +00:00
|
|
|
ignoreLinePresent := false
|
2023-01-10 20:51:58 +00:00
|
|
|
if SupportsLineNumbers(chunk.SourceType) {
|
2023-01-10 17:35:44 +00:00
|
|
|
copyChunk := *chunk
|
|
|
|
copyMetaDataClone := proto.Clone(chunk.SourceMetadata)
|
|
|
|
if copyMetaData, ok := copyMetaDataClone.(*source_metadatapb.MetaData); ok {
|
|
|
|
copyChunk.SourceMetadata = copyMetaData
|
|
|
|
}
|
|
|
|
fragStart, mdLine := FragmentFirstLine(©Chunk)
|
2023-06-29 13:45:56 +00:00
|
|
|
ignoreLinePresent = SetResultLineNumber(©Chunk, &result, fragStart, mdLine)
|
2023-01-10 17:35:44 +00:00
|
|
|
resultChunk = ©Chunk
|
|
|
|
}
|
2023-06-29 13:45:56 +00:00
|
|
|
if ignoreLinePresent {
|
|
|
|
continue
|
|
|
|
}
|
2022-10-24 20:57:27 +00:00
|
|
|
result.DecoderType = decoderType
|
2023-01-10 17:35:44 +00:00
|
|
|
e.results <- detectors.CopyMetadata(resultChunk, result)
|
2022-10-24 20:57:27 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
if len(results) > 0 {
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
detectorName := results[0].DetectorType.String()
|
|
|
|
avgTimeI, ok := e.detectorAvgTime.Load(detectorName)
|
|
|
|
var avgTime []time.Duration
|
|
|
|
if ok {
|
|
|
|
avgTime, ok = avgTimeI.([]time.Duration)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
2022-03-06 06:42:06 +00:00
|
|
|
}
|
2022-10-24 20:57:27 +00:00
|
|
|
avgTime = append(avgTime, elapsed)
|
|
|
|
e.detectorAvgTime.Store(detectorName, avgTime)
|
2022-03-06 06:42:06 +00:00
|
|
|
}
|
2022-02-07 18:29:06 +00:00
|
|
|
}
|
2022-01-13 20:02:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-01-19 06:24:56 +00:00
|
|
|
atomic.AddUint64(&e.chunksScanned, 1)
|
2022-01-13 20:02:24 +00:00
|
|
|
}
|
|
|
|
}
|
2022-03-22 16:27:15 +00:00
|
|
|
|
2023-05-09 15:02:34 +00:00
|
|
|
// lineNumberSupportedSources is a list of sources that support line numbers.
|
|
|
|
// It is stored this way because slice consts are not supported.
|
|
|
|
func lineNumberSupportedSources() []sourcespb.SourceType {
|
2022-03-22 16:27:15 +00:00
|
|
|
return []sourcespb.SourceType{
|
|
|
|
sourcespb.SourceType_SOURCE_TYPE_GIT,
|
|
|
|
sourcespb.SourceType_SOURCE_TYPE_GITHUB,
|
|
|
|
sourcespb.SourceType_SOURCE_TYPE_GITLAB,
|
|
|
|
sourcespb.SourceType_SOURCE_TYPE_BITBUCKET,
|
|
|
|
sourcespb.SourceType_SOURCE_TYPE_GERRIT,
|
|
|
|
sourcespb.SourceType_SOURCE_TYPE_GITHUB_UNAUTHENTICATED_ORG,
|
|
|
|
sourcespb.SourceType_SOURCE_TYPE_PUBLIC_GIT,
|
2023-05-09 15:02:34 +00:00
|
|
|
sourcespb.SourceType_SOURCE_TYPE_FILESYSTEM,
|
2022-03-22 16:27:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-10 20:51:58 +00:00
|
|
|
// SupportsLineNumbers determines if a line number can be found for a source type.
|
|
|
|
func SupportsLineNumbers(sourceType sourcespb.SourceType) bool {
|
2023-05-09 15:02:34 +00:00
|
|
|
for _, i := range lineNumberSupportedSources() {
|
2022-03-22 16:27:15 +00:00
|
|
|
if i == sourceType {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-05-04 21:11:10 +00:00
|
|
|
// FragmentLineOffset sets the line number for a provided source chunk with a given detector result.
|
2023-06-29 13:45:56 +00:00
|
|
|
func FragmentLineOffset(chunk *sources.Chunk, result *detectors.Result) (int64, bool) {
|
2022-05-04 21:11:10 +00:00
|
|
|
lines := bytes.Split(chunk.Data, []byte("\n"))
|
|
|
|
for i, line := range lines {
|
|
|
|
if bytes.Contains(line, result.Raw) {
|
2023-06-29 13:45:56 +00:00
|
|
|
// if the line contains the ignore tag, we should ignore the result
|
|
|
|
if bytes.Contains(line, []byte(ignoreTag)) {
|
|
|
|
return int64(i), true
|
|
|
|
}
|
|
|
|
return int64(i), false
|
2022-05-04 21:11:10 +00:00
|
|
|
}
|
|
|
|
}
|
2023-06-29 13:45:56 +00:00
|
|
|
return 0, false
|
2022-05-04 21:11:10 +00:00
|
|
|
}
|
|
|
|
|
2022-12-06 23:31:15 +00:00
|
|
|
// FragmentFirstLine returns the first line number of a fragment along with a pointer to the value to update in the
|
2022-05-04 21:11:10 +00:00
|
|
|
// chunk metadata.
|
2022-12-06 23:31:15 +00:00
|
|
|
func FragmentFirstLine(chunk *sources.Chunk) (int64, *int64) {
|
2022-05-04 21:11:10 +00:00
|
|
|
var fragmentStart *int64
|
2022-03-22 16:27:15 +00:00
|
|
|
switch metadata := chunk.SourceMetadata.GetData().(type) {
|
|
|
|
case *source_metadatapb.MetaData_Git:
|
2022-05-04 21:11:10 +00:00
|
|
|
fragmentStart = &metadata.Git.Line
|
2022-03-22 16:27:15 +00:00
|
|
|
case *source_metadatapb.MetaData_Github:
|
2022-05-04 21:11:10 +00:00
|
|
|
fragmentStart = &metadata.Github.Line
|
2022-03-22 16:27:15 +00:00
|
|
|
case *source_metadatapb.MetaData_Gitlab:
|
2022-05-04 21:11:10 +00:00
|
|
|
fragmentStart = &metadata.Gitlab.Line
|
2022-03-22 16:27:15 +00:00
|
|
|
case *source_metadatapb.MetaData_Bitbucket:
|
2022-05-04 21:11:10 +00:00
|
|
|
fragmentStart = &metadata.Bitbucket.Line
|
2022-03-22 16:27:15 +00:00
|
|
|
case *source_metadatapb.MetaData_Gerrit:
|
2022-05-04 21:11:10 +00:00
|
|
|
fragmentStart = &metadata.Gerrit.Line
|
2023-05-09 15:02:34 +00:00
|
|
|
case *source_metadatapb.MetaData_Filesystem:
|
|
|
|
fragmentStart = &metadata.Filesystem.Line
|
2022-05-04 22:45:12 +00:00
|
|
|
default:
|
|
|
|
return 0, nil
|
2022-03-22 16:27:15 +00:00
|
|
|
}
|
2022-05-04 21:11:10 +00:00
|
|
|
return *fragmentStart, fragmentStart
|
2022-03-22 16:27:15 +00:00
|
|
|
}
|
2022-12-06 23:31:15 +00:00
|
|
|
|
|
|
|
// SetResultLineNumber sets the line number in the provided result.
|
2023-06-29 13:45:56 +00:00
|
|
|
func SetResultLineNumber(chunk *sources.Chunk, result *detectors.Result, fragStart int64, mdLine *int64) bool {
|
|
|
|
offset, skip := FragmentLineOffset(chunk, result)
|
2023-01-10 17:35:44 +00:00
|
|
|
*mdLine = fragStart + offset
|
2023-06-29 13:45:56 +00:00
|
|
|
return skip
|
2022-12-06 23:31:15 +00:00
|
|
|
}
|