trufflehog/pkg/handlers/metrics.go

184 lines
6.7 KiB
Go
Raw Normal View History

[refactor] - Refactor Archive Handling Logic (#2703) * Remove specialized handler and archive struct and restructure handlers pkg. * Refactor RPM archive handlers to use a library instead of shelling out * make rpm handling context aware * update test * Refactor AR/deb archive handler to use an existing library instead of shelling out * Update tests * add max size check * add filename and size to context kvp * move skip file check and is binary check before opening file * fix test * preserve existing funcitonality of not handling non-archive files in HandleFile * Adjust check for rpm/deb archive type * add additional deb mime type * update comment * Remove specialized handler and archive struct and restructure handlers pkg. * Refactor RPM archive handlers to use a library instead of shelling out * make rpm handling context aware * update test * Refactor AR/deb archive handler to use an existing library instead of shelling out * Update tests * add max size check * add filename and size to context kvp * move skip file check and is binary check before opening file * fix test * preserve existing funcitonality of not handling non-archive files in HandleFile * Adjust check for rpm/deb archive type * add additional deb mime type * update comment * go mod tidy * update go mod * go mod tidy * add comment * update max depth check to > * go mod tidy * rename * [refactor] - Refactor Archive Handling Logic - Part 4: Non-Archive Data Handling and Cleanup (#2704) * Handle non-archive data within the DefaultHandler * make structs and methods private * Remove non-archive data handling within sources * Handle non-archive data within the DefaultHandler * rebase * Remove non-archive data handling within sources * add gzip * move diskbuffered rereader setup into handler pkg * remove DiskBuffereReader creation logic within sources * move rewind closer * reduce log verbosity * make defaultBufferSize a const * use correct reader * address comments * update test * [feat] - Add Prometheus Metrics for File Handlers (#2705) * add metrics for file handling * add metrics for errors * add metrics for file handling * add metrics for errors * fix tests * add metrics for max archive depth and skipped files * update error * skip symlinks and dirs * update err * fix err assignment * add metrics for file handling * add metrics for errors * fix tests * rebase * add metrics for errors * add metrics for max archive depth and skipped files * update error * skip symlinks and dirs * update err * fix err assignment * rebase * remove * update metric to ms * update comments * address comments * reduce indentations * add metrics for archive depth * [bug] - Enhanced Archive Handling to Address Interface Constraints (#2710) * add metrics for file handling * add metrics for errors * add metrics for file handling * add metrics for errors * fix tests * add metrics for max archive depth and skipped files * update error * skip symlinks and dirs * update err * Address incompatible reader to openArchive * remove nil check * fix err assignment * wrap compReader with DiskbufferReader * add metrics for file handling * add metrics for errors * fix tests * rebase * add metrics for errors * add metrics for max archive depth and skipped files * update error * skip symlinks and dirs * update err * fix err assignment * rebase * remove * update metric to ms * update comments * address comments * reduce indentations * replace diskbuffereader with bufferedfilereader * updtes * add metric back * [bug] - Fix bug and simplify git cat-file command execution and output handling (#2719) * add metrics for file handling * add metrics for errors * add metrics for file handling * add metrics for errors * fix tests * add metrics for max archive depth and skipped files * update error * skip symlinks and dirs * update err * Address incompatible reader to openArchive * remove nil check * fix err assignment * Allow git cat-file blob to complete before trying to handle the file * wrap compReader with DiskbufferReader * Allow git cat-file blob to complete before trying to handle the file * updates * revert stuff * update test * remove * add metrics for file handling * add metrics for errors * fix tests * rebase * add metrics for errors * add metrics for max archive depth and skipped files * update error * skip symlinks and dirs * update err * fix err assignment * rebase * remove * update metric to ms * update comments * address comments * reduce indentations * inline
2024-05-10 18:36:06 +00:00
package handlers
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/trufflesecurity/trufflehog/v3/pkg/common"
)
type metrics struct {
handlerType handlerType
handleFileLatency *prometheus.HistogramVec
bytesProcessed *prometheus.CounterVec
filesProcessed *prometheus.CounterVec
errorsEncountered *prometheus.CounterVec
filesSkipped *prometheus.CounterVec
maxArchiveDepthCount *prometheus.CounterVec
fileSize *prometheus.HistogramVec
fileProcessingTimeouts *prometheus.CounterVec
}
var (
handleFileLatency = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_file_latency_milliseconds",
Help: "Latency of the HandleFile method",
Buckets: prometheus.ExponentialBuckets(1, 5, 6),
},
[]string{"handler_type"},
)
bytesProcessed = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_bytes_processed_total",
Help: "Total number of bytes processed",
},
[]string{"handler_type"},
)
filesProcessed = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_files_processed_total",
Help: "Total number of files processed",
},
[]string{"handler_type"},
)
errorsEncountered = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_errors_encountered_total",
Help: "Total number of errors encountered",
},
[]string{"handler_type"},
)
filesSkipped = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_files_skipped_total",
Help: "Total number of files skipped",
},
[]string{"handler_type"},
)
maxArchiveDepthCount = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_max_archive_depth_reached_total",
Help: "Total number of times the maximum archive depth was reached",
},
[]string{"handler_type"},
)
fileSize = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_file_size_bytes",
Help: "Sizes of files handled by the handler",
Buckets: prometheus.ExponentialBuckets(1, 2, 4),
},
[]string{"handler_type"},
)
fileProcessingTimeouts = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_file_processing_timeouts_total",
Help: "Total number of file processing timeouts encountered",
},
[]string{"handler_type"},
)
)
// newHandlerMetrics creates a new metrics instance configured with Prometheus metrics specific to a file handler.
// The function takes a handlerType parameter, which represents the type of the handler (e.g., "default", "ar", "rpm").
// The handlerType is used as a label for each metric, allowing for differentiation and aggregation of metrics
// based on the handler type.
//
// The function initializes and returns a pointer to a metrics struct that contains the following Prometheus metrics:
//
// - handleFileLatency: a HistogramVec metric that measures the latency of the HandleFile method.
// It uses exponential buckets with a base of 1 and a factor of 5, up to 6 buckets.
// The metric is labeled with the handlerType.
//
// - bytesProcessed: a CounterVec metric that tracks the total number of bytes processed by the handler.
// It is labeled with the handlerType.
//
// - filesProcessed: a CounterVec metric that tracks the total number of files processed by the handler.
// It is labeled with the handlerType.
//
// - errorsEncountered: a CounterVec metric that tracks the total number of errors encountered by the handler.
// It is labeled with the handlerType.
//
// - filesSkipped: a CounterVec metric that tracks the total number of files skipped by the handler.
// It is labeled with the handlerType.
//
// - maxArchiveDepthCount: a CounterVec metric that tracks the total number of times the maximum archive depth was reached.
// It is labeled with the handlerType.
//
// - fileSize: a HistogramVec metric that measures the sizes of files handled by the handler.
// It uses exponential buckets with a base of 1 and a factor of 2, up to 4 buckets.
// It is labeled with the handlerType.
//
// - fileProcessingTimeouts: a CounterVec metric that tracks the total number of file processing timeouts
// encountered by the handler.
// It is labeled with the handlerType.
//
// The metrics are created with a common namespace and subsystem defined in the common package.
// This helps to organize and group related metrics together.
//
// By initializing the metrics with the handlerType label, the function enables accurate attribution and aggregation
// of metrics based on the specific handler type. This allows for fine-grained monitoring and analysis of
// file handler performance.
func newHandlerMetrics(t handlerType) *metrics {
return &metrics{
handlerType: t,
handleFileLatency: handleFileLatency,
bytesProcessed: bytesProcessed,
filesProcessed: filesProcessed,
errorsEncountered: errorsEncountered,
filesSkipped: filesSkipped,
maxArchiveDepthCount: maxArchiveDepthCount,
fileSize: fileSize,
fileProcessingTimeouts: fileProcessingTimeouts,
}
}
func (m *metrics) observeHandleFileLatency(duration int64) {
m.handleFileLatency.WithLabelValues(string(m.handlerType)).Observe(float64(duration))
}
func (m *metrics) incBytesProcessed(bytes int) {
m.bytesProcessed.WithLabelValues(string(m.handlerType)).Add(float64(bytes))
}
func (m *metrics) incFilesProcessed() {
m.filesProcessed.WithLabelValues(string(m.handlerType)).Inc()
}
func (m *metrics) incErrors() {
m.errorsEncountered.WithLabelValues(string(m.handlerType)).Inc()
}
func (m *metrics) incFilesSkipped() {
m.filesSkipped.WithLabelValues(string(m.handlerType)).Inc()
}
func (m *metrics) incMaxArchiveDepthCount() {
m.maxArchiveDepthCount.WithLabelValues(string(m.handlerType)).Inc()
}
func (m *metrics) observeFileSize(size int64) {
m.fileSize.WithLabelValues(string(m.handlerType)).Observe(float64(size))
}
func (m *metrics) incFileProcessingTimeouts() {
m.fileProcessingTimeouts.WithLabelValues(string(m.handlerType)).Inc()
}