[refactor] - Refactor Archive Handling Logic (#2703)

* Remove specialized handler and archive struct and restructure handlers pkg.

* Refactor RPM archive handlers to use a library instead of shelling out

* make rpm handling context aware

* update test

* Refactor AR/deb archive handler to use an existing library instead of shelling out

* Update tests

* add max size check

* add filename and size to context kvp

* move skip file check and is binary check before opening file

* fix test

* preserve existing funcitonality of not handling non-archive files in HandleFile

* Adjust check for rpm/deb archive type

* add additional deb mime type

* update comment

* Remove specialized handler and archive struct and restructure handlers pkg.

* Refactor RPM archive handlers to use a library instead of shelling out

* make rpm handling context aware

* update test

* Refactor AR/deb archive handler to use an existing library instead of shelling out

* Update tests

* add max size check

* add filename and size to context kvp

* move skip file check and is binary check before opening file

* fix test

* preserve existing funcitonality of not handling non-archive files in HandleFile

* Adjust check for rpm/deb archive type

* add additional deb mime type

* update comment

* go mod tidy

* update go mod

* go mod tidy

* add comment

* update max depth check to >

* go mod tidy

* rename

* [refactor] - Refactor Archive Handling Logic - Part 4: Non-Archive Data Handling and Cleanup (#2704)

* Handle non-archive data within the DefaultHandler

* make structs and methods private

* Remove non-archive data handling within sources

* Handle non-archive data within the DefaultHandler

* rebase

* Remove non-archive data handling within sources

* add gzip

* move diskbuffered rereader setup into handler pkg

* remove DiskBuffereReader creation logic within sources

* move rewind closer

* reduce log verbosity

* make defaultBufferSize a const

* use correct reader

* address comments

* update test

* [feat] - Add Prometheus Metrics for File Handlers (#2705)

* add metrics for file handling

* add metrics for errors

* add metrics for file handling

* add metrics for errors

* fix tests

* add metrics for max archive depth and skipped files

* update error

* skip symlinks and dirs

* update err

* fix err assignment

* add metrics for file handling

* add metrics for errors

* fix tests

* rebase

* add metrics for errors

* add metrics for max archive depth and skipped files

* update error

* skip symlinks and dirs

* update err

* fix err assignment

* rebase

* remove

* update metric to ms

* update comments

* address comments

* reduce indentations

* add metrics for archive depth

* [bug] - Enhanced Archive Handling to Address Interface Constraints (#2710)

* add metrics for file handling

* add metrics for errors

* add metrics for file handling

* add metrics for errors

* fix tests

* add metrics for max archive depth and skipped files

* update error

* skip symlinks and dirs

* update err

* Address incompatible reader to openArchive

* remove nil check

* fix err assignment

* wrap compReader with DiskbufferReader

* add metrics for file handling

* add metrics for errors

* fix tests

* rebase

* add metrics for errors

* add metrics for max archive depth and skipped files

* update error

* skip symlinks and dirs

* update err

* fix err assignment

* rebase

* remove

* update metric to ms

* update comments

* address comments

* reduce indentations

* replace diskbuffereader with bufferedfilereader

* updtes

* add metric back

* [bug] -  Fix bug and simplify git cat-file command execution and output handling (#2719)

* add metrics for file handling

* add metrics for errors

* add metrics for file handling

* add metrics for errors

* fix tests

* add metrics for max archive depth and skipped files

* update error

* skip symlinks and dirs

* update err

* Address incompatible reader to openArchive

* remove nil check

* fix err assignment

* Allow git cat-file blob to complete before trying to handle the file

* wrap compReader with DiskbufferReader

* Allow git cat-file blob to complete before trying to handle the file

* updates

* revert stuff

* update test

* remove

* add metrics for file handling

* add metrics for errors

* fix tests

* rebase

* add metrics for errors

* add metrics for max archive depth and skipped files

* update error

* skip symlinks and dirs

* update err

* fix err assignment

* rebase

* remove

* update metric to ms

* update comments

* address comments

* reduce indentations

* inline
This commit is contained in:
ahrav 2024-05-10 11:36:06 -07:00 committed by GitHub
parent 9d4eb9516f
commit 570cec7565
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 1248 additions and 1503 deletions

10
go.mod
View file

@ -48,7 +48,6 @@ require (
github.com/google/go-github/v61 v61.0.0
github.com/google/uuid v1.6.0
github.com/googleapis/gax-go/v2 v2.12.4
github.com/h2non/filetype v1.1.3
github.com/hashicorp/go-retryablehttp v0.7.6
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/jlaffaye/ftp v0.2.0
@ -69,6 +68,7 @@ require (
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.19.1
github.com/rabbitmq/amqp091-go v1.10.0
github.com/sassoftware/go-rpmutils v0.3.0
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3
github.com/shuheiktgw/go-travis v0.3.1
github.com/snowflakedb/gosnowflake v1.10.0
@ -93,6 +93,7 @@ require (
google.golang.org/api v0.178.0
google.golang.org/protobuf v1.34.1
gopkg.in/h2non/gock.v1 v1.1.2
pault.ag/go/debian v0.16.0
pgregory.net/rapid v1.1.0
sigs.k8s.io/yaml v1.4.0
)
@ -101,7 +102,6 @@ require (
cloud.google.com/go v0.112.2 // indirect
cloud.google.com/go/auth v0.3.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
cloud.google.com/go/compute v1.25.1 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect
cloud.google.com/go/iam v1.1.7 // indirect
cloud.google.com/go/longrunning v0.5.6 // indirect
@ -118,6 +118,7 @@ require (
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/DataDog/zstd v1.5.5 // indirect
github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.11.4 // indirect
@ -137,7 +138,6 @@ require (
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect
github.com/containerd/containerd v1.7.12 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
@ -198,6 +198,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/s3 v1.1.4 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/kjk/lzma v0.0.0-20161016003348-3fd93898850d // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
@ -258,6 +259,7 @@ require (
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
github.com/yuin/goldmark v1.5.4 // indirect
github.com/yuin/goldmark-emoji v1.0.2 // indirect
@ -278,7 +280,6 @@ require (
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.21.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240429193739-8cf5692501f6 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 // indirect
@ -286,4 +287,5 @@ require (
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
pault.ag/go/topsort v0.1.1 // indirect
)

225
go.sum
View file

@ -7,43 +7,23 @@ cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTj
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM=
cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4=
cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw=
cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms=
cloud.google.com/go/auth v0.2.0 h1:y6oTcpMSbOcXbwYgUUrvI+mrQ2xbrcdpPgtVbCGTLTk=
cloud.google.com/go/auth v0.2.0/go.mod h1:+yb+oy3/P0geX6DLKlqiGHARGR6EX2GRtYCzWOCQSbU=
cloud.google.com/go/auth v0.2.2 h1:gmxNJs4YZYcw6YvKRtVBaF2fyUE6UrWPyzU8jHvYfmI=
cloud.google.com/go/auth v0.2.2/go.mod h1:2bDNJWtWziDT3Pu1URxHHbkHE/BbOCuyUiKIGcNvafo=
cloud.google.com/go/auth v0.3.0 h1:PRyzEpGfx/Z9e8+lHsbkoUVXD0gnu4MNmm7Gp8TQNIs=
cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w=
cloud.google.com/go/auth/oauth2adapt v0.2.0 h1:FR8zevgQwu+8CqiOT5r6xCmJa3pJC/wdXEEPF1OkNhA=
cloud.google.com/go/auth/oauth2adapt v0.2.0/go.mod h1:AfqujpDAlTfLfeCIl/HJZZlIxD8+nJoZ5e0x1IxGq5k=
cloud.google.com/go/auth/oauth2adapt v0.2.1 h1:VSPmMmUlT8CkIZ2PzD9AlLN+R3+D1clXMWHHa6vG/Ag=
cloud.google.com/go/auth/oauth2adapt v0.2.1/go.mod h1:tOdK/k+D2e4GEwfBRA48dKNQiDsqIXxLh7VU319eV0g=
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg=
cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40=
cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU=
cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM=
cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA=
cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg=
cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s=
cloud.google.com/go/longrunning v0.5.6 h1:xAe8+0YaWoCKr9t1+aWe+OeQgN/iJK1fEgZSXmjuEaE=
cloud.google.com/go/longrunning v0.5.6/go.mod h1:vUaDrWYOMKRuhiv6JBnn49YxCPz2Ayn9GqyjaBT8/mA=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/secretmanager v1.12.0 h1:e5pIo/QEgiFiHPVJPxM5jbtUr4O/u5h2zLHYtkFQr24=
cloud.google.com/go/secretmanager v1.12.0/go.mod h1:Y1Gne3Ag+fZ2TDTiJc8ZJCMFbi7k1rYT4Rw30GXfvlk=
cloud.google.com/go/secretmanager v1.13.0 h1:nQ/Ca2Gzm/OEP8tr1hiFdHRi5wAnAmsm9qTjwkivyrQ=
cloud.google.com/go/secretmanager v1.13.0/go.mod h1:yWdfNmM2sLIiyv6RM6VqWKeBV7CdS0SO3ybxJJRhBEs=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
@ -100,6 +80,8 @@ github.com/BobuSumisu/aho-corasick v1.0.3/go.mod h1:hm4jLcvZKI2vRF2WDU1N4p/jpWtp
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU=
github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
@ -109,8 +91,6 @@ github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7
github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w=
github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/TheZeroSlave/zapsentry v1.22.1 h1:NB7JW4SDlWCdEZ+7qqbjfS3hkvuJuTRAvHh4RRKo4BY=
github.com/TheZeroSlave/zapsentry v1.22.1/go.mod h1:D1YMfSuu6xnkhwFXxrronesmsiyDhIqo+86I3Ok+r64=
github.com/TheZeroSlave/zapsentry v1.23.0 h1:TKyzfEL7LRlRr+7AvkukVLZ+jZPC++ebCUv7ZJHl1AU=
github.com/TheZeroSlave/zapsentry v1.23.0/go.mod h1:3DRFLu4gIpnCTD4V9HMCBSaqYP8gYU7mZickrs2/rIY=
github.com/adrg/strutil v0.3.1 h1:OLvSS7CSJO8lBii4YmBt8jiK9QOtB9CzCzwl4Ic/Fz4=
@ -125,8 +105,7 @@ github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk
github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA=
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
@ -138,46 +117,6 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/aws/aws-sdk-go v1.51.14 h1:qedX6zZEO1a+5kra+D4ythOYR3TgaROC0hTPxhTFh8I=
github.com/aws/aws-sdk-go v1.51.14/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.16 h1:vnWKK8KjbftEkuPX8bRj3WHsLy1uhotn0eXptpvrxJI=
github.com/aws/aws-sdk-go v1.51.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.17 h1:Cfa40lCdjv9OxC3X1Ks3a6O1Tu3gOANSyKHOSw/zuWU=
github.com/aws/aws-sdk-go v1.51.17/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.18 h1:JKrk49ZlBTyKa4+droU7U/hk0QG84v91xaA58O0LPdo=
github.com/aws/aws-sdk-go v1.51.18/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.19 h1:jp/Vx/mUpXttthvvo/4/Nn/3+zumirIlAFkp1Irf1kM=
github.com/aws/aws-sdk-go v1.51.19/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.20 h1:ziM90ujYHKKkoTZL+Wg2LwjbQecL+l298GGJeG4ktZs=
github.com/aws/aws-sdk-go v1.51.20/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.21 h1:UrT6JC9R9PkYYXDZBV0qDKTualMr+bfK2eboTknMgbs=
github.com/aws/aws-sdk-go v1.51.21/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.22 h1:VL2p2JgC32myt7DMEcbe1devdtgGSgMNvZpkcdvlxq4=
github.com/aws/aws-sdk-go v1.51.22/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.23 h1:/3TEdsEE/aHmdKGw2NrOp7Sdea76zfffGkTTSXTsDxY=
github.com/aws/aws-sdk-go v1.51.23/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.24 h1:nwL5MaommPkwb7Ixk24eWkdx5HY4of1gD10kFFVAl6A=
github.com/aws/aws-sdk-go v1.51.24/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls=
github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.26 h1:fYud+95lh9B89fAlRtgYpY8CcJF4T7JrWkLMq4GGCOo=
github.com/aws/aws-sdk-go v1.51.26/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.27 h1:ZprksHovT4rFfNBHB+Bc/0p4PTntAnTlZP39DMA/Qp8=
github.com/aws/aws-sdk-go v1.51.27/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.28 h1:x3CV5xjnL4EbVLaPXulBOxqiq2dkc9o6+50xxT3tvXY=
github.com/aws/aws-sdk-go v1.51.28/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.29 h1:18I5kjEcbyJOH4l2EjQyJJG6v3uPjrzwG0uiSD3vJEM=
github.com/aws/aws-sdk-go v1.51.29/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.30 h1:RVFkjn9P0JMwnuZCVH0TlV5k9zepHzlbc4943eZMhGw=
github.com/aws/aws-sdk-go v1.51.30/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.31 h1:4TM+sNc+Dzs7wY1sJ0+J8i60c6rkgnKP1pvPx8ghsSY=
github.com/aws/aws-sdk-go v1.51.31/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.51.32 h1:A6mPui7QP4mwmovyzgtdedbRbNur1Iu0/El7hBWNHms=
github.com/aws/aws-sdk-go v1.51.32/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.52.2 h1:l4g9wBXRBlvCtScvv4iLZCzLCtR7BFJcXOnOGQ20orw=
github.com/aws/aws-sdk-go v1.52.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.52.4 h1:9VsBVJ2TKf8xPP3+yIPGSYcEBIEymXsJzQoFgQuyvA0=
github.com/aws/aws-sdk-go v1.52.4/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.52.6 h1:nw1AMg0wIj5tTnI89KaDe9G5aISqXm4KJEe1DfNbFvA=
github.com/aws/aws-sdk-go v1.52.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw=
@ -203,8 +142,6 @@ github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
github.com/bradleyfalzon/ghinstallation/v2 v2.10.0 h1:XWuWBRFEpqVrHepQob9yPS3Xg4K3Wr9QCx4fu8HbUNg=
github.com/bradleyfalzon/ghinstallation/v2 v2.10.0/go.mod h1:qoGA4DxWPaYTgVCrmEspVSjlTu4WYAiSxMIhorMRXXc=
github.com/brianvoe/gofakeit/v7 v7.0.2 h1:jzYT7Ge3RDHw7J1CM1kwu0OQywV9vbf2qSGxBS72TCY=
github.com/brianvoe/gofakeit/v7 v7.0.2/go.mod h1:QXuPeBw164PJCzCUZVmgpgHJ3Llj49jSLVkKPMtxtxA=
github.com/brianvoe/gofakeit/v7 v7.0.3 h1:tGCt+eYfhTMWE1ko5G2EO1f/yE44yNpIwUb4h32O0wo=
github.com/brianvoe/gofakeit/v7 v7.0.3/go.mod h1:QXuPeBw164PJCzCUZVmgpgHJ3Llj49jSLVkKPMtxtxA=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
@ -215,10 +152,6 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charmbracelet/bubbles v0.18.0 h1:PYv1A036luoBGroX6VWjQIE9Syf2Wby2oOl/39KLfy0=
github.com/charmbracelet/bubbles v0.18.0/go.mod h1:08qhZhtIwzgrtBjAcJnij1t1H0ZRjwHyGsy6AL11PSw=
github.com/charmbracelet/bubbletea v0.25.0 h1:bAfwk7jRz7FKFl9RzlIULPkStffg5k6pNt5dywy4TcM=
github.com/charmbracelet/bubbletea v0.25.0/go.mod h1:EN3QDR1T5ZdWmdfDzYcqOCAps45+QIJbLOBxmVNWNNg=
github.com/charmbracelet/bubbletea v0.26.1 h1:xujcQeF73rh4jwu3+zhfQsvV18x+7zIjlw7/CYbzGJ0=
github.com/charmbracelet/bubbletea v0.26.1/go.mod h1:FzKr7sKoO8iFVcdIBM9J0sJOcQv5nDQaYwsee3kpbgo=
github.com/charmbracelet/bubbletea v0.26.2 h1:Eeb+n75Om9gQ+I6YpbCXQRKHt5Pn4vMwusQpwLiEgJQ=
github.com/charmbracelet/bubbletea v0.26.2/go.mod h1:6I0nZ3YHUrQj7YHIHlM8RySX4ZIthTliMY+W8X8b+Gs=
github.com/charmbracelet/glamour v0.7.0 h1:2BtKGZ4iVJCDfMF229EzbeR1QRKLWztO9dMtjmqZSng=
@ -241,27 +174,20 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/coinbase/waas-client-library-go v1.0.8 h1:AdbTmBQpsSUx947GZd5/68BhNBw1CSwTfE2PcnVy3Ao=
github.com/coinbase/waas-client-library-go v1.0.8/go.mod h1:RVKozprfdfMiK92ATZUWHRs0EFGHQj4rbEJjzzZzI1I=
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2wIvVRd/hEHD7lacgqrCPS+k8g1MndzfWY=
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0=
github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
github.com/couchbase/gocb/v2 v2.8.0 h1:KoG44zWrP4QgK724D7D2rXHgRlztwkAPFQVApJCJaB4=
github.com/couchbase/gocb/v2 v2.8.0/go.mod h1:GL6M8F4eB5ZuoTYh2RzwCUheVVi4EADdCQ3yc52kqUI=
github.com/couchbase/gocb/v2 v2.8.1 h1:syeJEVy36IvUy4wyzK/74M4wc4OJ2eWZ1d6yWG31Qno=
github.com/couchbase/gocb/v2 v2.8.1/go.mod h1:xI7kkiz4IhdrhBAAEcKC6R2oqVXxpMIV/ZkmxB+PWgM=
github.com/couchbase/gocbcore/v10 v10.4.0 h1:ItBAQdxl5I9CBkt/XqlRB/Ni4Ej2k2OK1ClB2HHipVE=
github.com/couchbase/gocbcore/v10 v10.4.0/go.mod h1:lYQIIk+tzoMcwtwU5GzPbDdqEkwkH3isI2rkSpfL0oM=
github.com/couchbase/gocbcore/v10 v10.4.1 h1:2vZjYRTbSCp1HEcL3iFQv+r4HwiI13VhdnbTku+E/+M=
github.com/couchbase/gocbcore/v10 v10.4.1/go.mod h1:rulbgUK70EuyRUiLQ0LhQAfSI/Rl+jWws8tTbHzvB6M=
github.com/couchbase/gocbcoreps v0.1.2 h1:wlGyyMnkWpCNOlTtfy8UG+8XZsFtqTJtPXz63+QKC58=
github.com/couchbase/gocbcoreps v0.1.2/go.mod h1:33hSdOKnrUVaBqw4+RiqW+2JoD8ylkbvqm89Wg81uXk=
github.com/couchbase/goprotostellar v1.0.2 h1:yoPbAL9sCtcyZ5e/DcU5PRMOEFaJrF9awXYu3VPfGls=
github.com/couchbase/goprotostellar v1.0.2/go.mod h1:5/yqVnZlW2/NSbAWu1hPJCFBEwjxgpe0PFFOlRixnp4=
github.com/couchbaselabs/gocaves/client v0.0.0-20230307083111-cc3960c624b1/go.mod h1:AVekAZwIY2stsJOMWLAS/0uA/+qdp7pjO8EHnl61QkY=
github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259 h1:2TXy68EGEzIMHOx9UvczR5ApVecwCfQZ0LjkmwMI6g4=
github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259/go.mod h1:AVekAZwIY2stsJOMWLAS/0uA/+qdp7pjO8EHnl61QkY=
github.com/couchbaselabs/gocbconnstr/v2 v2.0.0-20230515165046-68b522a21131 h1:2EAfFswAfgYn3a05DVcegiw6DgMgn1Mv5eGz6IHt1Cw=
@ -348,10 +274,6 @@ github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXY
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A=
github.com/go-ldap/ldap/v3 v3.4.6/go.mod h1:IGMQANNtxpsOzj7uUAMjpGBaOVTC4DYyIy8VsTdxmtc=
github.com/go-ldap/ldap/v3 v3.4.7 h1:3Hbd7mIB1qjd3Ra59fI3JYea/t5kykFu2CVHBca9koE=
github.com/go-ldap/ldap/v3 v3.4.7/go.mod h1:qS3Sjlu76eHfHGpUdWkAXQTw4beih+cHsco2jXlIXrk=
github.com/go-ldap/ldap/v3 v3.4.8 h1:loKJyspcRezt2Q3ZRMq2p/0v8iOurlmeXDPw6fikSvQ=
github.com/go-ldap/ldap/v3 v3.4.8/go.mod h1:qS3Sjlu76eHfHGpUdWkAXQTw4beih+cHsco2jXlIXrk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
@ -419,8 +341,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
@ -436,7 +356,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
@ -463,16 +382,12 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA=
github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4=
github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg=
github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY=
@ -488,8 +403,6 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rH
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU=
github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0=
github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -497,15 +410,14 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-retryablehttp v0.7.6 h1:TwRYfx2z2C4cLbXmT8I5PgP/xmuqASDyiVuGYfs9GZM=
github.com/hashicorp/go-retryablehttp v0.7.6/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -524,11 +436,17 @@ github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg=
github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=
github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8=
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
@ -550,6 +468,8 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kjk/lzma v0.0.0-20161016003348-3fd93898850d h1:RnWZeH8N8KXfbwMTex/KKMYMj0FJRCF6tQubUuQ02GM=
github.com/kjk/lzma v0.0.0-20161016003348-3fd93898850d/go.mod h1:phT/jsRPBAEqjAibu1BurrabCBNTYiVI+zbmyCZJY6Q=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
@ -582,8 +502,6 @@ github.com/launchdarkly/go-semver v1.0.2 h1:sYVRnuKyvxlmQCnCUyDkAhtmzSFRoX6rG2Xa
github.com/launchdarkly/go-semver v1.0.2/go.mod h1:xFmMwXba5Mb+3h72Z+VeSs9ahCvKo2QFUTHRNHVqR28=
github.com/launchdarkly/go-server-sdk-evaluation/v3 v3.0.0 h1:nQbR1xCpkdU9Z71FI28bWTi5LrmtSVURy0UFcBVD5ZU=
github.com/launchdarkly/go-server-sdk-evaluation/v3 v3.0.0/go.mod h1:cwk7/7SzNB2wZbCZS7w2K66klMLBe3NFM3/qd3xnsRc=
github.com/launchdarkly/go-server-sdk/v7 v7.3.0 h1:blc8npHPjhXGs2NU68YSKby6Xkxp16aDSObLt3W5Qww=
github.com/launchdarkly/go-server-sdk/v7 v7.3.0/go.mod h1:EY2ag+p9HnNXiG4pJ+y7QG2gqCYEoYD+NJgwkhmUUqk=
github.com/launchdarkly/go-server-sdk/v7 v7.4.0 h1:GaxFHjLuJzPTnadrQ2FcwSgW2Q1q2ZK/JtxpN5Q6vus=
github.com/launchdarkly/go-server-sdk/v7 v7.4.0/go.mod h1:EY2ag+p9HnNXiG4pJ+y7QG2gqCYEoYD+NJgwkhmUUqk=
github.com/launchdarkly/go-test-helpers/v2 v2.2.0 h1:L3kGILP/6ewikhzhdNkHy1b5y4zs50LueWenVF0sBbs=
@ -622,8 +540,6 @@ github.com/mholt/archiver/v4 v4.0.0-alpha.8 h1:tRGQuDVPh66WCOelqe6LIGh0gwmfwxUrS
github.com/mholt/archiver/v4 v4.0.0-alpha.8/go.mod h1:5f7FUYGXdJWUjESffJaYR4R60VhnHxb2X3T1teMyv5A=
github.com/microcosm-cc/bluemonday v1.0.25 h1:4NEwSfiJ+Wva0VxN5B8OwMicaJvD8r9tlJWm9rtloEg=
github.com/microcosm-cc/bluemonday v1.0.25/go.mod h1:ZIOjCQp1OrzBBPIJmfX4qDYFuhU02nx4bn030ixfHLE=
github.com/microsoft/go-mssqldb v1.7.0 h1:sgMPW0HA6Ihd37Yx0MzHyKD726C2kY/8KJsQtXHNaAs=
github.com/microsoft/go-mssqldb v1.7.0/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA=
github.com/microsoft/go-mssqldb v1.7.1 h1:KU/g8aWeM3Hx7IMOFpiwYiUkU+9zeISb4+tx3ScVfsM=
github.com/microsoft/go-mssqldb v1.7.1/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
@ -644,8 +560,6 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs=
github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns=
github.com/muesli/ansi v0.0.0-20211031195517-c9f0611b6c70 h1:kMlmsLSbjkikxQJ1IPwaM+7LJ9ltFu/fi8CRzvSnQmA=
github.com/muesli/ansi v0.0.0-20211031195517-c9f0611b6c70/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
@ -700,8 +614,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -711,8 +623,6 @@ github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSz
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rabbitmq/amqp091-go v1.9.0 h1:qrQtyzB4H8BQgEuJwhmVQqVHB9O4+MNDJCCAcpc3Aoo=
github.com/rabbitmq/amqp091-go v1.9.0/go.mod h1:+jPrT9iY2eLjRaMSRHUhc3z14E/l85kv/f+6luSD3pc=
github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw=
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@ -726,6 +636,8 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
github.com/sahilm/fuzzy v0.1.1-0.20230530133925-c48e322e2a8f h1:MvTmaQdww/z0Q4wrYjDSCcZ78NoftLQyHBSLW/Cx79Y=
github.com/sahilm/fuzzy v0.1.1-0.20230530133925-c48e322e2a8f/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y=
github.com/sassoftware/go-rpmutils v0.3.0 h1:tE4TZ8KcOXay5iIP64P291s6Qxd9MQCYhI7DU+f3gFA=
github.com/sassoftware/go-rpmutils v0.3.0/go.mod h1:hM9wdxFsjUFR/tJ6SMsLrJuChcucCa0DsCzE9RMfwMo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
@ -770,20 +682,12 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502 h1:34icjjmqJ2HPjrSuJYEkdZ+0ItmGQAQ75cRHIiftIyE=
github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8=
github.com/testcontainers/testcontainers-go v0.29.1 h1:z8kxdFlovA2y97RWx98v/TQ+tR+SXZm6p35M+xB92zk=
github.com/testcontainers/testcontainers-go v0.29.1/go.mod h1:SnKnKQav8UcgtKqjp/AD8bE1MqZm+3TDb/B8crE3XnI=
github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=
github.com/testcontainers/testcontainers-go v0.30.0/go.mod h1:K+kHNGiM5zjklKjgTtcrEetF3uhWbMUyqAQoyoh8Pf0=
github.com/testcontainers/testcontainers-go/modules/mssql v0.29.1 h1:N5GqwKhyoytElzELjR9KCdiAvvy+6GpJ7ibtvdyY/o4=
github.com/testcontainers/testcontainers-go/modules/mssql v0.29.1/go.mod h1:Hc7lzst9KkM+JMMrv4OjQOE7GUEDzzjBXVfq04iBE6E=
github.com/testcontainers/testcontainers-go/modules/mssql v0.30.0 h1:jvDa5EImFyFNv6LLbd3qdcq1TXBY2LzOUZ0GbVbsI7I=
github.com/testcontainers/testcontainers-go/modules/mssql v0.30.0/go.mod h1:BYBQszX47xz69xeTg5hX62ndgwxdfAIs6w/94Pk1Z1M=
github.com/testcontainers/testcontainers-go/modules/mysql v0.29.1 h1:SnJtZNcskgxOMyVAT7M+MQjpveP59nwKzlBw2ItX+C8=
github.com/testcontainers/testcontainers-go/modules/mysql v0.29.1/go.mod h1:VhA5dV+O19sx3Y9u9bfO+fbJfP3E7RiMq0nDMEGjslw=
github.com/testcontainers/testcontainers-go/modules/mysql v0.30.0 h1:wrePvxfU/2HFALKyBqpNs6VoPPvThzHy9aN+PCxse9g=
github.com/testcontainers/testcontainers-go/modules/mysql v0.30.0/go.mod h1:Srnlf7wwA7s6K4sKKhjAoBHJcKorRINR/i5dCA4ZyGk=
github.com/testcontainers/testcontainers-go/modules/postgres v0.29.1 h1:hTn3MzhR9w4btwfzr/NborGCaeNZG0MPBpufeDj10KA=
github.com/testcontainers/testcontainers-go/modules/postgres v0.29.1/go.mod h1:YsWyy+pHDgvGdi0axGOx6CGXWsE6eqSaApyd1FYYSSc=
github.com/testcontainers/testcontainers-go/modules/postgres v0.30.0 h1:D3HFqpZS90iRGAN7M85DFiuhPfvYvFNnx8urQ6mPAvo=
github.com/testcontainers/testcontainers-go/modules/postgres v0.30.0/go.mod h1:e1sKxwUOkqzvaqdHl/oV9mUtFmkDPTfBGp0po2tnWQU=
github.com/tetratelabs/wazero v1.7.0 h1:jg5qPydno59wqjpGrHph81lbtHzTrWzwwtD4cD88+hQ=
@ -806,20 +710,12 @@ github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0o
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
github.com/wasilibs/go-re2 v1.5.1 h1:a+Gb1mx6Q7MmU4d+3BCnnN28U2/cnADmY1oRRanQi10=
github.com/wasilibs/go-re2 v1.5.1/go.mod h1:UqqxQ1O99boQUm1r61H/IYGiGQOS/P88K7hU5nLNkEg=
github.com/wasilibs/go-re2 v1.5.2 h1:fDO2TJrRzRrv3jD0gzOvmZ2UM4Yt9YXOEdLrlNc/Ies=
github.com/wasilibs/go-re2 v1.5.2/go.mod h1:UqqxQ1O99boQUm1r61H/IYGiGQOS/P88K7hU5nLNkEg=
github.com/wasilibs/nottinygc v0.4.0 h1:h1TJMihMC4neN6Zq+WKpLxgd9xCFMw7O9ETLwY2exJQ=
github.com/wasilibs/nottinygc v0.4.0/go.mod h1:oDcIotskuYNMpqMF23l7Z8uzD4TC0WXHK8jetlB3HIo=
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ=
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM=
github.com/xanzy/go-gitlab v0.101.0 h1:qRgvX8DNE19zRugB6rnnZMZ5ubhITSKPLNWEyc6UIPg=
github.com/xanzy/go-gitlab v0.101.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI=
github.com/xanzy/go-gitlab v0.102.0 h1:ExHuJ1OTQ2yt25zBMMj0G96ChBirGYv8U7HyUiYkZ+4=
github.com/xanzy/go-gitlab v0.102.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI=
github.com/xanzy/go-gitlab v0.103.0 h1:J9pTQoq0GsEFqzd6srCM1QfdfKAxSNz6mT6ntrpNF2w=
github.com/xanzy/go-gitlab v0.103.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI=
github.com/xanzy/go-gitlab v0.104.1 h1:g/liXIPJH0jsTwVuzTAUMiKdTf6Qup3u2XZq5Rp90Wc=
github.com/xanzy/go-gitlab v0.104.1/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
@ -832,6 +728,8 @@ github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -851,8 +749,6 @@ github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
go.einride.tech/aip v0.60.0 h1:h6bgabZ5BCfAptbGex8jbh3VvPBRLa6xq+pQ1CAjHYw=
go.einride.tech/aip v0.60.0/go.mod h1:SdLbSbgSU60Xkb4TMkmsZEQPHeEWx0ikBoq5QnqZvdg=
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc=
go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@ -873,15 +769,14 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMey
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
@ -905,12 +800,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -921,14 +812,6 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw=
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ=
golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0 h1:985EYyeCOxTpcgOTJpflJUwOeEz0CQOdPt73OzpE9F8=
golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 h1:ESSUROHIBHg7USnszlcdmjBEwdMj9VUvU+OPk4yl2mc=
golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY=
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
@ -952,8 +835,6 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -985,10 +866,7 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -996,10 +874,6 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo=
golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1011,8 +885,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1054,20 +926,15 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@ -1076,12 +943,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1096,8 +959,6 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
@ -1136,10 +997,6 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1158,20 +1015,6 @@ google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk=
google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis=
google.golang.org/api v0.173.0 h1:fz6B7GWYWLS/HfruiTsRYVKQQApJ6vasTYWAK6+Qo8g=
google.golang.org/api v0.173.0/go.mod h1:ins7pTzjeBPQ3SdC/plzki6d/dQWwAWy8qVZ4Vgkzl8=
google.golang.org/api v0.174.0 h1:zB1BWl7ocxfTea2aQ9mgdzXjnfPySllpPOskdnO+q34=
google.golang.org/api v0.174.0/go.mod h1:aC7tB6j0HR1Nl0ni5ghpx6iLasmAX78Zkh/wgxAAjLg=
google.golang.org/api v0.175.0 h1:9bMDh10V9cBuU8N45Wlc3cKkItfqMRV0Fi8UscLEtbY=
google.golang.org/api v0.175.0/go.mod h1:Rra+ltKu14pps/4xTycZfobMgLpbosoaaL7c+SEMrO8=
google.golang.org/api v0.176.0 h1:dHj1/yv5Dm/eQTXiP9hNCRT3xzJHWXeNdRq29XbMxoE=
google.golang.org/api v0.176.0/go.mod h1:Rra+ltKu14pps/4xTycZfobMgLpbosoaaL7c+SEMrO8=
google.golang.org/api v0.176.1 h1:DJSXnV6An+NhJ1J+GWtoF2nHEuqB1VNoTfnIbjNvwD4=
google.golang.org/api v0.176.1/go.mod h1:j2MaSDYcvYV1lkZ1+SMW4IeF90SrEyFA+tluDYWRrFg=
google.golang.org/api v0.177.0 h1:8a0p/BbPa65GlqGWtUKxot4p0TV8OGOfyTjtmkXNXmk=
google.golang.org/api v0.177.0/go.mod h1:srbhue4MLjkjbkux5p3dw/ocYOSZTaIEvf7bCOnFQDw=
google.golang.org/api v0.178.0 h1:yoW/QMI4bRVCHF+NWOTa4cL8MoWL3Jnuc7FlcFF91Ok=
google.golang.org/api v0.178.0/go.mod h1:84/k2v8DFpDRebpGcooklv/lais3MEfqpaBLA12gl2U=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
@ -1179,8 +1022,6 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -1196,22 +1037,10 @@ google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y=
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw=
google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw=
google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc=
google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s=
google.golang.org/genproto/googleapis/api v0.0.0-20240429193739-8cf5692501f6 h1:DTJM0R8LECCgFeUwApvcEJHz85HLagW8uRENYxHh1ww=
google.golang.org/genproto/googleapis/api v0.0.0-20240429193739-8cf5692501f6/go.mod h1:10yRODfgim2/T8csjQsMPgZOMvtytXKTDRzH6HRGzRw=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240325203815-454cdb8f5daa h1:RBgMaUMP+6soRkik4VoN8ojR2nex2TqZwjSSogic+eo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240325203815-454cdb8f5daa/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 h1:DujSIu+2tC9Ht0aPNA7jgj23Iq8Ewi5sgkQ++wdvonE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@ -1224,10 +1053,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8=
google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@ -1239,12 +1064,6 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4=
google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1281,6 +1100,10 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
pault.ag/go/debian v0.16.0 h1:fivXn/IO9rn2nzTGndflDhOkNU703Axs/StWihOeU2g=
pault.ag/go/debian v0.16.0/go.mod h1:JFl0XWRCv9hWBrB5MDDZjA5GSEs1X3zcFK/9kCNIUmE=
pault.ag/go/topsort v0.1.1 h1:L0QnhUly6LmTv0e3DEzbN2q6/FGgAcQvaEw65S53Bg4=
pault.ag/go/topsort v0.1.1/go.mod h1:r1kc/L0/FZ3HhjezBIPaNVhkqv8L0UJ9bxRuHRVZ0q4=
pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw=
pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=

80
pkg/handlers/ar.go Normal file
View file

@ -0,0 +1,80 @@
package handlers
import (
"errors"
"fmt"
"io"
"time"
"pault.ag/go/debian/deb"
logContext "github.com/trufflesecurity/trufflehog/v3/pkg/context"
)
// arHandler specializes defaultHandler to handle AR archive formats. By embedding defaultHandler,
// arHandler inherits and can further customize the common handling behavior such as skipping binaries.
type arHandler struct{ *defaultHandler }
// newARHandler creates an arHandler.
func newARHandler() *arHandler {
return &arHandler{defaultHandler: newDefaultHandler(arHandlerType)}
}
// HandleFile processes AR formatted files. This function needs to be implemented to extract or
// manage data from AR files according to specific requirements.
func (h *arHandler) HandleFile(ctx logContext.Context, input readSeekCloser) (chan []byte, error) {
archiveChan := make(chan []byte, defaultBufferSize)
go func() {
ctx, cancel := logContext.WithTimeout(ctx, maxTimeout)
defer cancel()
defer close(archiveChan)
// Update the metrics for the file processing.
start := time.Now()
var err error
defer h.measureLatencyAndHandleErrors(start, err)
var arReader *deb.Ar
arReader, err = deb.LoadAr(input)
if err != nil {
ctx.Logger().Error(err, "error reading AR")
return
}
if err = h.processARFiles(ctx, arReader, archiveChan); err != nil {
ctx.Logger().Error(err, "error processing AR files")
}
}()
return archiveChan, nil
}
func (h *arHandler) processARFiles(ctx logContext.Context, reader *deb.Ar, archiveChan chan []byte) error {
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
arEntry, err := reader.Next()
if err != nil {
if errors.Is(err, io.EOF) {
ctx.Logger().V(3).Info("AR archive fully processed")
return nil
}
return fmt.Errorf("error reading AR payload: %w", err)
}
fileSize := arEntry.Size
fileCtx := logContext.WithValues(ctx, "filename", arEntry.Name, "size", fileSize)
if err := h.handleNonArchiveContent(fileCtx, arEntry.Data, archiveChan); err != nil {
fileCtx.Logger().Error(err, "error handling archive content in AR")
h.metrics.incErrors()
}
h.metrics.incFilesProcessed()
h.metrics.observeFileSize(fileSize)
}
}
}

32
pkg/handlers/ar_test.go Normal file
View file

@ -0,0 +1,32 @@
package handlers
import (
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/trufflesecurity/trufflehog/v3/pkg/context"
)
func TestHandleARFile(t *testing.T) {
file, err := os.Open("testdata/test.deb")
assert.Nil(t, err)
defer file.Close()
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
handler := newARHandler()
archiveChan, err := handler.HandleFile(context.AddLogger(ctx), file)
assert.NoError(t, err)
wantChunkCount := 102
count := 0
for range archiveChan {
count++
}
assert.Equal(t, wantChunkCount, count)
}

View file

@ -1,554 +0,0 @@
package handlers
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/gabriel-vasile/mimetype"
"github.com/h2non/filetype"
"github.com/mholt/archiver/v4"
"github.com/trufflesecurity/trufflehog/v3/pkg/common"
logContext "github.com/trufflesecurity/trufflehog/v3/pkg/context"
"github.com/trufflesecurity/trufflehog/v3/pkg/sources"
)
type ctxKey int
const (
depthKey ctxKey = iota
errMaxArchiveDepthReached = "max archive depth reached"
)
var (
maxDepth = 5
maxSize = 250 * 1024 * 1024 // 20MB
maxTimeout = time.Duration(30) * time.Second
defaultBufferSize = 512
)
// Ensure the Archive satisfies the interfaces at compile time.
var _ SpecializedHandler = (*Archive)(nil)
// Archive is a handler for extracting and decompressing archives.
type Archive struct {
size int
currentDepth int
skipBinaries bool
skipArchives bool
}
// New creates a new Archive handler with the provided options.
func (a *Archive) New(opts ...Option) {
for _, opt := range opts {
opt(a)
}
}
// SetArchiveMaxSize sets the maximum size of the archive.
func SetArchiveMaxSize(size int) {
maxSize = size
}
// SetArchiveMaxDepth sets the maximum depth of the archive.
func SetArchiveMaxDepth(depth int) {
maxDepth = depth
}
// SetArchiveMaxTimeout sets the maximum timeout for the archive handler.
func SetArchiveMaxTimeout(timeout time.Duration) {
maxTimeout = timeout
}
// FromFile extracts the files from an archive.
func (a *Archive) FromFile(originalCtx logContext.Context, data io.Reader) chan []byte {
if a.skipArchives {
return nil
}
archiveChan := make(chan []byte, defaultBufferSize)
go func() {
ctx, cancel := logContext.WithTimeout(originalCtx, maxTimeout)
logger := logContext.AddLogger(ctx).Logger()
defer cancel()
defer close(archiveChan)
err := a.openArchive(ctx, 0, data, archiveChan)
if err != nil {
if errors.Is(err, archiver.ErrNoMatch) {
return
}
logger.Error(err, "error unarchiving chunk.")
}
}()
return archiveChan
}
// openArchive takes a reader and extracts the contents up to the maximum depth.
func (a *Archive) openArchive(ctx logContext.Context, depth int, reader io.Reader, archiveChan chan []byte) error {
if common.IsDone(ctx) {
return ctx.Err()
}
if depth >= maxDepth {
return fmt.Errorf(errMaxArchiveDepthReached)
}
format, arReader, err := archiver.Identify("", reader)
if errors.Is(err, archiver.ErrNoMatch) && depth > 0 {
return a.handleNonArchiveContent(ctx, arReader, archiveChan)
}
if err != nil {
return err
}
switch archive := format.(type) {
case archiver.Decompressor:
// Decompress tha archive and feed the decompressed data back into the archive handler to extract any nested archives.
compReader, err := archive.OpenReader(arReader)
if err != nil {
return err
}
defer compReader.Close()
return a.openArchive(ctx, depth+1, compReader, archiveChan)
case archiver.Extractor:
return archive.Extract(logContext.WithValue(ctx, depthKey, depth+1), arReader, nil, a.extractorHandler(archiveChan))
default:
return fmt.Errorf("unknown archive type: %s", format.Name())
}
}
const mimeTypeBufferSize = 512
func (a *Archive) handleNonArchiveContent(ctx logContext.Context, reader io.Reader, archiveChan chan []byte) error {
bufReader := bufio.NewReaderSize(reader, mimeTypeBufferSize)
// A buffer of 512 bytes is used since many file formats store their magic numbers within the first 512 bytes.
// If fewer bytes are read, MIME type detection may still succeed.
buffer, err := bufReader.Peek(mimeTypeBufferSize)
if err != nil && !errors.Is(err, io.EOF) {
return fmt.Errorf("unable to read file for MIME type detection: %w", err)
}
mime := mimetype.Detect(buffer)
mimeT := mimeType(mime.String())
if common.SkipFile(mime.Extension()) {
ctx.Logger().V(5).Info("skipping file", "ext", mimeT)
return nil
}
if a.skipBinaries {
if common.IsBinary(mime.Extension()) || mimeT == machOType || mimeT == octetStream {
ctx.Logger().V(5).Info("skipping binary file", "ext", mimeT)
return nil
}
}
chunkReader := sources.NewChunkReader()
chunkResChan := chunkReader(ctx, bufReader)
for data := range chunkResChan {
if err := data.Error(); err != nil {
ctx.Logger().Error(err, "error reading chunk")
continue
}
if err := common.CancellableWrite(ctx, archiveChan, data.Bytes()); err != nil {
return err
}
}
return nil
}
// IsFiletype returns true if the provided reader is an archive.
func (a *Archive) IsFiletype(_ logContext.Context, reader io.Reader) (io.Reader, bool) {
format, readerB, err := archiver.Identify("", reader)
if err != nil {
return readerB, false
}
switch format.(type) {
case archiver.Extractor:
return readerB, true
case archiver.Decompressor:
return readerB, true
default:
return readerB, false
}
}
// extractorHandler is applied to each file in an archiver.Extractor file.
func (a *Archive) extractorHandler(archiveChan chan []byte) func(context.Context, archiver.File) error {
return func(ctx context.Context, f archiver.File) error {
lCtx := logContext.AddLogger(ctx)
lCtx.Logger().V(5).Info("Handling extracted file.", "filename", f.Name())
if common.IsDone(ctx) {
return ctx.Err()
}
depth := 0
if ctxDepth, ok := ctx.Value(depthKey).(int); ok {
depth = ctxDepth
}
fReader, err := f.Open()
if err != nil {
return err
}
defer fReader.Close()
if common.SkipFile(f.Name()) {
lCtx.Logger().V(5).Info("skipping file", "filename", f.Name())
return nil
}
if a.skipBinaries && common.IsBinary(f.Name()) {
lCtx.Logger().V(5).Info("skipping binary file", "filename", f.Name())
return nil
}
return a.openArchive(lCtx, depth, fReader, archiveChan)
}
}
// ReadToMax reads up to the max size.
func (a *Archive) ReadToMax(ctx logContext.Context, reader io.Reader) (data []byte, err error) {
// Archiver v4 is in alpha and using an experimental version of
// rardecode. There is a bug somewhere with rar decoder format 29
// that can lead to a panic. An issue is open in rardecode repo
// https://github.com/nwaples/rardecode/issues/30.
defer func() {
if r := recover(); r != nil {
// Return an error from ReadToMax.
if e, ok := r.(error); ok {
err = e
} else {
err = fmt.Errorf("panic occurred: %v", r)
}
ctx.Logger().Error(err, "Panic occurred when reading archive")
}
}()
if common.IsDone(ctx) {
return nil, ctx.Err()
}
var fileContent bytes.Buffer
// Create a limited reader to ensure we don't read more than the max size.
lr := io.LimitReader(reader, int64(maxSize))
// Using io.CopyBuffer for performance advantages. Though buf is mandatory
// for the method, due to the internal implementation of io.CopyBuffer, when
// *bytes.Buffer implements io.WriterTo or io.ReaderFrom, the provided buf
// is simply ignored. Thus, we can pass nil for the buf parameter.
_, err = io.CopyBuffer(&fileContent, lr, nil)
if err != nil && !errors.Is(err, io.EOF) {
return nil, err
}
if fileContent.Len() == maxSize {
ctx.Logger().V(2).Info("Max archive size reached.")
}
return fileContent.Bytes(), nil
}
type mimeType string
const (
arMimeType mimeType = "application/x-unix-archive"
rpmMimeType mimeType = "application/x-rpm"
machOType mimeType = "application/x-mach-binary"
octetStream mimeType = "application/octet-stream"
)
// mimeTools maps MIME types to the necessary command-line tools to handle them.
// This centralizes the tool requirements for different file types.
var mimeTools = map[mimeType][]string{
arMimeType: {"ar"},
rpmMimeType: {"rpm2cpio", "cpio"},
}
// extractToolCache stores the availability of extraction tools, eliminating the need for repeated filesystem lookups.
var extractToolCache map[string]bool
func init() {
// Preload the extractToolCache with the availability status of each required tool.
extractToolCache = make(map[string]bool)
for _, tools := range mimeTools {
for _, tool := range tools {
_, err := exec.LookPath(tool)
extractToolCache[tool] = err == nil
}
}
}
func ensureToolsForMimeType(mimeType mimeType) error {
tools, exists := mimeTools[mimeType]
if !exists {
return fmt.Errorf("unsupported mime type: %s", mimeType)
}
for _, tool := range tools {
if installed := extractToolCache[tool]; !installed {
return fmt.Errorf("required tool %s is not installed", tool)
}
}
return nil
}
// HandleSpecialized takes a file path and an io.Reader representing the input file,
// and processes it based on its extension, such as handling Debian (.deb) and RPM (.rpm) packages.
// It returns an io.Reader that can be used to read the processed content of the file,
// and an error if any issues occurred during processing.
// If the file is specialized, the returned boolean is true with no error.
// The caller is responsible for closing the returned reader.
func (a *Archive) HandleSpecialized(ctx logContext.Context, reader io.Reader) (io.Reader, bool, error) {
mimeType, reader, err := determineMimeType(reader)
if err != nil {
return nil, false, err
}
switch mimeType {
case arMimeType: // includes .deb files
if err := ensureToolsForMimeType(mimeType); err != nil {
return nil, false, err
}
reader, err = a.extractDebContent(ctx, reader)
case rpmMimeType:
if err := ensureToolsForMimeType(mimeType); err != nil {
return nil, false, err
}
reader, err = a.extractRpmContent(ctx, reader)
default:
return reader, false, nil
}
if err != nil {
return nil, false, fmt.Errorf("unable to extract file with MIME type %s: %w", mimeType, err)
}
return reader, true, nil
}
// extractDebContent takes a .deb file as an io.Reader, extracts its contents
// into a temporary directory, and returns a Reader for the extracted data archive.
// It handles the extraction process by using the 'ar' command and manages temporary
// files and directories for the operation.
// The caller is responsible for closing the returned reader.
func (a *Archive) extractDebContent(ctx logContext.Context, file io.Reader) (io.ReadCloser, error) {
if a.currentDepth >= maxDepth {
return nil, fmt.Errorf(errMaxArchiveDepthReached)
}
tmpEnv, err := a.createTempEnv(ctx, file)
if err != nil {
return nil, err
}
defer os.Remove(tmpEnv.tempFileName)
defer os.RemoveAll(tmpEnv.extractPath)
cmd := exec.Command("ar", "x", tmpEnv.tempFile.Name())
cmd.Dir = tmpEnv.extractPath
if err := executeCommand(cmd); err != nil {
return nil, err
}
handler := func(ctx logContext.Context, env tempEnv, file string) (string, error) {
if strings.HasPrefix(file, "data.tar.") {
return file, nil
}
return a.handleNestedFileMIME(ctx, env, file)
}
dataArchiveName, err := a.handleExtractedFiles(ctx, tmpEnv, handler)
if err != nil {
return nil, err
}
return openDataArchive(tmpEnv.extractPath, dataArchiveName)
}
// extractRpmContent takes an .rpm file as an io.Reader, extracts its contents
// into a temporary directory, and returns a Reader for the extracted data archive.
// It handles the extraction process by using the 'rpm2cpio' and 'cpio' commands and manages temporary
// files and directories for the operation.
// The caller is responsible for closing the returned reader.
func (a *Archive) extractRpmContent(ctx logContext.Context, file io.Reader) (io.ReadCloser, error) {
if a.currentDepth >= maxDepth {
return nil, fmt.Errorf("max archive depth reached")
}
tmpEnv, err := a.createTempEnv(ctx, file)
if err != nil {
return nil, err
}
defer os.Remove(tmpEnv.tempFileName)
defer os.RemoveAll(tmpEnv.extractPath)
// Use rpm2cpio to convert the RPM file to a cpio archive and then extract it using cpio command.
cmd := exec.Command("sh", "-c", "rpm2cpio "+tmpEnv.tempFile.Name()+" | cpio -id")
cmd.Dir = tmpEnv.extractPath
if err := executeCommand(cmd); err != nil {
return nil, err
}
handler := func(ctx logContext.Context, env tempEnv, file string) (string, error) {
if strings.HasSuffix(file, ".tar.gz") {
return file, nil
}
return a.handleNestedFileMIME(ctx, env, file)
}
dataArchiveName, err := a.handleExtractedFiles(ctx, tmpEnv, handler)
if err != nil {
return nil, err
}
return openDataArchive(tmpEnv.extractPath, dataArchiveName)
}
func (a *Archive) handleNestedFileMIME(ctx logContext.Context, tempEnv tempEnv, fileName string) (string, error) {
nestedFile, err := os.Open(filepath.Join(tempEnv.extractPath, fileName))
if err != nil {
return "", err
}
defer nestedFile.Close()
mimeType, reader, err := determineMimeType(nestedFile)
if err != nil {
return "", fmt.Errorf("unable to determine MIME type of nested filename: %s, %w", nestedFile.Name(), err)
}
switch mimeType {
case arMimeType, rpmMimeType:
_, _, err = a.HandleSpecialized(ctx, reader)
default:
return "", nil
}
if err != nil {
return "", fmt.Errorf("unable to extract file with MIME type %s: %w", mimeType, err)
}
return fileName, nil
}
// determineMimeType reads from the provided reader to detect the MIME type.
// It returns the detected MIME type and a new reader that includes the read portion.
func determineMimeType(reader io.Reader) (mimeType, io.Reader, error) {
// A buffer of 512 bytes is used since many file formats store their magic numbers within the first 512 bytes.
// If fewer bytes are read, MIME type detection may still succeed.
buffer := make([]byte, 512)
n, err := reader.Read(buffer)
if err != nil && !errors.Is(err, io.EOF) {
return "", nil, fmt.Errorf("unable to read file for MIME type detection: %w", err)
}
// Create a new reader that starts with the buffer we just read
// and continues with the rest of the original reader.
reader = io.MultiReader(bytes.NewReader(buffer[:n]), reader)
kind, err := filetype.Match(buffer)
if err != nil {
return "", nil, fmt.Errorf("unable to determine file type: %w", err)
}
return mimeType(kind.MIME.Value), reader, nil
}
// handleExtractedFiles processes each file in the extracted directory using a provided handler function.
// The function iterates through the files, applying the handleFile function to each, and returns the name
// of the data archive it finds. This centralizes the logic for handling specialized files such as .deb and .rpm
// by using the appropriate handling function passed as an argument. This design allows for flexibility and reuse
// of this function across various extraction processes in the package.
func (a *Archive) handleExtractedFiles(ctx logContext.Context, env tempEnv, handleFile func(logContext.Context, tempEnv, string) (string, error)) (string, error) {
extractedFiles, err := os.ReadDir(env.extractPath)
if err != nil {
return "", fmt.Errorf("unable to read extracted directory: %w", err)
}
var dataArchiveName string
for _, file := range extractedFiles {
filename := file.Name()
filePath := filepath.Join(env.extractPath, filename)
fileInfo, err := os.Stat(filePath)
if err != nil {
return "", fmt.Errorf("unable to get file info for filename %s: %w", filename, err)
}
if fileInfo.IsDir() {
continue
}
name, err := handleFile(ctx, env, filename)
if err != nil {
return "", err
}
if name != "" {
dataArchiveName = name
break
}
}
return dataArchiveName, nil
}
type tempEnv struct {
tempFile *os.File
tempFileName string
extractPath string
}
// createTempEnv creates a temporary file and a temporary directory for extracting archives.
// The caller is responsible for removing these temporary resources
// (both the file and directory) when they are no longer needed.
func (a *Archive) createTempEnv(ctx logContext.Context, file io.Reader) (tempEnv, error) {
tempFile, err := os.CreateTemp("", "tmp")
if err != nil {
return tempEnv{}, fmt.Errorf("unable to create temporary file: %w", err)
}
extractPath, err := os.MkdirTemp("", "tmp_archive")
if err != nil {
return tempEnv{}, fmt.Errorf("unable to create temporary directory: %w", err)
}
b, err := a.ReadToMax(ctx, file)
if err != nil {
return tempEnv{}, err
}
if _, err = tempFile.Write(b); err != nil {
return tempEnv{}, fmt.Errorf("unable to write to temporary file: %w", err)
}
return tempEnv{tempFile: tempFile, tempFileName: tempFile.Name(), extractPath: extractPath}, nil
}
func executeCommand(cmd *exec.Cmd) error {
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("unable to execute command %v: %w; error: %s", cmd.String(), err, stderr.String())
}
return nil
}
func openDataArchive(extractPath string, dataArchiveName string) (io.ReadCloser, error) {
dataArchivePath := filepath.Join(extractPath, dataArchiveName)
dataFile, err := os.Open(dataArchivePath)
if err != nil {
return nil, fmt.Errorf("unable to open file: %w", err)
}
return dataFile, nil
}

View file

@ -1,493 +0,0 @@
package handlers
import (
"archive/tar"
"bytes"
"context"
"encoding/binary"
"io"
"math/rand"
"net/http"
"os"
"regexp"
"strings"
"testing"
"time"
"github.com/h2non/filetype"
"github.com/stretchr/testify/assert"
diskbufferreader "github.com/trufflesecurity/disk-buffer-reader"
logContext "github.com/trufflesecurity/trufflehog/v3/pkg/context"
"github.com/trufflesecurity/trufflehog/v3/pkg/sources"
)
func TestArchiveHandler(t *testing.T) {
tests := map[string]struct {
archiveURL string
expectedChunks int
matchString string
}{
"gzip-single": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/one-zip.gz",
1,
"AKIAYVP4CIPPH5TNP3SW",
},
"gzip-nested": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/double-zip.gz",
1,
"AKIAYVP4CIPPH5TNP3SW",
},
"gzip-too-deep": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/six-zip.gz",
0,
"",
},
"tar-single": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/one.tar",
1,
"AKIAYVP4CIPPH5TNP3SW",
},
"tar-nested": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/two.tar",
1,
"AKIAYVP4CIPPH5TNP3SW",
},
"tar-too-deep": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/six.tar",
0,
"",
},
"targz-single": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/tar-archive.tar.gz",
1,
"AKIAYVP4CIPPH5TNP3SW",
},
"gzip-large": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/FifteenMB.gz",
1543,
"AKIAYVP4CIPPH5TNP3SW",
},
"zip-single": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/aws-canary-creds.zip",
1,
"AKIAYVP4CIPPH5TNP3SW",
},
}
for name, testCase := range tests {
t.Run(name, func(t *testing.T) {
resp, err := http.Get(testCase.archiveURL)
if err != nil || resp.StatusCode != http.StatusOK {
t.Error(err)
}
defer resp.Body.Close()
archive := Archive{}
archive.New()
newReader, err := diskbufferreader.New(resp.Body)
if err != nil {
t.Errorf("error creating reusable reader: %s", err)
}
archiveChan := archive.FromFile(logContext.Background(), newReader)
count := 0
re := regexp.MustCompile(testCase.matchString)
matched := false
for chunk := range archiveChan {
count++
if re.Match(chunk) {
matched = true
}
}
if !matched && len(testCase.matchString) > 0 {
t.Errorf("%s: Expected string not found in archive.", name)
}
if count != testCase.expectedChunks {
t.Errorf("%s: Unexpected number of chunks. Got %d, expected: %d", name, count, testCase.expectedChunks)
}
})
}
}
func TestHandleFile(t *testing.T) {
reporter := sources.ChanReporter{Ch: make(chan *sources.Chunk, 2)}
// Context cancels the operation.
canceledCtx, cancel := logContext.WithCancel(logContext.Background())
cancel()
reader, err := diskbufferreader.New(strings.NewReader("file"))
assert.NoError(t, err)
assert.False(t, HandleFile(canceledCtx, reader, &sources.Chunk{}, reporter))
// Only one chunk is sent on the channel.
// TODO: Embed a zip without making an HTTP request.
resp, err := http.Get("https://raw.githubusercontent.com/bill-rich/bad-secrets/master/aws-canary-creds.zip")
assert.NoError(t, err)
defer resp.Body.Close()
archive := Archive{}
archive.New()
reader, err = diskbufferreader.New(resp.Body)
assert.NoError(t, err)
assert.Equal(t, 0, len(reporter.Ch))
assert.True(t, HandleFile(logContext.Background(), reader, &sources.Chunk{}, reporter))
assert.Equal(t, 1, len(reporter.Ch))
}
func BenchmarkHandleFile(b *testing.B) {
file, err := os.Open("testdata/test.tgz")
assert.Nil(b, err)
defer file.Close()
archive := Archive{}
archive.New()
b.ResetTimer()
for i := 0; i < b.N; i++ {
sourceChan := make(chan *sources.Chunk, 1)
reader, err := diskbufferreader.New(file)
assert.NoError(b, err)
b.StartTimer()
go func() {
defer close(sourceChan)
HandleFile(logContext.Background(), reader, &sources.Chunk{}, sources.ChanReporter{Ch: sourceChan})
}()
for range sourceChan {
}
b.StopTimer()
}
}
func TestHandleFileSkipBinaries(t *testing.T) {
filename := createBinaryArchive(t)
defer os.Remove(filename)
file, err := os.Open(filename)
assert.NoError(t, err)
reader, err := diskbufferreader.New(file)
assert.NoError(t, err)
ctx, cancel := logContext.WithTimeout(logContext.Background(), 5*time.Second)
defer cancel()
sourceChan := make(chan *sources.Chunk, 1)
go func() {
defer close(sourceChan)
HandleFile(ctx, reader, &sources.Chunk{}, sources.ChanReporter{Ch: sourceChan}, WithSkipBinaries(true))
}()
count := 0
for range sourceChan {
count++
}
// The binary archive should not be scanned.
assert.Equal(t, 0, count)
}
func createBinaryArchive(t *testing.T) string {
t.Helper()
f, err := os.CreateTemp("", "testbinary")
assert.NoError(t, err)
defer os.Remove(f.Name())
r := rand.New(rand.NewSource(time.Now().UnixNano()))
randomBytes := make([]byte, 1024)
_, err = r.Read(randomBytes)
assert.NoError(t, err)
_, err = f.Write(randomBytes)
assert.NoError(t, err)
// Create and write some structured binary data (e.g., integers, floats)
for i := 0; i < 10; i++ {
err = binary.Write(f, binary.LittleEndian, int32(rand.Intn(1000)))
assert.NoError(t, err)
err = binary.Write(f, binary.LittleEndian, rand.Float64())
assert.NoError(t, err)
}
tarFile, err := os.Create("example.tar")
if err != nil {
t.Fatal(err)
}
defer tarFile.Close()
// Create a new tar archive.
tarWriter := tar.NewWriter(tarFile)
defer tarWriter.Close()
fileInfo, err := f.Stat()
assert.NoError(t, err)
header, err := tar.FileInfoHeader(fileInfo, "")
assert.NoError(t, err)
err = tarWriter.WriteHeader(header)
assert.NoError(t, err)
fileContent, err := os.ReadFile(f.Name())
assert.NoError(t, err)
_, err = tarWriter.Write(fileContent)
assert.NoError(t, err)
return tarFile.Name()
}
func TestReadToMax(t *testing.T) {
tests := []struct {
name string
input []byte
expected []byte
}{
{
name: "read full content within maxSize",
input: []byte("abcdefg"),
expected: []byte("abcdefg"),
},
{
name: "read content larger than maxSize",
input: make([]byte, maxSize+10), // this creates a byte slice 10 bytes larger than maxSize
expected: make([]byte, maxSize),
},
{
name: "empty input",
input: []byte(""),
expected: []byte(""),
},
}
a := &Archive{}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
reader := bytes.NewReader(tt.input)
output, err := a.ReadToMax(logContext.Background(), reader)
assert.Nil(t, err)
assert.Equal(t, tt.expected, output)
})
}
}
func BenchmarkReadToMax(b *testing.B) {
data := bytes.Repeat([]byte("a"), 1024*1000) // 1MB of data.
reader := bytes.NewReader(data)
a := &Archive{}
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StartTimer()
_, _ = a.ReadToMax(logContext.Background(), reader)
b.StopTimer()
_, _ = reader.Seek(0, 0) // Reset the reader position.
a.size = 0 // Reset archive size.
}
}
func TestExtractDebContent(t *testing.T) {
// Open the sample .deb file from the testdata folder.
file, err := os.Open("testdata/test.deb")
assert.Nil(t, err)
defer file.Close()
ctx := logContext.AddLogger(context.Background())
a := &Archive{}
reader, err := a.extractDebContent(ctx, file)
assert.Nil(t, err)
content, err := io.ReadAll(reader)
assert.Nil(t, err)
expectedLength := 1015582
assert.Equal(t, expectedLength, len(string(content)))
}
func TestSkipArchive(t *testing.T) {
file, err := os.Open("testdata/test.tgz")
assert.Nil(t, err)
defer file.Close()
reader, err := diskbufferreader.New(file)
assert.NoError(t, err)
ctx := logContext.Background()
chunkCh := make(chan *sources.Chunk)
go func() {
defer close(chunkCh)
ok := HandleFile(ctx, reader, &sources.Chunk{}, sources.ChanReporter{Ch: chunkCh}, WithSkipArchives(true))
assert.False(t, ok)
}()
wantCount := 0
count := 0
for range chunkCh {
count++
}
assert.Equal(t, wantCount, count)
}
func TestExtractTarContent(t *testing.T) {
file, err := os.Open("testdata/test.tgz")
assert.Nil(t, err)
defer file.Close()
reader, err := diskbufferreader.New(file)
assert.NoError(t, err)
ctx := logContext.Background()
chunkCh := make(chan *sources.Chunk)
go func() {
defer close(chunkCh)
ok := HandleFile(ctx, reader, &sources.Chunk{}, sources.ChanReporter{Ch: chunkCh})
assert.True(t, ok)
}()
wantCount := 4
count := 0
for range chunkCh {
count++
}
assert.Equal(t, wantCount, count)
}
func TestExtractRPMContent(t *testing.T) {
// Open the sample .rpm file from the testdata folder.
file, err := os.Open("testdata/test.rpm")
assert.Nil(t, err)
defer file.Close()
ctx := logContext.AddLogger(context.Background())
a := &Archive{}
reader, err := a.extractRpmContent(ctx, file)
assert.Nil(t, err)
content, err := io.ReadAll(reader)
assert.Nil(t, err)
expectedLength := 1822720
assert.Equal(t, expectedLength, len(string(content)))
}
func TestOpenInvalidArchive(t *testing.T) {
reader := strings.NewReader("invalid archive")
ctx := logContext.AddLogger(context.Background())
a := &Archive{}
archiveChan := make(chan []byte)
err := a.openArchive(ctx, 0, reader, archiveChan)
assert.Error(t, err)
}
func TestNestedDirArchive(t *testing.T) {
file, err := os.Open("testdata/dir-archive.zip")
assert.Nil(t, err)
defer file.Close()
reader, err := diskbufferreader.New(file)
assert.NoError(t, err)
ctx, cancel := logContext.WithTimeout(logContext.Background(), 5*time.Second)
defer cancel()
sourceChan := make(chan *sources.Chunk, 1)
go func() {
defer close(sourceChan)
HandleFile(ctx, reader, &sources.Chunk{}, sources.ChanReporter{Ch: sourceChan})
}()
count := 0
want := 4
for range sourceChan {
count++
}
assert.Equal(t, want, count)
}
func TestDetermineMimeType(t *testing.T) {
filetype.AddMatcher(filetype.NewType("txt", "text/plain"), func(buf []byte) bool {
return strings.HasPrefix(string(buf), "text:")
})
pngBytes := []byte("\x89PNG\r\n\x1a\n")
jpegBytes := []byte{0xFF, 0xD8, 0xFF}
textBytes := []byte("text: This is a plain text")
rpmBytes := []byte("\xed\xab\xee\xdb")
tests := []struct {
name string
input io.Reader
expected mimeType
shouldFail bool
}{
{
name: "PNG file",
input: bytes.NewReader(pngBytes),
expected: mimeType("image/png"),
shouldFail: false,
},
{
name: "JPEG file",
input: bytes.NewReader(jpegBytes),
expected: mimeType("image/jpeg"),
shouldFail: false,
},
{
name: "Text file",
input: bytes.NewReader(textBytes),
expected: mimeType("text/plain"),
shouldFail: false,
},
{
name: "RPM file",
input: bytes.NewReader(rpmBytes),
expected: rpmMimeType,
shouldFail: false,
},
{
name: "Truncated JPEG file",
input: io.LimitReader(bytes.NewReader(jpegBytes), 2),
expected: mimeType("unknown"),
shouldFail: true,
},
{
name: "Empty reader",
input: bytes.NewReader([]byte{}),
shouldFail: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
originalData, _ := io.ReadAll(io.TeeReader(tt.input, &bytes.Buffer{}))
tt.input = bytes.NewReader(originalData) // Reset the reader
mime, reader, err := determineMimeType(tt.input)
if err != nil && !tt.shouldFail {
t.Fatalf("unexpected error: %v", err)
}
if !tt.shouldFail {
assert.Equal(t, tt.expected, mime)
}
// Ensure the reader still contains all the original data.
data, _ := io.ReadAll(reader)
assert.Equal(t, originalData, data)
})
}
}

282
pkg/handlers/default.go Normal file
View file

@ -0,0 +1,282 @@
package handlers
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"time"
"github.com/gabriel-vasile/mimetype"
"github.com/mholt/archiver/v4"
"github.com/trufflesecurity/trufflehog/v3/pkg/common"
logContext "github.com/trufflesecurity/trufflehog/v3/pkg/context"
"github.com/trufflesecurity/trufflehog/v3/pkg/readers"
"github.com/trufflesecurity/trufflehog/v3/pkg/sources"
)
type ctxKey int
const (
depthKey ctxKey = iota
defaultBufferSize = 512
)
var (
maxDepth = 5
maxSize = 250 * 1024 * 1024 // 250 MB
maxTimeout = time.Duration(30) * time.Second
)
// SetArchiveMaxSize sets the maximum size of the archive.
func SetArchiveMaxSize(size int) { maxSize = size }
// SetArchiveMaxDepth sets the maximum depth of the archive.
func SetArchiveMaxDepth(depth int) { maxDepth = depth }
// SetArchiveMaxTimeout sets the maximum timeout for the archive handler.
func SetArchiveMaxTimeout(timeout time.Duration) { maxTimeout = timeout }
// defaultHandler provides a base implementation for file handlers, encapsulating common behaviors
// needed across different handlers. This handler is embedded in other specialized handlers to ensure
// consistent application of these common behaviors and to simplify the extension of handler functionalities.
type defaultHandler struct{ metrics *metrics }
// newDefaultHandler creates a defaultHandler with metrics configured based on the provided handlerType.
// The handlerType parameter is used to initialize the metrics instance with the appropriate handler type,
// ensuring that the metrics recorded within the defaultHandler methods are correctly attributed to the
// specific handler that invoked them. This allows for accurate metrics attribution when the defaultHandler
// is embedded in specialized handlers like arHandler or rpmHandler.
func newDefaultHandler(handlerType handlerType) *defaultHandler {
return &defaultHandler{metrics: newHandlerMetrics(handlerType)}
}
// HandleFile processes the input as either an archive or non-archive based on its content,
// utilizing a single output channel. It first tries to identify the input as an archive. If it is an archive,
// it processes it accordingly; otherwise, it handles the input as non-archive content.
// The function returns a channel that will receive the extracted data bytes and an error if the initial setup fails.
func (h *defaultHandler) HandleFile(ctx logContext.Context, input readSeekCloser) (chan []byte, error) {
// Shared channel for both archive and non-archive content.
dataChan := make(chan []byte, defaultBufferSize)
_, arReader, err := archiver.Identify("", input)
if err != nil {
if errors.Is(err, archiver.ErrNoMatch) {
// Not an archive, handle as non-archive content in a separate goroutine.
ctx.Logger().V(3).Info("File not recognized as an archive, handling as non-archive content.")
go func() {
defer close(dataChan)
// Update the metrics for the file processing.
start := time.Now()
var err error
defer func() {
h.measureLatencyAndHandleErrors(start, err)
h.metrics.incFilesProcessed()
}()
if err = h.handleNonArchiveContent(ctx, arReader, dataChan); err != nil {
ctx.Logger().Error(err, "error handling non-archive content.")
}
}()
return dataChan, nil
}
h.metrics.incErrors()
return nil, err
}
go func() {
ctx, cancel := logContext.WithTimeout(ctx, maxTimeout)
defer cancel()
defer close(dataChan)
// Update the metrics for the file processing.
start := time.Now()
var err error
defer h.measureLatencyAndHandleErrors(start, err)
if err = h.openArchive(ctx, 0, arReader, dataChan); err != nil {
ctx.Logger().Error(err, "error unarchiving chunk.")
}
}()
return dataChan, nil
}
// measureLatencyAndHandleErrors measures the latency of the file processing and updates the metrics accordingly.
// It also records errors and timeouts in the metrics.
func (h *defaultHandler) measureLatencyAndHandleErrors(start time.Time, err error) {
if err == nil {
h.metrics.observeHandleFileLatency(time.Since(start).Milliseconds())
return
}
h.metrics.incErrors()
if errors.Is(err, context.DeadlineExceeded) {
h.metrics.incFileProcessingTimeouts()
}
}
var ErrMaxDepthReached = errors.New("max archive depth reached")
// openArchive recursively extracts content from an archive up to a maximum depth, handling nested archives if necessary.
// It takes a reader from which it attempts to identify and process the archive format. Depending on the archive type,
// it either decompresses or extracts the contents directly, sending data to the provided channel.
// Returns an error if the archive cannot be processed due to issues like exceeding maximum depth or unsupported formats.
func (h *defaultHandler) openArchive(ctx logContext.Context, depth int, reader io.Reader, archiveChan chan []byte) error {
if common.IsDone(ctx) {
return ctx.Err()
}
if depth > maxDepth {
h.metrics.incMaxArchiveDepthCount()
return ErrMaxDepthReached
}
format, arReader, err := archiver.Identify("", reader)
switch {
case err == nil:
// Continue with the rest of the code.
case errors.Is(err, archiver.ErrNoMatch):
if depth > 0 {
// If openArchive is called on an already extracted/decompressed file and the depth is greater than 0,
// it means we are at least 1 layer deep in the archive. In this case, we should handle the content
// as non-archive data by calling handleNonArchiveContent.
return h.handleNonArchiveContent(ctx, arReader, archiveChan)
}
// If openArchive is called on the root (depth == 0) and we can't identify the format,
// it means we can't handle the content at all. Return the archiver.ErrNoMatch error.
return err
default:
// Some other error occurred.
return fmt.Errorf("error identifying archive: %w", err)
}
switch archive := format.(type) {
case archiver.Decompressor:
// Decompress tha archive and feed the decompressed data back into the archive handler to extract any nested archives.
compReader, err := archive.OpenReader(arReader)
if err != nil {
return fmt.Errorf("error opening decompressor with format %q: %w", format.Name(), err)
}
defer compReader.Close()
h.metrics.incFilesProcessed()
rdr, err := readers.NewBufferedFileReader(compReader)
if err != nil {
return fmt.Errorf("error creating random access reader: %w", err)
}
defer rdr.Close()
return h.openArchive(ctx, depth+1, rdr, archiveChan)
case archiver.Extractor:
err := archive.Extract(logContext.WithValue(ctx, depthKey, depth+1), arReader, nil, h.extractorHandler(archiveChan))
if err != nil {
return fmt.Errorf("error extracting archive with format: %s: %w", format.Name(), err)
}
return nil
default:
return fmt.Errorf("unknown archive type: %s", format.Name())
}
}
// extractorHandler creates a closure that handles individual files extracted by an archiver.
// It logs the extraction, checks for cancellation, and decides whether to skip the file based on its name or type,
// particularly for binary files if configured to skip. If the file is not skipped, it recursively calls openArchive
// to handle nested archives or to continue processing based on the file's content and depth in the archive structure.
func (h *defaultHandler) extractorHandler(archiveChan chan []byte) func(context.Context, archiver.File) error {
return func(ctx context.Context, file archiver.File) error {
lCtx := logContext.WithValues(
logContext.AddLogger(ctx),
"filename", file.Name(),
"size", file.Size(),
)
lCtx.Logger().V(5).Info("Handling extracted file.")
if file.IsDir() || file.LinkTarget != "" {
lCtx.Logger().V(5).Info("skipping directory or symlink")
return nil
}
if common.IsDone(ctx) {
return ctx.Err()
}
depth := 0
if ctxDepth, ok := ctx.Value(depthKey).(int); ok {
depth = ctxDepth
}
fileSize := file.Size()
if int(fileSize) > maxSize {
lCtx.Logger().V(3).Info("skipping file due to size")
return nil
}
if common.SkipFile(file.Name()) || common.IsBinary(file.Name()) {
lCtx.Logger().V(5).Info("skipping file")
h.metrics.incFilesSkipped()
return nil
}
fReader, err := file.Open()
if err != nil {
return fmt.Errorf("error opening file %q: %w", file.Name(), err)
}
defer fReader.Close()
rdr, err := readers.NewBufferedFileReader(fReader)
if err != nil {
return fmt.Errorf("error creating random access reader: %w", err)
}
defer rdr.Close()
h.metrics.incFilesProcessed()
h.metrics.observeFileSize(fileSize)
return h.openArchive(lCtx, depth, rdr, archiveChan)
}
}
// handleNonArchiveContent processes files that do not contain nested archives, serving as the final stage in the
// extraction/decompression process. It reads the content to detect its MIME type and decides whether to skip based
// on the type, particularly for binary files. It manages reading file chunks and writing them to the archive channel,
// effectively collecting the final bytes for further processing. This function is a key component in ensuring that all
// file content, regardless of being an archive or not, is handled appropriately.
func (h *defaultHandler) handleNonArchiveContent(ctx logContext.Context, reader io.Reader, archiveChan chan []byte) error {
bufReader := bufio.NewReaderSize(reader, defaultBufferSize)
// A buffer of 512 bytes is used since many file formats store their magic numbers within the first 512 bytes.
// If fewer bytes are read, MIME type detection may still succeed.
buffer, err := bufReader.Peek(defaultBufferSize)
if err != nil && !errors.Is(err, io.EOF) {
return fmt.Errorf("unable to read file for MIME type detection: %w", err)
}
mime := mimetype.Detect(buffer)
mimeT := mimeType(mime.String())
if common.SkipFile(mime.Extension()) || common.IsBinary(mime.Extension()) {
ctx.Logger().V(5).Info("skipping file", "ext", mimeT)
h.metrics.incFilesSkipped()
return nil
}
chunkReader := sources.NewChunkReader()
for data := range chunkReader(ctx, bufReader) {
if err := data.Error(); err != nil {
ctx.Logger().Error(err, "error reading chunk")
h.metrics.incErrors()
continue
}
if err := common.CancellableWrite(ctx, archiveChan, data.Bytes()); err != nil {
return err
}
h.metrics.incBytesProcessed(len(data.Bytes()))
}
return nil
}

View file

@ -0,0 +1,124 @@
package handlers
import (
"context"
"net/http"
"regexp"
"strings"
"testing"
"github.com/stretchr/testify/assert"
diskbufferreader "github.com/trufflesecurity/disk-buffer-reader"
logContext "github.com/trufflesecurity/trufflehog/v3/pkg/context"
)
func TestArchiveHandler(t *testing.T) {
tests := map[string]struct {
archiveURL string
expectedChunks int
matchString string
expectErr bool
}{
"gzip-single": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/one-zip.gz",
1,
"AKIAYVP4CIPPH5TNP3SW",
false,
},
"gzip-nested": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/double-zip.gz",
1,
"AKIAYVP4CIPPH5TNP3SW",
false,
},
"gzip-too-deep": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/six-zip.gz",
0,
"",
true,
},
"tar-single": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/one.tar",
1,
"AKIAYVP4CIPPH5TNP3SW",
false,
},
"tar-nested": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/two.tar",
1,
"AKIAYVP4CIPPH5TNP3SW",
false,
},
"tar-too-deep": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/six.tar",
0,
"",
true,
},
"targz-single": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/tar-archive.tar.gz",
1,
"AKIAYVP4CIPPH5TNP3SW",
false,
},
"gzip-large": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/FifteenMB.gz",
1543,
"AKIAYVP4CIPPH5TNP3SW",
false,
},
"zip-single": {
"https://raw.githubusercontent.com/bill-rich/bad-secrets/master/aws-canary-creds.zip",
1,
"AKIAYVP4CIPPH5TNP3SW",
false,
},
}
for name, testCase := range tests {
t.Run(name, func(t *testing.T) {
resp, err := http.Get(testCase.archiveURL)
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
defer resp.Body.Close()
handler := newDefaultHandler(defaultHandlerType)
newReader, err := diskbufferreader.New(resp.Body)
if err != nil {
t.Errorf("error creating reusable reader: %s", err)
}
archiveChan, err := handler.HandleFile(logContext.Background(), newReader)
if testCase.expectErr {
assert.NoError(t, err)
return
}
count := 0
re := regexp.MustCompile(testCase.matchString)
matched := false
for chunk := range archiveChan {
count++
if re.Match(chunk) {
matched = true
}
}
assert.True(t, matched)
assert.Equal(t, testCase.expectedChunks, count)
})
}
}
func TestOpenInvalidArchive(t *testing.T) {
reader := strings.NewReader("invalid archive")
ctx := logContext.AddLogger(context.Background())
handler := defaultHandler{}
archiveChan := make(chan []byte)
err := handler.openArchive(ctx, 0, reader, archiveChan)
assert.Error(t, err)
}

View file

@ -1,115 +1,226 @@
package handlers
import (
"fmt"
"io"
diskbufferreader "github.com/trufflesecurity/disk-buffer-reader"
"github.com/gabriel-vasile/mimetype"
logContext "github.com/trufflesecurity/trufflehog/v3/pkg/context"
"github.com/trufflesecurity/trufflehog/v3/pkg/readers"
"github.com/trufflesecurity/trufflehog/v3/pkg/sources"
)
func DefaultHandlers() []Handler {
return []Handler{
&Archive{},
}
// readSeekCloser is an interface that combines the functionality of io.ReadSeekCloser and io.ReaderAt.
// It supports reading data, seeking within an open resource, and closing the resource once operations are complete.
// Additionally, it allows reading from a specific offset within the resource without altering its current position,
// enabling efficient and flexible data access patterns. This interface is particularly useful for handling files
// or other data streams where random access and sequential processing are required.
type readSeekCloser interface {
io.ReadSeekCloser
io.ReaderAt
}
// SpecializedHandler defines the interface for handlers that can process specialized archives.
// It includes a method to handle specialized archives and determine if the file is of a special type.
type SpecializedHandler interface {
// HandleSpecialized examines the provided file reader within the context and determines if it is a specialized archive.
// It returns a reader with any necessary modifications, a boolean indicating if the file was specialized,
// and an error if something went wrong during processing.
HandleSpecialized(logContext.Context, io.Reader) (io.Reader, bool, error)
// FileHandler represents a handler for files.
// It has a single method, HandleFile, which takes a context and a readSeekCloser as input,
// and returns a channel of byte slices and an error.
// The readSeekCloser extends io.ReadSeekCloser with io.ReaderAt capabilities,
// allowing handlers to perform random and direct access on the file content efficiently.
type FileHandler interface {
HandleFile(ctx logContext.Context, reader readSeekCloser) (chan []byte, error)
}
// Option is a function type that applies a configuration to a Handler.
type Option func(Handler)
// fileHandlingConfig encapsulates configuration settings that control the behavior of file processing.
type fileHandlingConfig struct{ skipArchives bool }
// WithSkipBinaries returns a Option that configures whether to skip binary files.
func WithSkipBinaries(skip bool) Option {
return func(h Handler) {
if a, ok := h.(*Archive); ok {
a.skipBinaries = skip
}
// newFileHandlingConfig creates a default fileHandlingConfig with default settings.
// Optional functional parameters can customize the configuration.
func newFileHandlingConfig(options ...func(*fileHandlingConfig)) *fileHandlingConfig {
config := new(fileHandlingConfig)
for _, option := range options {
option(config)
}
return config
}
// WithSkipArchives returns a Option that configures whether to skip archive files.
func WithSkipArchives(skip bool) Option {
return func(h Handler) {
if a, ok := h.(*Archive); ok {
a.skipArchives = skip
}
}
// WithSkipArchives sets the skipArchives field of the fileHandlingConfig.
// If skip is true, the FileHandler will skip archive files.
func WithSkipArchives(skip bool) func(*fileHandlingConfig) {
return func(c *fileHandlingConfig) { c.skipArchives = skip }
}
type Handler interface {
FromFile(logContext.Context, io.Reader) chan []byte
IsFiletype(logContext.Context, io.Reader) (io.Reader, bool)
New(...Option)
type handlerType string
const (
defaultHandlerType handlerType = "default"
arHandlerType handlerType = "ar"
rpmHandlerType handlerType = "rpm"
)
type mimeType string
const (
sevenZMime mimeType = "application/x-7z-compressed"
bzip2Mime mimeType = "application/x-bzip2"
rarCompressedMime mimeType = "application/x-rar-compressed"
rarMime mimeType = "application/x-rar"
tarMime mimeType = "application/x-tar"
zipMime mimeType = "application/zip"
gxzipMime mimeType = "application/x-gzip"
gzipMime mimeType = "application/gzip"
gunzipMime mimeType = "application/x-gunzip"
gzippedMime mimeType = "application/gzipped"
gzipCompressedMime mimeType = "application/x-gzip-compressed"
gzipDocumentMime mimeType = "gzip/document"
xzMime mimeType = "application/x-xz"
msCabCompressedMime mimeType = "application/vnd.ms-cab-compressed"
rpmMime mimeType = "application/x-rpm"
fitsMime mimeType = "application/fits"
xarMime mimeType = "application/x-xar"
warcMime mimeType = "application/warc"
cpioMime mimeType = "application/cpio"
unixArMime mimeType = "application/x-unix-archive"
arMime mimeType = "application/x-archive"
debMime mimeType = "application/vnd.debian.binary-package"
lzipMime mimeType = "application/lzip"
lzipXMime mimeType = "application/x-lzip"
)
var knownArchiveMimeTypes = map[mimeType]struct{}{
sevenZMime: {},
bzip2Mime: {},
gzipMime: {},
gxzipMime: {},
rarCompressedMime: {},
rarMime: {},
tarMime: {},
zipMime: {},
gunzipMime: {},
gzippedMime: {},
gzipCompressedMime: {},
gzipDocumentMime: {},
xzMime: {},
msCabCompressedMime: {},
rpmMime: {},
fitsMime: {},
xarMime: {},
warcMime: {},
cpioMime: {},
unixArMime: {},
arMime: {},
debMime: {},
lzipMime: {},
lzipXMime: {},
}
// HandleFile processes a given file by selecting an appropriate handler from DefaultHandlers.
// It first checks if the handler implements SpecializedHandler for any special processing,
// then falls back to regular file type handling. If successful, it reads the file in chunks,
// packages them in the provided chunk skeleton, and reports them to the chunk reporter.
// The function returns true if processing was successful and false otherwise.
// Context is used for cancellation, and the caller is responsible for canceling it if needed.
func HandleFile(ctx logContext.Context, reReader *diskbufferreader.DiskBufferReader, chunkSkel *sources.Chunk, reporter sources.ChunkReporter, opts ...Option) bool {
for _, h := range DefaultHandlers() {
h.New(opts...)
if handled := processHandler(ctx, h, reReader, chunkSkel, reporter); handled {
return true
}
// getHandlerForType dynamically selects and configures a FileHandler based on the provided MIME type.
// This method uses specialized handlers for specific archive types and RPM packages:
// - arHandler is used for 'arMime', 'unixArMime', and 'debMime' which include Unix archives and Debian packages.
// - rpmHandler is used for 'rpmMime' and 'cpioMime', handling RPM and CPIO archives.
// For all other MIME types, which typically include common archive formats like .zip, .tar, .gz, etc.,
// a defaultHandler is used, leveraging the archiver library to manage these formats.
// The chosen handler is then configured with provided options, adapting it to specific operational needs.
// Returns the configured handler or an error if the handler type does not match the expected type.
func getHandlerForType(mimeT mimeType) (FileHandler, error) {
var handler FileHandler
switch mimeT {
case arMime, unixArMime, debMime:
handler = newARHandler()
case rpmMime, cpioMime:
handler = newRPMHandler()
default:
handler = newDefaultHandler(defaultHandlerType)
}
return false
return handler, nil
}
func processHandler(ctx logContext.Context, h Handler, reReader *diskbufferreader.DiskBufferReader, chunkSkel *sources.Chunk, reporter sources.ChunkReporter) bool {
if specialHandler, ok := h.(SpecializedHandler); ok {
file, isSpecial, err := specialHandler.HandleSpecialized(ctx, reReader)
if isSpecial {
return handleChunks(ctx, h.FromFile(ctx, file), chunkSkel, reporter)
}
if err != nil {
ctx.Logger().Error(err, "error handling file")
}
// HandleFile orchestrates the complete file handling process for a given file.
// It determines the MIME type of the file, selects the appropriate handler based on this type, and processes the file.
// This function initializes the handling process and delegates to the specific handler to manage file
// extraction or processing. Errors at any stage (MIME type determination, handler retrieval,
// seeking, or file handling) result in an error return value.
// Successful handling passes the file content through a channel to be chunked and reported.
//
// The function takes an io.Reader as input and wraps it with a diskbufferreader.DiskBufferReader to support
// seeking and to provide an io.ReaderAt interface. This is necessary for certain file handlers that require
// random access to the file content.
//
// If the skipArchives option is set to true and the detected MIME type is a known archive type,
// the function will skip processing the file and return nil.
func HandleFile(
ctx logContext.Context,
reader io.Reader,
chunkSkel *sources.Chunk,
reporter sources.ChunkReporter,
options ...func(*fileHandlingConfig),
) error {
config := newFileHandlingConfig(options...)
rdr, err := readers.NewBufferedFileReader(reader)
if err != nil {
return fmt.Errorf("error creating random access reader: %w", err)
}
defer rdr.Close()
mimeT, err := mimetype.DetectReader(rdr)
if err != nil {
return fmt.Errorf("error detecting MIME type: %w", err)
}
if _, err := reReader.Seek(0, io.SeekStart); err != nil {
ctx.Logger().Error(err, "error seeking to start of file")
return false
mime := mimeType(mimeT.String())
if _, ok := knownArchiveMimeTypes[mime]; ok && config.skipArchives {
ctx.Logger().V(5).Info("skipping archive file", "mime", mimeT.String())
return nil
}
if _, isType := h.IsFiletype(ctx, reReader); !isType {
return false
// Reset the reader to the start of the file since the MIME type detection may have read some bytes.
if _, err := rdr.Seek(0, io.SeekStart); err != nil {
return fmt.Errorf("error seeking to start of file: %w", err)
}
return handleChunks(ctx, h.FromFile(ctx, reReader), chunkSkel, reporter)
handler, err := getHandlerForType(mime)
if err != nil {
return fmt.Errorf("error getting handler for type: %w", err)
}
archiveChan, err := handler.HandleFile(ctx, rdr) // Delegate to the specific handler to process the file.
if err != nil {
return fmt.Errorf("error handling file: %w", err)
}
return handleChunks(ctx, archiveChan, chunkSkel, reporter)
}
func handleChunks(ctx logContext.Context, handlerChan chan []byte, chunkSkel *sources.Chunk, reporter sources.ChunkReporter) bool {
// handleChunks reads data from the handlerChan and uses it to fill chunks according to a predefined skeleton (chunkSkel).
// Each filled chunk is reported using the provided reporter. This function manages the lifecycle of the channel,
// handling the termination condition when the channel closes and ensuring the cancellation of the operation if the context
// is done. It returns true if all chunks are processed successfully, otherwise returns false on errors or cancellation.
func handleChunks(
ctx logContext.Context,
handlerChan chan []byte,
chunkSkel *sources.Chunk,
reporter sources.ChunkReporter,
) error {
if handlerChan == nil {
return false
return fmt.Errorf("handler channel is nil")
}
for {
select {
case data, open := <-handlerChan:
if !open {
return true
ctx.Logger().V(5).Info("handler channel closed, all chunks processed")
return nil
}
chunk := *chunkSkel
chunk.Data = data
if err := reporter.ChunkOk(ctx, chunk); err != nil {
return false
return fmt.Errorf("error reporting chunk: %w", err)
}
case <-ctx.Done():
return false
return ctx.Err()
}
}
}

View file

@ -0,0 +1,214 @@
package handlers
import (
"io"
"net/http"
"os"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
diskbufferreader "github.com/trufflesecurity/disk-buffer-reader"
"github.com/trufflesecurity/trufflehog/v3/pkg/context"
"github.com/trufflesecurity/trufflehog/v3/pkg/sources"
)
func TestHandleFileCancelledContext(t *testing.T) {
reporter := sources.ChanReporter{Ch: make(chan *sources.Chunk, 2)}
canceledCtx, cancel := context.WithCancel(context.Background())
cancel()
reader, err := diskbufferreader.New(strings.NewReader("file"))
assert.NoError(t, err)
assert.Error(t, HandleFile(canceledCtx, reader, &sources.Chunk{}, reporter))
}
func TestHandleFile(t *testing.T) {
reporter := sources.ChanReporter{Ch: make(chan *sources.Chunk, 2)}
// Only one chunk is sent on the channel.
// TODO: Embed a zip without making an HTTP request.
resp, err := http.Get("https://raw.githubusercontent.com/bill-rich/bad-secrets/master/aws-canary-creds.zip")
assert.NoError(t, err)
defer resp.Body.Close()
assert.Equal(t, 0, len(reporter.Ch))
assert.NoError(t, HandleFile(context.Background(), resp.Body, &sources.Chunk{}, reporter))
assert.Equal(t, 1, len(reporter.Ch))
}
func BenchmarkHandleFile(b *testing.B) {
file, err := os.Open("testdata/test.tgz")
assert.Nil(b, err)
defer file.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
sourceChan := make(chan *sources.Chunk, 1)
b.StartTimer()
go func() {
defer close(sourceChan)
err := HandleFile(context.Background(), file, &sources.Chunk{}, sources.ChanReporter{Ch: sourceChan})
assert.NoError(b, err)
}()
for range sourceChan {
}
b.StopTimer()
_, err = file.Seek(0, io.SeekStart)
assert.NoError(b, err)
}
}
func TestSkipArchive(t *testing.T) {
file, err := os.Open("testdata/test.tgz")
assert.Nil(t, err)
defer file.Close()
chunkCh := make(chan *sources.Chunk)
go func() {
defer close(chunkCh)
err := HandleFile(context.Background(), file, &sources.Chunk{}, sources.ChanReporter{Ch: chunkCh}, WithSkipArchives(true))
assert.NoError(t, err)
}()
wantCount := 0
count := 0
for range chunkCh {
count++
}
assert.Equal(t, wantCount, count)
}
func TestHandleNestedArchives(t *testing.T) {
file, err := os.Open("testdata/nested-dirs.zip")
assert.Nil(t, err)
defer file.Close()
chunkCh := make(chan *sources.Chunk)
go func() {
defer close(chunkCh)
err := HandleFile(context.Background(), file, &sources.Chunk{}, sources.ChanReporter{Ch: chunkCh})
assert.NoError(t, err)
}()
wantCount := 8
count := 0
for range chunkCh {
count++
}
assert.Equal(t, wantCount, count)
}
func TestHandleCompressedZip(t *testing.T) {
file, err := os.Open("testdata/example.zip.gz")
assert.Nil(t, err)
defer file.Close()
chunkCh := make(chan *sources.Chunk)
go func() {
defer close(chunkCh)
err := HandleFile(context.Background(), file, &sources.Chunk{}, sources.ChanReporter{Ch: chunkCh})
assert.NoError(t, err)
}()
wantCount := 2
count := 0
for range chunkCh {
count++
}
assert.Equal(t, wantCount, count)
}
func TestHandleNestedCompressedArchive(t *testing.T) {
file, err := os.Open("testdata/nested-compressed-archive.tar.gz")
assert.Nil(t, err)
defer file.Close()
chunkCh := make(chan *sources.Chunk)
go func() {
defer close(chunkCh)
err := HandleFile(context.Background(), file, &sources.Chunk{}, sources.ChanReporter{Ch: chunkCh})
assert.NoError(t, err)
}()
wantCount := 4
count := 0
for range chunkCh {
count++
}
assert.Equal(t, wantCount, count)
}
func TestExtractTarContent(t *testing.T) {
file, err := os.Open("testdata/test.tgz")
assert.Nil(t, err)
defer file.Close()
chunkCh := make(chan *sources.Chunk)
go func() {
defer close(chunkCh)
err := HandleFile(context.Background(), file, &sources.Chunk{}, sources.ChanReporter{Ch: chunkCh})
assert.NoError(t, err)
}()
wantCount := 4
count := 0
for range chunkCh {
count++
}
assert.Equal(t, wantCount, count)
}
func TestNestedDirArchive(t *testing.T) {
file, err := os.Open("testdata/dir-archive.zip")
assert.Nil(t, err)
defer file.Close()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
sourceChan := make(chan *sources.Chunk, 1)
go func() {
defer close(sourceChan)
err := HandleFile(ctx, file, &sources.Chunk{}, sources.ChanReporter{Ch: sourceChan})
assert.NoError(t, err)
}()
count := 0
want := 4
for range sourceChan {
count++
}
assert.Equal(t, want, count)
}
func TestHandleFileRPM(t *testing.T) {
wantChunkCount := 179
reporter := sources.ChanReporter{Ch: make(chan *sources.Chunk, wantChunkCount)}
file, err := os.Open("testdata/test.rpm")
assert.Nil(t, err)
defer file.Close()
assert.Equal(t, 0, len(reporter.Ch))
assert.NoError(t, HandleFile(context.Background(), file, &sources.Chunk{}, reporter))
assert.Equal(t, wantChunkCount, len(reporter.Ch))
}
func TestHandleFileAR(t *testing.T) {
wantChunkCount := 102
reporter := sources.ChanReporter{Ch: make(chan *sources.Chunk, wantChunkCount)}
file, err := os.Open("testdata/test.deb")
assert.Nil(t, err)
defer file.Close()
assert.Equal(t, 0, len(reporter.Ch))
assert.NoError(t, HandleFile(context.Background(), file, &sources.Chunk{}, reporter))
assert.Equal(t, wantChunkCount, len(reporter.Ch))
}

183
pkg/handlers/metrics.go Normal file
View file

@ -0,0 +1,183 @@
package handlers
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/trufflesecurity/trufflehog/v3/pkg/common"
)
type metrics struct {
handlerType handlerType
handleFileLatency *prometheus.HistogramVec
bytesProcessed *prometheus.CounterVec
filesProcessed *prometheus.CounterVec
errorsEncountered *prometheus.CounterVec
filesSkipped *prometheus.CounterVec
maxArchiveDepthCount *prometheus.CounterVec
fileSize *prometheus.HistogramVec
fileProcessingTimeouts *prometheus.CounterVec
}
var (
handleFileLatency = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_file_latency_milliseconds",
Help: "Latency of the HandleFile method",
Buckets: prometheus.ExponentialBuckets(1, 5, 6),
},
[]string{"handler_type"},
)
bytesProcessed = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_bytes_processed_total",
Help: "Total number of bytes processed",
},
[]string{"handler_type"},
)
filesProcessed = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_files_processed_total",
Help: "Total number of files processed",
},
[]string{"handler_type"},
)
errorsEncountered = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_errors_encountered_total",
Help: "Total number of errors encountered",
},
[]string{"handler_type"},
)
filesSkipped = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_files_skipped_total",
Help: "Total number of files skipped",
},
[]string{"handler_type"},
)
maxArchiveDepthCount = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_max_archive_depth_reached_total",
Help: "Total number of times the maximum archive depth was reached",
},
[]string{"handler_type"},
)
fileSize = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_file_size_bytes",
Help: "Sizes of files handled by the handler",
Buckets: prometheus.ExponentialBuckets(1, 2, 4),
},
[]string{"handler_type"},
)
fileProcessingTimeouts = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: common.MetricsNamespace,
Subsystem: common.MetricsSubsystem,
Name: "handlers_file_processing_timeouts_total",
Help: "Total number of file processing timeouts encountered",
},
[]string{"handler_type"},
)
)
// newHandlerMetrics creates a new metrics instance configured with Prometheus metrics specific to a file handler.
// The function takes a handlerType parameter, which represents the type of the handler (e.g., "default", "ar", "rpm").
// The handlerType is used as a label for each metric, allowing for differentiation and aggregation of metrics
// based on the handler type.
//
// The function initializes and returns a pointer to a metrics struct that contains the following Prometheus metrics:
//
// - handleFileLatency: a HistogramVec metric that measures the latency of the HandleFile method.
// It uses exponential buckets with a base of 1 and a factor of 5, up to 6 buckets.
// The metric is labeled with the handlerType.
//
// - bytesProcessed: a CounterVec metric that tracks the total number of bytes processed by the handler.
// It is labeled with the handlerType.
//
// - filesProcessed: a CounterVec metric that tracks the total number of files processed by the handler.
// It is labeled with the handlerType.
//
// - errorsEncountered: a CounterVec metric that tracks the total number of errors encountered by the handler.
// It is labeled with the handlerType.
//
// - filesSkipped: a CounterVec metric that tracks the total number of files skipped by the handler.
// It is labeled with the handlerType.
//
// - maxArchiveDepthCount: a CounterVec metric that tracks the total number of times the maximum archive depth was reached.
// It is labeled with the handlerType.
//
// - fileSize: a HistogramVec metric that measures the sizes of files handled by the handler.
// It uses exponential buckets with a base of 1 and a factor of 2, up to 4 buckets.
// It is labeled with the handlerType.
//
// - fileProcessingTimeouts: a CounterVec metric that tracks the total number of file processing timeouts
// encountered by the handler.
// It is labeled with the handlerType.
//
// The metrics are created with a common namespace and subsystem defined in the common package.
// This helps to organize and group related metrics together.
//
// By initializing the metrics with the handlerType label, the function enables accurate attribution and aggregation
// of metrics based on the specific handler type. This allows for fine-grained monitoring and analysis of
// file handler performance.
func newHandlerMetrics(t handlerType) *metrics {
return &metrics{
handlerType: t,
handleFileLatency: handleFileLatency,
bytesProcessed: bytesProcessed,
filesProcessed: filesProcessed,
errorsEncountered: errorsEncountered,
filesSkipped: filesSkipped,
maxArchiveDepthCount: maxArchiveDepthCount,
fileSize: fileSize,
fileProcessingTimeouts: fileProcessingTimeouts,
}
}
func (m *metrics) observeHandleFileLatency(duration int64) {
m.handleFileLatency.WithLabelValues(string(m.handlerType)).Observe(float64(duration))
}
func (m *metrics) incBytesProcessed(bytes int) {
m.bytesProcessed.WithLabelValues(string(m.handlerType)).Add(float64(bytes))
}
func (m *metrics) incFilesProcessed() {
m.filesProcessed.WithLabelValues(string(m.handlerType)).Inc()
}
func (m *metrics) incErrors() {
m.errorsEncountered.WithLabelValues(string(m.handlerType)).Inc()
}
func (m *metrics) incFilesSkipped() {
m.filesSkipped.WithLabelValues(string(m.handlerType)).Inc()
}
func (m *metrics) incMaxArchiveDepthCount() {
m.maxArchiveDepthCount.WithLabelValues(string(m.handlerType)).Inc()
}
func (m *metrics) observeFileSize(size int64) {
m.fileSize.WithLabelValues(string(m.handlerType)).Observe(float64(size))
}
func (m *metrics) incFileProcessingTimeouts() {
m.fileProcessingTimeouts.WithLabelValues(string(m.handlerType)).Inc()
}

87
pkg/handlers/rpm.go Normal file
View file

@ -0,0 +1,87 @@
package handlers
import (
"errors"
"fmt"
"io"
"time"
"github.com/sassoftware/go-rpmutils"
logContext "github.com/trufflesecurity/trufflehog/v3/pkg/context"
)
// rpmHandler specializes defaultHandler to manage RPM package files. It leverages shared behaviors
// from defaultHandler and introduces additional logic specific to RPM packages.
type rpmHandler struct{ *defaultHandler }
// newRPMHandler creates an rpmHandler.
func newRPMHandler() *rpmHandler {
return &rpmHandler{defaultHandler: newDefaultHandler(rpmHandlerType)}
}
// HandleFile processes RPM formatted files. Further implementation is required to appropriately
// handle RPM specific archive operations.
func (h *rpmHandler) HandleFile(ctx logContext.Context, input readSeekCloser) (chan []byte, error) {
archiveChan := make(chan []byte, defaultBufferSize)
go func() {
ctx, cancel := logContext.WithTimeout(ctx, maxTimeout)
defer cancel()
defer close(archiveChan)
// Update the metrics for the file processing.
start := time.Now()
var err error
defer h.measureLatencyAndHandleErrors(start, err)
var rpm *rpmutils.Rpm
rpm, err = rpmutils.ReadRpm(input)
if err != nil {
ctx.Logger().Error(err, "error reading RPM")
return
}
var reader rpmutils.PayloadReader
reader, err = rpm.PayloadReaderExtended()
if err != nil {
ctx.Logger().Error(err, "error getting RPM payload reader")
return
}
if err = h.processRPMFiles(ctx, reader, archiveChan); err != nil {
ctx.Logger().Error(err, "error processing RPM files")
}
}()
return archiveChan, nil
}
func (h *rpmHandler) processRPMFiles(ctx logContext.Context, reader rpmutils.PayloadReader, archiveChan chan []byte) error {
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
fileInfo, err := reader.Next()
if err != nil {
if errors.Is(err, io.EOF) {
ctx.Logger().V(3).Info("RPM payload archive fully processed")
return nil
}
return fmt.Errorf("error reading RPM payload: %w", err)
}
fileSize := fileInfo.Size()
fileCtx := logContext.WithValues(ctx, "filename", fileInfo.Name, "size", fileSize)
if err := h.handleNonArchiveContent(fileCtx, reader, archiveChan); err != nil {
fileCtx.Logger().Error(err, "error handling archive content in RPM")
h.metrics.incErrors()
}
h.metrics.incFilesProcessed()
h.metrics.observeFileSize(fileSize)
}
}
}

32
pkg/handlers/rpm_test.go Normal file
View file

@ -0,0 +1,32 @@
package handlers
import (
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/trufflesecurity/trufflehog/v3/pkg/context"
)
func TestHandleRPMFile(t *testing.T) {
file, err := os.Open("testdata/test.rpm")
assert.Nil(t, err)
defer file.Close()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
handler := newRPMHandler()
archiveChan, err := handler.HandleFile(context.AddLogger(ctx), file)
assert.NoError(t, err)
wantChunkCount := 179
count := 0
for range archiveChan {
count++
}
assert.Equal(t, wantChunkCount, count)
}

BIN
pkg/handlers/testdata/example.zip.gz vendored Normal file

Binary file not shown.

Binary file not shown.

BIN
pkg/handlers/testdata/nested-dirs.zip vendored Normal file

Binary file not shown.

View file

@ -9,12 +9,10 @@ import (
"github.com/go-errors/errors"
"github.com/go-logr/logr"
diskbufferreader "github.com/trufflesecurity/disk-buffer-reader"
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"github.com/trufflesecurity/trufflehog/v3/pkg/cleantemp"
"github.com/trufflesecurity/trufflehog/v3/pkg/common"
"github.com/trufflesecurity/trufflehog/v3/pkg/context"
"github.com/trufflesecurity/trufflehog/v3/pkg/handlers"
@ -165,17 +163,9 @@ func (s *Source) scanFile(ctx context.Context, path string, chunksChan chan *sou
return fmt.Errorf("unable to open file: %w", err)
}
bufferName := cleantemp.MkFilename()
defer inputFile.Close()
logger.V(3).Info("scanning file")
reReader, err := diskbufferreader.New(inputFile, diskbufferreader.WithBufferName(bufferName))
if err != nil {
return fmt.Errorf("could not create re-readable reader: %w", err)
}
defer reReader.Close()
chunkSkel := &sources.Chunk{
SourceType: s.Type(),
SourceName: s.name,
@ -190,44 +180,8 @@ func (s *Source) scanFile(ctx context.Context, path string, chunksChan chan *sou
},
Verify: s.verify,
}
if handlers.HandleFile(ctx, reReader, chunkSkel, sources.ChanReporter{Ch: chunksChan}) {
return nil
}
if err := reReader.Reset(); err != nil {
return err
}
reReader.Stop()
chunkReader := sources.NewChunkReader()
chunkResChan := chunkReader(ctx, reReader)
for data := range chunkResChan {
if err := data.Error(); err != nil {
s.log.Error(err, "error reading chunk.")
continue
}
chunk := &sources.Chunk{
SourceType: s.Type(),
SourceName: s.name,
SourceID: s.SourceID(),
JobID: s.JobID(),
Data: data.Bytes(),
SourceMetadata: &source_metadatapb.MetaData{
Data: &source_metadatapb.MetaData_Filesystem{
Filesystem: &source_metadatapb.Filesystem{
File: sanitizer.UTF8(path),
},
},
},
Verify: s.verify,
}
if err := common.CancellableWrite(ctx, chunksChan, chunk); err != nil {
return err
}
}
return nil
return handlers.HandleFile(ctx, inputFile, chunkSkel, sources.ChanReporter{Ch: chunksChan})
}
// Enumerate implements SourceUnitEnumerator interface. This implementation simply

View file

@ -12,7 +12,6 @@ import (
"cloud.google.com/go/storage"
"github.com/go-errors/errors"
"github.com/go-logr/logr"
diskbufferreader "github.com/trufflesecurity/disk-buffer-reader"
"golang.org/x/oauth2"
"golang.org/x/oauth2/endpoints"
"google.golang.org/protobuf/proto"
@ -20,7 +19,6 @@ import (
"github.com/trufflesecurity/trufflehog/v3/pkg/cache"
"github.com/trufflesecurity/trufflehog/v3/pkg/cache/memory"
"github.com/trufflesecurity/trufflehog/v3/pkg/cleantemp"
"github.com/trufflesecurity/trufflehog/v3/pkg/context"
"github.com/trufflesecurity/trufflehog/v3/pkg/handlers"
"github.com/trufflesecurity/trufflehog/v3/pkg/pb/credentialspb"
@ -355,49 +353,5 @@ func (s *Source) processObject(ctx context.Context, o object) error {
},
}
data, err := s.readObjectData(ctx, o, chunkSkel)
if err != nil {
return fmt.Errorf("error reading object data: %w", err)
}
// If data is nil, it means that the file was handled by a handler.
if data == nil {
return nil
}
chunkSkel.Data = data
select {
case <-ctx.Done():
return ctx.Err()
case s.chunksCh <- chunkSkel:
}
return nil
}
func (s *Source) readObjectData(ctx context.Context, o object, chunk *sources.Chunk) ([]byte, error) {
bufferName := cleantemp.MkFilename()
reader, err := diskbufferreader.New(o, diskbufferreader.WithBufferName(bufferName))
if err != nil {
return nil, fmt.Errorf("error creating disk buffer reader: %w", err)
}
defer reader.Close()
if handlers.HandleFile(ctx, reader, chunk, sources.ChanReporter{Ch: s.chunksCh}) {
ctx.Logger().V(3).Info("File was handled", "name", s.name, "bucket", o.bucket, "object", o.name)
return nil, nil
}
if err := reader.Reset(); err != nil {
return nil, fmt.Errorf("error resetting reader: %w", err)
}
reader.Stop()
data, err := io.ReadAll(reader)
if err != nil {
return nil, fmt.Errorf("error reading object: %w", err)
}
return data, nil
return handlers.HandleFile(ctx, o, chunkSkel, sources.ChanReporter{Ch: s.chunksCh})
}

View file

@ -25,8 +25,6 @@ import (
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
diskbufferreader "github.com/trufflesecurity/disk-buffer-reader"
"github.com/trufflesecurity/trufflehog/v3/pkg/cleantemp"
"github.com/trufflesecurity/trufflehog/v3/pkg/common"
"github.com/trufflesecurity/trufflehog/v3/pkg/context"
@ -1212,71 +1210,17 @@ func (s *Git) handleBinary(ctx context.Context, gitDir string, reporter sources.
return nil
}
var handlerOpts []handlers.Option
if s.skipArchives {
handlerOpts = append(handlerOpts, handlers.WithSkipArchives(true))
}
cmd := exec.Command("git", "-C", gitDir, "cat-file", "blob", commitHash.String()+":"+path)
var stderr bytes.Buffer
cmd.Stderr = &stderr
fileReader, err := cmd.StdoutPipe()
stdout, err := cmd.Output()
if err != nil {
return err
return fmt.Errorf("error running git cat-file: %w\n%s", err, stderr.Bytes())
}
if err := cmd.Start(); err != nil {
return err
}
defer func() {
if err := fileReader.Close(); err != nil {
ctx.Logger().Error(err, "error closing fileReader")
}
if err := cmd.Wait(); err != nil {
ctx.Logger().Error(
err, "error waiting for command",
"command", cmd.String(),
"stderr", stderr.String(),
"commit", commitHash,
)
}
}()
bufferName := cleantemp.MkFilename()
reader, err := diskbufferreader.New(fileReader, diskbufferreader.WithBufferName(bufferName))
if err != nil {
return err
}
defer reader.Close()
if handlers.HandleFile(fileCtx, reader, chunkSkel, reporter, handlerOpts...) {
return nil
}
fileCtx.Logger().V(1).Info("binary file not handled, chunking raw")
if err := reader.Reset(); err != nil {
return err
}
reader.Stop()
chunkReader := sources.NewChunkReader()
chunkResChan := chunkReader(fileCtx, reader)
for data := range chunkResChan {
chunk := *chunkSkel
chunk.Data = data.Bytes()
if err := data.Error(); err != nil {
return err
}
if err := reporter.ChunkOk(fileCtx, chunk); err != nil {
return err
}
}
return nil
return handlers.HandleFile(fileCtx, bytes.NewReader(stdout), chunkSkel, reporter, handlers.WithSkipArchives(s.skipArchives))
}
func (s *Source) Enumerate(ctx context.Context, reporter sources.UnitReporter) error {

View file

@ -16,12 +16,10 @@ import (
"github.com/aws/aws-sdk-go/service/sts"
"github.com/go-errors/errors"
"github.com/go-logr/logr"
diskbufferreader "github.com/trufflesecurity/disk-buffer-reader"
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"github.com/trufflesecurity/trufflehog/v3/pkg/cleantemp"
"github.com/trufflesecurity/trufflehog/v3/pkg/common"
"github.com/trufflesecurity/trufflehog/v3/pkg/context"
"github.com/trufflesecurity/trufflehog/v3/pkg/handlers"
@ -357,16 +355,7 @@ func (s *Source) pageChunker(ctx context.Context, client *s3.S3, chunksChan chan
}
return nil
}
bufferName := cleantemp.MkFilename()
defer res.Body.Close()
reader, err := diskbufferreader.New(res.Body, diskbufferreader.WithBufferName(bufferName))
if err != nil {
s.log.Error(err, "Could not create reader.")
return nil
}
defer reader.Close()
email := "Unknown"
if obj.Owner != nil {
@ -391,31 +380,12 @@ func (s *Source) pageChunker(ctx context.Context, client *s3.S3, chunksChan chan
},
Verify: s.verify,
}
if handlers.HandleFile(ctx, reader, chunkSkel, sources.ChanReporter{Ch: chunksChan}) {
atomic.AddUint64(objectCount, 1)
s.log.V(5).Info("S3 object scanned.", "object_count", objectCount, "page_number", pageNumber)
if err := handlers.HandleFile(ctx, res.Body, chunkSkel, sources.ChanReporter{Ch: chunksChan}); err != nil {
ctx.Logger().Error(err, "error handling file")
return nil
}
if err := reader.Reset(); err != nil {
s.log.Error(err, "Error resetting reader to start.")
}
reader.Stop()
chunkReader := sources.NewChunkReader()
chunkResChan := chunkReader(ctx, reader)
for data := range chunkResChan {
if err := data.Error(); err != nil {
s.log.Error(err, "error reading chunk.")
continue
}
chunk := *chunkSkel
chunk.Data = data.Bytes()
if err := common.CancellableWrite(ctx, chunksChan, &chunk); err != nil {
return err
}
}
atomic.AddUint64(objectCount, 1)
s.log.V(5).Info("S3 object scanned.", "object_count", objectCount, "page_number", pageNumber)
nErr, ok = errorCount.Load(prefix)