mirror of
https://github.com/matrix-org/dendrite
synced 2024-12-14 15:22:50 +00:00
mediaapi/fileutils: Remove obsolete error variables
This commit is contained in:
parent
4f2d9a3b69
commit
6fc6499848
2 changed files with 7 additions and 20 deletions
|
@ -30,16 +30,6 @@ import (
|
||||||
"github.com/matrix-org/dendrite/mediaapi/types"
|
"github.com/matrix-org/dendrite/mediaapi/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FIXME: make into error types
|
|
||||||
var (
|
|
||||||
// ErrFileIsTooLarge indicates that the uploaded file is larger than the configured maximum file size
|
|
||||||
ErrFileIsTooLarge = fmt.Errorf("file is too large")
|
|
||||||
errRead = fmt.Errorf("failed to read response from remote server")
|
|
||||||
errResponse = fmt.Errorf("failed to write file data to response body")
|
|
||||||
errHash = fmt.Errorf("failed to hash file data")
|
|
||||||
errWrite = fmt.Errorf("failed to write file to disk")
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetPathFromBase64Hash evaluates the path to a media file from its Base64Hash
|
// GetPathFromBase64Hash evaluates the path to a media file from its Base64Hash
|
||||||
// If the Base64Hash is long enough, we split it into pieces, creating up to 2 subdirectories
|
// If the Base64Hash is long enough, we split it into pieces, creating up to 2 subdirectories
|
||||||
// for more manageable browsing and use the remainder as the file name.
|
// for more manageable browsing and use the remainder as the file name.
|
||||||
|
|
|
@ -108,16 +108,13 @@ func (r *uploadRequest) doUpload(reqReader io.Reader, cfg *config.MediaAPI, db *
|
||||||
// The file data is hashed and the hash is used as the MediaID. The hash is useful as a
|
// The file data is hashed and the hash is used as the MediaID. The hash is useful as a
|
||||||
// method of deduplicating files to save storage, as well as a way to conduct
|
// method of deduplicating files to save storage, as well as a way to conduct
|
||||||
// integrity checks on the file data in the repository.
|
// integrity checks on the file data in the repository.
|
||||||
hash, bytesWritten, tmpDir, copyError := fileutils.WriteTempFile(reqReader, cfg.MaxFileSizeBytes, cfg.AbsBasePath)
|
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(reqReader, cfg.MaxFileSizeBytes, cfg.AbsBasePath)
|
||||||
if copyError != nil {
|
if err != nil {
|
||||||
logFields := log.Fields{
|
r.Logger.WithError(err).WithFields(log.Fields{
|
||||||
"Origin": r.MediaMetadata.Origin,
|
"Origin": r.MediaMetadata.Origin,
|
||||||
"MediaID": r.MediaMetadata.MediaID,
|
"MediaID": r.MediaMetadata.MediaID,
|
||||||
}
|
"MaxFileSizeBytes": cfg.MaxFileSizeBytes,
|
||||||
if copyError == fileutils.ErrFileIsTooLarge {
|
}).Warn("Error while transferring file")
|
||||||
logFields["MaxFileSizeBytes"] = cfg.MaxFileSizeBytes
|
|
||||||
}
|
|
||||||
r.Logger.WithError(copyError).WithFields(logFields).Warn("Error while transferring file")
|
|
||||||
fileutils.RemoveDir(tmpDir, r.Logger)
|
fileutils.RemoveDir(tmpDir, r.Logger)
|
||||||
return &util.JSONResponse{
|
return &util.JSONResponse{
|
||||||
Code: 400,
|
Code: 400,
|
||||||
|
|
Loading…
Reference in a new issue