mirror of
https://github.com/dani-garcia/vaultwarden
synced 2025-02-16 12:58:25 +00:00
Merge branch 'main' into multiple-domains-support
This commit is contained in:
commit
158f834ba7
41 changed files with 1436 additions and 956 deletions
6
.github/workflows/build.yml
vendored
6
.github/workflows/build.yml
vendored
|
@ -46,7 +46,7 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: "Checkout"
|
- name: "Checkout"
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
|
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b #v4.1.4
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ jobs:
|
||||||
|
|
||||||
# Only install the clippy and rustfmt components on the default rust-toolchain
|
# Only install the clippy and rustfmt components on the default rust-toolchain
|
||||||
- name: "Install rust-toolchain version"
|
- name: "Install rust-toolchain version"
|
||||||
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
|
uses: dtolnay/rust-toolchain@bb45937a053e097f8591208d8e74c90db1873d07 # master @ Apr 14, 2024, 9:02 PM GMT+2
|
||||||
if: ${{ matrix.channel == 'rust-toolchain' }}
|
if: ${{ matrix.channel == 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
|
@ -84,7 +84,7 @@ jobs:
|
||||||
|
|
||||||
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
# Install the any other channel to be used for which we do not execute clippy and rustfmt
|
||||||
- name: "Install MSRV version"
|
- name: "Install MSRV version"
|
||||||
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
|
uses: dtolnay/rust-toolchain@bb45937a053e097f8591208d8e74c90db1873d07 # master @ Apr 14, 2024, 9:02 PM GMT+2
|
||||||
if: ${{ matrix.channel != 'rust-toolchain' }}
|
if: ${{ matrix.channel != 'rust-toolchain' }}
|
||||||
with:
|
with:
|
||||||
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
|
||||||
|
|
2
.github/workflows/hadolint.yml
vendored
2
.github/workflows/hadolint.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||||
# End Checkout the repo
|
# End Checkout the repo
|
||||||
|
|
||||||
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
# Download hadolint - https://github.com/hadolint/hadolint/releases
|
||||||
|
|
22
.github/workflows/release.yml
vendored
22
.github/workflows/release.yml
vendored
|
@ -58,7 +58,7 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
# Checkout the repo
|
# Checkout the repo
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
@ -69,11 +69,11 @@ jobs:
|
||||||
|
|
||||||
# Start Docker Buildx
|
# Start Docker Buildx
|
||||||
- name: Setup Docker Buildx
|
- name: Setup Docker Buildx
|
||||||
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
|
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
|
||||||
# https://github.com/moby/buildkit/issues/3969
|
# https://github.com/moby/buildkit/issues/3969
|
||||||
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
|
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
|
||||||
with:
|
with:
|
||||||
config-inline: |
|
buildkitd-config-inline: |
|
||||||
[worker.oci]
|
[worker.oci]
|
||||||
max-parallelism = 2
|
max-parallelism = 2
|
||||||
driver-opts: |
|
driver-opts: |
|
||||||
|
@ -102,7 +102,7 @@ jobs:
|
||||||
|
|
||||||
# Login to Docker Hub
|
# Login to Docker Hub
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
@ -116,7 +116,7 @@ jobs:
|
||||||
|
|
||||||
# Login to GitHub Container Registry
|
# Login to GitHub Container Registry
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
|
@ -137,7 +137,7 @@ jobs:
|
||||||
|
|
||||||
# Login to Quay.io
|
# Login to Quay.io
|
||||||
- name: Login to Quay.io
|
- name: Login to Quay.io
|
||||||
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
|
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
|
||||||
with:
|
with:
|
||||||
registry: quay.io
|
registry: quay.io
|
||||||
username: ${{ secrets.QUAY_USERNAME }}
|
username: ${{ secrets.QUAY_USERNAME }}
|
||||||
|
@ -171,7 +171,7 @@ jobs:
|
||||||
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
|
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
|
||||||
|
|
||||||
- name: Bake ${{ matrix.base_image }} containers
|
- name: Bake ${{ matrix.base_image }} containers
|
||||||
uses: docker/bake-action@849707117b03d39aba7924c50a10376a69e88d7d # v4.1.0
|
uses: docker/bake-action@73b0efa7a0e8ac276e0a8d5c580698a942ff10b5 # v4.4.0
|
||||||
env:
|
env:
|
||||||
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
BASE_TAGS: "${{ env.BASE_TAGS }}"
|
||||||
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
|
||||||
|
@ -229,28 +229,28 @@ jobs:
|
||||||
|
|
||||||
# Upload artifacts to Github Actions
|
# Upload artifacts to Github Actions
|
||||||
- name: "Upload amd64 artifact"
|
- name: "Upload amd64 artifact"
|
||||||
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
|
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
with:
|
with:
|
||||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
|
||||||
path: vaultwarden-amd64
|
path: vaultwarden-amd64
|
||||||
|
|
||||||
- name: "Upload arm64 artifact"
|
- name: "Upload arm64 artifact"
|
||||||
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
|
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
with:
|
with:
|
||||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
|
||||||
path: vaultwarden-arm64
|
path: vaultwarden-arm64
|
||||||
|
|
||||||
- name: "Upload armv7 artifact"
|
- name: "Upload armv7 artifact"
|
||||||
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
|
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
with:
|
with:
|
||||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
|
||||||
path: vaultwarden-armv7
|
path: vaultwarden-armv7
|
||||||
|
|
||||||
- name: "Upload armv6 artifact"
|
- name: "Upload armv6 artifact"
|
||||||
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
|
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
|
||||||
if: ${{ matrix.base_image == 'alpine' }}
|
if: ${{ matrix.base_image == 'alpine' }}
|
||||||
with:
|
with:
|
||||||
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6
|
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6
|
||||||
|
|
6
.github/workflows/trivy.yml
vendored
6
.github/workflows/trivy.yml
vendored
|
@ -25,10 +25,10 @@ jobs:
|
||||||
actions: read
|
actions: read
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
|
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b #v4.1.4
|
||||||
|
|
||||||
- name: Run Trivy vulnerability scanner
|
- name: Run Trivy vulnerability scanner
|
||||||
uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca # v0.16.1
|
uses: aquasecurity/trivy-action@d710430a6722f083d3b36b8339ff66b32f22ee55 # v0.19.0
|
||||||
with:
|
with:
|
||||||
scan-type: repo
|
scan-type: repo
|
||||||
ignore-unfixed: true
|
ignore-unfixed: true
|
||||||
|
@ -37,6 +37,6 @@ jobs:
|
||||||
severity: CRITICAL,HIGH
|
severity: CRITICAL,HIGH
|
||||||
|
|
||||||
- name: Upload Trivy scan results to GitHub Security tab
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
uses: github/codeql-action/upload-sarif@b7bf0a3ed3ecfa44160715d7c442788f65f0f923 # v3.23.2
|
uses: github/codeql-action/upload-sarif@2bbafcdd7fbf96243689e764c2f15d9735164f33 # v3.25.3
|
||||||
with:
|
with:
|
||||||
sarif_file: 'trivy-results.sarif'
|
sarif_file: 'trivy-results.sarif'
|
||||||
|
|
992
Cargo.lock
generated
992
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
55
Cargo.toml
55
Cargo.toml
|
@ -3,7 +3,7 @@ name = "vaultwarden"
|
||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.74.0"
|
rust-version = "1.75.0"
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||||
|
@ -36,7 +36,7 @@ unstable = []
|
||||||
|
|
||||||
[target."cfg(not(windows))".dependencies]
|
[target."cfg(not(windows))".dependencies]
|
||||||
# Logging
|
# Logging
|
||||||
syslog = "6.1.0"
|
syslog = "6.1.1"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Logging
|
# Logging
|
||||||
|
@ -60,47 +60,47 @@ rocket = { version = "0.5.0", features = ["tls", "json"], default-features = fal
|
||||||
rocket_ws = { version ="0.1.0" }
|
rocket_ws = { version ="0.1.0" }
|
||||||
|
|
||||||
# WebSockets libraries
|
# WebSockets libraries
|
||||||
rmpv = "1.0.1" # MessagePack library
|
rmpv = "1.0.2" # MessagePack library
|
||||||
|
|
||||||
# Concurrent HashMap used for WebSocket messaging and favicons
|
# Concurrent HashMap used for WebSocket messaging and favicons
|
||||||
dashmap = "5.5.3"
|
dashmap = "5.5.3"
|
||||||
|
|
||||||
# Async futures
|
# Async futures
|
||||||
futures = "0.3.30"
|
futures = "0.3.30"
|
||||||
tokio = { version = "1.36.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
|
tokio = { version = "1.37.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||||
|
|
||||||
# A generic serialization/deserialization framework
|
# A generic serialization/deserialization framework
|
||||||
serde = { version = "1.0.197", features = ["derive"] }
|
serde = { version = "1.0.198", features = ["derive"] }
|
||||||
serde_json = "1.0.114"
|
serde_json = "1.0.116"
|
||||||
|
|
||||||
# A safe, extensible ORM and Query builder
|
# A safe, extensible ORM and Query builder
|
||||||
diesel = { version = "2.1.5", features = ["chrono", "r2d2", "numeric"] }
|
diesel = { version = "2.1.6", features = ["chrono", "r2d2", "numeric"] }
|
||||||
diesel_migrations = "2.1.0"
|
diesel_migrations = "2.1.0"
|
||||||
diesel_logger = { version = "0.3.0", optional = true }
|
diesel_logger = { version = "0.3.0", optional = true }
|
||||||
|
|
||||||
# Bundled/Static SQLite
|
# Bundled/Static SQLite
|
||||||
libsqlite3-sys = { version = "0.27.0", features = ["bundled"], optional = true }
|
libsqlite3-sys = { version = "0.28.0", features = ["bundled"], optional = true }
|
||||||
|
|
||||||
# Crypto-related libraries
|
# Crypto-related libraries
|
||||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||||
ring = "0.17.8"
|
ring = "0.17.8"
|
||||||
|
|
||||||
# UUID generation
|
# UUID generation
|
||||||
uuid = { version = "1.7.0", features = ["v4"] }
|
uuid = { version = "1.8.0", features = ["v4"] }
|
||||||
|
|
||||||
# Date and time libraries
|
# Date and time libraries
|
||||||
chrono = { version = "0.4.34", features = ["clock", "serde"], default-features = false }
|
chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false }
|
||||||
chrono-tz = "0.8.6"
|
chrono-tz = "0.9.0"
|
||||||
time = "0.3.34"
|
time = "0.3.36"
|
||||||
|
|
||||||
# Job scheduler
|
# Job scheduler
|
||||||
job_scheduler_ng = "2.0.4"
|
job_scheduler_ng = "2.0.5"
|
||||||
|
|
||||||
# Data encoding library Hex/Base32/Base64
|
# Data encoding library Hex/Base32/Base64
|
||||||
data-encoding = "2.5.0"
|
data-encoding = "2.5.0"
|
||||||
|
|
||||||
# JWT library
|
# JWT library
|
||||||
jsonwebtoken = "9.2.0"
|
jsonwebtoken = "9.3.0"
|
||||||
|
|
||||||
# TOTP library
|
# TOTP library
|
||||||
totp-lite = "2.0.1"
|
totp-lite = "2.0.1"
|
||||||
|
@ -115,27 +115,28 @@ webauthn-rs = "0.3.2"
|
||||||
url = "2.5.0"
|
url = "2.5.0"
|
||||||
|
|
||||||
# Email libraries
|
# Email libraries
|
||||||
lettre = { version = "0.11.4", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
lettre = { version = "0.11.7", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
|
||||||
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
|
||||||
email_address = "0.2.4"
|
email_address = "0.2.4"
|
||||||
|
|
||||||
# HTML Template library
|
# HTML Template library
|
||||||
handlebars = { version = "5.1.0", features = ["dir_source"] }
|
handlebars = { version = "5.1.2", features = ["dir_source"] }
|
||||||
|
|
||||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||||
reqwest = { version = "0.11.26", features = ["stream", "json", "gzip", "brotli", "socks", "cookies", "trust-dns", "native-tls-alpn"] }
|
reqwest = { version = "0.12.4", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||||
|
hickory-resolver = "0.24.1"
|
||||||
|
|
||||||
# Favicon extraction libraries
|
# Favicon extraction libraries
|
||||||
html5gum = "0.5.7"
|
html5gum = "0.5.7"
|
||||||
regex = { version = "1.10.3", features = ["std", "perf", "unicode-perl"], default-features = false }
|
regex = { version = "1.10.4", features = ["std", "perf", "unicode-perl"], default-features = false }
|
||||||
data-url = "0.3.1"
|
data-url = "0.3.1"
|
||||||
bytes = "1.5.0"
|
bytes = "1.6.0"
|
||||||
|
|
||||||
# Cache function results (Used for version check and favicon fetching)
|
# Cache function results (Used for version check and favicon fetching)
|
||||||
cached = { version = "0.49.2", features = ["async"] }
|
cached = { version = "0.50.0", features = ["async"] }
|
||||||
|
|
||||||
# Used for custom short lived cookie jar during favicon extraction
|
# Used for custom short lived cookie jar during favicon extraction
|
||||||
cookie = "0.18.0"
|
cookie = "0.18.1"
|
||||||
cookie_store = "0.21.0"
|
cookie_store = "0.21.0"
|
||||||
|
|
||||||
# Used by U2F, JWT and PostgreSQL
|
# Used by U2F, JWT and PostgreSQL
|
||||||
|
@ -153,8 +154,8 @@ semver = "1.0.22"
|
||||||
|
|
||||||
# Allow overriding the default memory allocator
|
# Allow overriding the default memory allocator
|
||||||
# Mainly used for the musl builds, since the default musl malloc is very slow
|
# Mainly used for the musl builds, since the default musl malloc is very slow
|
||||||
mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true }
|
mimalloc = { version = "0.1.41", features = ["secure"], default-features = false, optional = true }
|
||||||
which = "6.0.0"
|
which = "6.0.1"
|
||||||
|
|
||||||
# Argon2 library with support for the PHC format
|
# Argon2 library with support for the PHC format
|
||||||
argon2 = "0.5.3"
|
argon2 = "0.5.3"
|
||||||
|
@ -205,14 +206,14 @@ unsafe_code = "forbid"
|
||||||
non_ascii_idents = "forbid"
|
non_ascii_idents = "forbid"
|
||||||
|
|
||||||
# Deny
|
# Deny
|
||||||
future_incompatible = "deny"
|
future_incompatible = { level = "deny", priority = -1 }
|
||||||
noop_method_call = "deny"
|
noop_method_call = "deny"
|
||||||
pointer_structural_match = "deny"
|
pointer_structural_match = "deny"
|
||||||
rust_2018_idioms = "deny"
|
rust_2018_idioms = { level = "deny", priority = -1 }
|
||||||
rust_2021_compatibility = "deny"
|
rust_2021_compatibility = { level = "deny", priority = -1 }
|
||||||
trivial_casts = "deny"
|
trivial_casts = "deny"
|
||||||
trivial_numeric_casts = "deny"
|
trivial_numeric_casts = "deny"
|
||||||
unused = "deny"
|
unused = { level = "deny", priority = -1 }
|
||||||
unused_import_braces = "deny"
|
unused_import_braces = "deny"
|
||||||
unused_lifetimes = "deny"
|
unused_lifetimes = "deny"
|
||||||
deprecated_in_future = "deny"
|
deprecated_in_future = "deny"
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
---
|
---
|
||||||
vault_version: "v2024.1.2b"
|
vault_version: "v2024.3.1"
|
||||||
vault_image_digest: "sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08"
|
vault_image_digest: "sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5"
|
||||||
# Cross Compile Docker Helper Scripts v1.3.0
|
# Cross Compile Docker Helper Scripts v1.4.0
|
||||||
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||||
xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc"
|
xx_image_digest: "sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4"
|
||||||
rust_version: 1.76.0 # Rust version to be used
|
rust_version: 1.77.2 # Rust version to be used
|
||||||
debian_version: bookworm # Debian release name to be used
|
debian_version: bookworm # Debian release name to be used
|
||||||
alpine_version: 3.19 # Alpine version to be used
|
alpine_version: 3.19 # Alpine version to be used
|
||||||
# For which platforms/architectures will we try to build images
|
# For which platforms/architectures will we try to build images
|
||||||
|
|
|
@ -18,23 +18,23 @@
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2024.1.2b
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.3.1
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.1.2b
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.3.1
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08]
|
# [docker.io/vaultwarden/web-vault@sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5
|
||||||
# [docker.io/vaultwarden/web-vault:v2024.1.2b]
|
# [docker.io/vaultwarden/web-vault:v2024.3.1]
|
||||||
#
|
#
|
||||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08 as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5 as vault
|
||||||
|
|
||||||
########################## ALPINE BUILD IMAGES ##########################
|
########################## ALPINE BUILD IMAGES ##########################
|
||||||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.76.0 as build_amd64
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.77.2 as build_amd64
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.76.0 as build_arm64
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.77.2 as build_arm64
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.76.0 as build_armv7
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.77.2 as build_armv7
|
||||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.76.0 as build_armv6
|
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.77.2 as build_armv6
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# hadolint ignore=DL3006
|
# hadolint ignore=DL3006
|
||||||
|
@ -65,13 +65,14 @@ RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Shared variables across Debian and Alpine
|
# Environment variables for Cargo on Alpine based builds
|
||||||
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||||
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||||
# Output the current contents of the file
|
# Output the current contents of the file
|
||||||
cat /env-cargo
|
cat /env-cargo
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
|
|
||||||
|
|
|
@ -18,24 +18,24 @@
|
||||||
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
|
||||||
# click the tag name to view the digest of the image it currently points to.
|
# click the tag name to view the digest of the image it currently points to.
|
||||||
# - From the command line:
|
# - From the command line:
|
||||||
# $ docker pull docker.io/vaultwarden/web-vault:v2024.1.2b
|
# $ docker pull docker.io/vaultwarden/web-vault:v2024.3.1
|
||||||
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.1.2b
|
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.3.1
|
||||||
# [docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08]
|
# [docker.io/vaultwarden/web-vault@sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5]
|
||||||
#
|
#
|
||||||
# - Conversely, to get the tag name from the digest:
|
# - Conversely, to get the tag name from the digest:
|
||||||
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08
|
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5
|
||||||
# [docker.io/vaultwarden/web-vault:v2024.1.2b]
|
# [docker.io/vaultwarden/web-vault:v2024.3.1]
|
||||||
#
|
#
|
||||||
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08 as vault
|
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5 as vault
|
||||||
|
|
||||||
########################## Cross Compile Docker Helper Scripts ##########################
|
########################## Cross Compile Docker Helper Scripts ##########################
|
||||||
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
|
||||||
## And these bash scripts do not have any significant difference if at all
|
## And these bash scripts do not have any significant difference if at all
|
||||||
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx
|
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4 AS xx
|
||||||
|
|
||||||
########################## BUILD IMAGE ##########################
|
########################## BUILD IMAGE ##########################
|
||||||
# hadolint ignore=DL3006
|
# hadolint ignore=DL3006
|
||||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.76.0-slim-bookworm as build
|
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.77.2-slim-bookworm as build
|
||||||
COPY --from=xx / /
|
COPY --from=xx / /
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
ARG TARGETVARIANT
|
ARG TARGETVARIANT
|
||||||
|
@ -88,9 +88,17 @@ RUN mkdir -pv "${CARGO_HOME}" \
|
||||||
RUN USER=root cargo new --bin /app
|
RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Environment variables for cargo across Debian and Alpine
|
# Environment variables for Cargo on Debian based builds
|
||||||
|
ARG ARCH_OPENSSL_LIB_DIR \
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR
|
||||||
|
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
if xx-info is-cross ; then \
|
if xx-info is-cross ; then \
|
||||||
|
# Some special variables if needed to override some build paths
|
||||||
|
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
|
|
@ -108,9 +108,17 @@ RUN USER=root cargo new --bin /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
{% if base == "debian" %}
|
{% if base == "debian" %}
|
||||||
# Environment variables for cargo across Debian and Alpine
|
# Environment variables for Cargo on Debian based builds
|
||||||
|
ARG ARCH_OPENSSL_LIB_DIR \
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR
|
||||||
|
|
||||||
RUN source /env-cargo && \
|
RUN source /env-cargo && \
|
||||||
if xx-info is-cross ; then \
|
if xx-info is-cross ; then \
|
||||||
|
# Some special variables if needed to override some build paths
|
||||||
|
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
|
||||||
|
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
|
||||||
|
fi && \
|
||||||
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
|
||||||
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
# Because of this we generate the needed environment variables here which we can load in the needed steps.
|
||||||
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
|
||||||
|
@ -126,13 +134,14 @@ RUN source /env-cargo && \
|
||||||
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
ARG DB=sqlite,mysql,postgresql
|
ARG DB=sqlite,mysql,postgresql
|
||||||
{% elif base == "alpine" %}
|
{% elif base == "alpine" %}
|
||||||
# Shared variables across Debian and Alpine
|
# Environment variables for Cargo on Alpine based builds
|
||||||
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
|
||||||
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
|
||||||
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
|
||||||
# Output the current contents of the file
|
# Output the current contents of the file
|
||||||
cat /env-cargo
|
cat /env-cargo
|
||||||
|
|
||||||
|
# Configure the DB ARG as late as possible to not invalidate the cached layers above
|
||||||
# Enable MiMalloc to improve performance on Alpine builds
|
# Enable MiMalloc to improve performance on Alpine builds
|
||||||
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -11,6 +11,11 @@ With just these two files we can build both Debian and Alpine images for the fol
|
||||||
- armv7 (linux/arm/v7)
|
- armv7 (linux/arm/v7)
|
||||||
- armv6 (linux/arm/v6)
|
- armv6 (linux/arm/v6)
|
||||||
|
|
||||||
|
Some unsupported platforms for Debian based images. These are not built and tested by default and are only provided to make it easier for users to build for these architectures.
|
||||||
|
- 386 (linux/386)
|
||||||
|
- ppc64le (linux/ppc64le)
|
||||||
|
- s390x (linux/s390x)
|
||||||
|
|
||||||
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
|
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
|
||||||
This ensures the container build process can run binaries from other architectures.<br>
|
This ensures the container build process can run binaries from other architectures.<br>
|
||||||
|
|
||||||
|
|
|
@ -125,6 +125,40 @@ target "debian-armv6" {
|
||||||
tags = generate_tags("", "-armv6")
|
tags = generate_tags("", "-armv6")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ==== Start of unsupported Debian architecture targets ===
|
||||||
|
// These are provided just to help users build for these rare platforms
|
||||||
|
// They will not be built by default
|
||||||
|
target "debian-386" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/386"]
|
||||||
|
tags = generate_tags("", "-386")
|
||||||
|
args = {
|
||||||
|
ARCH_OPENSSL_LIB_DIR = "/usr/lib/i386-linux-gnu"
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/i386-linux-gnu"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-ppc64le" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/ppc64le"]
|
||||||
|
tags = generate_tags("", "-ppc64le")
|
||||||
|
args = {
|
||||||
|
ARCH_OPENSSL_LIB_DIR = "/usr/lib/powerpc64le-linux-gnu"
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/powerpc64le-linux-gnu"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target "debian-s390x" {
|
||||||
|
inherits = ["debian"]
|
||||||
|
platforms = ["linux/s390x"]
|
||||||
|
tags = generate_tags("", "-s390x")
|
||||||
|
args = {
|
||||||
|
ARCH_OPENSSL_LIB_DIR = "/usr/lib/s390x-linux-gnu"
|
||||||
|
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/s390x-linux-gnu"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// ==== End of unsupported Debian architecture targets ===
|
||||||
|
|
||||||
// A Group to build all platforms individually for local testing
|
// A Group to build all platforms individually for local testing
|
||||||
group "debian-all" {
|
group "debian-all" {
|
||||||
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]
|
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "1.76.0"
|
channel = "1.77.2"
|
||||||
components = [ "rustfmt", "clippy" ]
|
components = [ "rustfmt", "clippy" ]
|
||||||
profile = "minimal"
|
profile = "minimal"
|
||||||
|
|
|
@ -707,10 +707,7 @@ async fn diagnostics(
|
||||||
let (latest_release, latest_commit, latest_web_build) =
|
let (latest_release, latest_commit, latest_web_build) =
|
||||||
get_release_info(has_http_access, running_within_container).await;
|
get_release_info(has_http_access, running_within_container).await;
|
||||||
|
|
||||||
let ip_header_name = match &ip_header.0 {
|
let ip_header_name = &ip_header.0.unwrap_or_default();
|
||||||
Some(h) => h,
|
|
||||||
_ => "",
|
|
||||||
};
|
|
||||||
|
|
||||||
let diagnostics_json = json!({
|
let diagnostics_json = json!({
|
||||||
"dns_resolved": dns_resolved,
|
"dns_resolved": dns_resolved,
|
||||||
|
@ -723,8 +720,8 @@ async fn diagnostics(
|
||||||
"running_within_container": running_within_container,
|
"running_within_container": running_within_container,
|
||||||
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
|
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
|
||||||
"has_http_access": has_http_access,
|
"has_http_access": has_http_access,
|
||||||
"ip_header_exists": &ip_header.0.is_some(),
|
"ip_header_exists": !ip_header_name.is_empty(),
|
||||||
"ip_header_match": ip_header_name == CONFIG.ip_header(),
|
"ip_header_match": ip_header_name.eq(&CONFIG.ip_header()),
|
||||||
"ip_header_name": ip_header_name,
|
"ip_header_name": ip_header_name,
|
||||||
"ip_header_config": &CONFIG.ip_header(),
|
"ip_header_config": &CONFIG.ip_header(),
|
||||||
"uses_proxy": uses_proxy,
|
"uses_proxy": uses_proxy,
|
||||||
|
|
|
@ -166,7 +166,8 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
|
||||||
}
|
}
|
||||||
user
|
user
|
||||||
} else if CONFIG.is_signup_allowed(&email)
|
} else if CONFIG.is_signup_allowed(&email)
|
||||||
|| EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some()
|
|| (CONFIG.emergency_access_allowed()
|
||||||
|
&& EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some())
|
||||||
{
|
{
|
||||||
user
|
user
|
||||||
} else {
|
} else {
|
||||||
|
@ -217,7 +218,6 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
|
||||||
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await {
|
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await {
|
||||||
error!("Error sending welcome email: {:#?}", e);
|
error!("Error sending welcome email: {:#?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
user.last_verifying_at = Some(user.created_at);
|
user.last_verifying_at = Some(user.created_at);
|
||||||
} else if let Err(e) = mail::send_welcome(&user.email).await {
|
} else if let Err(e) = mail::send_welcome(&user.email).await {
|
||||||
error!("Error sending welcome email: {:#?}", e);
|
error!("Error sending welcome email: {:#?}", e);
|
||||||
|
@ -229,6 +229,14 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
|
||||||
}
|
}
|
||||||
|
|
||||||
user.save(&mut conn).await?;
|
user.save(&mut conn).await?;
|
||||||
|
|
||||||
|
// accept any open emergency access invitations
|
||||||
|
if !CONFIG.mail_enabled() && CONFIG.emergency_access_allowed() {
|
||||||
|
for mut emergency_invite in EmergencyAccess::find_all_invited_by_grantee_email(&user.email, &mut conn).await {
|
||||||
|
let _ = emergency_invite.accept_invite(&user.uuid, &user.email, &mut conn).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(Json(json!({
|
Ok(Json(json!({
|
||||||
"Object": "register",
|
"Object": "register",
|
||||||
"CaptchaBypassToken": "",
|
"CaptchaBypassToken": "",
|
||||||
|
@ -438,24 +446,46 @@ async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, mut conn: D
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct UpdateFolderData {
|
struct UpdateFolderData {
|
||||||
Id: String,
|
// There is a bug in 2024.3.x which adds a `null` item.
|
||||||
|
// To bypass this we allow a Option here, but skip it during the updates
|
||||||
|
// See: https://github.com/bitwarden/clients/issues/8453
|
||||||
|
Id: Option<String>,
|
||||||
Name: String,
|
Name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct UpdateEmergencyAccessData {
|
||||||
|
Id: String,
|
||||||
|
KeyEncrypted: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
struct UpdateResetPasswordData {
|
||||||
|
OrganizationId: String,
|
||||||
|
ResetPasswordKey: String,
|
||||||
|
}
|
||||||
|
|
||||||
use super::ciphers::CipherData;
|
use super::ciphers::CipherData;
|
||||||
|
use super::sends::{update_send_from_data, SendData};
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct KeyData {
|
struct KeyData {
|
||||||
Ciphers: Vec<CipherData>,
|
Ciphers: Vec<CipherData>,
|
||||||
Folders: Vec<UpdateFolderData>,
|
Folders: Vec<UpdateFolderData>,
|
||||||
|
Sends: Vec<SendData>,
|
||||||
|
EmergencyAccessKeys: Vec<UpdateEmergencyAccessData>,
|
||||||
|
ResetPasswordKeys: Vec<UpdateResetPasswordData>,
|
||||||
Key: String,
|
Key: String,
|
||||||
PrivateKey: String,
|
|
||||||
MasterPasswordHash: String,
|
MasterPasswordHash: String,
|
||||||
|
PrivateKey: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/accounts/key", data = "<data>")]
|
#[post("/accounts/key", data = "<data>")]
|
||||||
async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
|
||||||
|
// TODO: See if we can wrap everything within a SQL Transaction. If something fails it should revert everything.
|
||||||
let data: KeyData = data.into_inner().data;
|
let data: KeyData = data.into_inner().data;
|
||||||
|
|
||||||
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
if !headers.user.check_valid_password(&data.MasterPasswordHash) {
|
||||||
|
@ -472,37 +502,83 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
|
||||||
|
|
||||||
// Update folder data
|
// Update folder data
|
||||||
for folder_data in data.Folders {
|
for folder_data in data.Folders {
|
||||||
let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &mut conn).await {
|
// Skip `null` folder id entries.
|
||||||
Some(folder) => folder,
|
// See: https://github.com/bitwarden/clients/issues/8453
|
||||||
None => err!("Folder doesn't exist"),
|
if let Some(folder_id) = folder_data.Id {
|
||||||
|
let mut saved_folder = match Folder::find_by_uuid(&folder_id, &mut conn).await {
|
||||||
|
Some(folder) => folder,
|
||||||
|
None => err!("Folder doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
if &saved_folder.user_uuid != user_uuid {
|
||||||
|
err!("The folder is not owned by the user")
|
||||||
|
}
|
||||||
|
|
||||||
|
saved_folder.name = folder_data.Name;
|
||||||
|
saved_folder.save(&mut conn).await?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update emergency access data
|
||||||
|
for emergency_access_data in data.EmergencyAccessKeys {
|
||||||
|
let mut saved_emergency_access = match EmergencyAccess::find_by_uuid(&emergency_access_data.Id, &mut conn).await
|
||||||
|
{
|
||||||
|
Some(emergency_access) => emergency_access,
|
||||||
|
None => err!("Emergency access doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if &saved_folder.user_uuid != user_uuid {
|
if &saved_emergency_access.grantor_uuid != user_uuid {
|
||||||
err!("The folder is not owned by the user")
|
err!("The emergency access is not owned by the user")
|
||||||
}
|
}
|
||||||
|
|
||||||
saved_folder.name = folder_data.Name;
|
saved_emergency_access.key_encrypted = Some(emergency_access_data.KeyEncrypted);
|
||||||
saved_folder.save(&mut conn).await?
|
saved_emergency_access.save(&mut conn).await?
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update reset password data
|
||||||
|
for reset_password_data in data.ResetPasswordKeys {
|
||||||
|
let mut user_org =
|
||||||
|
match UserOrganization::find_by_user_and_org(user_uuid, &reset_password_data.OrganizationId, &mut conn)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Some(reset_password) => reset_password,
|
||||||
|
None => err!("Reset password doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
user_org.reset_password_key = Some(reset_password_data.ResetPasswordKey);
|
||||||
|
user_org.save(&mut conn).await?
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update send data
|
||||||
|
for send_data in data.Sends {
|
||||||
|
let mut send = match Send::find_by_uuid(send_data.Id.as_ref().unwrap(), &mut conn).await {
|
||||||
|
Some(send) => send,
|
||||||
|
None => err!("Send doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
update_send_from_data(&mut send, send_data, &headers, &mut conn, &nt, UpdateType::None).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update cipher data
|
// Update cipher data
|
||||||
use super::ciphers::update_cipher_from_data;
|
use super::ciphers::update_cipher_from_data;
|
||||||
|
|
||||||
for cipher_data in data.Ciphers {
|
for cipher_data in data.Ciphers {
|
||||||
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &mut conn).await {
|
if cipher_data.OrganizationId.is_none() {
|
||||||
Some(cipher) => cipher,
|
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &mut conn).await {
|
||||||
None => err!("Cipher doesn't exist"),
|
Some(cipher) => cipher,
|
||||||
};
|
None => err!("Cipher doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
if saved_cipher.user_uuid.as_ref().unwrap() != user_uuid {
|
if saved_cipher.user_uuid.as_ref().unwrap() != user_uuid {
|
||||||
err!("The cipher is not owned by the user")
|
err!("The cipher is not owned by the user")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
||||||
|
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
||||||
|
// We force the users to logout after the user has been saved to try and prevent these issues.
|
||||||
|
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None)
|
||||||
|
.await?
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
|
|
||||||
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
|
|
||||||
// We force the users to logout after the user has been saved to try and prevent these issues.
|
|
||||||
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None)
|
|
||||||
.await?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update user data
|
// Update user data
|
||||||
|
@ -773,7 +849,7 @@ async fn delete_account(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, m
|
||||||
|
|
||||||
#[get("/accounts/revision-date")]
|
#[get("/accounts/revision-date")]
|
||||||
fn revision_date(headers: Headers) -> JsonResult {
|
fn revision_date(headers: Headers) -> JsonResult {
|
||||||
let revision_date = headers.user.updated_at.timestamp_millis();
|
let revision_date = headers.user.updated_at.and_utc().timestamp_millis();
|
||||||
Ok(Json(json!(revision_date)))
|
Ok(Json(json!(revision_date)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,7 @@ use rocket::{
|
||||||
};
|
};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use crate::util::NumberOrString;
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordOrOtpData, UpdateType},
|
api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordOrOtpData, UpdateType},
|
||||||
auth::Headers,
|
auth::Headers,
|
||||||
|
@ -205,7 +206,7 @@ pub struct CipherData {
|
||||||
// Folder id is not included in import
|
// Folder id is not included in import
|
||||||
FolderId: Option<String>,
|
FolderId: Option<String>,
|
||||||
// TODO: Some of these might appear all the time, no need for Option
|
// TODO: Some of these might appear all the time, no need for Option
|
||||||
OrganizationId: Option<String>,
|
pub OrganizationId: Option<String>,
|
||||||
|
|
||||||
Key: Option<String>,
|
Key: Option<String>,
|
||||||
|
|
||||||
|
@ -321,7 +322,7 @@ async fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, mut conn:
|
||||||
data.LastKnownRevisionDate = None;
|
data.LastKnownRevisionDate = None;
|
||||||
|
|
||||||
let mut cipher = Cipher::new(data.Type, data.Name.clone());
|
let mut cipher = Cipher::new(data.Type, data.Name.clone());
|
||||||
update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
|
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
|
||||||
|
|
||||||
Ok(Json(cipher.to_json(&headers.base_url, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
Ok(Json(cipher.to_json(&headers.base_url, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||||
}
|
}
|
||||||
|
@ -352,7 +353,7 @@ pub async fn update_cipher_from_data(
|
||||||
cipher: &mut Cipher,
|
cipher: &mut Cipher,
|
||||||
data: CipherData,
|
data: CipherData,
|
||||||
headers: &Headers,
|
headers: &Headers,
|
||||||
shared_to_collection: bool,
|
shared_to_collections: Option<Vec<String>>,
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
nt: &Notify<'_>,
|
nt: &Notify<'_>,
|
||||||
ut: UpdateType,
|
ut: UpdateType,
|
||||||
|
@ -391,7 +392,7 @@ pub async fn update_cipher_from_data(
|
||||||
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
|
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
|
||||||
None => err!("You don't have permission to add item to organization"),
|
None => err!("You don't have permission to add item to organization"),
|
||||||
Some(org_user) => {
|
Some(org_user) => {
|
||||||
if shared_to_collection
|
if shared_to_collections.is_some()
|
||||||
|| org_user.has_full_access()
|
|| org_user.has_full_access()
|
||||||
|| cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await
|
|| cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await
|
||||||
{
|
{
|
||||||
|
@ -518,8 +519,15 @@ pub async fn update_cipher_from_data(
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn).await, &headers.device.uuid, None, conn)
|
nt.send_cipher_update(
|
||||||
.await;
|
ut,
|
||||||
|
cipher,
|
||||||
|
&cipher.update_users_revision(conn).await,
|
||||||
|
&headers.device.uuid,
|
||||||
|
shared_to_collections,
|
||||||
|
conn,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -580,7 +588,7 @@ async fn post_ciphers_import(
|
||||||
cipher_data.FolderId = folder_uuid;
|
cipher_data.FolderId = folder_uuid;
|
||||||
|
|
||||||
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
||||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await?;
|
update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut user = headers.user;
|
let mut user = headers.user;
|
||||||
|
@ -648,7 +656,7 @@ async fn put_cipher(
|
||||||
err!("Cipher is not write accessible")
|
err!("Cipher is not write accessible")
|
||||||
}
|
}
|
||||||
|
|
||||||
update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
|
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
|
||||||
|
|
||||||
Ok(Json(cipher.to_json(&headers.base_url, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
Ok(Json(cipher.to_json(&headers.base_url, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||||
}
|
}
|
||||||
|
@ -898,7 +906,7 @@ async fn share_cipher_by_uuid(
|
||||||
None => err!("Cipher doesn't exist"),
|
None => err!("Cipher doesn't exist"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut shared_to_collection = false;
|
let mut shared_to_collections = vec![];
|
||||||
|
|
||||||
if let Some(organization_uuid) = &data.Cipher.OrganizationId {
|
if let Some(organization_uuid) = &data.Cipher.OrganizationId {
|
||||||
for uuid in &data.CollectionIds {
|
for uuid in &data.CollectionIds {
|
||||||
|
@ -907,7 +915,7 @@ async fn share_cipher_by_uuid(
|
||||||
Some(collection) => {
|
Some(collection) => {
|
||||||
if collection.is_writable_by_user(&headers.user.uuid, conn).await {
|
if collection.is_writable_by_user(&headers.user.uuid, conn).await {
|
||||||
CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?;
|
CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?;
|
||||||
shared_to_collection = true;
|
shared_to_collections.push(collection.uuid);
|
||||||
} else {
|
} else {
|
||||||
err!("No rights to modify the collection")
|
err!("No rights to modify the collection")
|
||||||
}
|
}
|
||||||
|
@ -923,7 +931,7 @@ async fn share_cipher_by_uuid(
|
||||||
UpdateType::SyncCipherCreate
|
UpdateType::SyncCipherCreate
|
||||||
};
|
};
|
||||||
|
|
||||||
update_cipher_from_data(&mut cipher, data.Cipher, headers, shared_to_collection, conn, nt, ut).await?;
|
update_cipher_from_data(&mut cipher, data.Cipher, headers, Some(shared_to_collections), conn, nt, ut).await?;
|
||||||
|
|
||||||
Ok(Json(cipher.to_json(&headers.base_url, &headers.user.uuid, None, CipherSyncType::User, conn).await))
|
Ok(Json(cipher.to_json(&headers.base_url, &headers.user.uuid, None, CipherSyncType::User, conn).await))
|
||||||
}
|
}
|
||||||
|
@ -957,7 +965,7 @@ async fn get_attachment(uuid: &str, attachment_id: &str, headers: Headers, mut c
|
||||||
struct AttachmentRequestData {
|
struct AttachmentRequestData {
|
||||||
Key: String,
|
Key: String,
|
||||||
FileName: String,
|
FileName: String,
|
||||||
FileSize: i64,
|
FileSize: NumberOrString,
|
||||||
AdminRequest: Option<bool>, // true when attaching from an org vault view
|
AdminRequest: Option<bool>, // true when attaching from an org vault view
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -987,12 +995,14 @@ async fn post_attachment_v2(
|
||||||
}
|
}
|
||||||
|
|
||||||
let data: AttachmentRequestData = data.into_inner().data;
|
let data: AttachmentRequestData = data.into_inner().data;
|
||||||
if data.FileSize < 0 {
|
let file_size = data.FileSize.into_i64()?;
|
||||||
|
|
||||||
|
if file_size < 0 {
|
||||||
err!("Attachment size can't be negative")
|
err!("Attachment size can't be negative")
|
||||||
}
|
}
|
||||||
let attachment_id = crypto::generate_attachment_id();
|
let attachment_id = crypto::generate_attachment_id();
|
||||||
let attachment =
|
let attachment =
|
||||||
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, data.FileSize, Some(data.Key));
|
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, file_size, Some(data.Key));
|
||||||
attachment.save(&mut conn).await.expect("Error saving attachment");
|
attachment.save(&mut conn).await.expect("Error saving attachment");
|
||||||
|
|
||||||
let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id);
|
let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id);
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use chrono::{Duration, Utc};
|
use chrono::{TimeDelta, Utc};
|
||||||
use rocket::{serde::json::Json, Route};
|
use rocket::{serde::json::Json, Route};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
|
@ -61,7 +61,9 @@ async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||||
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
|
||||||
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
|
||||||
for ea in emergency_access_list {
|
for ea in emergency_access_list {
|
||||||
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
|
if let Some(grantee) = ea.to_json_grantee_details(&mut conn).await {
|
||||||
|
emergency_access_list_json.push(grantee)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Json(json!({
|
Json(json!({
|
||||||
|
@ -95,7 +97,9 @@ async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
|
||||||
check_emergency_access_enabled()?;
|
check_emergency_access_enabled()?;
|
||||||
|
|
||||||
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
|
||||||
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
|
Some(emergency_access) => Ok(Json(
|
||||||
|
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
|
||||||
|
)),
|
||||||
None => err!("Emergency access not valid."),
|
None => err!("Emergency access not valid."),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -209,7 +213,7 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
||||||
err!("You can not set yourself as an emergency contact.")
|
err!("You can not set yourself as an emergency contact.")
|
||||||
}
|
}
|
||||||
|
|
||||||
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
|
let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await {
|
||||||
None => {
|
None => {
|
||||||
if !CONFIG.invitations_allowed() {
|
if !CONFIG.invitations_allowed() {
|
||||||
err!(format!("Grantee user does not exist: {}", &email))
|
err!(format!("Grantee user does not exist: {}", &email))
|
||||||
|
@ -226,9 +230,10 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
||||||
|
|
||||||
let mut user = User::new(email.clone());
|
let mut user = User::new(email.clone());
|
||||||
user.save(&mut conn).await?;
|
user.save(&mut conn).await?;
|
||||||
user
|
(user, true)
|
||||||
}
|
}
|
||||||
Some(user) => user,
|
Some(user) if user.password_hash.is_empty() => (user, true),
|
||||||
|
Some(user) => (user, false),
|
||||||
};
|
};
|
||||||
|
|
||||||
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
|
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
|
||||||
|
@ -256,15 +261,9 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
|
||||||
&grantor_user.email,
|
&grantor_user.email,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else if !new_user {
|
||||||
// Automatically mark user as accepted if no email invites
|
// if mail is not enabled immediately accept the invitation for existing users
|
||||||
match User::find_by_mail(&email, &mut conn).await {
|
new_emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
|
||||||
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
},
|
|
||||||
None => err!("Grantee user not found."),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -308,17 +307,12 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
|
||||||
&grantor_user.email,
|
&grantor_user.email,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else if !grantee_user.password_hash.is_empty() {
|
||||||
if Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
// accept the invitation for existing user
|
||||||
let invitation = Invitation::new(&email);
|
emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
|
||||||
invitation.save(&mut conn).await?;
|
} else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &mut conn).await.is_none() {
|
||||||
}
|
let invitation = Invitation::new(&email);
|
||||||
|
invitation.save(&mut conn).await?;
|
||||||
// Automatically mark user as accepted if no email invites
|
|
||||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -367,10 +361,7 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
||||||
&& grantor_user.name == claims.grantor_name
|
&& grantor_user.name == claims.grantor_name
|
||||||
&& grantor_user.email == claims.grantor_email
|
&& grantor_user.email == claims.grantor_email
|
||||||
{
|
{
|
||||||
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
|
emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &mut conn).await?;
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => err!(e.to_string()),
|
|
||||||
}
|
|
||||||
|
|
||||||
if CONFIG.mail_enabled() {
|
if CONFIG.mail_enabled() {
|
||||||
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
|
||||||
|
@ -382,26 +373,6 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn accept_invite_process(
|
|
||||||
grantee_uuid: &str,
|
|
||||||
emergency_access: &mut EmergencyAccess,
|
|
||||||
grantee_email: &str,
|
|
||||||
conn: &mut DbConn,
|
|
||||||
) -> EmptyResult {
|
|
||||||
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
|
|
||||||
err!("User email does not match invite.");
|
|
||||||
}
|
|
||||||
|
|
||||||
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
|
|
||||||
err!("Emergency contact already accepted.");
|
|
||||||
}
|
|
||||||
|
|
||||||
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
|
|
||||||
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
|
|
||||||
emergency_access.email = None;
|
|
||||||
emergency_access.save(conn).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct ConfirmData {
|
struct ConfirmData {
|
||||||
|
@ -766,7 +737,7 @@ pub async fn emergency_request_timeout_job(pool: DbPool) {
|
||||||
for mut emer in emergency_access_list {
|
for mut emer in emergency_access_list {
|
||||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||||
let recovery_allowed_at =
|
let recovery_allowed_at =
|
||||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days));
|
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days)).unwrap();
|
||||||
if recovery_allowed_at.le(&now) {
|
if recovery_allowed_at.le(&now) {
|
||||||
// Only update the access status
|
// Only update the access status
|
||||||
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
// Updating the whole record could cause issues when the emergency_notification_reminder_job is also active
|
||||||
|
@ -822,10 +793,10 @@ pub async fn emergency_notification_reminder_job(pool: DbPool) {
|
||||||
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
// The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None)
|
||||||
// Calculate the day before the recovery will become active
|
// Calculate the day before the recovery will become active
|
||||||
let final_recovery_reminder_at =
|
let final_recovery_reminder_at =
|
||||||
emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days - 1));
|
emer.recovery_initiated_at.unwrap() + TimeDelta::try_days(i64::from(emer.wait_time_days - 1)).unwrap();
|
||||||
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
// Calculate if a day has passed since the previous notification, else no notification has been sent before
|
||||||
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at {
|
||||||
last_notification_at + Duration::days(1)
|
last_notification_at + TimeDelta::try_days(1).unwrap()
|
||||||
} else {
|
} else {
|
||||||
now
|
now
|
||||||
};
|
};
|
||||||
|
|
|
@ -192,14 +192,17 @@ fn version() -> Json<&'static str> {
|
||||||
fn config() -> Json<Value> {
|
fn config() -> Json<Value> {
|
||||||
// TODO: maybe this should be extracted from the current request params
|
// TODO: maybe this should be extracted from the current request params
|
||||||
let domain = crate::CONFIG.main_domain();
|
let domain = crate::CONFIG.main_domain();
|
||||||
let feature_states = parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
|
let mut feature_states =
|
||||||
|
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
|
||||||
|
// Force the new key rotation feature
|
||||||
|
feature_states.insert("key-rotation-improvements".to_string(), true);
|
||||||
Json(json!({
|
Json(json!({
|
||||||
// Note: The clients use this version to handle backwards compatibility concerns
|
// Note: The clients use this version to handle backwards compatibility concerns
|
||||||
// This means they expect a version that closely matches the Bitwarden server version
|
// This means they expect a version that closely matches the Bitwarden server version
|
||||||
// We should make sure that we keep this updated when we support the new server features
|
// We should make sure that we keep this updated when we support the new server features
|
||||||
// Version history:
|
// Version history:
|
||||||
// - Individual cipher key encryption: 2023.9.1
|
// - Individual cipher key encryption: 2023.9.1
|
||||||
"version": "2023.9.1",
|
"version": "2024.2.0",
|
||||||
"gitHash": option_env!("GIT_REV"),
|
"gitHash": option_env!("GIT_REV"),
|
||||||
"server": {
|
"server": {
|
||||||
"name": "Vaultwarden",
|
"name": "Vaultwarden",
|
||||||
|
|
|
@ -329,27 +329,19 @@ async fn get_org_collections_details(org_id: &str, headers: ManagerHeadersLoose,
|
||||||
&& GroupUser::has_full_access_by_member(org_id, &user_org.uuid, &mut conn).await);
|
&& GroupUser::has_full_access_by_member(org_id, &user_org.uuid, &mut conn).await);
|
||||||
|
|
||||||
for col in Collection::find_by_organization(org_id, &mut conn).await {
|
for col in Collection::find_by_organization(org_id, &mut conn).await {
|
||||||
// assigned indicates whether the current user has access to the given collection
|
// check whether the current user has access to the given collection
|
||||||
let mut assigned = has_full_access_to_org;
|
let assigned = has_full_access_to_org
|
||||||
|
|| CollectionUser::has_access_to_collection_by_user(&col.uuid, &user_org.user_uuid, &mut conn).await
|
||||||
|
|| (CONFIG.org_groups_enabled()
|
||||||
|
&& GroupUser::has_access_to_collection_by_member(&col.uuid, &user_org.uuid, &mut conn).await);
|
||||||
|
|
||||||
// get the users assigned directly to the given collection
|
// get the users assigned directly to the given collection
|
||||||
let users: Vec<Value> = coll_users
|
let users: Vec<Value> = coll_users
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|collection_user| collection_user.collection_uuid == col.uuid)
|
.filter(|collection_user| collection_user.collection_uuid == col.uuid)
|
||||||
.map(|collection_user| {
|
.map(|collection_user| SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json())
|
||||||
// check if the current user is assigned to this collection directly
|
|
||||||
if collection_user.user_uuid == user_org.uuid {
|
|
||||||
assigned = true;
|
|
||||||
}
|
|
||||||
SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// check if the current user has access to the given collection via a group
|
|
||||||
if !assigned && CONFIG.org_groups_enabled() {
|
|
||||||
assigned = GroupUser::has_access_to_collection_by_member(&col.uuid, &user_org.uuid, &mut conn).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
// get the group details for the given collection
|
// get the group details for the given collection
|
||||||
let groups: Vec<Value> = if CONFIG.org_groups_enabled() {
|
let groups: Vec<Value> = if CONFIG.org_groups_enabled() {
|
||||||
CollectionGroup::find_by_collection(&col.uuid, &mut conn)
|
CollectionGroup::find_by_collection(&col.uuid, &mut conn)
|
||||||
|
@ -672,24 +664,16 @@ async fn get_org_collection_detail(
|
||||||
Vec::with_capacity(0)
|
Vec::with_capacity(0)
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut assigned = false;
|
|
||||||
let users: Vec<Value> =
|
let users: Vec<Value> =
|
||||||
CollectionUser::find_by_collection_swap_user_uuid_with_org_user_uuid(&collection.uuid, &mut conn)
|
CollectionUser::find_by_collection_swap_user_uuid_with_org_user_uuid(&collection.uuid, &mut conn)
|
||||||
.await
|
.await
|
||||||
.iter()
|
.iter()
|
||||||
.map(|collection_user| {
|
.map(|collection_user| {
|
||||||
// Remember `user_uuid` is swapped here with the `user_org.uuid` with a join during the `find_by_collection_swap_user_uuid_with_org_user_uuid` call.
|
|
||||||
// We check here if the current user is assigned to this collection or not.
|
|
||||||
if collection_user.user_uuid == user_org.uuid {
|
|
||||||
assigned = true;
|
|
||||||
}
|
|
||||||
SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()
|
SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if user_org.access_all {
|
let assigned = Collection::can_access_collection(&user_org, &collection.uuid, &mut conn).await;
|
||||||
assigned = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut json_object = collection.to_json();
|
let mut json_object = collection.to_json();
|
||||||
json_object["Assigned"] = json!(assigned);
|
json_object["Assigned"] = json!(assigned);
|
||||||
|
@ -1618,7 +1602,7 @@ async fn post_org_import(
|
||||||
let mut ciphers = Vec::new();
|
let mut ciphers = Vec::new();
|
||||||
for cipher_data in data.Ciphers {
|
for cipher_data in data.Ciphers {
|
||||||
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
|
||||||
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await.ok();
|
update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await.ok();
|
||||||
ciphers.push(cipher);
|
ciphers.push(cipher);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2247,7 +2231,7 @@ impl GroupRequest {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_group(&self, mut group: Group) -> Group {
|
pub fn update_group(&self, mut group: Group) -> Group {
|
||||||
group.name = self.Name.clone();
|
group.name.clone_from(&self.Name);
|
||||||
group.access_all = self.AccessAll.unwrap_or(false);
|
group.access_all = self.AccessAll.unwrap_or(false);
|
||||||
// Group Updates do not support changing the external_id
|
// Group Updates do not support changing the external_id
|
||||||
// These input fields are in a disabled state, and can only be updated/added via ldap_import
|
// These input fields are in a disabled state, and can only be updated/added via ldap_import
|
||||||
|
|
|
@ -209,7 +209,7 @@ impl<'r> FromRequest<'r> for PublicToken {
|
||||||
Err(_) => err_handler!("Invalid claim"),
|
Err(_) => err_handler!("Invalid claim"),
|
||||||
};
|
};
|
||||||
// Check if time is between claims.nbf and claims.exp
|
// Check if time is between claims.nbf and claims.exp
|
||||||
let time_now = Utc::now().naive_utc().timestamp();
|
let time_now = Utc::now().timestamp();
|
||||||
if time_now < claims.nbf {
|
if time_now < claims.nbf {
|
||||||
err_handler!("Token issued in the future");
|
err_handler!("Token issued in the future");
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use chrono::{DateTime, Duration, Utc};
|
use chrono::{DateTime, TimeDelta, Utc};
|
||||||
use num_traits::ToPrimitive;
|
use num_traits::ToPrimitive;
|
||||||
use rocket::form::Form;
|
use rocket::form::Form;
|
||||||
use rocket::fs::NamedFile;
|
use rocket::fs::NamedFile;
|
||||||
|
@ -49,7 +49,7 @@ pub async fn purge_sends(pool: DbPool) {
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
struct SendData {
|
pub struct SendData {
|
||||||
Type: i32,
|
Type: i32,
|
||||||
Key: String,
|
Key: String,
|
||||||
Password: Option<String>,
|
Password: Option<String>,
|
||||||
|
@ -65,6 +65,9 @@ struct SendData {
|
||||||
Text: Option<Value>,
|
Text: Option<Value>,
|
||||||
File: Option<Value>,
|
File: Option<Value>,
|
||||||
FileLength: Option<NumberOrString>,
|
FileLength: Option<NumberOrString>,
|
||||||
|
|
||||||
|
// Used for key rotations
|
||||||
|
pub Id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
|
||||||
|
@ -119,7 +122,7 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
|
||||||
err!("Send data not provided");
|
err!("Send data not provided");
|
||||||
};
|
};
|
||||||
|
|
||||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
if data.DeletionDate > Utc::now() + TimeDelta::try_days(31).unwrap() {
|
||||||
err!(
|
err!(
|
||||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||||
);
|
);
|
||||||
|
@ -549,6 +552,19 @@ async fn put_send(
|
||||||
None => err!("Send not found"),
|
None => err!("Send not found"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?;
|
||||||
|
|
||||||
|
Ok(Json(send.to_json()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_send_from_data(
|
||||||
|
send: &mut Send,
|
||||||
|
data: SendData,
|
||||||
|
headers: &Headers,
|
||||||
|
conn: &mut DbConn,
|
||||||
|
nt: &Notify<'_>,
|
||||||
|
ut: UpdateType,
|
||||||
|
) -> EmptyResult {
|
||||||
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
|
||||||
err!("Send is not owned by user")
|
err!("Send is not owned by user")
|
||||||
}
|
}
|
||||||
|
@ -557,6 +573,12 @@ async fn put_send(
|
||||||
err!("Sends can't change type")
|
err!("Sends can't change type")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if data.DeletionDate > Utc::now() + TimeDelta::try_days(31).unwrap() {
|
||||||
|
err!(
|
||||||
|
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// When updating a file Send, we receive nulls in the File field, as it's immutable,
|
// When updating a file Send, we receive nulls in the File field, as it's immutable,
|
||||||
// so we only need to update the data field in the Text case
|
// so we only need to update the data field in the Text case
|
||||||
if data.Type == SendType::Text as i32 {
|
if data.Type == SendType::Text as i32 {
|
||||||
|
@ -569,11 +591,6 @@ async fn put_send(
|
||||||
send.data = data_str;
|
send.data = data_str;
|
||||||
}
|
}
|
||||||
|
|
||||||
if data.DeletionDate > Utc::now() + Duration::days(31) {
|
|
||||||
err!(
|
|
||||||
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
|
|
||||||
);
|
|
||||||
}
|
|
||||||
send.name = data.Name;
|
send.name = data.Name;
|
||||||
send.akey = data.Key;
|
send.akey = data.Key;
|
||||||
send.deletion_date = data.DeletionDate.naive_utc();
|
send.deletion_date = data.DeletionDate.naive_utc();
|
||||||
|
@ -591,17 +608,11 @@ async fn put_send(
|
||||||
send.set_password(Some(&password));
|
send.set_password(Some(&password));
|
||||||
}
|
}
|
||||||
|
|
||||||
send.save(&mut conn).await?;
|
send.save(conn).await?;
|
||||||
nt.send_send_update(
|
if ut != UpdateType::None {
|
||||||
UpdateType::SyncSendUpdate,
|
nt.send_send_update(ut, send, &send.update_users_revision(conn).await, &headers.device.uuid, conn).await;
|
||||||
&send,
|
}
|
||||||
&send.update_users_revision(&mut conn).await,
|
Ok(())
|
||||||
&headers.device.uuid,
|
|
||||||
&mut conn,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(Json(send.to_json()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[delete("/sends/<id>")]
|
#[delete("/sends/<id>")]
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
use chrono::{DateTime, TimeDelta, Utc};
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
|
|
||||||
|
@ -232,9 +232,9 @@ pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, c
|
||||||
twofactor.data = email_data.to_json();
|
twofactor.data = email_data.to_json();
|
||||||
twofactor.save(conn).await?;
|
twofactor.save(conn).await?;
|
||||||
|
|
||||||
let date = NaiveDateTime::from_timestamp_opt(email_data.token_sent, 0).expect("Email token timestamp invalid.");
|
let date = DateTime::from_timestamp(email_data.token_sent, 0).expect("Email token timestamp invalid.").naive_utc();
|
||||||
let max_time = CONFIG.email_expiration_time() as i64;
|
let max_time = CONFIG.email_expiration_time() as i64;
|
||||||
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
|
if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
|
||||||
err!(
|
err!(
|
||||||
"Token has expired",
|
"Token has expired",
|
||||||
ErrorEvent {
|
ErrorEvent {
|
||||||
|
@ -265,14 +265,14 @@ impl EmailTokenData {
|
||||||
EmailTokenData {
|
EmailTokenData {
|
||||||
email,
|
email,
|
||||||
last_token: Some(token),
|
last_token: Some(token),
|
||||||
token_sent: Utc::now().naive_utc().timestamp(),
|
token_sent: Utc::now().timestamp(),
|
||||||
attempts: 0,
|
attempts: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_token(&mut self, token: String) {
|
pub fn set_token(&mut self, token: String) {
|
||||||
self.last_token = Some(token);
|
self.last_token = Some(token);
|
||||||
self.token_sent = Utc::now().naive_utc().timestamp();
|
self.token_sent = Utc::now().timestamp();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset_token(&mut self) {
|
pub fn reset_token(&mut self) {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use chrono::{Duration, Utc};
|
use chrono::{TimeDelta, Utc};
|
||||||
use data_encoding::BASE32;
|
use data_encoding::BASE32;
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
|
@ -259,7 +259,7 @@ pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
|
||||||
};
|
};
|
||||||
|
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
let time_limit = Duration::minutes(CONFIG.incomplete_2fa_time_limit());
|
let time_limit = TimeDelta::try_minutes(CONFIG.incomplete_2fa_time_limit()).unwrap();
|
||||||
let time_before = now - time_limit;
|
let time_before = now - time_limit;
|
||||||
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await;
|
let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await;
|
||||||
for login in incomplete_logins {
|
for login in incomplete_logins {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
use chrono::{DateTime, TimeDelta, Utc};
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
@ -32,7 +32,7 @@ impl ProtectedActionData {
|
||||||
pub fn new(token: String) -> Self {
|
pub fn new(token: String) -> Self {
|
||||||
Self {
|
Self {
|
||||||
token,
|
token,
|
||||||
token_sent: Utc::now().naive_utc().timestamp(),
|
token_sent: Utc::now().timestamp(),
|
||||||
attempts: 0,
|
attempts: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -122,9 +122,9 @@ pub async fn validate_protected_action_otp(
|
||||||
|
|
||||||
// Check if the token has expired (Using the email 2fa expiration time)
|
// Check if the token has expired (Using the email 2fa expiration time)
|
||||||
let date =
|
let date =
|
||||||
NaiveDateTime::from_timestamp_opt(pa_data.token_sent, 0).expect("Protected Action token timestamp invalid.");
|
DateTime::from_timestamp(pa_data.token_sent, 0).expect("Protected Action token timestamp invalid.").naive_utc();
|
||||||
let max_time = CONFIG.email_expiration_time() as i64;
|
let max_time = CONFIG.email_expiration_time() as i64;
|
||||||
if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
|
if date + TimeDelta::try_seconds(max_time).unwrap() < Utc::now().naive_utc() {
|
||||||
pa.delete(conn).await?;
|
pa.delete(conn).await?;
|
||||||
err!("Token has expired")
|
err!("Token has expired")
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use rocket::serde::json::Json;
|
use rocket::serde::json::Json;
|
||||||
use rocket::Route;
|
use rocket::Route;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use yubico::{config::Config, verify};
|
use yubico::{config::Config, verify_async};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
api::{
|
api::{
|
||||||
|
@ -74,13 +74,10 @@ async fn verify_yubikey_otp(otp: String) -> EmptyResult {
|
||||||
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
|
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
|
||||||
|
|
||||||
match CONFIG.yubico_server() {
|
match CONFIG.yubico_server() {
|
||||||
Some(server) => {
|
Some(server) => verify_async(otp, config.set_api_hosts(vec![server])).await,
|
||||||
tokio::task::spawn_blocking(move || verify(otp, config.set_api_hosts(vec![server]))).await.unwrap()
|
None => verify_async(otp, config).await,
|
||||||
}
|
|
||||||
None => tokio::task::spawn_blocking(move || verify(otp, config)).await.unwrap(),
|
|
||||||
}
|
}
|
||||||
.map_res("Failed to verify OTP")
|
.map_res("Failed to verify OTP")
|
||||||
.and(Ok(()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/two-factor/get-yubikey", data = "<data>")]
|
#[post("/two-factor/get-yubikey", data = "<data>")]
|
||||||
|
@ -194,10 +191,6 @@ pub async fn validate_yubikey_login(response: &str, twofactor_data: &str) -> Emp
|
||||||
err!("Given Yubikey is not registered");
|
err!("Given Yubikey is not registered");
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = verify_yubikey_otp(response.to_owned()).await;
|
verify_yubikey_otp(response.to_owned()).await.map_res("Failed to verify Yubikey against OTP server")?;
|
||||||
|
Ok(())
|
||||||
match result {
|
|
||||||
Ok(_answer) => Ok(()),
|
|
||||||
Err(_e) => err!("Failed to verify Yubikey against OTP server"),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
320
src/api/icons.rs
320
src/api/icons.rs
|
@ -1,6 +1,6 @@
|
||||||
use std::{
|
use std::{
|
||||||
net::IpAddr,
|
net::IpAddr,
|
||||||
sync::Arc,
|
sync::{Arc, Mutex},
|
||||||
time::{Duration, SystemTime},
|
time::{Duration, SystemTime},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -16,14 +16,13 @@ use rocket::{http::ContentType, response::Redirect, Route};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||||
io::{AsyncReadExt, AsyncWriteExt},
|
io::{AsyncReadExt, AsyncWriteExt},
|
||||||
net::lookup_host,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
|
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
error::Error,
|
error::Error,
|
||||||
util::{get_reqwest_client_builder, Cached},
|
util::{get_reqwest_client_builder, Cached, CustomDnsResolver, CustomResolverError},
|
||||||
CONFIG,
|
CONFIG,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -49,48 +48,32 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||||
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
|
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
|
||||||
let pool_idle_timeout = Duration::from_secs(10);
|
let pool_idle_timeout = Duration::from_secs(10);
|
||||||
// Reuse the client between requests
|
// Reuse the client between requests
|
||||||
let client = get_reqwest_client_builder()
|
get_reqwest_client_builder()
|
||||||
.cookie_provider(Arc::clone(&cookie_store))
|
.cookie_provider(Arc::clone(&cookie_store))
|
||||||
.timeout(icon_download_timeout)
|
.timeout(icon_download_timeout)
|
||||||
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
||||||
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
||||||
.trust_dns(true)
|
.dns_resolver(CustomDnsResolver::instance())
|
||||||
.default_headers(default_headers.clone());
|
.default_headers(default_headers.clone())
|
||||||
|
.build()
|
||||||
match client.build() {
|
.expect("Failed to build client")
|
||||||
Ok(client) => client,
|
|
||||||
Err(e) => {
|
|
||||||
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
|
|
||||||
get_reqwest_client_builder()
|
|
||||||
.cookie_provider(cookie_store)
|
|
||||||
.timeout(icon_download_timeout)
|
|
||||||
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
|
|
||||||
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
|
|
||||||
.trust_dns(false)
|
|
||||||
.default_headers(default_headers)
|
|
||||||
.build()
|
|
||||||
.expect("Failed to build client")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Build Regex only once since this takes a lot of time.
|
// Build Regex only once since this takes a lot of time.
|
||||||
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
|
||||||
|
|
||||||
// Special HashMap which holds the user defined Regex to speedup matching the regex.
|
#[get("/<domain>/icon.png")]
|
||||||
static ICON_BLACKLIST_REGEX: Lazy<dashmap::DashMap<String, Regex>> = Lazy::new(dashmap::DashMap::new);
|
fn icon_external(domain: &str) -> Option<Redirect> {
|
||||||
|
|
||||||
async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
|
||||||
if !is_valid_domain(domain) {
|
if !is_valid_domain(domain) {
|
||||||
warn!("Invalid domain: {}", domain);
|
warn!("Invalid domain: {}", domain);
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
if check_domain_blacklist_reason(domain).await.is_some() {
|
if is_domain_blacklisted(domain) {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let url = template.replace("{}", domain);
|
let url = CONFIG._icon_service_url().replace("{}", domain);
|
||||||
match CONFIG.icon_redirect_code() {
|
match CONFIG.icon_redirect_code() {
|
||||||
301 => Some(Redirect::moved(url)), // legacy permanent redirect
|
301 => Some(Redirect::moved(url)), // legacy permanent redirect
|
||||||
302 => Some(Redirect::found(url)), // legacy temporary redirect
|
302 => Some(Redirect::found(url)), // legacy temporary redirect
|
||||||
|
@ -103,11 +86,6 @@ async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[get("/<domain>/icon.png")]
|
|
||||||
async fn icon_external(domain: &str) -> Option<Redirect> {
|
|
||||||
icon_redirect(domain, &CONFIG._icon_service_url()).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[get("/<domain>/icon.png")]
|
#[get("/<domain>/icon.png")]
|
||||||
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
|
||||||
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
|
||||||
|
@ -166,153 +144,28 @@ fn is_valid_domain(domain: &str) -> bool {
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
pub fn is_domain_blacklisted(domain: &str) -> bool {
|
||||||
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
let Some(config_blacklist) = CONFIG.icon_blacklist_regex() else {
|
||||||
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
return false;
|
||||||
#[allow(clippy::nonminimal_bool)]
|
};
|
||||||
#[cfg(not(feature = "unstable"))]
|
|
||||||
fn is_global(ip: IpAddr) -> bool {
|
|
||||||
match ip {
|
|
||||||
IpAddr::V4(ip) => {
|
|
||||||
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
|
|
||||||
// globally routable addresses in the 192.0.0.0/24 range.
|
|
||||||
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
!ip.is_private()
|
|
||||||
&& !ip.is_loopback()
|
|
||||||
&& !ip.is_link_local()
|
|
||||||
&& !ip.is_broadcast()
|
|
||||||
&& !ip.is_documentation()
|
|
||||||
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
|
|
||||||
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|
|
||||||
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
|
|
||||||
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
|
|
||||||
// Make sure the address is not in 0.0.0.0/8
|
|
||||||
&& ip.octets()[0] != 0
|
|
||||||
}
|
|
||||||
IpAddr::V6(ip) => {
|
|
||||||
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
!ip.is_multicast()
|
|
||||||
&& !ip.is_loopback()
|
|
||||||
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
|
|
||||||
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
|
|
||||||
&& !ip.is_unspecified()
|
|
||||||
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "unstable")]
|
// Compiled domain blacklist
|
||||||
fn is_global(ip: IpAddr) -> bool {
|
static COMPILED_BLACKLIST: Mutex<Option<(String, Regex)>> = Mutex::new(None);
|
||||||
ip.is_global()
|
let mut guard = COMPILED_BLACKLIST.lock().unwrap();
|
||||||
}
|
|
||||||
|
|
||||||
/// These are some tests to check that the implementations match
|
// If the stored regex is up to date, use it
|
||||||
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11
|
if let Some((value, regex)) = &*guard {
|
||||||
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct
|
if value == &config_blacklist {
|
||||||
/// Note that the is_global implementation is subject to change as new IP RFCs are created
|
return regex.is_match(domain);
|
||||||
///
|
|
||||||
/// To run while showing progress output:
|
|
||||||
/// cargo test --features sqlite,unstable -- --nocapture --ignored
|
|
||||||
#[cfg(test)]
|
|
||||||
#[cfg(feature = "unstable")]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[ignore]
|
|
||||||
fn test_ipv4_global() {
|
|
||||||
for a in 0..u8::MAX {
|
|
||||||
println!("Iter: {}/255", a);
|
|
||||||
for b in 0..u8::MAX {
|
|
||||||
for c in 0..u8::MAX {
|
|
||||||
for d in 0..u8::MAX {
|
|
||||||
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
|
|
||||||
assert_eq!(ip.is_global(), is_global(ip))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
// If we don't have a regex stored, or it's not up to date, recreate it
|
||||||
#[ignore]
|
let regex = Regex::new(&config_blacklist).unwrap();
|
||||||
fn test_ipv6_global() {
|
let is_match = regex.is_match(domain);
|
||||||
use ring::rand::{SecureRandom, SystemRandom};
|
*guard = Some((config_blacklist, regex));
|
||||||
let mut v = [0u8; 16];
|
|
||||||
let rand = SystemRandom::new();
|
|
||||||
for i in 0..1_000 {
|
|
||||||
println!("Iter: {}/1_000", i);
|
|
||||||
for _ in 0..10_000_000 {
|
|
||||||
rand.fill(&mut v).expect("Error generating random values");
|
|
||||||
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
|
|
||||||
(v[14] as u16) << 8 | v[15] as u16,
|
|
||||||
(v[12] as u16) << 8 | v[13] as u16,
|
|
||||||
(v[10] as u16) << 8 | v[11] as u16,
|
|
||||||
(v[8] as u16) << 8 | v[9] as u16,
|
|
||||||
(v[6] as u16) << 8 | v[7] as u16,
|
|
||||||
(v[4] as u16) << 8 | v[5] as u16,
|
|
||||||
(v[2] as u16) << 8 | v[3] as u16,
|
|
||||||
(v[0] as u16) << 8 | v[1] as u16,
|
|
||||||
));
|
|
||||||
assert_eq!(ip.is_global(), is_global(ip))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
is_match
|
||||||
enum DomainBlacklistReason {
|
|
||||||
Regex,
|
|
||||||
IP,
|
|
||||||
}
|
|
||||||
|
|
||||||
use cached::proc_macro::cached;
|
|
||||||
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
|
|
||||||
async fn check_domain_blacklist_reason(domain: &str) -> Option<DomainBlacklistReason> {
|
|
||||||
// First check the blacklist regex if there is a match.
|
|
||||||
// This prevents the blocked domain(s) from being leaked via a DNS lookup.
|
|
||||||
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
|
|
||||||
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
|
|
||||||
let is_match = if let Some(regex) = ICON_BLACKLIST_REGEX.get(&blacklist) {
|
|
||||||
regex.is_match(domain)
|
|
||||||
} else {
|
|
||||||
// Clear the current list if the previous key doesn't exists.
|
|
||||||
// To prevent growing of the HashMap after someone has changed it via the admin interface.
|
|
||||||
if ICON_BLACKLIST_REGEX.len() >= 1 {
|
|
||||||
ICON_BLACKLIST_REGEX.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate the regex to store in too the Lazy Static HashMap.
|
|
||||||
let blacklist_regex = Regex::new(&blacklist).unwrap();
|
|
||||||
let is_match = blacklist_regex.is_match(domain);
|
|
||||||
ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex);
|
|
||||||
|
|
||||||
is_match
|
|
||||||
};
|
|
||||||
|
|
||||||
if is_match {
|
|
||||||
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
|
|
||||||
return Some(DomainBlacklistReason::Regex);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if CONFIG.icon_blacklist_non_global_ips() {
|
|
||||||
if let Ok(s) = lookup_host((domain, 0)).await {
|
|
||||||
for addr in s {
|
|
||||||
if !is_global(addr.ip()) {
|
|
||||||
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
|
|
||||||
return Some(DomainBlacklistReason::IP);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||||
|
@ -342,6 +195,13 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||||
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
|
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
// If this error comes from the custom resolver, this means this is a blacklisted domain
|
||||||
|
// or non global IP, don't save the miss file in this case to avoid leaking it
|
||||||
|
if let Some(error) = CustomResolverError::downcast_ref(&e) {
|
||||||
|
warn!("{error}");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
warn!("Unable to download icon: {:?}", e);
|
warn!("Unable to download icon: {:?}", e);
|
||||||
let miss_indicator = path + ".miss";
|
let miss_indicator = path + ".miss";
|
||||||
save_icon(&miss_indicator, &[]).await;
|
save_icon(&miss_indicator, &[]).await;
|
||||||
|
@ -491,42 +351,48 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
|
||||||
let ssldomain = format!("https://{domain}");
|
let ssldomain = format!("https://{domain}");
|
||||||
let httpdomain = format!("http://{domain}");
|
let httpdomain = format!("http://{domain}");
|
||||||
|
|
||||||
// First check the domain as given during the request for both HTTPS and HTTP.
|
// First check the domain as given during the request for HTTPS.
|
||||||
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await {
|
let resp = match get_page(&ssldomain).await {
|
||||||
Ok(c) => Ok(c),
|
Err(e) if CustomResolverError::downcast_ref(&e).is_none() => {
|
||||||
Err(e) => {
|
// If we get an error that is not caused by the blacklist, we retry with HTTP
|
||||||
let mut sub_resp = Err(e);
|
match get_page(&httpdomain).await {
|
||||||
|
mut sub_resp @ Err(_) => {
|
||||||
|
// When the domain is not an IP, and has more then one dot, remove all subdomains.
|
||||||
|
let is_ip = domain.parse::<IpAddr>();
|
||||||
|
if is_ip.is_err() && domain.matches('.').count() > 1 {
|
||||||
|
let mut domain_parts = domain.split('.');
|
||||||
|
let base_domain = format!(
|
||||||
|
"{base}.{tld}",
|
||||||
|
tld = domain_parts.next_back().unwrap(),
|
||||||
|
base = domain_parts.next_back().unwrap()
|
||||||
|
);
|
||||||
|
if is_valid_domain(&base_domain) {
|
||||||
|
let sslbase = format!("https://{base_domain}");
|
||||||
|
let httpbase = format!("http://{base_domain}");
|
||||||
|
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
|
||||||
|
|
||||||
// When the domain is not an IP, and has more then one dot, remove all subdomains.
|
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
|
||||||
let is_ip = domain.parse::<IpAddr>();
|
}
|
||||||
if is_ip.is_err() && domain.matches('.').count() > 1 {
|
|
||||||
let mut domain_parts = domain.split('.');
|
|
||||||
let base_domain = format!(
|
|
||||||
"{base}.{tld}",
|
|
||||||
tld = domain_parts.next_back().unwrap(),
|
|
||||||
base = domain_parts.next_back().unwrap()
|
|
||||||
);
|
|
||||||
if is_valid_domain(&base_domain) {
|
|
||||||
let sslbase = format!("https://{base_domain}");
|
|
||||||
let httpbase = format!("http://{base_domain}");
|
|
||||||
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
|
|
||||||
|
|
||||||
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
|
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
||||||
}
|
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
||||||
|
let www_domain = format!("www.{domain}");
|
||||||
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
|
if is_valid_domain(&www_domain) {
|
||||||
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
|
let sslwww = format!("https://{www_domain}");
|
||||||
let www_domain = format!("www.{domain}");
|
let httpwww = format!("http://{www_domain}");
|
||||||
if is_valid_domain(&www_domain) {
|
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
|
||||||
let sslwww = format!("https://{www_domain}");
|
|
||||||
let httpwww = format!("http://{www_domain}");
|
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
|
||||||
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
|
}
|
||||||
|
}
|
||||||
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
|
sub_resp
|
||||||
}
|
}
|
||||||
|
res => res,
|
||||||
}
|
}
|
||||||
sub_resp
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we get a result or a blacklist error, just continue
|
||||||
|
res => res,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Create the iconlist
|
// Create the iconlist
|
||||||
|
@ -573,21 +439,12 @@ async fn get_page(url: &str) -> Result<Response, Error> {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
|
||||||
match check_domain_blacklist_reason(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
|
|
||||||
Some(DomainBlacklistReason::Regex) => warn!("Favicon '{}' is from a blacklisted domain!", url),
|
|
||||||
Some(DomainBlacklistReason::IP) => warn!("Favicon '{}' is hosted on a non-global IP!", url),
|
|
||||||
None => (),
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut client = CLIENT.get(url);
|
let mut client = CLIENT.get(url);
|
||||||
if !referer.is_empty() {
|
if !referer.is_empty() {
|
||||||
client = client.header("Referer", referer)
|
client = client.header("Referer", referer)
|
||||||
}
|
}
|
||||||
|
|
||||||
match client.send().await {
|
Ok(client.send().await?.error_for_status()?)
|
||||||
Ok(c) => c.error_for_status().map_err(Into::into),
|
|
||||||
Err(e) => err_silent!(format!("{e}")),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
/// Returns a Integer with the priority of the type of the icon which to prefer.
|
||||||
|
@ -670,12 +527,6 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||||
match check_domain_blacklist_reason(domain).await {
|
|
||||||
Some(DomainBlacklistReason::Regex) => err_silent!("Domain is blacklisted", domain),
|
|
||||||
Some(DomainBlacklistReason::IP) => err_silent!("Host resolves to a non-global IP", domain),
|
|
||||||
None => (),
|
|
||||||
}
|
|
||||||
|
|
||||||
let icon_result = get_icon_url(domain).await?;
|
let icon_result = get_icon_url(domain).await?;
|
||||||
|
|
||||||
let mut buffer = Bytes::new();
|
let mut buffer = Bytes::new();
|
||||||
|
@ -711,22 +562,19 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
||||||
_ => debug!("Extracted icon from data:image uri is invalid"),
|
_ => debug!("Extracted icon from data:image uri is invalid"),
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
match get_page_with_referer(&icon.href, &icon_result.referer).await {
|
let res = get_page_with_referer(&icon.href, &icon_result.referer).await?;
|
||||||
Ok(res) => {
|
|
||||||
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
|
|
||||||
|
|
||||||
// Check if the icon type is allowed, else try an icon from the list.
|
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
|
||||||
icon_type = get_icon_type(&buffer);
|
|
||||||
if icon_type.is_none() {
|
// Check if the icon type is allowed, else try an icon from the list.
|
||||||
buffer.clear();
|
icon_type = get_icon_type(&buffer);
|
||||||
debug!("Icon from {}, is not a valid image type", icon.href);
|
if icon_type.is_none() {
|
||||||
continue;
|
buffer.clear();
|
||||||
}
|
debug!("Icon from {}, is not a valid image type", icon.href);
|
||||||
info!("Downloaded icon from {}", icon.href);
|
continue;
|
||||||
break;
|
}
|
||||||
}
|
info!("Downloaded icon from {}", icon.href);
|
||||||
Err(e) => debug!("{:?}", e),
|
break;
|
||||||
};
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -303,7 +303,12 @@ async fn _password_login(
|
||||||
"KdfIterations": user.client_kdf_iter,
|
"KdfIterations": user.client_kdf_iter,
|
||||||
"KdfMemory": user.client_kdf_memory,
|
"KdfMemory": user.client_kdf_memory,
|
||||||
"KdfParallelism": user.client_kdf_parallelism,
|
"KdfParallelism": user.client_kdf_parallelism,
|
||||||
"ResetMasterPassword": false,// TODO: Same as above
|
"ResetMasterPassword": false, // TODO: Same as above
|
||||||
|
"ForcePasswordReset": false,
|
||||||
|
"MasterPasswordPolicy": {
|
||||||
|
"object": "masterPasswordPolicy",
|
||||||
|
},
|
||||||
|
|
||||||
"scope": scope,
|
"scope": scope,
|
||||||
"unofficialServer": true,
|
"unofficialServer": true,
|
||||||
"UserDecryptionOptions": {
|
"UserDecryptionOptions": {
|
||||||
|
|
|
@ -20,7 +20,7 @@ pub use crate::api::{
|
||||||
core::two_factor::send_incomplete_2fa_notifications,
|
core::two_factor::send_incomplete_2fa_notifications,
|
||||||
core::{emergency_notification_reminder_job, emergency_request_timeout_job},
|
core::{emergency_notification_reminder_job, emergency_request_timeout_job},
|
||||||
core::{event_cleanup_job, events_routes as core_events_routes},
|
core::{event_cleanup_job, events_routes as core_events_routes},
|
||||||
icons::routes as icons_routes,
|
icons::{is_domain_blacklisted, routes as icons_routes},
|
||||||
identity::routes as identity_routes,
|
identity::routes as identity_routes,
|
||||||
notifications::routes as notifications_routes,
|
notifications::routes as notifications_routes,
|
||||||
notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS},
|
notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS},
|
||||||
|
|
|
@ -288,8 +288,8 @@ fn serialize(val: Value) -> Vec<u8> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn serialize_date(date: NaiveDateTime) -> Value {
|
fn serialize_date(date: NaiveDateTime) -> Value {
|
||||||
let seconds: i64 = date.timestamp();
|
let seconds: i64 = date.and_utc().timestamp();
|
||||||
let nanos: i64 = date.timestamp_subsec_nanos().into();
|
let nanos: i64 = date.and_utc().timestamp_subsec_nanos().into();
|
||||||
let timestamp = nanos << 34 | seconds;
|
let timestamp = nanos << 34 | seconds;
|
||||||
|
|
||||||
let bs = timestamp.to_be_bytes();
|
let bs = timestamp.to_be_bytes();
|
||||||
|
|
|
@ -114,11 +114,11 @@ pub async fn register_push_device(device: &mut Device, conn: &mut crate::db::DbC
|
||||||
.await?
|
.await?
|
||||||
.error_for_status()
|
.error_for_status()
|
||||||
{
|
{
|
||||||
err!(format!("An error occured while proceeding registration of a device: {e}"));
|
err!(format!("An error occurred while proceeding registration of a device: {e}"));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(e) = device.save(conn).await {
|
if let Err(e) = device.save(conn).await {
|
||||||
err!(format!("An error occured while trying to save the (registered) device push uuid: {e}"));
|
err!(format!("An error occurred while trying to save the (registered) device push uuid: {e}"));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
55
src/auth.rs
55
src/auth.rs
|
@ -1,10 +1,10 @@
|
||||||
// JWT Handling
|
// JWT Handling
|
||||||
//
|
//
|
||||||
use chrono::{Duration, Utc};
|
use chrono::{TimeDelta, Utc};
|
||||||
use num_traits::FromPrimitive;
|
use num_traits::FromPrimitive;
|
||||||
use once_cell::sync::{Lazy, OnceCell};
|
use once_cell::sync::{Lazy, OnceCell};
|
||||||
|
|
||||||
use jsonwebtoken::{self, errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
|
use jsonwebtoken::{errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
|
||||||
use openssl::rsa::Rsa;
|
use openssl::rsa::Rsa;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde::ser::Serialize;
|
use serde::ser::Serialize;
|
||||||
|
@ -14,7 +14,7 @@ use crate::{error::Error, CONFIG};
|
||||||
|
|
||||||
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
|
||||||
|
|
||||||
pub static DEFAULT_VALIDITY: Lazy<Duration> = Lazy::new(|| Duration::hours(2));
|
pub static DEFAULT_VALIDITY: Lazy<TimeDelta> = Lazy::new(|| TimeDelta::try_hours(2).unwrap());
|
||||||
static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM));
|
static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM));
|
||||||
|
|
||||||
fn jwt_origin() -> String {
|
fn jwt_origin() -> String {
|
||||||
|
@ -39,7 +39,8 @@ pub fn initialize_keys() -> Result<(), crate::error::Error> {
|
||||||
let mut priv_key_buffer = Vec::with_capacity(2048);
|
let mut priv_key_buffer = Vec::with_capacity(2048);
|
||||||
|
|
||||||
let priv_key = {
|
let priv_key = {
|
||||||
let mut priv_key_file = File::options().create(true).read(true).write(true).open(CONFIG.private_rsa_key())?;
|
let mut priv_key_file =
|
||||||
|
File::options().create(true).truncate(false).read(true).write(true).open(CONFIG.private_rsa_key())?;
|
||||||
|
|
||||||
#[allow(clippy::verbose_file_reads)]
|
#[allow(clippy::verbose_file_reads)]
|
||||||
let bytes_read = priv_key_file.read_to_end(&mut priv_key_buffer)?;
|
let bytes_read = priv_key_file.read_to_end(&mut priv_key_buffer)?;
|
||||||
|
@ -192,11 +193,11 @@ pub fn generate_invite_claims(
|
||||||
user_org_id: Option<String>,
|
user_org_id: Option<String>,
|
||||||
invited_by_email: Option<String>,
|
invited_by_email: Option<String>,
|
||||||
) -> InviteJwtClaims {
|
) -> InviteJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||||
InviteJwtClaims {
|
InviteJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||||
iss: JWT_INVITE_ISSUER.to_string(),
|
iss: JWT_INVITE_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: uuid,
|
||||||
email,
|
email,
|
||||||
|
@ -230,11 +231,11 @@ pub fn generate_emergency_access_invite_claims(
|
||||||
grantor_name: String,
|
grantor_name: String,
|
||||||
grantor_email: String,
|
grantor_email: String,
|
||||||
) -> EmergencyAccessInviteJwtClaims {
|
) -> EmergencyAccessInviteJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||||
EmergencyAccessInviteJwtClaims {
|
EmergencyAccessInviteJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||||
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
|
iss: JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: uuid,
|
||||||
email,
|
email,
|
||||||
|
@ -261,10 +262,10 @@ pub struct OrgApiKeyLoginJwtClaims {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims {
|
pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
OrgApiKeyLoginJwtClaims {
|
OrgApiKeyLoginJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(1)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(1).unwrap()).timestamp(),
|
||||||
iss: JWT_ORG_API_KEY_ISSUER.to_string(),
|
iss: JWT_ORG_API_KEY_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: uuid,
|
||||||
client_id: format!("organization.{org_id}"),
|
client_id: format!("organization.{org_id}"),
|
||||||
|
@ -288,10 +289,10 @@ pub struct FileDownloadClaims {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims {
|
pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
FileDownloadClaims {
|
FileDownloadClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::minutes(5)).timestamp(),
|
exp: (time_now + TimeDelta::try_minutes(5).unwrap()).timestamp(),
|
||||||
iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(),
|
iss: JWT_FILE_DOWNLOAD_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: uuid,
|
||||||
file_id,
|
file_id,
|
||||||
|
@ -311,42 +312,42 @@ pub struct BasicJwtClaims {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
|
pub fn generate_delete_claims(uuid: String) -> BasicJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||||
BasicJwtClaims {
|
BasicJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||||
iss: JWT_DELETE_ISSUER.to_string(),
|
iss: JWT_DELETE_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: uuid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims {
|
pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
let expire_hours = i64::from(CONFIG.invitation_expiration_hours());
|
||||||
BasicJwtClaims {
|
BasicJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::hours(expire_hours)).timestamp(),
|
exp: (time_now + TimeDelta::try_hours(expire_hours).unwrap()).timestamp(),
|
||||||
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
|
iss: JWT_VERIFYEMAIL_ISSUER.to_string(),
|
||||||
sub: uuid,
|
sub: uuid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_admin_claims() -> BasicJwtClaims {
|
pub fn generate_admin_claims() -> BasicJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
BasicJwtClaims {
|
BasicJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::minutes(CONFIG.admin_session_lifetime())).timestamp(),
|
exp: (time_now + TimeDelta::try_minutes(CONFIG.admin_session_lifetime()).unwrap()).timestamp(),
|
||||||
iss: JWT_ADMIN_ISSUER.to_string(),
|
iss: JWT_ADMIN_ISSUER.to_string(),
|
||||||
sub: "admin_panel".to_string(),
|
sub: "admin_panel".to_string(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
|
pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims {
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
BasicJwtClaims {
|
BasicJwtClaims {
|
||||||
nbf: time_now.timestamp(),
|
nbf: time_now.timestamp(),
|
||||||
exp: (time_now + Duration::minutes(2)).timestamp(),
|
exp: (time_now + TimeDelta::try_minutes(2).unwrap()).timestamp(),
|
||||||
iss: JWT_SEND_ISSUER.to_string(),
|
iss: JWT_SEND_ISSUER.to_string(),
|
||||||
sub: format!("{send_id}/{file_id}"),
|
sub: format!("{send_id}/{file_id}"),
|
||||||
}
|
}
|
||||||
|
@ -430,10 +431,8 @@ impl<'r> FromRequest<'r> for HostInfo {
|
||||||
|
|
||||||
let host = if let Some(host) = headers.get_one("X-Forwarded-Host") {
|
let host = if let Some(host) = headers.get_one("X-Forwarded-Host") {
|
||||||
host
|
host
|
||||||
} else if let Some(host) = headers.get_one("Host") {
|
|
||||||
host
|
|
||||||
} else {
|
} else {
|
||||||
""
|
headers.get_one("Host").unwrap_or_default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let base_url_origin = format!("{protocol}://{host}");
|
let base_url_origin = format!("{protocol}://{host}");
|
||||||
|
@ -543,7 +542,7 @@ impl<'r> FromRequest<'r> for Headers {
|
||||||
// Check if the stamp exception has expired first.
|
// Check if the stamp exception has expired first.
|
||||||
// Then, check if the current route matches any of the allowed routes.
|
// Then, check if the current route matches any of the allowed routes.
|
||||||
// After that check the stamp in exception matches the one in the claims.
|
// After that check the stamp in exception matches the one in the claims.
|
||||||
if Utc::now().naive_utc().timestamp() > stamp_exception.expire {
|
if Utc::now().timestamp() > stamp_exception.expire {
|
||||||
// If the stamp exception has been expired remove it from the database.
|
// If the stamp exception has been expired remove it from the database.
|
||||||
// This prevents checking this stamp exception for new requests.
|
// This prevents checking this stamp exception for new requests.
|
||||||
let mut user = user;
|
let mut user = user;
|
||||||
|
@ -735,7 +734,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
|
||||||
_ => err_handler!("Error getting DB"),
|
_ => err_handler!("Error getting DB"),
|
||||||
};
|
};
|
||||||
|
|
||||||
if !can_access_collection(&headers.org_user, &col_id, &mut conn).await {
|
if !Collection::can_access_collection(&headers.org_user, &col_id, &mut conn).await {
|
||||||
err_handler!("The current user isn't a manager for this collection")
|
err_handler!("The current user isn't a manager for this collection")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -808,10 +807,6 @@ impl From<ManagerHeadersLoose> for Headers {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
|
|
||||||
org_user.has_full_access()
|
|
||||||
|| Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn).await
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ManagerHeaders {
|
impl ManagerHeaders {
|
||||||
pub async fn from_loose(
|
pub async fn from_loose(
|
||||||
|
@ -823,7 +818,7 @@ impl ManagerHeaders {
|
||||||
if uuid::Uuid::parse_str(col_id).is_err() {
|
if uuid::Uuid::parse_str(col_id).is_err() {
|
||||||
err!("Collection Id is malformed!");
|
err!("Collection Id is malformed!");
|
||||||
}
|
}
|
||||||
if !can_access_collection(&h.org_user, col_id, conn).await {
|
if !Collection::can_access_collection(&h.org_user, col_id, conn).await {
|
||||||
err!("You don't have access to all collections!");
|
err!("You don't have access to all collections!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -140,7 +140,7 @@ impl AuthRequest {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn purge_expired_auth_requests(conn: &mut DbConn) {
|
pub async fn purge_expired_auth_requests(conn: &mut DbConn) {
|
||||||
let expiry_time = Utc::now().naive_utc() - chrono::Duration::minutes(5); //after 5 minutes, clients reject the request
|
let expiry_time = Utc::now().naive_utc() - chrono::TimeDelta::try_minutes(5).unwrap(); //after 5 minutes, clients reject the request
|
||||||
for auth_request in Self::find_created_before(&expiry_time, conn).await {
|
for auth_request in Self::find_created_before(&expiry_time, conn).await {
|
||||||
auth_request.delete(conn).await.ok();
|
auth_request.delete(conn).await.ok();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use crate::CONFIG;
|
use crate::CONFIG;
|
||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
|
@ -361,7 +361,7 @@ impl Cipher {
|
||||||
pub async fn purge_trash(conn: &mut DbConn) {
|
pub async fn purge_trash(conn: &mut DbConn) {
|
||||||
if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() {
|
if let Some(auto_delete_days) = CONFIG.trash_auto_delete_days() {
|
||||||
let now = Utc::now().naive_utc();
|
let now = Utc::now().naive_utc();
|
||||||
let dt = now - Duration::days(auto_delete_days);
|
let dt = now - TimeDelta::try_days(auto_delete_days).unwrap();
|
||||||
for cipher in Self::find_deleted_before(&dt, conn).await {
|
for cipher in Self::find_deleted_before(&dt, conn).await {
|
||||||
cipher.delete(conn).await.ok();
|
cipher.delete(conn).await.ok();
|
||||||
}
|
}
|
||||||
|
@ -431,7 +431,7 @@ impl Cipher {
|
||||||
}
|
}
|
||||||
if let Some(ref org_uuid) = self.organization_uuid {
|
if let Some(ref org_uuid) = self.organization_uuid {
|
||||||
if let Some(cipher_sync_data) = cipher_sync_data {
|
if let Some(cipher_sync_data) = cipher_sync_data {
|
||||||
return cipher_sync_data.user_group_full_access_for_organizations.get(org_uuid).is_some();
|
return cipher_sync_data.user_group_full_access_for_organizations.contains(org_uuid);
|
||||||
} else {
|
} else {
|
||||||
return Group::is_in_full_access_group(user_uuid, org_uuid, conn).await;
|
return Group::is_in_full_access_group(user_uuid, org_uuid, conn).await;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::{CollectionGroup, User, UserOrgStatus, UserOrgType, UserOrganization};
|
use super::{CollectionGroup, GroupUser, User, UserOrgStatus, UserOrgType, UserOrganization};
|
||||||
use crate::CONFIG;
|
use crate::CONFIG;
|
||||||
|
|
||||||
db_object! {
|
db_object! {
|
||||||
|
@ -102,6 +102,15 @@ impl Collection {
|
||||||
json_object["HidePasswords"] = json!(hide_passwords);
|
json_object["HidePasswords"] = json!(hide_passwords);
|
||||||
json_object
|
json_object
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
|
||||||
|
org_user.has_status(UserOrgStatus::Confirmed)
|
||||||
|
&& (org_user.has_full_access()
|
||||||
|
|| CollectionUser::has_access_to_collection_by_user(col_id, &org_user.user_uuid, conn).await
|
||||||
|
|| (CONFIG.org_groups_enabled()
|
||||||
|
&& (GroupUser::has_full_access_by_member(&org_user.org_uuid, &org_user.uuid, conn).await
|
||||||
|
|| GroupUser::has_access_to_collection_by_member(col_id, &org_user.uuid, conn).await)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use crate::db::DbConn;
|
use crate::db::DbConn;
|
||||||
|
@ -252,17 +261,6 @@ impl Collection {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if a user has access to a specific collection
|
|
||||||
// FIXME: This needs to be reviewed. The query used by `find_by_user_uuid` could be adjusted to filter when needed.
|
|
||||||
// For now this is a good solution without making to much changes.
|
|
||||||
pub async fn has_access_by_collection_and_user_uuid(
|
|
||||||
collection_uuid: &str,
|
|
||||||
user_uuid: &str,
|
|
||||||
conn: &mut DbConn,
|
|
||||||
) -> bool {
|
|
||||||
Self::find_by_user_uuid(user_uuid.to_owned(), conn).await.into_iter().any(|c| c.uuid == collection_uuid)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||||
Self::find_by_user_uuid(user_uuid.to_owned(), conn)
|
Self::find_by_user_uuid(user_uuid.to_owned(), conn)
|
||||||
.await
|
.await
|
||||||
|
@ -644,6 +642,10 @@ impl CollectionUser {
|
||||||
Ok(())
|
Ok(())
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn has_access_to_collection_by_user(col_id: &str, user_uuid: &str, conn: &mut DbConn) -> bool {
|
||||||
|
Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Database methods
|
/// Database methods
|
||||||
|
|
|
@ -67,8 +67,8 @@ impl Device {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the expiration of the device and the last update date
|
// Update the expiration of the device and the last update date
|
||||||
let time_now = Utc::now().naive_utc();
|
let time_now = Utc::now();
|
||||||
self.updated_at = time_now;
|
self.updated_at = time_now.naive_utc();
|
||||||
|
|
||||||
// ---
|
// ---
|
||||||
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
|
// Disabled these keys to be added to the JWT since they could cause the JWT to get too large
|
||||||
|
|
|
@ -81,25 +81,32 @@ impl EmergencyAccess {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Value {
|
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option<Value> {
|
||||||
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
|
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
|
||||||
Some(User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found."))
|
User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
|
||||||
} else if let Some(email) = self.email.as_deref() {
|
} else if let Some(email) = self.email.as_deref() {
|
||||||
Some(User::find_by_mail(email, conn).await.expect("Grantee user not found."))
|
match User::find_by_mail(email, conn).await {
|
||||||
|
Some(user) => user,
|
||||||
|
None => {
|
||||||
|
// remove outstanding invitations which should not exist
|
||||||
|
let _ = Self::delete_all_by_grantee_email(email, conn).await;
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
None
|
return None;
|
||||||
};
|
};
|
||||||
|
|
||||||
json!({
|
Some(json!({
|
||||||
"Id": self.uuid,
|
"Id": self.uuid,
|
||||||
"Status": self.status,
|
"Status": self.status,
|
||||||
"Type": self.atype,
|
"Type": self.atype,
|
||||||
"WaitTimeDays": self.wait_time_days,
|
"WaitTimeDays": self.wait_time_days,
|
||||||
"GranteeId": grantee_user.as_ref().map_or("", |u| &u.uuid),
|
"GranteeId": grantee_user.uuid,
|
||||||
"Email": grantee_user.as_ref().map_or("", |u| &u.email),
|
"Email": grantee_user.email,
|
||||||
"Name": grantee_user.as_ref().map_or("", |u| &u.name),
|
"Name": grantee_user.name,
|
||||||
"Object": "emergencyAccessGranteeDetails",
|
"Object": "emergencyAccessGranteeDetails",
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -174,7 +181,7 @@ impl EmergencyAccess {
|
||||||
// Update the grantee so that it will refresh it's status.
|
// Update the grantee so that it will refresh it's status.
|
||||||
User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await;
|
User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await;
|
||||||
self.status = status;
|
self.status = status;
|
||||||
self.updated_at = date.to_owned();
|
date.clone_into(&mut self.updated_at);
|
||||||
|
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
crate::util::retry(|| {
|
crate::util::retry(|| {
|
||||||
|
@ -192,7 +199,7 @@ impl EmergencyAccess {
|
||||||
conn: &mut DbConn,
|
conn: &mut DbConn,
|
||||||
) -> EmptyResult {
|
) -> EmptyResult {
|
||||||
self.last_notification_at = Some(date.to_owned());
|
self.last_notification_at = Some(date.to_owned());
|
||||||
self.updated_at = date.to_owned();
|
date.clone_into(&mut self.updated_at);
|
||||||
|
|
||||||
db_run! {conn: {
|
db_run! {conn: {
|
||||||
crate::util::retry(|| {
|
crate::util::retry(|| {
|
||||||
|
@ -214,6 +221,13 @@ impl EmergencyAccess {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn delete_all_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
for ea in Self::find_all_invited_by_grantee_email(grantee_email, conn).await {
|
||||||
|
ea.delete(conn).await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
|
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
|
||||||
User::update_uuid_revision(&self.grantor_uuid, conn).await;
|
User::update_uuid_revision(&self.grantor_uuid, conn).await;
|
||||||
|
|
||||||
|
@ -285,6 +299,15 @@ impl EmergencyAccess {
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn find_all_invited_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||||
|
db_run! { conn: {
|
||||||
|
emergency_access::table
|
||||||
|
.filter(emergency_access::email.eq(grantee_email))
|
||||||
|
.filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32))
|
||||||
|
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
emergency_access::table
|
emergency_access::table
|
||||||
|
@ -292,6 +315,21 @@ impl EmergencyAccess {
|
||||||
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
|
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn accept_invite(&mut self, grantee_uuid: &str, grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
|
||||||
|
if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email {
|
||||||
|
err!("User email does not match invite.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.status == EmergencyAccessStatus::Accepted as i32 {
|
||||||
|
err!("Emergency contact already accepted.");
|
||||||
|
}
|
||||||
|
|
||||||
|
self.status = EmergencyAccessStatus::Accepted as i32;
|
||||||
|
self.grantee_uuid = Some(String::from(grantee_uuid));
|
||||||
|
self.email = None;
|
||||||
|
self.save(conn).await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// endregion
|
// endregion
|
||||||
|
|
|
@ -3,7 +3,7 @@ use serde_json::Value;
|
||||||
|
|
||||||
use crate::{api::EmptyResult, error::MapResult, CONFIG};
|
use crate::{api::EmptyResult, error::MapResult, CONFIG};
|
||||||
|
|
||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||||
|
|
||||||
// https://bitwarden.com/help/event-logs/
|
// https://bitwarden.com/help/event-logs/
|
||||||
|
|
||||||
|
@ -316,7 +316,7 @@ impl Event {
|
||||||
|
|
||||||
pub async fn clean_events(conn: &mut DbConn) -> EmptyResult {
|
pub async fn clean_events(conn: &mut DbConn) -> EmptyResult {
|
||||||
if let Some(days_to_retain) = CONFIG.events_days_retain() {
|
if let Some(days_to_retain) = CONFIG.events_days_retain() {
|
||||||
let dt = Utc::now().naive_utc() - Duration::days(days_to_retain);
|
let dt = Utc::now().naive_utc() - TimeDelta::try_days(days_to_retain).unwrap();
|
||||||
db_run! { conn: {
|
db_run! { conn: {
|
||||||
diesel::delete(event::table.filter(event::event_date.lt(dt)))
|
diesel::delete(event::table.filter(event::event_date.lt(dt)))
|
||||||
.execute(conn)
|
.execute(conn)
|
||||||
|
|
|
@ -344,6 +344,25 @@ impl UserOrganization {
|
||||||
pub async fn to_json(&self, conn: &mut DbConn) -> Value {
|
pub async fn to_json(&self, conn: &mut DbConn) -> Value {
|
||||||
let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap();
|
let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap();
|
||||||
|
|
||||||
|
let permissions = json!({
|
||||||
|
// TODO: Add support for Custom User Roles
|
||||||
|
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
|
||||||
|
"accessEventLogs": false,
|
||||||
|
"accessImportExport": false,
|
||||||
|
"accessReports": false,
|
||||||
|
"createNewCollections": false,
|
||||||
|
"editAnyCollection": false,
|
||||||
|
"deleteAnyCollection": false,
|
||||||
|
"editAssignedCollections": false,
|
||||||
|
"deleteAssignedCollections": false,
|
||||||
|
"manageGroups": false,
|
||||||
|
"managePolicies": false,
|
||||||
|
"manageSso": false, // Not supported
|
||||||
|
"manageUsers": false,
|
||||||
|
"manageResetPassword": false,
|
||||||
|
"manageScim": false // Not supported (Not AGPLv3 Licensed)
|
||||||
|
});
|
||||||
|
|
||||||
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
|
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
|
||||||
json!({
|
json!({
|
||||||
"Id": self.org_uuid,
|
"Id": self.org_uuid,
|
||||||
|
@ -371,27 +390,7 @@ impl UserOrganization {
|
||||||
// "KeyConnectorEnabled": false,
|
// "KeyConnectorEnabled": false,
|
||||||
// "KeyConnectorUrl": null,
|
// "KeyConnectorUrl": null,
|
||||||
|
|
||||||
// TODO: Add support for Custom User Roles
|
"permissions": permissions,
|
||||||
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
|
|
||||||
// "Permissions": {
|
|
||||||
// "AccessEventLogs": false,
|
|
||||||
// "AccessImportExport": false,
|
|
||||||
// "AccessReports": false,
|
|
||||||
// "ManageAllCollections": false,
|
|
||||||
// "CreateNewCollections": false,
|
|
||||||
// "EditAnyCollection": false,
|
|
||||||
// "DeleteAnyCollection": false,
|
|
||||||
// "ManageAssignedCollections": false,
|
|
||||||
// "editAssignedCollections": false,
|
|
||||||
// "deleteAssignedCollections": false,
|
|
||||||
// "ManageCiphers": false,
|
|
||||||
// "ManageGroups": false,
|
|
||||||
// "ManagePolicies": false,
|
|
||||||
// "ManageResetPassword": false,
|
|
||||||
// "ManageSso": false, // Not supported
|
|
||||||
// "ManageUsers": false,
|
|
||||||
// "ManageScim": false, // Not supported (Not AGPLv3 Licensed)
|
|
||||||
// },
|
|
||||||
|
|
||||||
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use chrono::{Duration, NaiveDateTime, Utc};
|
use chrono::{NaiveDateTime, TimeDelta, Utc};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::crypto;
|
use crate::crypto;
|
||||||
|
@ -202,7 +202,7 @@ impl User {
|
||||||
let stamp_exception = UserStampException {
|
let stamp_exception = UserStampException {
|
||||||
routes: route_exception,
|
routes: route_exception,
|
||||||
security_stamp: self.security_stamp.clone(),
|
security_stamp: self.security_stamp.clone(),
|
||||||
expire: (Utc::now().naive_utc() + Duration::minutes(2)).timestamp(),
|
expire: (Utc::now() + TimeDelta::try_minutes(2).unwrap()).timestamp(),
|
||||||
};
|
};
|
||||||
self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default());
|
self.stamp_exception = Some(serde_json::to_string(&stamp_exception).unwrap_or_default());
|
||||||
}
|
}
|
||||||
|
@ -246,6 +246,7 @@ impl User {
|
||||||
"Email": self.email,
|
"Email": self.email,
|
||||||
"EmailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
|
"EmailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
|
||||||
"Premium": true,
|
"Premium": true,
|
||||||
|
"PremiumFromOrganization": false,
|
||||||
"MasterPasswordHint": self.password_hint,
|
"MasterPasswordHint": self.password_hint,
|
||||||
"Culture": "en-US",
|
"Culture": "en-US",
|
||||||
"TwoFactorEnabled": twofactor_enabled,
|
"TwoFactorEnabled": twofactor_enabled,
|
||||||
|
@ -257,6 +258,7 @@ impl User {
|
||||||
"ProviderOrganizations": [],
|
"ProviderOrganizations": [],
|
||||||
"ForcePasswordReset": false,
|
"ForcePasswordReset": false,
|
||||||
"AvatarColor": self.avatar_color,
|
"AvatarColor": self.avatar_color,
|
||||||
|
"UsesKeyConnector": false,
|
||||||
"Object": "profile",
|
"Object": "profile",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -311,6 +313,7 @@ impl User {
|
||||||
|
|
||||||
Send::delete_all_by_user(&self.uuid, conn).await?;
|
Send::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
|
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
|
EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?;
|
||||||
UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
|
UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
Cipher::delete_all_by_user(&self.uuid, conn).await?;
|
Cipher::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
Favorite::delete_all_by_user(&self.uuid, conn).await?;
|
Favorite::delete_all_by_user(&self.uuid, conn).await?;
|
||||||
|
|
14
src/main.rs
14
src/main.rs
|
@ -3,7 +3,7 @@
|
||||||
// The more key/value pairs there are the more recursion occurs.
|
// The more key/value pairs there are the more recursion occurs.
|
||||||
// We want to keep this as low as possible, but not higher then 128.
|
// We want to keep this as low as possible, but not higher then 128.
|
||||||
// If you go above 128 it will cause rust-analyzer to fail,
|
// If you go above 128 it will cause rust-analyzer to fail,
|
||||||
#![recursion_limit = "103"]
|
#![recursion_limit = "90"]
|
||||||
|
|
||||||
// When enabled use MiMalloc as malloc instead of the default malloc
|
// When enabled use MiMalloc as malloc instead of the default malloc
|
||||||
#[cfg(feature = "enable_mimalloc")]
|
#[cfg(feature = "enable_mimalloc")]
|
||||||
|
@ -211,9 +211,9 @@ fn launch_info() {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||||
// Depending on the main log level we either want to disable or enable logging for trust-dns.
|
// Depending on the main log level we either want to disable or enable logging for hickory.
|
||||||
// Else if there are timeouts it will clutter the logs since trust-dns uses warn for this.
|
// Else if there are timeouts it will clutter the logs since hickory uses warn for this.
|
||||||
let trust_dns_level = if level >= log::LevelFilter::Debug {
|
let hickory_level = if level >= log::LevelFilter::Debug {
|
||||||
level
|
level
|
||||||
} else {
|
} else {
|
||||||
log::LevelFilter::Off
|
log::LevelFilter::Off
|
||||||
|
@ -266,9 +266,9 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
|
||||||
.level_for("handlebars::render", handlebars_level)
|
.level_for("handlebars::render", handlebars_level)
|
||||||
// Prevent cookie_store logs
|
// Prevent cookie_store logs
|
||||||
.level_for("cookie_store", log::LevelFilter::Off)
|
.level_for("cookie_store", log::LevelFilter::Off)
|
||||||
// Variable level for trust-dns used by reqwest
|
// Variable level for hickory used by reqwest
|
||||||
.level_for("trust_dns_resolver::name_server::name_server", trust_dns_level)
|
.level_for("hickory_resolver::name_server::name_server", hickory_level)
|
||||||
.level_for("trust_dns_proto::xfer", trust_dns_level)
|
.level_for("hickory_proto::xfer", hickory_level)
|
||||||
.level_for("diesel_logger", diesel_logger_level)
|
.level_for("diesel_logger", diesel_logger_level)
|
||||||
.chain(std::io::stdout());
|
.chain(std::io::stdout());
|
||||||
|
|
||||||
|
|
261
src/util.rs
261
src/util.rs
|
@ -4,6 +4,7 @@
|
||||||
use std::{collections::HashMap, io::Cursor, ops::Deref, path::Path};
|
use std::{collections::HashMap, io::Cursor, ops::Deref, path::Path};
|
||||||
|
|
||||||
use num_traits::ToPrimitive;
|
use num_traits::ToPrimitive;
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
use rocket::{
|
use rocket::{
|
||||||
fairing::{Fairing, Info, Kind},
|
fairing::{Fairing, Info, Kind},
|
||||||
http::{ContentType, Header, HeaderMap, Method, Status},
|
http::{ContentType, Header, HeaderMap, Method, Status},
|
||||||
|
@ -224,7 +225,7 @@ impl<'r, R: 'r + Responder<'r, 'static> + Send> Responder<'r, 'static> for Cache
|
||||||
res.set_raw_header("Cache-Control", cache_control_header);
|
res.set_raw_header("Cache-Control", cache_control_header);
|
||||||
|
|
||||||
let time_now = chrono::Local::now();
|
let time_now = chrono::Local::now();
|
||||||
let expiry_time = time_now + chrono::Duration::seconds(self.ttl.try_into().unwrap());
|
let expiry_time = time_now + chrono::TimeDelta::try_seconds(self.ttl.try_into().unwrap()).unwrap();
|
||||||
res.set_raw_header("Expires", format_datetime_http(&expiry_time));
|
res.set_raw_header("Expires", format_datetime_http(&expiry_time));
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
@ -530,7 +531,7 @@ pub fn container_base_image() -> &'static str {
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use serde::de::{self, DeserializeOwned, Deserializer, MapAccess, SeqAccess, Visitor};
|
use serde::de::{self, DeserializeOwned, Deserializer, MapAccess, SeqAccess, Visitor};
|
||||||
use serde_json::{self, Value};
|
use serde_json::Value;
|
||||||
|
|
||||||
pub type JsonMap = serde_json::Map<String, Value>;
|
pub type JsonMap = serde_json::Map<String, Value>;
|
||||||
|
|
||||||
|
@ -711,14 +712,9 @@ where
|
||||||
|
|
||||||
use reqwest::{header, Client, ClientBuilder};
|
use reqwest::{header, Client, ClientBuilder};
|
||||||
|
|
||||||
pub fn get_reqwest_client() -> Client {
|
pub fn get_reqwest_client() -> &'static Client {
|
||||||
match get_reqwest_client_builder().build() {
|
static INSTANCE: Lazy<Client> = Lazy::new(|| get_reqwest_client_builder().build().expect("Failed to build client"));
|
||||||
Ok(client) => client,
|
&INSTANCE
|
||||||
Err(e) => {
|
|
||||||
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
|
|
||||||
get_reqwest_client_builder().trust_dns(false).build().expect("Failed to build client")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_reqwest_client_builder() -> ClientBuilder {
|
pub fn get_reqwest_client_builder() -> ClientBuilder {
|
||||||
|
@ -777,3 +773,248 @@ pub fn parse_experimental_client_feature_flags(experimental_client_feature_flags
|
||||||
|
|
||||||
feature_states
|
feature_states
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mod dns_resolver {
|
||||||
|
use std::{
|
||||||
|
fmt,
|
||||||
|
net::{IpAddr, SocketAddr},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use hickory_resolver::{system_conf::read_system_conf, TokioAsyncResolver};
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
use reqwest::dns::{Name, Resolve, Resolving};
|
||||||
|
|
||||||
|
use crate::{util::is_global, CONFIG};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum CustomResolverError {
|
||||||
|
Blacklist {
|
||||||
|
domain: String,
|
||||||
|
},
|
||||||
|
NonGlobalIp {
|
||||||
|
domain: String,
|
||||||
|
ip: IpAddr,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CustomResolverError {
|
||||||
|
pub fn downcast_ref(e: &dyn std::error::Error) -> Option<&Self> {
|
||||||
|
let mut source = e.source();
|
||||||
|
|
||||||
|
while let Some(err) = source {
|
||||||
|
source = err.source();
|
||||||
|
if let Some(err) = err.downcast_ref::<CustomResolverError>() {
|
||||||
|
return Some(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for CustomResolverError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::Blacklist {
|
||||||
|
domain,
|
||||||
|
} => write!(f, "Blacklisted domain: {domain} matched ICON_BLACKLIST_REGEX"),
|
||||||
|
Self::NonGlobalIp {
|
||||||
|
domain,
|
||||||
|
ip,
|
||||||
|
} => write!(f, "IP {ip} for domain '{domain}' is not a global IP!"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::error::Error for CustomResolverError {}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum CustomDnsResolver {
|
||||||
|
Default(),
|
||||||
|
Hickory(Arc<TokioAsyncResolver>),
|
||||||
|
}
|
||||||
|
type BoxError = Box<dyn std::error::Error + Send + Sync>;
|
||||||
|
|
||||||
|
impl CustomDnsResolver {
|
||||||
|
pub fn instance() -> Arc<Self> {
|
||||||
|
static INSTANCE: Lazy<Arc<CustomDnsResolver>> = Lazy::new(CustomDnsResolver::new);
|
||||||
|
Arc::clone(&*INSTANCE)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new() -> Arc<Self> {
|
||||||
|
match read_system_conf() {
|
||||||
|
Ok((config, opts)) => {
|
||||||
|
let resolver = TokioAsyncResolver::tokio(config.clone(), opts.clone());
|
||||||
|
Arc::new(Self::Hickory(Arc::new(resolver)))
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
|
||||||
|
Arc::new(Self::Default())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that we get an iterator of addresses, but we only grab the first one for convenience
|
||||||
|
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
|
||||||
|
pre_resolve(name)?;
|
||||||
|
|
||||||
|
let result = match self {
|
||||||
|
Self::Default() => tokio::net::lookup_host(name).await?.next(),
|
||||||
|
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(addr) = &result {
|
||||||
|
post_resolve(name, addr.ip())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pre_resolve(name: &str) -> Result<(), CustomResolverError> {
|
||||||
|
if crate::api::is_domain_blacklisted(name) {
|
||||||
|
return Err(CustomResolverError::Blacklist {
|
||||||
|
domain: name.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn post_resolve(name: &str, ip: IpAddr) -> Result<(), CustomResolverError> {
|
||||||
|
if CONFIG.icon_blacklist_non_global_ips() && !is_global(ip) {
|
||||||
|
Err(CustomResolverError::NonGlobalIp {
|
||||||
|
domain: name.to_string(),
|
||||||
|
ip,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Resolve for CustomDnsResolver {
|
||||||
|
fn resolve(&self, name: Name) -> Resolving {
|
||||||
|
let this = self.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
let name = name.as_str();
|
||||||
|
let result = this.resolve_domain(name).await?;
|
||||||
|
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub use dns_resolver::{CustomDnsResolver, CustomResolverError};
|
||||||
|
|
||||||
|
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
|
||||||
|
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
|
||||||
|
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
|
||||||
|
#[allow(clippy::nonminimal_bool)]
|
||||||
|
#[cfg(any(not(feature = "unstable"), test))]
|
||||||
|
pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
|
||||||
|
match ip {
|
||||||
|
std::net::IpAddr::V4(ip) => {
|
||||||
|
!(ip.octets()[0] == 0 // "This network"
|
||||||
|
|| ip.is_private()
|
||||||
|
|| (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) //ip.is_shared()
|
||||||
|
|| ip.is_loopback()
|
||||||
|
|| ip.is_link_local()
|
||||||
|
// addresses reserved for future protocols (`192.0.0.0/24`)
|
||||||
|
||(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|
||||||
|
|| ip.is_documentation()
|
||||||
|
|| (ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) // ip.is_benchmarking()
|
||||||
|
|| (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) //ip.is_reserved()
|
||||||
|
|| ip.is_broadcast())
|
||||||
|
}
|
||||||
|
std::net::IpAddr::V6(ip) => {
|
||||||
|
!(ip.is_unspecified()
|
||||||
|
|| ip.is_loopback()
|
||||||
|
// IPv4-mapped Address (`::ffff:0:0/96`)
|
||||||
|
|| matches!(ip.segments(), [0, 0, 0, 0, 0, 0xffff, _, _])
|
||||||
|
// IPv4-IPv6 Translat. (`64:ff9b:1::/48`)
|
||||||
|
|| matches!(ip.segments(), [0x64, 0xff9b, 1, _, _, _, _, _])
|
||||||
|
// Discard-Only Address Block (`100::/64`)
|
||||||
|
|| matches!(ip.segments(), [0x100, 0, 0, 0, _, _, _, _])
|
||||||
|
// IETF Protocol Assignments (`2001::/23`)
|
||||||
|
|| (matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200)
|
||||||
|
&& !(
|
||||||
|
// Port Control Protocol Anycast (`2001:1::1`)
|
||||||
|
u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001
|
||||||
|
// Traversal Using Relays around NAT Anycast (`2001:1::2`)
|
||||||
|
|| u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002
|
||||||
|
// AMT (`2001:3::/32`)
|
||||||
|
|| matches!(ip.segments(), [0x2001, 3, _, _, _, _, _, _])
|
||||||
|
// AS112-v6 (`2001:4:112::/48`)
|
||||||
|
|| matches!(ip.segments(), [0x2001, 4, 0x112, _, _, _, _, _])
|
||||||
|
// ORCHIDv2 (`2001:20::/28`)
|
||||||
|
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x2F).contains(&b))
|
||||||
|
))
|
||||||
|
|| ((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8)) // ip.is_documentation()
|
||||||
|
|| ((ip.segments()[0] & 0xfe00) == 0xfc00) //ip.is_unique_local()
|
||||||
|
|| ((ip.segments()[0] & 0xffc0) == 0xfe80)) //ip.is_unicast_link_local()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "unstable"))]
|
||||||
|
pub use is_global_hardcoded as is_global;
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable")]
|
||||||
|
#[inline(always)]
|
||||||
|
pub fn is_global(ip: std::net::IpAddr) -> bool {
|
||||||
|
ip.is_global()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// These are some tests to check that the implementations match
|
||||||
|
/// The IPv4 can be all checked in 30 seconds or so and they are correct as of nightly 2023-07-17
|
||||||
|
/// The IPV6 can't be checked in a reasonable time, so we check over a hundred billion random ones, so far correct
|
||||||
|
/// Note that the is_global implementation is subject to change as new IP RFCs are created
|
||||||
|
///
|
||||||
|
/// To run while showing progress output:
|
||||||
|
/// cargo +nightly test --release --features sqlite,unstable -- --nocapture --ignored
|
||||||
|
#[cfg(test)]
|
||||||
|
#[cfg(feature = "unstable")]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::net::IpAddr;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[ignore]
|
||||||
|
fn test_ipv4_global() {
|
||||||
|
for a in 0..u8::MAX {
|
||||||
|
println!("Iter: {}/255", a);
|
||||||
|
for b in 0..u8::MAX {
|
||||||
|
for c in 0..u8::MAX {
|
||||||
|
for d in 0..u8::MAX {
|
||||||
|
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
|
||||||
|
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {}", ip)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[ignore]
|
||||||
|
fn test_ipv6_global() {
|
||||||
|
use rand::Rng;
|
||||||
|
|
||||||
|
std::thread::scope(|s| {
|
||||||
|
for t in 0..16 {
|
||||||
|
let handle = s.spawn(move || {
|
||||||
|
let mut v = [0u8; 16];
|
||||||
|
let mut rng = rand::thread_rng();
|
||||||
|
|
||||||
|
for i in 0..20 {
|
||||||
|
println!("Thread {t} Iter: {i}/50");
|
||||||
|
for _ in 0..500_000_000 {
|
||||||
|
rng.fill(&mut v);
|
||||||
|
let ip = IpAddr::V6(std::net::Ipv6Addr::from(v));
|
||||||
|
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {ip}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue