Compare commits

...

12 Commits

Author SHA1 Message Date
Daniel García 0f5cb4c032
Change API inputs/outputs and structs to camelCase 2024-04-27 23:34:36 +02:00
Daniel García 0fe93edea6
Some fixes for the new mobile apps (#4526) 2024-04-27 23:24:04 +02:00
Stefan Melmuk e9aa5a545e
fix emergency access invites (#4337)
* fix emergency access invites with no mail

when mail is disabled instead of accepting emergency access for all
invited users automatically, we only accept if the user already exists

on registration of a new account any open emergency access invitations
will be accepted, if mail is disabled

also prevent invited emergency access contacts to register if emergency
access is disabled (this is only relevant for when mail is enabled, if
mail is disabled they should have an Invitation entry)

* delete emergency access invitations

if an invited user is deleted in the /admin panel their emergency
access invitation will remain in the database which causes
the to_json_grantee_details fn to panic

* improve missing emergency access grantees

instead of returning an empty emergency access contact the entry should
not be added to the list. also the error handling can be improved a bit.
2024-04-27 22:16:05 +02:00
Stefan Melmuk 9dcc738f85
improve access to collections via groups (#4441)
* refactor get_org_collections_details

* improve access to collection check

* fix get_org_collection_detail too
2024-04-27 22:09:00 +02:00
Kristof Mattei 84a7c7da5d
Pass in collection ids to notifier when sharing cipher. (#4517) 2024-04-27 21:53:10 +02:00
Mathijs van Veluw ca9234ed86
Add extra (unsupported) container build arch's (#4524)
There was a PR (#4370) to add i686/i386 support for Vaultwarden.
That specific PR was not a viable way of adding this.

This PR adds extra architectures for Debian based containers which we
will not support by default. Those images will not be build and pushed
to our container registries.

Added the following architectures:
 - linux/386
 - linux/ppc64le
 - linux/s390x

Again, there will be no major support for these architectures, but it
will allow people who use these architectures to build a Debian based
binary more easily
2024-04-27 21:51:14 +02:00
Daniel García 27dc67fadd
Implement custom DNS resolver (#3988) 2024-04-27 20:25:34 +02:00
Mathijs van Veluw 2ad33ec97f
Update Crate and Rust (#4522)
* Update Crate and Rust

- Updated all crates
- Updated Rust to the latest patch version

* Updated GitHub Actions
2024-04-27 00:53:42 +02:00
Mathijs van Veluw e1a8df96db
Update Key Rotation web-vault v2024.3.x (#4446)
Key rotation was changed since 2024.1.x.
Multiple other items were added to be rotated like password-reset and emergency-access data to be part of just one POST instead of having multiple.

See: https://github.com/dani-garcia/bw_web_builds/pull/157
2024-04-06 14:42:53 +02:00
Mathijs van Veluw e42a37c6c1
Update crates and some Clippy fixes (#4475)
- Updated all crates including reqwest
- Fixed some clippy lints reported by nightly Rust
2024-04-06 13:55:10 +02:00
Stefan Melmuk 129b835ac7
update web-vault to v2024.3.1 (new vertical layout) (#4468)
* update web-vault to v2024.3.0

* update web-vault to v2024.3.1
2024-04-06 11:45:25 +02:00
Daniel García 2d98aa3045
Use async verify for Yubikey (#4448) 2024-03-23 16:03:17 +01:00
51 changed files with 3119 additions and 2728 deletions

View File

@ -46,7 +46,7 @@ jobs:
steps:
# Checkout the repo
- name: "Checkout"
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b #v4.1.4
# End Checkout the repo
@ -74,7 +74,7 @@ jobs:
# Only install the clippy and rustfmt components on the default rust-toolchain
- name: "Install rust-toolchain version"
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
uses: dtolnay/rust-toolchain@bb45937a053e097f8591208d8e74c90db1873d07 # master @ Apr 14, 2024, 9:02 PM GMT+2
if: ${{ matrix.channel == 'rust-toolchain' }}
with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
@ -84,7 +84,7 @@ jobs:
# Install the any other channel to be used for which we do not execute clippy and rustfmt
- name: "Install MSRV version"
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
uses: dtolnay/rust-toolchain@bb45937a053e097f8591208d8e74c90db1873d07 # master @ Apr 14, 2024, 9:02 PM GMT+2
if: ${{ matrix.channel != 'rust-toolchain' }}
with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"

View File

@ -13,7 +13,7 @@ jobs:
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
# End Checkout the repo
# Download hadolint - https://github.com/hadolint/hadolint/releases

View File

@ -58,7 +58,7 @@ jobs:
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
with:
fetch-depth: 0
@ -69,11 +69,11 @@ jobs:
# Start Docker Buildx
- name: Setup Docker Buildx
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
# https://github.com/moby/buildkit/issues/3969
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
with:
config-inline: |
buildkitd-config-inline: |
[worker.oci]
max-parallelism = 2
driver-opts: |
@ -102,7 +102,7 @@ jobs:
# Login to Docker Hub
- name: Login to Docker Hub
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
@ -116,7 +116,7 @@ jobs:
# Login to GitHub Container Registry
- name: Login to GitHub Container Registry
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@ -137,7 +137,7 @@ jobs:
# Login to Quay.io
- name: Login to Quay.io
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
@ -171,7 +171,7 @@ jobs:
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
- name: Bake ${{ matrix.base_image }} containers
uses: docker/bake-action@849707117b03d39aba7924c50a10376a69e88d7d # v4.1.0
uses: docker/bake-action@73b0efa7a0e8ac276e0a8d5c580698a942ff10b5 # v4.4.0
env:
BASE_TAGS: "${{ env.BASE_TAGS }}"
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
@ -229,28 +229,28 @@ jobs:
# Upload artifacts to Github Actions
- name: "Upload amd64 artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
path: vaultwarden-amd64
- name: "Upload arm64 artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
path: vaultwarden-arm64
- name: "Upload armv7 artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
path: vaultwarden-armv7
- name: "Upload armv6 artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6

View File

@ -25,10 +25,10 @@ jobs:
actions: read
steps:
- name: Checkout code
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b #v4.1.4
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca # v0.16.1
uses: aquasecurity/trivy-action@d710430a6722f083d3b36b8339ff66b32f22ee55 # v0.19.0
with:
scan-type: repo
ignore-unfixed: true
@ -37,6 +37,6 @@ jobs:
severity: CRITICAL,HIGH
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@b7bf0a3ed3ecfa44160715d7c442788f65f0f923 # v3.23.2
uses: github/codeql-action/upload-sarif@2bbafcdd7fbf96243689e764c2f15d9735164f33 # v3.25.3
with:
sarif_file: 'trivy-results.sarif'

841
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,7 @@ unstable = []
[target."cfg(not(windows))".dependencies]
# Logging
syslog = "6.1.0"
syslog = "6.1.1"
[dependencies]
# Logging
@ -60,21 +60,21 @@ rocket = { version = "0.5.0", features = ["tls", "json"], default-features = fal
rocket_ws = { version ="0.1.0" }
# WebSockets libraries
rmpv = "1.0.1" # MessagePack library
rmpv = "1.0.2" # MessagePack library
# Concurrent HashMap used for WebSocket messaging and favicons
dashmap = "5.5.3"
# Async futures
futures = "0.3.30"
tokio = { version = "1.36.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
tokio = { version = "1.37.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
# A generic serialization/deserialization framework
serde = { version = "1.0.197", features = ["derive"] }
serde_json = "1.0.114"
serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116"
# A safe, extensible ORM and Query builder
diesel = { version = "2.1.5", features = ["chrono", "r2d2", "numeric"] }
diesel = { version = "2.1.6", features = ["chrono", "r2d2", "numeric"] }
diesel_migrations = "2.1.0"
diesel_logger = { version = "0.3.0", optional = true }
@ -89,12 +89,12 @@ ring = "0.17.8"
uuid = { version = "1.8.0", features = ["v4"] }
# Date and time libraries
chrono = { version = "0.4.35", features = ["clock", "serde"], default-features = false }
chrono-tz = "0.8.6"
time = "0.3.34"
chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false }
chrono-tz = "0.9.0"
time = "0.3.36"
# Job scheduler
job_scheduler_ng = "2.0.4"
job_scheduler_ng = "2.0.5"
# Data encoding library Hex/Base32/Base64
data-encoding = "2.5.0"
@ -115,27 +115,28 @@ webauthn-rs = "0.3.2"
url = "2.5.0"
# Email libraries
lettre = { version = "0.11.4", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
lettre = { version = "0.11.7", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
email_address = "0.2.4"
# HTML Template library
handlebars = { version = "5.1.0", features = ["dir_source"] }
handlebars = { version = "5.1.2", features = ["dir_source"] }
# HTTP client (Used for favicons, version check, DUO and HIBP API)
reqwest = { version = "0.11.27", features = ["default-tls", "native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies", "hickory-dns"], default-features = false}
reqwest = { version = "0.12.4", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
hickory-resolver = "0.24.1"
# Favicon extraction libraries
html5gum = "0.5.7"
regex = { version = "1.10.3", features = ["std", "perf", "unicode-perl"], default-features = false }
regex = { version = "1.10.4", features = ["std", "perf", "unicode-perl"], default-features = false }
data-url = "0.3.1"
bytes = "1.5.0"
bytes = "1.6.0"
# Cache function results (Used for version check and favicon fetching)
cached = { version = "0.49.2", features = ["async"] }
cached = { version = "0.50.0", features = ["async"] }
# Used for custom short lived cookie jar during favicon extraction
cookie = "0.18.0"
cookie = "0.18.1"
cookie_store = "0.21.0"
# Used by U2F, JWT and PostgreSQL
@ -153,8 +154,8 @@ semver = "1.0.22"
# Allow overriding the default memory allocator
# Mainly used for the musl builds, since the default musl malloc is very slow
mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true }
which = "6.0.0"
mimalloc = { version = "0.1.41", features = ["secure"], default-features = false, optional = true }
which = "6.0.1"
# Argon2 library with support for the PHC format
argon2 = "0.5.3"
@ -205,14 +206,14 @@ unsafe_code = "forbid"
non_ascii_idents = "forbid"
# Deny
future_incompatible = "deny"
future_incompatible = { level = "deny", priority = -1 }
noop_method_call = "deny"
pointer_structural_match = "deny"
rust_2018_idioms = "deny"
rust_2021_compatibility = "deny"
rust_2018_idioms = { level = "deny", priority = -1 }
rust_2021_compatibility = { level = "deny", priority = -1 }
trivial_casts = "deny"
trivial_numeric_casts = "deny"
unused = "deny"
unused = { level = "deny", priority = -1 }
unused_import_braces = "deny"
unused_lifetimes = "deny"
deprecated_in_future = "deny"

View File

@ -1,10 +1,10 @@
---
vault_version: "v2024.1.2b"
vault_image_digest: "sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08"
# Cross Compile Docker Helper Scripts v1.3.0
vault_version: "v2024.3.1"
vault_image_digest: "sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5"
# Cross Compile Docker Helper Scripts v1.4.0
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc"
rust_version: 1.77.0 # Rust version to be used
xx_image_digest: "sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4"
rust_version: 1.77.2 # Rust version to be used
debian_version: bookworm # Debian release name to be used
alpine_version: 3.19 # Alpine version to be used
# For which platforms/architectures will we try to build images

View File

@ -18,23 +18,23 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull docker.io/vaultwarden/web-vault:v2024.1.2b
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.1.2b
# [docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08]
# $ docker pull docker.io/vaultwarden/web-vault:v2024.3.1
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.3.1
# [docker.io/vaultwarden/web-vault@sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08
# [docker.io/vaultwarden/web-vault:v2024.1.2b]
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5
# [docker.io/vaultwarden/web-vault:v2024.3.1]
#
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08 as vault
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5 as vault
########################## ALPINE BUILD IMAGES ##########################
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
## And for Alpine we define all build images here, they will only be loaded when actually used
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.77.0 as build_amd64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.77.0 as build_arm64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.77.0 as build_armv7
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.77.0 as build_armv6
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.77.2 as build_amd64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.77.2 as build_arm64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.77.2 as build_armv7
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.77.2 as build_armv6
########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006
@ -65,13 +65,14 @@ RUN mkdir -pv "${CARGO_HOME}" \
RUN USER=root cargo new --bin /app
WORKDIR /app
# Shared variables across Debian and Alpine
# Environment variables for Cargo on Alpine based builds
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
# Output the current contents of the file
cat /env-cargo
# Configure the DB ARG as late as possible to not invalidate the cached layers above
# Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc

View File

@ -18,24 +18,24 @@
# - From https://hub.docker.com/r/vaultwarden/web-vault/tags,
# click the tag name to view the digest of the image it currently points to.
# - From the command line:
# $ docker pull docker.io/vaultwarden/web-vault:v2024.1.2b
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.1.2b
# [docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08]
# $ docker pull docker.io/vaultwarden/web-vault:v2024.3.1
# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2024.3.1
# [docker.io/vaultwarden/web-vault@sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5]
#
# - Conversely, to get the tag name from the digest:
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08
# [docker.io/vaultwarden/web-vault:v2024.1.2b]
# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5
# [docker.io/vaultwarden/web-vault:v2024.3.1]
#
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:798c0c893b6d16728878ff280b49da08863334d1f8dd88895580dc3dba622f08 as vault
FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5 as vault
########################## Cross Compile Docker Helper Scripts ##########################
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
## And these bash scripts do not have any significant difference if at all
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4 AS xx
########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.77.0-slim-bookworm as build
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.77.2-slim-bookworm as build
COPY --from=xx / /
ARG TARGETARCH
ARG TARGETVARIANT
@ -88,9 +88,17 @@ RUN mkdir -pv "${CARGO_HOME}" \
RUN USER=root cargo new --bin /app
WORKDIR /app
# Environment variables for cargo across Debian and Alpine
# Environment variables for Cargo on Debian based builds
ARG ARCH_OPENSSL_LIB_DIR \
ARCH_OPENSSL_INCLUDE_DIR
RUN source /env-cargo && \
if xx-info is-cross ; then \
# Some special variables if needed to override some build paths
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
fi && \
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
# Because of this we generate the needed environment variables here which we can load in the needed steps.
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \

View File

@ -108,9 +108,17 @@ RUN USER=root cargo new --bin /app
WORKDIR /app
{% if base == "debian" %}
# Environment variables for cargo across Debian and Alpine
# Environment variables for Cargo on Debian based builds
ARG ARCH_OPENSSL_LIB_DIR \
ARCH_OPENSSL_INCLUDE_DIR
RUN source /env-cargo && \
if xx-info is-cross ; then \
# Some special variables if needed to override some build paths
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
fi && \
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
# Because of this we generate the needed environment variables here which we can load in the needed steps.
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
@ -126,13 +134,14 @@ RUN source /env-cargo && \
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
{% elif base == "alpine" %}
# Shared variables across Debian and Alpine
# Environment variables for Cargo on Alpine based builds
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
# Output the current contents of the file
cat /env-cargo
# Configure the DB ARG as late as possible to not invalidate the cached layers above
# Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
{% endif %}

View File

@ -11,6 +11,11 @@ With just these two files we can build both Debian and Alpine images for the fol
- armv7 (linux/arm/v7)
- armv6 (linux/arm/v6)
Some unsupported platforms for Debian based images. These are not built and tested by default and are only provided to make it easier for users to build for these architectures.
- 386 (linux/386)
- ppc64le (linux/ppc64le)
- s390x (linux/s390x)
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
This ensures the container build process can run binaries from other architectures.<br>

View File

@ -125,6 +125,40 @@ target "debian-armv6" {
tags = generate_tags("", "-armv6")
}
// ==== Start of unsupported Debian architecture targets ===
// These are provided just to help users build for these rare platforms
// They will not be built by default
target "debian-386" {
inherits = ["debian"]
platforms = ["linux/386"]
tags = generate_tags("", "-386")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/i386-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/i386-linux-gnu"
}
}
target "debian-ppc64le" {
inherits = ["debian"]
platforms = ["linux/ppc64le"]
tags = generate_tags("", "-ppc64le")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/powerpc64le-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/powerpc64le-linux-gnu"
}
}
target "debian-s390x" {
inherits = ["debian"]
platforms = ["linux/s390x"]
tags = generate_tags("", "-s390x")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/s390x-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/s390x-linux-gnu"
}
}
// ==== End of unsupported Debian architecture targets ===
// A Group to build all platforms individually for local testing
group "debian-all" {
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]

View File

@ -1,4 +1,4 @@
[toolchain]
channel = "1.77.0"
channel = "1.77.2"
components = [ "rustfmt", "clippy" ]
profile = "minimal"

View File

@ -265,8 +265,8 @@ fn admin_page_login() -> ApiResult<Html<String>> {
render_admin_login(None, None)
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct InviteData {
email: String,
}
@ -475,7 +475,7 @@ async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) ->
}
}
#[derive(Deserialize, Debug)]
#[derive(Debug, Deserialize)]
struct UserOrgTypeData {
user_type: NumberOrString,
user_uuid: String,
@ -701,10 +701,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
let (latest_release, latest_commit, latest_web_build) =
get_release_info(has_http_access, running_within_container).await;
let ip_header_name = match &ip_header.0 {
Some(h) => h,
_ => "",
};
let ip_header_name = &ip_header.0.unwrap_or_default();
let diagnostics_json = json!({
"dns_resolved": dns_resolved,
@ -717,8 +714,8 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
"running_within_container": running_within_container,
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
"has_http_access": has_http_access,
"ip_header_exists": &ip_header.0.is_some(),
"ip_header_match": ip_header_name == CONFIG.ip_header(),
"ip_header_exists": !ip_header_name.is_empty(),
"ip_header_match": ip_header_name.eq(&CONFIG.ip_header()),
"ip_header_name": ip_header_name,
"ip_header_config": &CONFIG.ip_header(),
"uses_proxy": uses_proxy,

File diff suppressed because it is too large Load Diff

View File

@ -10,8 +10,9 @@ use rocket::{
};
use serde_json::Value;
use crate::util::NumberOrString;
use crate::{
api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordOrOtpData, UpdateType},
api::{self, core::log_event, EmptyResult, JsonResult, Notify, PasswordOrOtpData, UpdateType},
auth::Headers,
crypto,
db::{models::*, DbConn, DbPool},
@ -140,15 +141,15 @@ async fn sync(data: SyncData, headers: Headers, mut conn: DbConn) -> Json<Value>
};
Json(json!({
"Profile": user_json,
"Folders": folders_json,
"Collections": collections_json,
"Policies": policies_json,
"Ciphers": ciphers_json,
"Domains": domains_json,
"Sends": sends_json,
"profile": user_json,
"folders": folders_json,
"collections": collections_json,
"policies": policies_json,
"ciphers": ciphers_json,
"domains": domains_json,
"sends": sends_json,
"unofficialServer": true,
"Object": "sync"
"object": "sync"
}))
}
@ -166,9 +167,9 @@ async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> {
}
Json(json!({
"Data": ciphers_json,
"Object": "list",
"ContinuationToken": null
"data": ciphers_json,
"object": "list",
"continuationToken": null
}))
}
@ -197,17 +198,17 @@ async fn get_cipher_details(uuid: &str, headers: Headers, conn: DbConn) -> JsonR
get_cipher(uuid, headers, conn).await
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CipherData {
// Id is optional as it is included only in bulk share
pub Id: Option<String>,
pub id: Option<String>,
// Folder id is not included in import
FolderId: Option<String>,
folder_id: Option<String>,
// TODO: Some of these might appear all the time, no need for Option
OrganizationId: Option<String>,
pub organization_id: Option<String>,
Key: Option<String>,
key: Option<String>,
/*
Login = 1,
@ -215,27 +216,27 @@ pub struct CipherData {
Card = 3,
Identity = 4
*/
pub Type: i32,
pub Name: String,
pub Notes: Option<String>,
Fields: Option<Value>,
pub r#type: i32,
pub name: String,
pub notes: Option<String>,
fields: Option<Value>,
// Only one of these should exist, depending on type
Login: Option<Value>,
SecureNote: Option<Value>,
Card: Option<Value>,
Identity: Option<Value>,
login: Option<Value>,
secure_note: Option<Value>,
card: Option<Value>,
identity: Option<Value>,
Favorite: Option<bool>,
Reprompt: Option<i32>,
favorite: Option<bool>,
reprompt: Option<i32>,
PasswordHistory: Option<Value>,
password_history: Option<Value>,
// These are used during key rotation
// 'Attachments' is unused, contains map of {id: filename}
#[serde(rename = "Attachments")]
_Attachments: Option<Value>,
Attachments2: Option<HashMap<String, Attachments2Data>>,
#[allow(dead_code)]
attachments: Option<Value>,
attachments2: Option<HashMap<String, Attachments2Data>>,
// The revision datetime (in ISO 8601 format) of the client's local copy
// of the cipher. This is used to prevent a client from updating a cipher
@ -243,31 +244,26 @@ pub struct CipherData {
// loss. It's not an error when no value is provided; this can happen
// when using older client versions, or if the operation doesn't involve
// updating an existing cipher.
LastKnownRevisionDate: Option<String>,
last_known_revision_date: Option<String>,
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct PartialCipherData {
FolderId: Option<String>,
Favorite: bool,
folder_id: Option<String>,
favorite: bool,
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Attachments2Data {
FileName: String,
Key: String,
file_name: String,
key: String,
}
/// Called when an org admin clones an org cipher.
#[post("/ciphers/admin", data = "<data>")]
async fn post_ciphers_admin(
data: JsonUpcase<ShareCipherData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
async fn post_ciphers_admin(data: Json<ShareCipherData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
post_ciphers_create(data, headers, conn, nt).await
}
@ -276,25 +272,25 @@ async fn post_ciphers_admin(
/// `organizationId` is null.
#[post("/ciphers/create", data = "<data>")]
async fn post_ciphers_create(
data: JsonUpcase<ShareCipherData>,
data: Json<ShareCipherData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let mut data: ShareCipherData = data.into_inner().data;
let mut data: ShareCipherData = data.into_inner();
// Check if there are one more more collections selected when this cipher is part of an organization.
// err if this is not the case before creating an empty cipher.
if data.Cipher.OrganizationId.is_some() && data.CollectionIds.is_empty() {
if data.cipher.organization_id.is_some() && data.collection_ids.is_empty() {
err!("You must select at least one collection.");
}
// This check is usually only needed in update_cipher_from_data(), but we
// need it here as well to avoid creating an empty cipher in the call to
// cipher.save() below.
enforce_personal_ownership_policy(Some(&data.Cipher), &headers, &mut conn).await?;
enforce_personal_ownership_policy(Some(&data.cipher), &headers, &mut conn).await?;
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
let mut cipher = Cipher::new(data.cipher.r#type, data.cipher.name.clone());
cipher.user_uuid = Some(headers.user.uuid.clone());
cipher.save(&mut conn).await?;
@ -304,24 +300,24 @@ async fn post_ciphers_create(
// the current time, so the stale data check will end up failing down the
// line. Since this function only creates new ciphers (whether by cloning
// or otherwise), we can just ignore this field entirely.
data.Cipher.LastKnownRevisionDate = None;
data.cipher.last_known_revision_date = None;
share_cipher_by_uuid(&cipher.uuid, data, &headers, &mut conn, &nt).await
}
/// Called when creating a new user-owned cipher.
#[post("/ciphers", data = "<data>")]
async fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
let mut data: CipherData = data.into_inner().data;
async fn post_ciphers(data: Json<CipherData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
let mut data: CipherData = data.into_inner();
// The web/browser clients set this field to null as expected, but the
// mobile clients seem to set the invalid value `0001-01-01T00:00:00`,
// which results in a warning message being logged. This field isn't
// needed when creating a new cipher, so just ignore it unconditionally.
data.LastKnownRevisionDate = None;
data.last_known_revision_date = None;
let mut cipher = Cipher::new(data.Type, data.Name.clone());
update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
let mut cipher = Cipher::new(data.r#type, data.name.clone());
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
}
@ -338,7 +334,7 @@ async fn enforce_personal_ownership_policy(
headers: &Headers,
conn: &mut DbConn,
) -> EmptyResult {
if data.is_none() || data.unwrap().OrganizationId.is_none() {
if data.is_none() || data.unwrap().organization_id.is_none() {
let user_uuid = &headers.user.uuid;
let policy_type = OrgPolicyType::PersonalOwnership;
if OrgPolicy::is_applicable_to_user(user_uuid, policy_type, None, conn).await {
@ -352,7 +348,7 @@ pub async fn update_cipher_from_data(
cipher: &mut Cipher,
data: CipherData,
headers: &Headers,
shared_to_collection: bool,
shared_to_collections: Option<Vec<String>>,
conn: &mut DbConn,
nt: &Notify<'_>,
ut: UpdateType,
@ -362,7 +358,7 @@ pub async fn update_cipher_from_data(
// Check that the client isn't updating an existing cipher with stale data.
// And only perform this check when not importing ciphers, else the date/time check will fail.
if ut != UpdateType::None {
if let Some(dt) = data.LastKnownRevisionDate {
if let Some(dt) = data.last_known_revision_date {
match NaiveDateTime::parse_from_str(&dt, "%+") {
// ISO 8601 format
Err(err) => warn!("Error parsing LastKnownRevisionDate '{}': {}", dt, err),
@ -374,24 +370,24 @@ pub async fn update_cipher_from_data(
}
}
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.organization_id {
err!("Organization mismatch. Please resync the client before updating the cipher")
}
if let Some(note) = &data.Notes {
if let Some(note) = &data.notes {
if note.len() > 10_000 {
err!("The field Notes exceeds the maximum encrypted value length of 10000 characters.")
}
}
// Check if this cipher is being transferred from a personal to an organization vault
let transfer_cipher = cipher.organization_uuid.is_none() && data.OrganizationId.is_some();
let transfer_cipher = cipher.organization_uuid.is_none() && data.organization_id.is_some();
if let Some(org_id) = data.OrganizationId {
if let Some(org_id) = data.organization_id {
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
None => err!("You don't have permission to add item to organization"),
Some(org_user) => {
if shared_to_collection
if shared_to_collections.is_some()
|| org_user.has_full_access()
|| cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await
{
@ -411,7 +407,7 @@ pub async fn update_cipher_from_data(
cipher.user_uuid = Some(headers.user.uuid.clone());
}
if let Some(ref folder_id) = data.FolderId {
if let Some(ref folder_id) = data.folder_id {
match Folder::find_by_uuid(folder_id, conn).await {
Some(folder) => {
if folder.user_uuid != headers.user.uuid {
@ -423,7 +419,7 @@ pub async fn update_cipher_from_data(
}
// Modify attachments name and keys when rotating
if let Some(attachments) = data.Attachments2 {
if let Some(attachments) = data.attachments2 {
for (id, attachment) in attachments {
let mut saved_att = match Attachment::find_by_id(&id, conn).await {
Some(att) => att,
@ -444,8 +440,8 @@ pub async fn update_cipher_from_data(
break;
}
saved_att.akey = Some(attachment.Key);
saved_att.file_name = attachment.FileName;
saved_att.akey = Some(attachment.key);
saved_att.file_name = attachment.file_name;
saved_att.save(conn).await?;
}
@ -459,44 +455,44 @@ pub async fn update_cipher_from_data(
fn _clean_cipher_data(mut json_data: Value) -> Value {
if json_data.is_array() {
json_data.as_array_mut().unwrap().iter_mut().for_each(|ref mut f| {
f.as_object_mut().unwrap().remove("Response");
f.as_object_mut().unwrap().remove("response");
});
};
json_data
}
let type_data_opt = match data.Type {
1 => data.Login,
2 => data.SecureNote,
3 => data.Card,
4 => data.Identity,
let type_data_opt = match data.r#type {
1 => data.login,
2 => data.secure_note,
3 => data.card,
4 => data.identity,
_ => err!("Invalid type"),
};
let type_data = match type_data_opt {
Some(mut data) => {
// Remove the 'Response' key from the base object.
data.as_object_mut().unwrap().remove("Response");
data.as_object_mut().unwrap().remove("response");
// Remove the 'Response' key from every Uri.
if data["Uris"].is_array() {
data["Uris"] = _clean_cipher_data(data["Uris"].clone());
if data["uris"].is_array() {
data["uris"] = _clean_cipher_data(data["uris"].clone());
}
data
}
None => err!("Data missing"),
};
cipher.key = data.Key;
cipher.name = data.Name;
cipher.notes = data.Notes;
cipher.fields = data.Fields.map(|f| _clean_cipher_data(f).to_string());
cipher.key = data.key;
cipher.name = data.name;
cipher.notes = data.notes;
cipher.fields = data.fields.map(|f| _clean_cipher_data(f).to_string());
cipher.data = type_data.to_string();
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
cipher.reprompt = data.Reprompt;
cipher.password_history = data.password_history.map(|f| f.to_string());
cipher.reprompt = data.reprompt;
cipher.save(conn).await?;
cipher.move_to_folder(data.FolderId, &headers.user.uuid, conn).await?;
cipher.set_favorite(data.Favorite, &headers.user.uuid, conn).await?;
cipher.move_to_folder(data.folder_id, &headers.user.uuid, conn).await?;
cipher.set_favorite(data.favorite, &headers.user.uuid, conn).await?;
if ut != UpdateType::None {
// Only log events for organizational ciphers
@ -518,50 +514,57 @@ pub async fn update_cipher_from_data(
)
.await;
}
nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn).await, &headers.device.uuid, None, conn)
.await;
nt.send_cipher_update(
ut,
cipher,
&cipher.update_users_revision(conn).await,
&headers.device.uuid,
shared_to_collections,
conn,
)
.await;
}
Ok(())
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct ImportData {
Ciphers: Vec<CipherData>,
Folders: Vec<FolderData>,
FolderRelationships: Vec<RelationsData>,
ciphers: Vec<CipherData>,
folders: Vec<FolderData>,
folder_relationships: Vec<RelationsData>,
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct RelationsData {
// Cipher id
Key: usize,
key: usize,
// Folder id
Value: usize,
value: usize,
}
#[post("/ciphers/import", data = "<data>")]
async fn post_ciphers_import(
data: JsonUpcase<ImportData>,
data: Json<ImportData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
enforce_personal_ownership_policy(None, &headers, &mut conn).await?;
let data: ImportData = data.into_inner().data;
let data: ImportData = data.into_inner();
// Validate the import before continuing
// Bitwarden does not process the import if there is one item invalid.
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
Cipher::validate_notes(&data.Ciphers)?;
Cipher::validate_notes(&data.ciphers)?;
// Read and create the folders
let mut folders: Vec<_> = Vec::new();
for folder in data.Folders.into_iter() {
let mut new_folder = Folder::new(headers.user.uuid.clone(), folder.Name);
for folder in data.folders.into_iter() {
let mut new_folder = Folder::new(headers.user.uuid.clone(), folder.name);
new_folder.save(&mut conn).await?;
folders.push(new_folder);
@ -570,17 +573,17 @@ async fn post_ciphers_import(
// Read the relations between folders and ciphers
let mut relations_map = HashMap::new();
for relation in data.FolderRelationships {
relations_map.insert(relation.Key, relation.Value);
for relation in data.folder_relationships {
relations_map.insert(relation.key, relation.value);
}
// Read and create the ciphers
for (index, mut cipher_data) in data.Ciphers.into_iter().enumerate() {
for (index, mut cipher_data) in data.ciphers.into_iter().enumerate() {
let folder_uuid = relations_map.get(&index).map(|i| folders[*i].uuid.clone());
cipher_data.FolderId = folder_uuid;
cipher_data.folder_id = folder_uuid;
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await?;
let mut cipher = Cipher::new(cipher_data.r#type, cipher_data.name.clone());
update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await?;
}
let mut user = headers.user;
@ -594,7 +597,7 @@ async fn post_ciphers_import(
#[put("/ciphers/<uuid>/admin", data = "<data>")]
async fn put_cipher_admin(
uuid: &str,
data: JsonUpcase<CipherData>,
data: Json<CipherData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
@ -605,7 +608,7 @@ async fn put_cipher_admin(
#[post("/ciphers/<uuid>/admin", data = "<data>")]
async fn post_cipher_admin(
uuid: &str,
data: JsonUpcase<CipherData>,
data: Json<CipherData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
@ -614,25 +617,19 @@ async fn post_cipher_admin(
}
#[post("/ciphers/<uuid>", data = "<data>")]
async fn post_cipher(
uuid: &str,
data: JsonUpcase<CipherData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
async fn post_cipher(uuid: &str, data: Json<CipherData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
put_cipher(uuid, data, headers, conn, nt).await
}
#[put("/ciphers/<uuid>", data = "<data>")]
async fn put_cipher(
uuid: &str,
data: JsonUpcase<CipherData>,
data: Json<CipherData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let data: CipherData = data.into_inner().data;
let data: CipherData = data.into_inner();
let mut cipher = match Cipher::find_by_uuid(uuid, &mut conn).await {
Some(cipher) => cipher,
@ -648,18 +645,13 @@ async fn put_cipher(
err!("Cipher is not write accessible")
}
update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
}
#[post("/ciphers/<uuid>/partial", data = "<data>")]
async fn post_cipher_partial(
uuid: &str,
data: JsonUpcase<PartialCipherData>,
headers: Headers,
conn: DbConn,
) -> JsonResult {
async fn post_cipher_partial(uuid: &str, data: Json<PartialCipherData>, headers: Headers, conn: DbConn) -> JsonResult {
put_cipher_partial(uuid, data, headers, conn).await
}
@ -667,18 +659,18 @@ async fn post_cipher_partial(
#[put("/ciphers/<uuid>/partial", data = "<data>")]
async fn put_cipher_partial(
uuid: &str,
data: JsonUpcase<PartialCipherData>,
data: Json<PartialCipherData>,
headers: Headers,
mut conn: DbConn,
) -> JsonResult {
let data: PartialCipherData = data.into_inner().data;
let data: PartialCipherData = data.into_inner();
let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await {
Some(cipher) => cipher,
None => err!("Cipher doesn't exist"),
};
if let Some(ref folder_id) = data.FolderId {
if let Some(ref folder_id) = data.folder_id {
match Folder::find_by_uuid(folder_id, &mut conn).await {
Some(folder) => {
if folder.user_uuid != headers.user.uuid {
@ -690,23 +682,23 @@ async fn put_cipher_partial(
}
// Move cipher
cipher.move_to_folder(data.FolderId.clone(), &headers.user.uuid, &mut conn).await?;
cipher.move_to_folder(data.folder_id.clone(), &headers.user.uuid, &mut conn).await?;
// Update favorite
cipher.set_favorite(Some(data.Favorite), &headers.user.uuid, &mut conn).await?;
cipher.set_favorite(Some(data.favorite), &headers.user.uuid, &mut conn).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct CollectionsAdminData {
CollectionIds: Vec<String>,
collection_ids: Vec<String>,
}
#[put("/ciphers/<uuid>/collections", data = "<data>")]
async fn put_collections_update(
uuid: &str,
data: JsonUpcase<CollectionsAdminData>,
data: Json<CollectionsAdminData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
@ -717,7 +709,7 @@ async fn put_collections_update(
#[post("/ciphers/<uuid>/collections", data = "<data>")]
async fn post_collections_update(
uuid: &str,
data: JsonUpcase<CollectionsAdminData>,
data: Json<CollectionsAdminData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
@ -728,7 +720,7 @@ async fn post_collections_update(
#[put("/ciphers/<uuid>/collections-admin", data = "<data>")]
async fn put_collections_admin(
uuid: &str,
data: JsonUpcase<CollectionsAdminData>,
data: Json<CollectionsAdminData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
@ -739,12 +731,12 @@ async fn put_collections_admin(
#[post("/ciphers/<uuid>/collections-admin", data = "<data>")]
async fn post_collections_admin(
uuid: &str,
data: JsonUpcase<CollectionsAdminData>,
data: Json<CollectionsAdminData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
let data: CollectionsAdminData = data.into_inner().data;
let data: CollectionsAdminData = data.into_inner();
let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await {
Some(cipher) => cipher,
@ -755,7 +747,7 @@ async fn post_collections_admin(
err!("Cipher is not write accessible")
}
let posted_collections: HashSet<String> = data.CollectionIds.iter().cloned().collect();
let posted_collections: HashSet<String> = data.collection_ids.iter().cloned().collect();
let current_collections: HashSet<String> =
cipher.get_collections(headers.user.uuid.clone(), &mut conn).await.iter().cloned().collect();
@ -803,21 +795,21 @@ async fn post_collections_admin(
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct ShareCipherData {
Cipher: CipherData,
CollectionIds: Vec<String>,
cipher: CipherData,
collection_ids: Vec<String>,
}
#[post("/ciphers/<uuid>/share", data = "<data>")]
async fn post_cipher_share(
uuid: &str,
data: JsonUpcase<ShareCipherData>,
data: Json<ShareCipherData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let data: ShareCipherData = data.into_inner().data;
let data: ShareCipherData = data.into_inner();
share_cipher_by_uuid(uuid, data, &headers, &mut conn, &nt).await
}
@ -825,53 +817,53 @@ async fn post_cipher_share(
#[put("/ciphers/<uuid>/share", data = "<data>")]
async fn put_cipher_share(
uuid: &str,
data: JsonUpcase<ShareCipherData>,
data: Json<ShareCipherData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let data: ShareCipherData = data.into_inner().data;
let data: ShareCipherData = data.into_inner();
share_cipher_by_uuid(uuid, data, &headers, &mut conn, &nt).await
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct ShareSelectedCipherData {
Ciphers: Vec<CipherData>,
CollectionIds: Vec<String>,
ciphers: Vec<CipherData>,
collection_ids: Vec<String>,
}
#[put("/ciphers/share", data = "<data>")]
async fn put_cipher_share_selected(
data: JsonUpcase<ShareSelectedCipherData>,
data: Json<ShareSelectedCipherData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
let mut data: ShareSelectedCipherData = data.into_inner().data;
let mut data: ShareSelectedCipherData = data.into_inner();
if data.Ciphers.is_empty() {
if data.ciphers.is_empty() {
err!("You must select at least one cipher.")
}
if data.CollectionIds.is_empty() {
if data.collection_ids.is_empty() {
err!("You must select at least one collection.")
}
for cipher in data.Ciphers.iter() {
if cipher.Id.is_none() {
for cipher in data.ciphers.iter() {
if cipher.id.is_none() {
err!("Request missing ids field")
}
}
while let Some(cipher) = data.Ciphers.pop() {
while let Some(cipher) = data.ciphers.pop() {
let mut shared_cipher_data = ShareCipherData {
Cipher: cipher,
CollectionIds: data.CollectionIds.clone(),
cipher,
collection_ids: data.collection_ids.clone(),
};
match shared_cipher_data.Cipher.Id.take() {
match shared_cipher_data.cipher.id.take() {
Some(id) => share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &nt).await?,
None => err!("Request missing ids field"),
};
@ -898,16 +890,16 @@ async fn share_cipher_by_uuid(
None => err!("Cipher doesn't exist"),
};
let mut shared_to_collection = false;
let mut shared_to_collections = vec![];
if let Some(organization_uuid) = &data.Cipher.OrganizationId {
for uuid in &data.CollectionIds {
if let Some(organization_uuid) = &data.cipher.organization_id {
for uuid in &data.collection_ids {
match Collection::find_by_uuid_and_org(uuid, organization_uuid, conn).await {
None => err!("Invalid collection ID provided"),
Some(collection) => {
if collection.is_writable_by_user(&headers.user.uuid, conn).await {
CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?;
shared_to_collection = true;
shared_to_collections.push(collection.uuid);
} else {
err!("No rights to modify the collection")
}
@ -917,13 +909,13 @@ async fn share_cipher_by_uuid(
};
// When LastKnownRevisionDate is None, it is a new cipher, so send CipherCreate.
let ut = if data.Cipher.LastKnownRevisionDate.is_some() {
let ut = if data.cipher.last_known_revision_date.is_some() {
UpdateType::SyncCipherUpdate
} else {
UpdateType::SyncCipherCreate
};
update_cipher_from_data(&mut cipher, data.Cipher, headers, shared_to_collection, conn, nt, ut).await?;
update_cipher_from_data(&mut cipher, data.cipher, headers, Some(shared_to_collections), conn, nt, ut).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
}
@ -953,12 +945,12 @@ async fn get_attachment(uuid: &str, attachment_id: &str, headers: Headers, mut c
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct AttachmentRequestData {
Key: String,
FileName: String,
FileSize: i64,
AdminRequest: Option<bool>, // true when attaching from an org vault view
key: String,
file_name: String,
file_size: NumberOrString,
admin_request: Option<bool>, // true when attaching from an org vault view
}
enum FileUploadType {
@ -973,7 +965,7 @@ enum FileUploadType {
#[post("/ciphers/<uuid>/attachment/v2", data = "<data>")]
async fn post_attachment_v2(
uuid: &str,
data: JsonUpcase<AttachmentRequestData>,
data: Json<AttachmentRequestData>,
headers: Headers,
mut conn: DbConn,
) -> JsonResult {
@ -986,26 +978,28 @@ async fn post_attachment_v2(
err!("Cipher is not write accessible")
}
let data: AttachmentRequestData = data.into_inner().data;
if data.FileSize < 0 {
let data: AttachmentRequestData = data.into_inner();
let file_size = data.file_size.into_i64()?;
if file_size < 0 {
err!("Attachment size can't be negative")
}
let attachment_id = crypto::generate_attachment_id();
let attachment =
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, data.FileSize, Some(data.Key));
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.file_name, file_size, Some(data.key));
attachment.save(&mut conn).await.expect("Error saving attachment");
let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id);
let response_key = match data.AdminRequest {
Some(b) if b => "CipherMiniResponse",
_ => "CipherResponse",
let response_key = match data.admin_request {
Some(b) if b => "cipherMiniResponse",
_ => "cipherResponse",
};
Ok(Json(json!({ // AttachmentUploadDataResponseModel
"Object": "attachment-fileUpload",
"AttachmentId": attachment_id,
"Url": url,
"FileUploadType": FileUploadType::Direct as i32,
"object": "attachment-fileUpload",
"attachmentId": attachment_id,
"url": url,
"fileUploadType": FileUploadType::Direct as i32,
response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await,
})))
}
@ -1340,38 +1334,23 @@ async fn delete_cipher_admin(uuid: &str, headers: Headers, mut conn: DbConn, nt:
}
#[delete("/ciphers", data = "<data>")]
async fn delete_cipher_selected(
data: JsonUpcase<Value>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
async fn delete_cipher_selected(data: Json<Value>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
_delete_multiple_ciphers(data, headers, conn, false, nt).await // permanent delete
}
#[post("/ciphers/delete", data = "<data>")]
async fn delete_cipher_selected_post(
data: JsonUpcase<Value>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
async fn delete_cipher_selected_post(data: Json<Value>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
_delete_multiple_ciphers(data, headers, conn, false, nt).await // permanent delete
}
#[put("/ciphers/delete", data = "<data>")]
async fn delete_cipher_selected_put(
data: JsonUpcase<Value>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
async fn delete_cipher_selected_put(data: Json<Value>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
_delete_multiple_ciphers(data, headers, conn, true, nt).await // soft delete
}
#[delete("/ciphers/admin", data = "<data>")]
async fn delete_cipher_selected_admin(
data: JsonUpcase<Value>,
data: Json<Value>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
@ -1381,7 +1360,7 @@ async fn delete_cipher_selected_admin(
#[post("/ciphers/delete-admin", data = "<data>")]
async fn delete_cipher_selected_post_admin(
data: JsonUpcase<Value>,
data: Json<Value>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
@ -1391,7 +1370,7 @@ async fn delete_cipher_selected_post_admin(
#[put("/ciphers/delete-admin", data = "<data>")]
async fn delete_cipher_selected_put_admin(
data: JsonUpcase<Value>,
data: Json<Value>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
@ -1410,33 +1389,28 @@ async fn restore_cipher_put_admin(uuid: &str, headers: Headers, mut conn: DbConn
}
#[put("/ciphers/restore", data = "<data>")]
async fn restore_cipher_selected(
data: JsonUpcase<Value>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
async fn restore_cipher_selected(data: Json<Value>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
_restore_multiple_ciphers(data, &headers, &mut conn, &nt).await
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct MoveCipherData {
FolderId: Option<String>,
Ids: Vec<String>,
folder_id: Option<String>,
ids: Vec<String>,
}
#[post("/ciphers/move", data = "<data>")]
async fn move_cipher_selected(
data: JsonUpcase<MoveCipherData>,
data: Json<MoveCipherData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
let data = data.into_inner().data;
let data = data.into_inner();
let user_uuid = headers.user.uuid;
if let Some(ref folder_id) = data.FolderId {
if let Some(ref folder_id) = data.folder_id {
match Folder::find_by_uuid(folder_id, &mut conn).await {
Some(folder) => {
if folder.user_uuid != user_uuid {
@ -1447,7 +1421,7 @@ async fn move_cipher_selected(
}
}
for uuid in data.Ids {
for uuid in data.ids {
let cipher = match Cipher::find_by_uuid(&uuid, &mut conn).await {
Some(cipher) => cipher,
None => err!("Cipher doesn't exist"),
@ -1458,7 +1432,7 @@ async fn move_cipher_selected(
}
// Move cipher
cipher.move_to_folder(data.FolderId.clone(), &user_uuid, &mut conn).await?;
cipher.move_to_folder(data.folder_id.clone(), &user_uuid, &mut conn).await?;
nt.send_cipher_update(
UpdateType::SyncCipherUpdate,
@ -1476,7 +1450,7 @@ async fn move_cipher_selected(
#[put("/ciphers/move", data = "<data>")]
async fn move_cipher_selected_put(
data: JsonUpcase<MoveCipherData>,
data: Json<MoveCipherData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
@ -1493,12 +1467,12 @@ struct OrganizationId {
#[post("/ciphers/purge?<organization..>", data = "<data>")]
async fn delete_all(
organization: Option<OrganizationId>,
data: JsonUpcase<PasswordOrOtpData>,
data: Json<PasswordOrOtpData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
let data: PasswordOrOtpData = data.into_inner().data;
let data: PasswordOrOtpData = data.into_inner();
let mut user = headers.user;
data.validate(&user, true, &mut conn).await?;
@ -1606,13 +1580,13 @@ async fn _delete_cipher_by_uuid(
}
async fn _delete_multiple_ciphers(
data: JsonUpcase<Value>,
data: Json<Value>,
headers: Headers,
mut conn: DbConn,
soft_delete: bool,
nt: Notify<'_>,
) -> EmptyResult {
let data: Value = data.into_inner().data;
let data: Value = data.into_inner();
let uuids = match data.get("Ids") {
Some(ids) => match ids.as_array() {
@ -1671,12 +1645,12 @@ async fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &mut DbCon
}
async fn _restore_multiple_ciphers(
data: JsonUpcase<Value>,
data: Json<Value>,
headers: &Headers,
conn: &mut DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let data: Value = data.into_inner().data;
let data: Value = data.into_inner();
let uuids = match data.get("Ids") {
Some(ids) => match ids.as_array() {
@ -1695,9 +1669,9 @@ async fn _restore_multiple_ciphers(
}
Ok(Json(json!({
"Data": ciphers,
"Object": "list",
"ContinuationToken": null
"data": ciphers,
"object": "list",
"continuationToken": null
})))
}

View File

@ -5,7 +5,7 @@ use serde_json::Value;
use crate::{
api::{
core::{CipherSyncData, CipherSyncType},
EmptyResult, JsonResult, JsonUpcase,
EmptyResult, JsonResult,
},
auth::{decode_emergency_access_invite, Headers},
db::{models::*, DbConn, DbPool},
@ -43,31 +43,33 @@ pub fn routes() -> Vec<Route> {
async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
if !CONFIG.emergency_access_allowed() {
return Json(json!({
"Data": [{
"Id": "",
"Status": 2,
"Type": 0,
"WaitTimeDays": 0,
"GranteeId": "",
"Email": "",
"Name": "NOTE: Emergency Access is disabled!",
"Object": "emergencyAccessGranteeDetails",
"data": [{
"id": "",
"status": 2,
"type": 0,
"waitTimeDays": 0,
"granteeId": "",
"email": "",
"name": "NOTE: Emergency Access is disabled!",
"object": "emergencyAccessGranteeDetails",
}],
"Object": "list",
"ContinuationToken": null
"object": "list",
"continuationToken": null
}));
}
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
for ea in emergency_access_list {
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
if let Some(grantee) = ea.to_json_grantee_details(&mut conn).await {
emergency_access_list_json.push(grantee)
}
}
Json(json!({
"Data": emergency_access_list_json,
"Object": "list",
"ContinuationToken": null
"data": emergency_access_list_json,
"object": "list",
"continuationToken": null
}))
}
@ -84,9 +86,9 @@ async fn get_grantees(headers: Headers, mut conn: DbConn) -> Json<Value> {
}
Json(json!({
"Data": emergency_access_list_json,
"Object": "list",
"ContinuationToken": null
"data": emergency_access_list_json,
"object": "list",
"continuationToken": null
}))
}
@ -95,7 +97,9 @@ async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?;
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
Some(emergency_access) => Ok(Json(
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
)),
None => err!("Emergency access not valid."),
}
}
@ -105,42 +109,38 @@ async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
// region put/post
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct EmergencyAccessUpdateData {
Type: NumberOrString,
WaitTimeDays: i32,
KeyEncrypted: Option<String>,
r#type: NumberOrString,
wait_time_days: i32,
key_encrypted: Option<String>,
}
#[put("/emergency-access/<emer_id>", data = "<data>")]
async fn put_emergency_access(emer_id: &str, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
async fn put_emergency_access(emer_id: &str, data: Json<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult {
post_emergency_access(emer_id, data, conn).await
}
#[post("/emergency-access/<emer_id>", data = "<data>")]
async fn post_emergency_access(
emer_id: &str,
data: JsonUpcase<EmergencyAccessUpdateData>,
mut conn: DbConn,
) -> JsonResult {
async fn post_emergency_access(emer_id: &str, data: Json<EmergencyAccessUpdateData>, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?;
let data: EmergencyAccessUpdateData = data.into_inner().data;
let data: EmergencyAccessUpdateData = data.into_inner();
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emergency_access) => emergency_access,
None => err!("Emergency access not valid."),
};
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
Some(new_type) => new_type as i32,
None => err!("Invalid emergency access type."),
};
emergency_access.atype = new_type;
emergency_access.wait_time_days = data.WaitTimeDays;
if data.KeyEncrypted.is_some() {
emergency_access.key_encrypted = data.KeyEncrypted;
emergency_access.wait_time_days = data.wait_time_days;
if data.key_encrypted.is_some() {
emergency_access.key_encrypted = data.key_encrypted;
}
emergency_access.save(&mut conn).await?;
@ -180,24 +180,24 @@ async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbC
// region invite
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct EmergencyAccessInviteData {
Email: String,
Type: NumberOrString,
WaitTimeDays: i32,
email: String,
r#type: NumberOrString,
wait_time_days: i32,
}
#[post("/emergency-access/invite", data = "<data>")]
async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_enabled()?;
let data: EmergencyAccessInviteData = data.into_inner().data;
let email = data.Email.to_lowercase();
let wait_time_days = data.WaitTimeDays;
let data: EmergencyAccessInviteData = data.into_inner();
let email = data.email.to_lowercase();
let wait_time_days = data.wait_time_days;
let emergency_access_status = EmergencyAccessStatus::Invited as i32;
let new_type = match EmergencyAccessType::from_str(&data.Type.into_string()) {
let new_type = match EmergencyAccessType::from_str(&data.r#type.into_string()) {
Some(new_type) => new_type as i32,
None => err!("Invalid emergency access type."),
};
@ -209,7 +209,7 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
err!("You can not set yourself as an emergency contact.")
}
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await {
None => {
if !CONFIG.invitations_allowed() {
err!(format!("Grantee user does not exist: {}", &email))
@ -226,9 +226,10 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
let mut user = User::new(email.clone());
user.save(&mut conn).await?;
user
(user, true)
}
Some(user) => user,
Some(user) if user.password_hash.is_empty() => (user, true),
Some(user) => (user, false),
};
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
@ -256,15 +257,9 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
&grantor_user.email,
)
.await?;
} else {
// Automatically mark user as accepted if no email invites
match User::find_by_mail(&email, &mut conn).await {
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
},
None => err!("Grantee user not found."),
}
} else if !new_user {
// if mail is not enabled immediately accept the invitation for existing users
new_emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
}
Ok(())
@ -308,34 +303,29 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
&grantor_user.email,
)
.await?;
} else {
if Invitation::find_by_mail(&email, &mut conn).await.is_none() {
let invitation = Invitation::new(&email);
invitation.save(&mut conn).await?;
}
// Automatically mark user as accepted if no email invites
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
}
} else if !grantee_user.password_hash.is_empty() {
// accept the invitation for existing user
emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
} else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &mut conn).await.is_none() {
let invitation = Invitation::new(&email);
invitation.save(&mut conn).await?;
}
Ok(())
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct AcceptData {
Token: String,
token: String,
}
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
check_emergency_access_enabled()?;
let data: AcceptData = data.into_inner().data;
let token = &data.Token;
let data: AcceptData = data.into_inner();
let token = &data.token;
let claims = decode_emergency_access_invite(token)?;
// This can happen if the user who received the invite used a different email to signup.
@ -367,10 +357,7 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
&& grantor_user.name == claims.grantor_name
&& grantor_user.email == claims.grantor_email
{
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
}
emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &mut conn).await?;
if CONFIG.mail_enabled() {
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
@ -382,44 +369,24 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
}
}
async fn accept_invite_process(
grantee_uuid: &str,
emergency_access: &mut EmergencyAccess,
grantee_email: &str,
conn: &mut DbConn,
) -> EmptyResult {
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
err!("User email does not match invite.");
}
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
err!("Emergency contact already accepted.");
}
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
emergency_access.email = None;
emergency_access.save(conn).await
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct ConfirmData {
Key: String,
key: String,
}
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
async fn confirm_emergency_access(
emer_id: &str,
data: JsonUpcase<ConfirmData>,
data: Json<ConfirmData>,
headers: Headers,
mut conn: DbConn,
) -> JsonResult {
check_emergency_access_enabled()?;
let confirming_user = headers.user;
let data: ConfirmData = data.into_inner().data;
let key = data.Key;
let data: ConfirmData = data.into_inner();
let key = data.key;
let mut emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emer) => emer,
@ -614,9 +581,9 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn
}
Ok(Json(json!({
"Ciphers": ciphers_json,
"KeyEncrypted": &emergency_access.key_encrypted,
"Object": "emergencyAccessView",
"ciphers": ciphers_json,
"keyEncrypted": &emergency_access.key_encrypted,
"object": "emergencyAccessView",
})))
}
@ -640,35 +607,35 @@ async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
};
let result = json!({
"Kdf": grantor_user.client_kdf_type,
"KdfIterations": grantor_user.client_kdf_iter,
"KdfMemory": grantor_user.client_kdf_memory,
"KdfParallelism": grantor_user.client_kdf_parallelism,
"KeyEncrypted": &emergency_access.key_encrypted,
"Object": "emergencyAccessTakeover",
"kdf": grantor_user.client_kdf_type,
"kdfIterations": grantor_user.client_kdf_iter,
"kdfMemory": grantor_user.client_kdf_memory,
"kdfParallelism": grantor_user.client_kdf_parallelism,
"keyEncrypted": &emergency_access.key_encrypted,
"object": "emergencyAccessTakeover",
});
Ok(Json(result))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct EmergencyAccessPasswordData {
NewMasterPasswordHash: String,
Key: String,
new_master_password_hash: String,
key: String,
}
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
async fn password_emergency_access(
emer_id: &str,
data: JsonUpcase<EmergencyAccessPasswordData>,
data: Json<EmergencyAccessPasswordData>,
headers: Headers,
mut conn: DbConn,
) -> EmptyResult {
check_emergency_access_enabled()?;
let data: EmergencyAccessPasswordData = data.into_inner().data;
let new_master_password_hash = &data.NewMasterPasswordHash;
let data: EmergencyAccessPasswordData = data.into_inner();
let new_master_password_hash = &data.new_master_password_hash;
//let key = &data.Key;
let requesting_user = headers.user;
@ -687,7 +654,7 @@ async fn password_emergency_access(
};
// change grantor_user password
grantor_user.set_password(new_master_password_hash, Some(data.Key), true, None);
grantor_user.set_password(new_master_password_hash, Some(data.key), true, None);
grantor_user.save(&mut conn).await?;
// Disable TwoFactor providers since they will otherwise block logins
@ -725,9 +692,9 @@ async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: Db
let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
Ok(Json(json!({
"Data": policies_json,
"Object": "list",
"ContinuationToken": null
"data": policies_json,
"object": "list",
"continuationToken": null
})))
}

View File

@ -5,7 +5,7 @@ use rocket::{form::FromForm, serde::json::Json, Route};
use serde_json::Value;
use crate::{
api::{EmptyResult, JsonResult, JsonUpcaseVec},
api::{EmptyResult, JsonResult},
auth::{AdminHeaders, Headers},
db::{
models::{Cipher, Event, UserOrganization},
@ -22,7 +22,6 @@ pub fn routes() -> Vec<Route> {
}
#[derive(FromForm)]
#[allow(non_snake_case)]
struct EventRange {
start: String,
end: String,
@ -53,9 +52,9 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders,
};
Ok(Json(json!({
"Data": events_json,
"Object": "list",
"ContinuationToken": get_continuation_token(&events_json),
"data": events_json,
"object": "list",
"continuationToken": get_continuation_token(&events_json),
})))
}
@ -85,9 +84,9 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers,
};
Ok(Json(json!({
"Data": events_json,
"Object": "list",
"ContinuationToken": get_continuation_token(&events_json),
"data": events_json,
"object": "list",
"continuationToken": get_continuation_token(&events_json),
})))
}
@ -119,9 +118,9 @@ async fn get_user_events(
};
Ok(Json(json!({
"Data": events_json,
"Object": "list",
"ContinuationToken": get_continuation_token(&events_json),
"data": events_json,
"object": "list",
"continuationToken": get_continuation_token(&events_json),
})))
}
@ -145,33 +144,33 @@ pub fn main_routes() -> Vec<Route> {
routes![post_events_collect,]
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct EventCollection {
// Mandatory
Type: i32,
Date: String,
r#type: i32,
date: String,
// Optional
CipherId: Option<String>,
OrganizationId: Option<String>,
cipher_id: Option<String>,
organization_id: Option<String>,
}
// Upstream:
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
#[post("/collect", format = "application/json", data = "<data>")]
async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Headers, mut conn: DbConn) -> EmptyResult {
async fn post_events_collect(data: Json<Vec<EventCollection>>, headers: Headers, mut conn: DbConn) -> EmptyResult {
if !CONFIG.org_events_enabled() {
return Ok(());
}
for event in data.iter().map(|d| &d.data) {
let event_date = parse_date(&event.Date);
match event.Type {
for event in data.iter() {
let event_date = parse_date(&event.date);
match event.r#type {
1000..=1099 => {
_log_user_event(
event.Type,
event.r#type,
&headers.user.uuid,
headers.device.atype,
Some(event_date),
@ -181,9 +180,9 @@ async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Head
.await;
}
1600..=1699 => {
if let Some(org_uuid) = &event.OrganizationId {
if let Some(org_uuid) = &event.organization_id {
_log_event(
event.Type,
event.r#type,
org_uuid,
org_uuid,
&headers.user.uuid,
@ -196,11 +195,11 @@ async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Head
}
}
_ => {
if let Some(cipher_uuid) = &event.CipherId {
if let Some(cipher_uuid) = &event.cipher_id {
if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
if let Some(org_uuid) = cipher.organization_uuid {
_log_event(
event.Type,
event.r#type,
cipher_uuid,
&org_uuid,
&headers.user.uuid,

View File

@ -2,7 +2,7 @@ use rocket::serde::json::Json;
use serde_json::Value;
use crate::{
api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
api::{EmptyResult, JsonResult, Notify, UpdateType},
auth::Headers,
db::{models::*, DbConn},
};
@ -17,9 +17,9 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
Json(json!({
"Data": folders_json,
"Object": "list",
"ContinuationToken": null,
"data": folders_json,
"object": "list",
"continuationToken": null,
}))
}
@ -38,16 +38,16 @@ async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResul
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
pub struct FolderData {
pub Name: String,
pub name: String,
}
#[post("/folders", data = "<data>")]
async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
let data: FolderData = data.into_inner().data;
async fn post_folders(data: Json<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
let data: FolderData = data.into_inner();
let mut folder = Folder::new(headers.user.uuid, data.Name);
let mut folder = Folder::new(headers.user.uuid, data.name);
folder.save(&mut conn).await?;
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await;
@ -56,25 +56,19 @@ async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn:
}
#[post("/folders/<uuid>", data = "<data>")]
async fn post_folder(
uuid: &str,
data: JsonUpcase<FolderData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
async fn post_folder(uuid: &str, data: Json<FolderData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
put_folder(uuid, data, headers, conn, nt).await
}
#[put("/folders/<uuid>", data = "<data>")]
async fn put_folder(
uuid: &str,
data: JsonUpcase<FolderData>,
data: Json<FolderData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let data: FolderData = data.into_inner().data;
let data: FolderData = data.into_inner();
let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await {
Some(folder) => folder,
@ -85,7 +79,7 @@ async fn put_folder(
err!("Folder belongs to another user")
}
folder.name = data.Name;
folder.name = data.name;
folder.save(&mut conn).await?;
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await;

View File

@ -49,19 +49,19 @@ pub fn events_routes() -> Vec<Route> {
use rocket::{serde::json::Json, serde::json::Value, Catcher, Route};
use crate::{
api::{JsonResult, JsonUpcase, Notify, UpdateType},
api::{JsonResult, Notify, UpdateType},
auth::Headers,
db::DbConn,
error::Error,
util::{get_reqwest_client, parse_experimental_client_feature_flags},
};
#[derive(Serialize, Deserialize, Debug)]
#[allow(non_snake_case)]
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct GlobalDomain {
Type: i32,
Domains: Vec<String>,
Excluded: bool,
r#type: i32,
domains: Vec<String>,
excluded: bool,
}
const GLOBAL_DOMAINS: &str = include_str!("../../static/global_domains.json");
@ -81,38 +81,38 @@ fn _get_eq_domains(headers: Headers, no_excluded: bool) -> Json<Value> {
let mut globals: Vec<GlobalDomain> = from_str(GLOBAL_DOMAINS).unwrap();
for global in &mut globals {
global.Excluded = excluded_globals.contains(&global.Type);
global.excluded = excluded_globals.contains(&global.r#type);
}
if no_excluded {
globals.retain(|g| !g.Excluded);
globals.retain(|g| !g.excluded);
}
Json(json!({
"EquivalentDomains": equivalent_domains,
"GlobalEquivalentDomains": globals,
"Object": "domains",
"equivalentDomains": equivalent_domains,
"globalEquivalentDomains": globals,
"object": "domains",
}))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct EquivDomainData {
ExcludedGlobalEquivalentDomains: Option<Vec<i32>>,
EquivalentDomains: Option<Vec<Vec<String>>>,
excluded_global_equivalent_domains: Option<Vec<i32>>,
equivalent_domains: Option<Vec<Vec<String>>>,
}
#[post("/settings/domains", data = "<data>")]
async fn post_eq_domains(
data: JsonUpcase<EquivDomainData>,
data: Json<EquivDomainData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let data: EquivDomainData = data.into_inner().data;
let data: EquivDomainData = data.into_inner();
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
let equivalent_domains = data.EquivalentDomains.unwrap_or_default();
let excluded_globals = data.excluded_global_equivalent_domains.unwrap_or_default();
let equivalent_domains = data.equivalent_domains.unwrap_or_default();
let mut user = headers.user;
use serde_json::to_string;
@ -128,12 +128,7 @@ async fn post_eq_domains(
}
#[put("/settings/domains", data = "<data>")]
async fn put_eq_domains(
data: JsonUpcase<EquivDomainData>,
headers: Headers,
conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
async fn put_eq_domains(data: Json<EquivDomainData>, headers: Headers, conn: DbConn, nt: Notify<'_>) -> JsonResult {
post_eq_domains(data, headers, conn, nt).await
}
@ -157,15 +152,15 @@ async fn hibp_breach(username: &str) -> JsonResult {
Ok(Json(value))
} else {
Ok(Json(json!([{
"Name": "HaveIBeenPwned",
"Title": "Manual HIBP Check",
"Domain": "haveibeenpwned.com",
"BreachDate": "2019-08-18T00:00:00Z",
"AddedDate": "2019-08-18T00:00:00Z",
"Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
"LogoPath": "vw_static/hibp.png",
"PwnCount": 0,
"DataClasses": [
"name": "HaveIBeenPwned",
"title": "Manual HIBP Check",
"domain": "haveibeenpwned.com",
"breachDate": "2019-08-18T00:00:00Z",
"addedDate": "2019-08-18T00:00:00Z",
"description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"),
"logoPath": "vw_static/hibp.png",
"pwnCount": 0,
"dataClasses": [
"Error - No API key set!"
]
}])))
@ -191,14 +186,17 @@ fn version() -> Json<&'static str> {
#[get("/config")]
fn config() -> Json<Value> {
let domain = crate::CONFIG.domain();
let feature_states = parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
let mut feature_states =
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
// Force the new key rotation feature
feature_states.insert("key-rotation-improvements".to_string(), true);
Json(json!({
// Note: The clients use this version to handle backwards compatibility concerns
// This means they expect a version that closely matches the Bitwarden server version
// We should make sure that we keep this updated when we support the new server features
// Version history:
// - Individual cipher key encryption: 2023.9.1
"version": "2023.9.1",
"version": "2024.2.0",
"gitHash": option_env!("GIT_REV"),
"server": {
"name": "Vaultwarden",

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,14 @@
use chrono::Utc;
use rocket::{
request::{self, FromRequest, Outcome},
serde::json::Json,
Request, Route,
};
use std::collections::HashSet;
use crate::{
api::{EmptyResult, JsonUpcase},
api::EmptyResult,
auth,
db::{models::*, DbConn},
mail, CONFIG,
@ -18,43 +19,43 @@ pub fn routes() -> Vec<Route> {
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct OrgImportGroupData {
Name: String,
ExternalId: String,
MemberExternalIds: Vec<String>,
name: String,
external_id: String,
member_external_ids: Vec<String>,
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct OrgImportUserData {
Email: String,
ExternalId: String,
Deleted: bool,
email: String,
external_id: String,
deleted: bool,
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct OrgImportData {
Groups: Vec<OrgImportGroupData>,
Members: Vec<OrgImportUserData>,
OverwriteExisting: bool,
// LargeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
groups: Vec<OrgImportGroupData>,
members: Vec<OrgImportUserData>,
overwrite_existing: bool,
// largeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
}
#[post("/public/organization/import", data = "<data>")]
async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
async fn ldap_import(data: Json<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult {
// Most of the logic for this function can be found here
// https://github.com/bitwarden/server/blob/fd892b2ff4547648a276734fb2b14a8abae2c6f5/src/Core/Services/Implementations/OrganizationService.cs#L1797
let org_id = token.0;
let data = data.into_inner().data;
let data = data.into_inner();
for user_data in &data.Members {
if user_data.Deleted {
for user_data in &data.members {
if user_data.deleted {
// If user is marked for deletion and it exists, revoke it
if let Some(mut user_org) =
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await
{
// Only revoke a user if it is not the last confirmed owner
let revoked = if user_org.atype == UserOrgType::Owner
@ -72,27 +73,27 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
user_org.revoke()
};
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone()));
if revoked || ext_modified {
user_org.save(&mut conn).await?;
}
}
// If user is part of the organization, restore it
} else if let Some(mut user_org) =
UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
UserOrganization::find_by_email_and_org(&user_data.email, &org_id, &mut conn).await
{
let restored = user_org.restore();
let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
let ext_modified = user_org.set_external_id(Some(user_data.external_id.clone()));
if restored || ext_modified {
user_org.save(&mut conn).await?;
}
} else {
// If user is not part of the organization
let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
let user = match User::find_by_mail(&user_data.email, &mut conn).await {
Some(user) => user, // exists in vaultwarden
None => {
// User does not exist yet
let mut new_user = User::new(user_data.Email.clone());
let mut new_user = User::new(user_data.email.clone());
new_user.save(&mut conn).await?;
if !CONFIG.mail_enabled() {
@ -109,7 +110,7 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
};
let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
new_org_user.set_external_id(Some(user_data.ExternalId.clone()));
new_org_user.set_external_id(Some(user_data.external_id.clone()));
new_org_user.access_all = false;
new_org_user.atype = UserOrgType::User as i32;
new_org_user.status = user_org_status;
@ -123,7 +124,7 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
};
mail::send_invite(
&user_data.Email,
&user_data.email,
&user.uuid,
Some(org_id.clone()),
Some(new_org_user.uuid),
@ -136,12 +137,16 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
}
if CONFIG.org_groups_enabled() {
for group_data in &data.Groups {
let group_uuid = match Group::find_by_external_id(&group_data.ExternalId, &mut conn).await {
for group_data in &data.groups {
let group_uuid = match Group::find_by_external_id(&group_data.external_id, &mut conn).await {
Some(group) => group.uuid,
None => {
let mut group =
Group::new(org_id.clone(), group_data.Name.clone(), false, Some(group_data.ExternalId.clone()));
let mut group = Group::new(
org_id.clone(),
group_data.name.clone(),
false,
Some(group_data.external_id.clone()),
);
group.save(&mut conn).await?;
group.uuid
}
@ -149,7 +154,7 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?;
for ext_id in &group_data.MemberExternalIds {
for ext_id in &group_data.member_external_ids {
if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await
{
let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone());
@ -162,9 +167,9 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co
}
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
if data.OverwriteExisting {
if data.overwrite_existing {
// Generate a HashSet to quickly verify if a member is listed or not.
let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect();
let sync_members: HashSet<String> = data.members.into_iter().map(|m| m.external_id).collect();
for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
if let Some(ref user_external_id) = user_org.external_id {
if !sync_members.contains(user_external_id) {

View File

@ -9,7 +9,7 @@ use rocket::serde::json::Json;
use serde_json::Value;
use crate::{
api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType},
auth::{ClientIp, Headers, Host},
db::{models::*, DbConn, DbPool},
util::{NumberOrString, SafeString},
@ -48,23 +48,26 @@ pub async fn purge_sends(pool: DbPool) {
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct SendData {
Type: i32,
Key: String,
Password: Option<String>,
MaxAccessCount: Option<NumberOrString>,
ExpirationDate: Option<DateTime<Utc>>,
DeletionDate: DateTime<Utc>,
Disabled: bool,
HideEmail: Option<bool>,
#[serde(rename_all = "camelCase")]
pub struct SendData {
r#type: i32,
key: String,
password: Option<String>,
max_access_count: Option<NumberOrString>,
expiration_date: Option<DateTime<Utc>>,
deletion_date: DateTime<Utc>,
disabled: bool,
hide_email: Option<bool>,
// Data field
Name: String,
Notes: Option<String>,
Text: Option<Value>,
File: Option<Value>,
FileLength: Option<NumberOrString>,
name: String,
notes: Option<String>,
text: Option<Value>,
file: Option<Value>,
file_length: Option<NumberOrString>,
// Used for key rotations
pub id: Option<String>,
}
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
@ -93,7 +96,7 @@ async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> Em
/// Ref: https://bitwarden.com/help/article/policies/#send-options
async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult {
let user_uuid = &headers.user.uuid;
let hide_email = data.HideEmail.unwrap_or(false);
let hide_email = data.hide_email.unwrap_or(false);
if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await {
err!(
"Due to an Enterprise Policy, you are not allowed to hide your email address \
@ -104,40 +107,40 @@ async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, c
}
fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> {
let data_val = if data.Type == SendType::Text as i32 {
data.Text
} else if data.Type == SendType::File as i32 {
data.File
let data_val = if data.r#type == SendType::Text as i32 {
data.text
} else if data.r#type == SendType::File as i32 {
data.file
} else {
err!("Invalid Send type")
};
let data_str = if let Some(mut d) = data_val {
d.as_object_mut().and_then(|o| o.remove("Response"));
d.as_object_mut().and_then(|o| o.remove("response"));
serde_json::to_string(&d)?
} else {
err!("Send data not provided");
};
if data.DeletionDate > Utc::now() + TimeDelta::try_days(31).unwrap() {
if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
err!(
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
);
}
let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc());
let mut send = Send::new(data.r#type, data.name, data_str, data.key, data.deletion_date.naive_utc());
send.user_uuid = Some(user_uuid);
send.notes = data.Notes;
send.max_access_count = match data.MaxAccessCount {
send.notes = data.notes;
send.max_access_count = match data.max_access_count {
Some(m) => Some(m.into_i32()?),
_ => None,
};
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
send.disabled = data.Disabled;
send.hide_email = data.HideEmail;
send.atype = data.Type;
send.expiration_date = data.expiration_date.map(|d| d.naive_utc());
send.disabled = data.disabled;
send.hide_email = data.hide_email;
send.atype = data.r#type;
send.set_password(data.Password.as_deref());
send.set_password(data.password.as_deref());
Ok(send)
}
@ -148,9 +151,9 @@ async fn get_sends(headers: Headers, mut conn: DbConn) -> Json<Value> {
let sends_json: Vec<Value> = sends.await.iter().map(|s| s.to_json()).collect();
Json(json!({
"Data": sends_json,
"Object": "list",
"ContinuationToken": null
"data": sends_json,
"object": "list",
"continuationToken": null
}))
}
@ -169,13 +172,13 @@ async fn get_send(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult
}
#[post("/sends", data = "<data>")]
async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
async fn post_send(data: Json<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
enforce_disable_send_policy(&headers, &mut conn).await?;
let data: SendData = data.into_inner().data;
let data: SendData = data.into_inner();
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
if data.Type == SendType::File as i32 {
if data.r#type == SendType::File as i32 {
err!("File sends should use /api/sends/file")
}
@ -195,7 +198,7 @@ async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbCon
#[derive(FromForm)]
struct UploadData<'f> {
model: Json<crate::util::UpCase<SendData>>,
model: Json<SendData>,
data: TempFile<'f>,
}
@ -215,7 +218,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
model,
mut data,
} = data.into_inner();
let model = model.into_inner().data;
let model = model.into_inner();
let Some(size) = data.len().to_i64() else {
err!("Invalid send size");
@ -263,9 +266,9 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
let mut data_value: Value = serde_json::from_str(&send.data)?;
if let Some(o) = data_value.as_object_mut() {
o.insert(String::from("Id"), Value::String(file_id));
o.insert(String::from("Size"), Value::Number(size.into()));
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size)));
o.insert(String::from("id"), Value::String(file_id));
o.insert(String::from("size"), Value::Number(size.into()));
o.insert(String::from("sizeName"), Value::String(crate::util::get_display_size(size)));
}
send.data = serde_json::to_string(&data_value)?;
@ -285,18 +288,18 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
// Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L190
#[post("/sends/file/v2", data = "<data>")]
async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
async fn post_send_file_v2(data: Json<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult {
enforce_disable_send_policy(&headers, &mut conn).await?;
let data = data.into_inner().data;
let data = data.into_inner();
if data.Type != SendType::File as i32 {
if data.r#type != SendType::File as i32 {
err!("Send content is not a file");
}
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
let file_length = match &data.FileLength {
let file_length = match &data.file_length {
Some(m) => m.into_i64()?,
_ => err!("Invalid send length"),
};
@ -331,9 +334,9 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con
let mut data_value: Value = serde_json::from_str(&send.data)?;
if let Some(o) = data_value.as_object_mut() {
o.insert(String::from("Id"), Value::String(file_id.clone()));
o.insert(String::from("Size"), Value::Number(file_length.into()));
o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length)));
o.insert(String::from("id"), Value::String(file_id.clone()));
o.insert(String::from("size"), Value::Number(file_length.into()));
o.insert(String::from("sizeName"), Value::String(crate::util::get_display_size(file_length)));
}
send.data = serde_json::to_string(&data_value)?;
send.save(&mut conn).await?;
@ -392,15 +395,15 @@ async fn post_send_file_v2_data(
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
pub struct SendAccessData {
pub Password: Option<String>,
pub password: Option<String>,
}
#[post("/sends/access/<access_id>", data = "<data>")]
async fn post_access(
access_id: &str,
data: JsonUpcase<SendAccessData>,
data: Json<SendAccessData>,
mut conn: DbConn,
ip: ClientIp,
nt: Notify<'_>,
@ -431,7 +434,7 @@ async fn post_access(
}
if send.password_hash.is_some() {
match data.into_inner().data.Password {
match data.into_inner().password {
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
Some(_) => err!("Invalid password", format!("IP: {}.", ip.ip)),
None => err_code!("Password not provided", format!("IP: {}.", ip.ip), 401),
@ -461,7 +464,7 @@ async fn post_access(
async fn post_access_file(
send_id: &str,
file_id: &str,
data: JsonUpcase<SendAccessData>,
data: Json<SendAccessData>,
host: Host,
mut conn: DbConn,
nt: Notify<'_>,
@ -492,7 +495,7 @@ async fn post_access_file(
}
if send.password_hash.is_some() {
match data.into_inner().data.Password {
match data.into_inner().password {
Some(ref p) if send.check_password(p) => { /* Nothing to do here */ }
Some(_) => err!("Invalid password."),
None => err_code!("Password not provided", 401),
@ -515,9 +518,9 @@ async fn post_access_file(
let token_claims = crate::auth::generate_send_claims(send_id, file_id);
let token = crate::auth::encode_jwt(&token_claims);
Ok(Json(json!({
"Object": "send-fileDownload",
"Id": file_id,
"Url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
"object": "send-fileDownload",
"id": file_id,
"url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
})))
}
@ -532,16 +535,10 @@ async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Opt
}
#[put("/sends/<id>", data = "<data>")]
async fn put_send(
id: &str,
data: JsonUpcase<SendData>,
headers: Headers,
mut conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
async fn put_send(id: &str, data: Json<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
enforce_disable_send_policy(&headers, &mut conn).await?;
let data: SendData = data.into_inner().data;
let data: SendData = data.into_inner();
enforce_disable_hide_email_policy(&data, &headers, &mut conn).await?;
let mut send = match Send::find_by_uuid(id, &mut conn).await {
@ -549,19 +546,38 @@ async fn put_send(
None => err!("Send not found"),
};
update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?;
Ok(Json(send.to_json()))
}
pub async fn update_send_from_data(
send: &mut Send,
data: SendData,
headers: &Headers,
conn: &mut DbConn,
nt: &Notify<'_>,
ut: UpdateType,
) -> EmptyResult {
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
err!("Send is not owned by user")
}
if send.atype != data.Type {
if send.atype != data.r#type {
err!("Sends can't change type")
}
if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
err!(
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
);
}
// When updating a file Send, we receive nulls in the File field, as it's immutable,
// so we only need to update the data field in the Text case
if data.Type == SendType::Text as i32 {
let data_str = if let Some(mut d) = data.Text {
d.as_object_mut().and_then(|d| d.remove("Response"));
if data.r#type == SendType::Text as i32 {
let data_str = if let Some(mut d) = data.text {
d.as_object_mut().and_then(|d| d.remove("response"));
serde_json::to_string(&d)?
} else {
err!("Send data not provided");
@ -569,39 +585,28 @@ async fn put_send(
send.data = data_str;
}
if data.DeletionDate > Utc::now() + TimeDelta::try_days(31).unwrap() {
err!(
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
);
}
send.name = data.Name;
send.akey = data.Key;
send.deletion_date = data.DeletionDate.naive_utc();
send.notes = data.Notes;
send.max_access_count = match data.MaxAccessCount {
send.name = data.name;
send.akey = data.key;
send.deletion_date = data.deletion_date.naive_utc();
send.notes = data.notes;
send.max_access_count = match data.max_access_count {
Some(m) => Some(m.into_i32()?),
_ => None,
};
send.expiration_date = data.ExpirationDate.map(|d| d.naive_utc());
send.hide_email = data.HideEmail;
send.disabled = data.Disabled;
send.expiration_date = data.expiration_date.map(|d| d.naive_utc());
send.hide_email = data.hide_email;
send.disabled = data.disabled;
// Only change the value if it's present
if let Some(password) = data.Password {
if let Some(password) = data.password {
send.set_password(Some(&password));
}
send.save(&mut conn).await?;
nt.send_send_update(
UpdateType::SyncSendUpdate,
&send,
&send.update_users_revision(&mut conn).await,
&headers.device.uuid,
&mut conn,
)
.await;
Ok(Json(send.to_json()))
send.save(conn).await?;
if ut != UpdateType::None {
nt.send_send_update(ut, send, &send.update_users_revision(conn).await, &headers.device.uuid, conn).await;
}
Ok(())
}
#[delete("/sends/<id>")]

View File

@ -3,10 +3,7 @@ use rocket::serde::json::Json;
use rocket::Route;
use crate::{
api::{
core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
PasswordOrOtpData,
},
api::{core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, PasswordOrOtpData},
auth::{ClientIp, Headers},
crypto,
db::{
@ -23,8 +20,8 @@ pub fn routes() -> Vec<Route> {
}
#[post("/two-factor/get-authenticator", data = "<data>")]
async fn generate_authenticator(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
async fn generate_authenticator(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner();
let user = headers.user;
data.validate(&user, false, &mut conn).await?;
@ -38,36 +35,32 @@ async fn generate_authenticator(data: JsonUpcase<PasswordOrOtpData>, headers: He
};
Ok(Json(json!({
"Enabled": enabled,
"Key": key,
"Object": "twoFactorAuthenticator"
"enabled": enabled,
"key": key,
"object": "twoFactorAuthenticator"
})))
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct EnableAuthenticatorData {
Key: String,
Token: NumberOrString,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
key: String,
token: NumberOrString,
master_password_hash: Option<String>,
otp: Option<String>,
}
#[post("/two-factor/authenticator", data = "<data>")]
async fn activate_authenticator(
data: JsonUpcase<EnableAuthenticatorData>,
headers: Headers,
mut conn: DbConn,
) -> JsonResult {
let data: EnableAuthenticatorData = data.into_inner().data;
let key = data.Key;
let token = data.Token.into_string();
async fn activate_authenticator(data: Json<EnableAuthenticatorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableAuthenticatorData = data.into_inner();
let key = data.key;
let token = data.token.into_string();
let mut user = headers.user;
PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
master_password_hash: data.master_password_hash,
otp: data.otp,
}
.validate(&user, true, &mut conn)
.await?;
@ -90,18 +83,14 @@ async fn activate_authenticator(
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
Ok(Json(json!({
"Enabled": true,
"Key": key,
"Object": "twoFactorAuthenticator"
"enabled": true,
"key": key,
"object": "twoFactorAuthenticator"
})))
}
#[put("/two-factor/authenticator", data = "<data>")]
async fn activate_authenticator_put(
data: JsonUpcase<EnableAuthenticatorData>,
headers: Headers,
conn: DbConn,
) -> JsonResult {
async fn activate_authenticator_put(data: Json<EnableAuthenticatorData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_authenticator(data, headers, conn).await
}

View File

@ -5,7 +5,7 @@ use rocket::Route;
use crate::{
api::{
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult,
PasswordOrOtpData,
},
auth::Headers,
@ -92,8 +92,8 @@ impl DuoStatus {
const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>";
#[post("/two-factor/get-duo", data = "<data>")]
async fn get_duo(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
async fn get_duo(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner();
let user = headers.user;
data.validate(&user, false, &mut conn).await?;
@ -109,16 +109,16 @@ async fn get_duo(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn
let json = if let Some(data) = data {
json!({
"Enabled": enabled,
"Host": data.host,
"SecretKey": data.sk,
"IntegrationKey": data.ik,
"Object": "twoFactorDuo"
"enabled": enabled,
"host": data.host,
"secretKey": data.sk,
"integrationKey": data.ik,
"object": "twoFactorDuo"
})
} else {
json!({
"Enabled": enabled,
"Object": "twoFactorDuo"
"enabled": enabled,
"object": "twoFactorDuo"
})
};
@ -126,21 +126,21 @@ async fn get_duo(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn
}
#[derive(Deserialize)]
#[allow(non_snake_case, dead_code)]
#[serde(rename_all = "camelCase")]
struct EnableDuoData {
Host: String,
SecretKey: String,
IntegrationKey: String,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
host: String,
secret_key: String,
integration_key: String,
master_password_hash: Option<String>,
otp: Option<String>,
}
impl From<EnableDuoData> for DuoData {
fn from(d: EnableDuoData) -> Self {
Self {
host: d.Host,
ik: d.IntegrationKey,
sk: d.SecretKey,
host: d.host,
ik: d.integration_key,
sk: d.secret_key,
}
}
}
@ -151,17 +151,17 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
st.is_empty() || s == DISABLED_MESSAGE_DEFAULT
}
!empty_or_default(&data.Host) && !empty_or_default(&data.SecretKey) && !empty_or_default(&data.IntegrationKey)
!empty_or_default(&data.host) && !empty_or_default(&data.secret_key) && !empty_or_default(&data.integration_key)
}
#[post("/two-factor/duo", data = "<data>")]
async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableDuoData = data.into_inner().data;
async fn activate_duo(data: Json<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableDuoData = data.into_inner();
let mut user = headers.user;
PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash.clone(),
Otp: data.Otp.clone(),
master_password_hash: data.master_password_hash.clone(),
otp: data.otp.clone(),
}
.validate(&user, true, &mut conn)
.await?;
@ -184,16 +184,16 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
Ok(Json(json!({
"Enabled": true,
"Host": data.host,
"SecretKey": data.sk,
"IntegrationKey": data.ik,
"Object": "twoFactorDuo"
"enabled": true,
"host": data.host,
"secretKey": data.sk,
"integrationKey": data.ik,
"object": "twoFactorDuo"
})))
}
#[put("/two-factor/duo", data = "<data>")]
async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
async fn activate_duo_put(data: Json<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_duo(data, headers, conn).await
}

View File

@ -5,7 +5,7 @@ use rocket::Route;
use crate::{
api::{
core::{log_user_event, two_factor::_generate_recover_code},
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
EmptyResult, JsonResult, PasswordOrOtpData,
},
auth::Headers,
crypto,
@ -22,28 +22,28 @@ pub fn routes() -> Vec<Route> {
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct SendEmailLoginData {
Email: String,
MasterPasswordHash: String,
email: String,
master_password_hash: String,
}
/// User is trying to login and wants to use email 2FA.
/// Does not require Bearer token
#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult
async fn send_email_login(data: JsonUpcase<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult {
let data: SendEmailLoginData = data.into_inner().data;
async fn send_email_login(data: Json<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult {
let data: SendEmailLoginData = data.into_inner();
use crate::db::models::User;
// Get the user
let user = match User::find_by_mail(&data.Email, &mut conn).await {
let user = match User::find_by_mail(&data.email, &mut conn).await {
Some(user) => user,
None => err!("Username or password is incorrect. Try again."),
};
// Check password
if !user.check_valid_password(&data.MasterPasswordHash) {
if !user.check_valid_password(&data.master_password_hash) {
err!("Username or password is incorrect. Try again.")
}
@ -76,8 +76,8 @@ pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
/// When user clicks on Manage email 2FA show the user the related information
#[post("/two-factor/get-email", data = "<data>")]
async fn get_email(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
async fn get_email(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner();
let user = headers.user;
data.validate(&user, false, &mut conn).await?;
@ -92,30 +92,30 @@ async fn get_email(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut co
};
Ok(Json(json!({
"Email": mfa_email,
"Enabled": enabled,
"Object": "twoFactorEmail"
"email": mfa_email,
"enabled": enabled,
"object": "twoFactorEmail"
})))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct SendEmailData {
/// Email where 2FA codes will be sent to, can be different than user email account.
Email: String,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
email: String,
master_password_hash: Option<String>,
otp: Option<String>,
}
/// Send a verification email to the specified email address to check whether it exists/belongs to user.
#[post("/two-factor/send-email", data = "<data>")]
async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
let data: SendEmailData = data.into_inner().data;
async fn send_email(data: Json<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
let data: SendEmailData = data.into_inner();
let user = headers.user;
PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
master_password_hash: data.master_password_hash,
otp: data.otp,
}
.validate(&user, false, &mut conn)
.await?;
@ -131,7 +131,7 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
}
let generated_token = crypto::generate_email_token(CONFIG.email_token_size());
let twofactor_data = EmailTokenData::new(data.Email, generated_token);
let twofactor_data = EmailTokenData::new(data.email, generated_token);
// Uses EmailVerificationChallenge as type to show that it's not verified yet.
let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json());
@ -143,24 +143,24 @@ async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn:
}
#[derive(Deserialize, Serialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct EmailData {
Email: String,
Token: String,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
email: String,
token: String,
master_password_hash: Option<String>,
otp: Option<String>,
}
/// Verify email belongs to user and can be used for 2FA email codes.
#[put("/two-factor/email", data = "<data>")]
async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EmailData = data.into_inner().data;
async fn email(data: Json<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EmailData = data.into_inner();
let mut user = headers.user;
// This is the last step in the verification process, delete the otp directly afterwards
PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
master_password_hash: data.master_password_hash,
otp: data.otp,
}
.validate(&user, true, &mut conn)
.await?;
@ -176,7 +176,7 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
_ => err!("No token available"),
};
if !crypto::ct_eq(issued_token, data.Token) {
if !crypto::ct_eq(issued_token, data.token) {
err!("Token is invalid")
}
@ -190,9 +190,9 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
Ok(Json(json!({
"Email": email_data.email,
"Enabled": "true",
"Object": "twoFactorEmail"
"email": email_data.email,
"enabled": "true",
"object": "twoFactorEmail"
})))
}

View File

@ -7,7 +7,7 @@ use serde_json::Value;
use crate::{
api::{
core::{log_event, log_user_event},
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
EmptyResult, JsonResult, PasswordOrOtpData,
},
auth::{ClientHeaders, Headers},
crypto,
@ -50,52 +50,52 @@ async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json<Value> {
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
Json(json!({
"Data": twofactors_json,
"Object": "list",
"ContinuationToken": null,
"data": twofactors_json,
"object": "list",
"continuationToken": null,
}))
}
#[post("/two-factor/get-recover", data = "<data>")]
async fn get_recover(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
async fn get_recover(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner();
let user = headers.user;
data.validate(&user, true, &mut conn).await?;
Ok(Json(json!({
"Code": user.totp_recover,
"Object": "twoFactorRecover"
"code": user.totp_recover,
"object": "twoFactorRecover"
})))
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct RecoverTwoFactor {
MasterPasswordHash: String,
Email: String,
RecoveryCode: String,
master_password_hash: String,
email: String,
recovery_code: String,
}
#[post("/two-factor/recover", data = "<data>")]
async fn recover(data: JsonUpcase<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult {
let data: RecoverTwoFactor = data.into_inner().data;
async fn recover(data: Json<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult {
let data: RecoverTwoFactor = data.into_inner();
use crate::db::models::User;
// Get the user
let mut user = match User::find_by_mail(&data.Email, &mut conn).await {
let mut user = match User::find_by_mail(&data.email, &mut conn).await {
Some(user) => user,
None => err!("Username or password is incorrect. Try again."),
};
// Check password
if !user.check_valid_password(&data.MasterPasswordHash) {
if !user.check_valid_password(&data.master_password_hash) {
err!("Username or password is incorrect. Try again.")
}
// Check if recovery code is correct
if !user.check_valid_recovery_code(&data.RecoveryCode) {
if !user.check_valid_recovery_code(&data.recovery_code) {
err!("Recovery code is incorrect. Try again.")
}
@ -127,27 +127,27 @@ async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) {
}
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct DisableTwoFactorData {
MasterPasswordHash: Option<String>,
Otp: Option<String>,
Type: NumberOrString,
master_password_hash: Option<String>,
otp: Option<String>,
r#type: NumberOrString,
}
#[post("/two-factor/disable", data = "<data>")]
async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: DisableTwoFactorData = data.into_inner().data;
async fn disable_twofactor(data: Json<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: DisableTwoFactorData = data.into_inner();
let user = headers.user;
// Delete directly after a valid token has been provided
PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
master_password_hash: data.master_password_hash,
otp: data.otp,
}
.validate(&user, true, &mut conn)
.await?;
let type_ = data.Type.into_i32()?;
let type_ = data.r#type.into_i32()?;
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
twofactor.delete(&mut conn).await?;
@ -160,14 +160,14 @@ async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Head
}
Ok(Json(json!({
"Enabled": false,
"Type": type_,
"Object": "twoFactorProvider"
"enabled": false,
"type": type_,
"object": "twoFactorProvider"
})))
}
#[put("/two-factor/disable", data = "<data>")]
async fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
async fn disable_twofactor_put(data: Json<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
disable_twofactor(data, headers, conn).await
}

View File

@ -1,8 +1,8 @@
use chrono::{DateTime, TimeDelta, Utc};
use rocket::Route;
use rocket::{serde::json::Json, Route};
use crate::{
api::{EmptyResult, JsonUpcase},
api::EmptyResult,
auth::Headers,
crypto,
db::{
@ -18,7 +18,7 @@ pub fn routes() -> Vec<Route> {
}
/// Data stored in the TwoFactor table in the db
#[derive(Serialize, Deserialize, Debug)]
#[derive(Debug, Serialize, Deserialize)]
pub struct ProtectedActionData {
/// Token issued to validate the protected action
pub token: String,
@ -82,23 +82,24 @@ async fn request_otp(headers: Headers, mut conn: DbConn) -> EmptyResult {
}
#[derive(Deserialize, Serialize, Debug)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct ProtectedActionVerify {
OTP: String,
#[serde(rename = "OTP", alias = "otp")]
otp: String,
}
#[post("/accounts/verify-otp", data = "<data>")]
async fn verify_otp(data: JsonUpcase<ProtectedActionVerify>, headers: Headers, mut conn: DbConn) -> EmptyResult {
async fn verify_otp(data: Json<ProtectedActionVerify>, headers: Headers, mut conn: DbConn) -> EmptyResult {
if !CONFIG.mail_enabled() {
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.");
}
let user = headers.user;
let data: ProtectedActionVerify = data.into_inner().data;
let data: ProtectedActionVerify = data.into_inner();
// Delete the token after one validation attempt
// This endpoint only gets called for the vault export, and doesn't need a second attempt
validate_protected_action_otp(&data.OTP, &user.uuid, true, &mut conn).await
validate_protected_action_otp(&data.otp, &user.uuid, true, &mut conn).await
}
pub async fn validate_protected_action_otp(

View File

@ -7,7 +7,7 @@ use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState,
use crate::{
api::{
core::{log_user_event, two_factor::_generate_recover_code},
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
EmptyResult, JsonResult, PasswordOrOtpData,
},
auth::Headers,
db::{
@ -96,20 +96,20 @@ pub struct WebauthnRegistration {
impl WebauthnRegistration {
fn to_json(&self) -> Value {
json!({
"Id": self.id,
"Name": self.name,
"id": self.id,
"name": self.name,
"migrated": self.migrated,
})
}
}
#[post("/two-factor/get-webauthn", data = "<data>")]
async fn get_webauthn(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
async fn get_webauthn(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
if !CONFIG.domain_set() {
err!("`DOMAIN` environment variable is not set. Webauthn disabled")
}
let data: PasswordOrOtpData = data.into_inner().data;
let data: PasswordOrOtpData = data.into_inner();
let user = headers.user;
data.validate(&user, false, &mut conn).await?;
@ -118,19 +118,15 @@ async fn get_webauthn(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut
let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
Ok(Json(json!({
"Enabled": enabled,
"Keys": registrations_json,
"Object": "twoFactorWebAuthn"
"enabled": enabled,
"keys": registrations_json,
"object": "twoFactorWebAuthn"
})))
}
#[post("/two-factor/get-webauthn-challenge", data = "<data>")]
async fn generate_webauthn_challenge(
data: JsonUpcase<PasswordOrOtpData>,
headers: Headers,
mut conn: DbConn,
) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
async fn generate_webauthn_challenge(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner();
let user = headers.user;
data.validate(&user, false, &mut conn).await?;
@ -161,102 +157,94 @@ async fn generate_webauthn_challenge(
}
#[derive(Debug, Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct EnableWebauthnData {
Id: NumberOrString, // 1..5
Name: String,
DeviceResponse: RegisterPublicKeyCredentialCopy,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
id: NumberOrString, // 1..5
name: String,
device_response: RegisterPublicKeyCredentialCopy,
master_password_hash: Option<String>,
otp: Option<String>,
}
// This is copied from RegisterPublicKeyCredential to change the Response objects casing
#[derive(Debug, Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct RegisterPublicKeyCredentialCopy {
pub Id: String,
pub RawId: Base64UrlSafeData,
pub Response: AuthenticatorAttestationResponseRawCopy,
pub Type: String,
pub id: String,
pub raw_id: Base64UrlSafeData,
pub response: AuthenticatorAttestationResponseRawCopy,
pub r#type: String,
}
// This is copied from AuthenticatorAttestationResponseRaw to change clientDataJSON to clientDataJson
#[derive(Debug, Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
pub struct AuthenticatorAttestationResponseRawCopy {
pub AttestationObject: Base64UrlSafeData,
pub ClientDataJson: Base64UrlSafeData,
#[serde(rename = "AttestationObject", alias = "attestationObject")]
pub attestation_object: Base64UrlSafeData,
#[serde(rename = "clientDataJson", alias = "clientDataJSON")]
pub client_data_json: Base64UrlSafeData,
}
impl From<RegisterPublicKeyCredentialCopy> for RegisterPublicKeyCredential {
fn from(r: RegisterPublicKeyCredentialCopy) -> Self {
Self {
id: r.Id,
raw_id: r.RawId,
id: r.id,
raw_id: r.raw_id,
response: AuthenticatorAttestationResponseRaw {
attestation_object: r.Response.AttestationObject,
client_data_json: r.Response.ClientDataJson,
attestation_object: r.response.attestation_object,
client_data_json: r.response.client_data_json,
},
type_: r.Type,
type_: r.r#type,
}
}
}
// This is copied from PublicKeyCredential to change the Response objects casing
#[derive(Debug, Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
pub struct PublicKeyCredentialCopy {
pub Id: String,
pub RawId: Base64UrlSafeData,
pub Response: AuthenticatorAssertionResponseRawCopy,
pub Extensions: Option<AuthenticationExtensionsClientOutputsCopy>,
pub Type: String,
pub id: String,
pub raw_id: Base64UrlSafeData,
pub response: AuthenticatorAssertionResponseRawCopy,
pub extensions: Option<AuthenticationExtensionsClientOutputs>,
pub r#type: String,
}
// This is copied from AuthenticatorAssertionResponseRaw to change clientDataJSON to clientDataJson
#[derive(Debug, Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
pub struct AuthenticatorAssertionResponseRawCopy {
pub AuthenticatorData: Base64UrlSafeData,
pub ClientDataJson: Base64UrlSafeData,
pub Signature: Base64UrlSafeData,
pub UserHandle: Option<Base64UrlSafeData>,
}
#[derive(Debug, Deserialize)]
#[allow(non_snake_case)]
pub struct AuthenticationExtensionsClientOutputsCopy {
#[serde(default)]
pub Appid: bool,
pub authenticator_data: Base64UrlSafeData,
#[serde(rename = "clientDataJson", alias = "clientDataJSON")]
pub client_data_json: Base64UrlSafeData,
pub signature: Base64UrlSafeData,
pub user_handle: Option<Base64UrlSafeData>,
}
impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
fn from(r: PublicKeyCredentialCopy) -> Self {
Self {
id: r.Id,
raw_id: r.RawId,
id: r.id,
raw_id: r.raw_id,
response: AuthenticatorAssertionResponseRaw {
authenticator_data: r.Response.AuthenticatorData,
client_data_json: r.Response.ClientDataJson,
signature: r.Response.Signature,
user_handle: r.Response.UserHandle,
authenticator_data: r.response.authenticator_data,
client_data_json: r.response.client_data_json,
signature: r.response.signature,
user_handle: r.response.user_handle,
},
extensions: r.Extensions.map(|e| AuthenticationExtensionsClientOutputs {
appid: e.Appid,
}),
type_: r.Type,
extensions: r.extensions,
type_: r.r#type,
}
}
}
#[post("/two-factor/webauthn", data = "<data>")]
async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableWebauthnData = data.into_inner().data;
async fn activate_webauthn(data: Json<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableWebauthnData = data.into_inner();
let mut user = headers.user;
PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
master_password_hash: data.master_password_hash,
otp: data.otp,
}
.validate(&user, true, &mut conn)
.await?;
@ -274,13 +262,13 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
// Verify the credentials with the saved state
let (credential, _data) =
WebauthnConfig::load().register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?;
WebauthnConfig::load().register_credential(&data.device_response.into(), &state, |_| Ok(false))?;
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1;
// TODO: Check for repeated ID's
registrations.push(WebauthnRegistration {
id: data.Id.into_i32()?,
name: data.Name,
id: data.id.into_i32()?,
name: data.name,
migrated: false,
credential,
@ -296,28 +284,28 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
Ok(Json(json!({
"Enabled": true,
"Keys": keys_json,
"Object": "twoFactorU2f"
"enabled": true,
"keys": keys_json,
"object": "twoFactorU2f"
})))
}
#[put("/two-factor/webauthn", data = "<data>")]
async fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
async fn activate_webauthn_put(data: Json<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_webauthn(data, headers, conn).await
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct DeleteU2FData {
Id: NumberOrString,
MasterPasswordHash: String,
id: NumberOrString,
master_password_hash: String,
}
#[delete("/two-factor/webauthn", data = "<data>")]
async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let id = data.data.Id.into_i32()?;
if !headers.user.check_valid_password(&data.data.MasterPasswordHash) {
async fn delete_webauthn(data: Json<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let id = data.id.into_i32()?;
if !headers.user.check_valid_password(&data.master_password_hash) {
err!("Invalid password");
}
@ -358,9 +346,9 @@ async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
Ok(Json(json!({
"Enabled": true,
"Keys": keys_json,
"Object": "twoFactorU2f"
"enabled": true,
"keys": keys_json,
"object": "twoFactorU2f"
})))
}
@ -413,8 +401,8 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut
),
};
let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
let rsp: PublicKeyCredential = rsp.data.into();
let rsp: PublicKeyCredentialCopy = serde_json::from_str(response)?;
let rsp: PublicKeyCredential = rsp.into();
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1;

View File

@ -1,12 +1,12 @@
use rocket::serde::json::Json;
use rocket::Route;
use serde_json::Value;
use yubico::{config::Config, verify};
use yubico::{config::Config, verify_async};
use crate::{
api::{
core::{log_user_event, two_factor::_generate_recover_code},
EmptyResult, JsonResult, JsonUpcase, PasswordOrOtpData,
EmptyResult, JsonResult, PasswordOrOtpData,
},
auth::Headers,
db::{
@ -21,28 +21,30 @@ pub fn routes() -> Vec<Route> {
routes![generate_yubikey, activate_yubikey, activate_yubikey_put,]
}
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct EnableYubikeyData {
Key1: Option<String>,
Key2: Option<String>,
Key3: Option<String>,
Key4: Option<String>,
Key5: Option<String>,
Nfc: bool,
MasterPasswordHash: Option<String>,
Otp: Option<String>,
key1: Option<String>,
key2: Option<String>,
key3: Option<String>,
key4: Option<String>,
key5: Option<String>,
nfc: bool,
master_password_hash: Option<String>,
otp: Option<String>,
}
#[derive(Deserialize, Serialize, Debug)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
pub struct YubikeyMetadata {
Keys: Vec<String>,
pub Nfc: bool,
#[serde(rename = "keys", alias = "Keys")]
keys: Vec<String>,
#[serde(rename = "nfc", alias = "Nfc")]
pub nfc: bool,
}
fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> {
let data_keys = [&data.Key1, &data.Key2, &data.Key3, &data.Key4, &data.Key5];
let data_keys = [&data.key1, &data.key2, &data.key3, &data.key4, &data.key5];
data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect()
}
@ -74,21 +76,18 @@ async fn verify_yubikey_otp(otp: String) -> EmptyResult {
let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret);
match CONFIG.yubico_server() {
Some(server) => {
tokio::task::spawn_blocking(move || verify(otp, config.set_api_hosts(vec![server]))).await.unwrap()
}
None => tokio::task::spawn_blocking(move || verify(otp, config)).await.unwrap(),
Some(server) => verify_async(otp, config.set_api_hosts(vec![server])).await,
None => verify_async(otp, config).await,
}
.map_res("Failed to verify OTP")
.and(Ok(()))
}
#[post("/two-factor/get-yubikey", data = "<data>")]
async fn generate_yubikey(data: JsonUpcase<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
async fn generate_yubikey(data: Json<PasswordOrOtpData>, headers: Headers, mut conn: DbConn) -> JsonResult {
// Make sure the credentials are set
get_yubico_credentials()?;
let data: PasswordOrOtpData = data.into_inner().data;
let data: PasswordOrOtpData = data.into_inner();
let user = headers.user;
data.validate(&user, false, &mut conn).await?;
@ -101,29 +100,29 @@ async fn generate_yubikey(data: JsonUpcase<PasswordOrOtpData>, headers: Headers,
if let Some(r) = r {
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?;
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
let mut result = jsonify_yubikeys(yubikey_metadata.keys);
result["Enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned());
result["enabled"] = Value::Bool(true);
result["nfc"] = Value::Bool(yubikey_metadata.nfc);
result["object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result))
} else {
Ok(Json(json!({
"Enabled": false,
"Object": "twoFactorU2f",
"enabled": false,
"object": "twoFactorU2f",
})))
}
}
#[post("/two-factor/yubikey", data = "<data>")]
async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableYubikeyData = data.into_inner().data;
async fn activate_yubikey(data: Json<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
let data: EnableYubikeyData = data.into_inner();
let mut user = headers.user;
PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash.clone(),
Otp: data.Otp.clone(),
master_password_hash: data.master_password_hash.clone(),
otp: data.otp.clone(),
}
.validate(&user, true, &mut conn)
.await?;
@ -139,8 +138,8 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
if yubikeys.is_empty() {
return Ok(Json(json!({
"Enabled": false,
"Object": "twoFactorU2f",
"enabled": false,
"object": "twoFactorU2f",
})));
}
@ -157,8 +156,8 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (x[..12]).to_owned()).collect();
let yubikey_metadata = YubikeyMetadata {
Keys: yubikey_ids,
Nfc: data.Nfc,
keys: yubikey_ids,
nfc: data.nfc,
};
yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap();
@ -168,17 +167,17 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await;
let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
let mut result = jsonify_yubikeys(yubikey_metadata.keys);
result["Enabled"] = Value::Bool(true);
result["Nfc"] = Value::Bool(yubikey_metadata.Nfc);
result["Object"] = Value::String("twoFactorU2f".to_owned());
result["enabled"] = Value::Bool(true);
result["nfc"] = Value::Bool(yubikey_metadata.nfc);
result["object"] = Value::String("twoFactorU2f".to_owned());
Ok(Json(result))
}
#[put("/two-factor/yubikey", data = "<data>")]
async fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
async fn activate_yubikey_put(data: Json<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
activate_yubikey(data, headers, conn).await
}
@ -190,14 +189,10 @@ pub async fn validate_yubikey_login(response: &str, twofactor_data: &str) -> Emp
let yubikey_metadata: YubikeyMetadata = serde_json::from_str(twofactor_data).expect("Can't parse Yubikey Metadata");
let response_id = &response[..12];
if !yubikey_metadata.Keys.contains(&response_id.to_owned()) {
if !yubikey_metadata.keys.contains(&response_id.to_owned()) {
err!("Given Yubikey is not registered");
}
let result = verify_yubikey_otp(response.to_owned()).await;
match result {
Ok(_answer) => Ok(()),
Err(_e) => err!("Failed to verify Yubikey against OTP server"),
}
verify_yubikey_otp(response.to_owned()).await.map_res("Failed to verify Yubikey against OTP server")?;
Ok(())
}

View File

@ -1,6 +1,6 @@
use std::{
net::IpAddr,
sync::Arc,
sync::{Arc, Mutex},
time::{Duration, SystemTime},
};
@ -16,14 +16,13 @@ use rocket::{http::ContentType, response::Redirect, Route};
use tokio::{
fs::{create_dir_all, remove_file, symlink_metadata, File},
io::{AsyncReadExt, AsyncWriteExt},
net::lookup_host,
};
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
use crate::{
error::Error,
util::{get_reqwest_client_builder, Cached},
util::{get_reqwest_client_builder, Cached, CustomDnsResolver, CustomResolverError},
CONFIG,
};
@ -49,48 +48,32 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
let pool_idle_timeout = Duration::from_secs(10);
// Reuse the client between requests
let client = get_reqwest_client_builder()
get_reqwest_client_builder()
.cookie_provider(Arc::clone(&cookie_store))
.timeout(icon_download_timeout)
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
.hickory_dns(true)
.default_headers(default_headers.clone());
match client.build() {
Ok(client) => client,
Err(e) => {
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
get_reqwest_client_builder()
.cookie_provider(cookie_store)
.timeout(icon_download_timeout)
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
.hickory_dns(false)
.default_headers(default_headers)
.build()
.expect("Failed to build client")
}
}
.dns_resolver(CustomDnsResolver::instance())
.default_headers(default_headers.clone())
.build()
.expect("Failed to build client")
});
// Build Regex only once since this takes a lot of time.
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
// Special HashMap which holds the user defined Regex to speedup matching the regex.
static ICON_BLACKLIST_REGEX: Lazy<dashmap::DashMap<String, Regex>> = Lazy::new(dashmap::DashMap::new);
async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
#[get("/<domain>/icon.png")]
fn icon_external(domain: &str) -> Option<Redirect> {
if !is_valid_domain(domain) {
warn!("Invalid domain: {}", domain);
return None;
}
if check_domain_blacklist_reason(domain).await.is_some() {
if is_domain_blacklisted(domain) {
return None;
}
let url = template.replace("{}", domain);
let url = CONFIG._icon_service_url().replace("{}", domain);
match CONFIG.icon_redirect_code() {
301 => Some(Redirect::moved(url)), // legacy permanent redirect
302 => Some(Redirect::found(url)), // legacy temporary redirect
@ -103,11 +86,6 @@ async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
}
}
#[get("/<domain>/icon.png")]
async fn icon_external(domain: &str) -> Option<Redirect> {
icon_redirect(domain, &CONFIG._icon_service_url()).await
}
#[get("/<domain>/icon.png")]
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
@ -166,153 +144,28 @@ fn is_valid_domain(domain: &str) -> bool {
true
}
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
#[allow(clippy::nonminimal_bool)]
#[cfg(not(feature = "unstable"))]
fn is_global(ip: IpAddr) -> bool {
match ip {
IpAddr::V4(ip) => {
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
// globally routable addresses in the 192.0.0.0/24 range.
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
return true;
}
!ip.is_private()
&& !ip.is_loopback()
&& !ip.is_link_local()
&& !ip.is_broadcast()
&& !ip.is_documentation()
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
// Make sure the address is not in 0.0.0.0/8
&& ip.octets()[0] != 0
}
IpAddr::V6(ip) => {
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
true
} else {
!ip.is_multicast()
&& !ip.is_loopback()
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
&& !ip.is_unspecified()
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
}
}
}
}
pub fn is_domain_blacklisted(domain: &str) -> bool {
let Some(config_blacklist) = CONFIG.icon_blacklist_regex() else {
return false;
};
#[cfg(feature = "unstable")]
fn is_global(ip: IpAddr) -> bool {
ip.is_global()
}
// Compiled domain blacklist
static COMPILED_BLACKLIST: Mutex<Option<(String, Regex)>> = Mutex::new(None);
let mut guard = COMPILED_BLACKLIST.lock().unwrap();
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct
/// Note that the is_global implementation is subject to change as new IP RFCs are created
///
/// To run while showing progress output:
/// cargo test --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
// If the stored regex is up to date, use it
if let Some((value, regex)) = &*guard {
if value == &config_blacklist {
return regex.is_match(domain);
}
}
#[test]
#[ignore]
fn test_ipv6_global() {
use ring::rand::{SecureRandom, SystemRandom};
let mut v = [0u8; 16];
let rand = SystemRandom::new();
for i in 0..1_000 {
println!("Iter: {}/1_000", i);
for _ in 0..10_000_000 {
rand.fill(&mut v).expect("Error generating random values");
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
(v[14] as u16) << 8 | v[15] as u16,
(v[12] as u16) << 8 | v[13] as u16,
(v[10] as u16) << 8 | v[11] as u16,
(v[8] as u16) << 8 | v[9] as u16,
(v[6] as u16) << 8 | v[7] as u16,
(v[4] as u16) << 8 | v[5] as u16,
(v[2] as u16) << 8 | v[3] as u16,
(v[0] as u16) << 8 | v[1] as u16,
));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
}
// If we don't have a regex stored, or it's not up to date, recreate it
let regex = Regex::new(&config_blacklist).unwrap();
let is_match = regex.is_match(domain);
*guard = Some((config_blacklist, regex));
#[derive(Clone)]
enum DomainBlacklistReason {
Regex,
IP,
}
use cached::proc_macro::cached;
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
async fn check_domain_blacklist_reason(domain: &str) -> Option<DomainBlacklistReason> {
// First check the blacklist regex if there is a match.
// This prevents the blocked domain(s) from being leaked via a DNS lookup.
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
let is_match = if let Some(regex) = ICON_BLACKLIST_REGEX.get(&blacklist) {
regex.is_match(domain)
} else {
// Clear the current list if the previous key doesn't exists.
// To prevent growing of the HashMap after someone has changed it via the admin interface.
if ICON_BLACKLIST_REGEX.len() >= 1 {
ICON_BLACKLIST_REGEX.clear();
}
// Generate the regex to store in too the Lazy Static HashMap.
let blacklist_regex = Regex::new(&blacklist).unwrap();
let is_match = blacklist_regex.is_match(domain);
ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex);
is_match
};
if is_match {
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
return Some(DomainBlacklistReason::Regex);
}
}
if CONFIG.icon_blacklist_non_global_ips() {
if let Ok(s) = lookup_host((domain, 0)).await {
for addr in s {
if !is_global(addr.ip()) {
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
return Some(DomainBlacklistReason::IP);
}
}
}
}
None
is_match
}
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
@ -342,6 +195,13 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
}
Err(e) => {
// If this error comes from the custom resolver, this means this is a blacklisted domain
// or non global IP, don't save the miss file in this case to avoid leaking it
if let Some(error) = CustomResolverError::downcast_ref(&e) {
warn!("{error}");
return None;
}
warn!("Unable to download icon: {:?}", e);
let miss_indicator = path + ".miss";
save_icon(&miss_indicator, &[]).await;
@ -491,42 +351,48 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
let ssldomain = format!("https://{domain}");
let httpdomain = format!("http://{domain}");
// First check the domain as given during the request for both HTTPS and HTTP.
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await {
Ok(c) => Ok(c),
Err(e) => {
let mut sub_resp = Err(e);
// First check the domain as given during the request for HTTPS.
let resp = match get_page(&ssldomain).await {
Err(e) if CustomResolverError::downcast_ref(&e).is_none() => {
// If we get an error that is not caused by the blacklist, we retry with HTTP
match get_page(&httpdomain).await {
mut sub_resp @ Err(_) => {
// When the domain is not an IP, and has more then one dot, remove all subdomains.
let is_ip = domain.parse::<IpAddr>();
if is_ip.is_err() && domain.matches('.').count() > 1 {
let mut domain_parts = domain.split('.');
let base_domain = format!(
"{base}.{tld}",
tld = domain_parts.next_back().unwrap(),
base = domain_parts.next_back().unwrap()
);
if is_valid_domain(&base_domain) {
let sslbase = format!("https://{base_domain}");
let httpbase = format!("http://{base_domain}");
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
// When the domain is not an IP, and has more then one dot, remove all subdomains.
let is_ip = domain.parse::<IpAddr>();
if is_ip.is_err() && domain.matches('.').count() > 1 {
let mut domain_parts = domain.split('.');
let base_domain = format!(
"{base}.{tld}",
tld = domain_parts.next_back().unwrap(),
base = domain_parts.next_back().unwrap()
);
if is_valid_domain(&base_domain) {
let sslbase = format!("https://{base_domain}");
let httpbase = format!("http://{base_domain}");
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
}
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
}
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
let www_domain = format!("www.{domain}");
if is_valid_domain(&www_domain) {
let sslwww = format!("https://{www_domain}");
let httpwww = format!("http://{www_domain}");
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
let www_domain = format!("www.{domain}");
if is_valid_domain(&www_domain) {
let sslwww = format!("https://{www_domain}");
let httpwww = format!("http://{www_domain}");
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
}
}
sub_resp
}
res => res,
}
sub_resp
}
// If we get a result or a blacklist error, just continue
res => res,
};
// Create the iconlist
@ -573,21 +439,12 @@ async fn get_page(url: &str) -> Result<Response, Error> {
}
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
match check_domain_blacklist_reason(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
Some(DomainBlacklistReason::Regex) => warn!("Favicon '{}' is from a blacklisted domain!", url),
Some(DomainBlacklistReason::IP) => warn!("Favicon '{}' is hosted on a non-global IP!", url),
None => (),
}
let mut client = CLIENT.get(url);
if !referer.is_empty() {
client = client.header("Referer", referer)
}
match client.send().await {
Ok(c) => c.error_for_status().map_err(Into::into),
Err(e) => err_silent!(format!("{e}")),
}
Ok(client.send().await?.error_for_status()?)
}
/// Returns a Integer with the priority of the type of the icon which to prefer.
@ -670,12 +527,6 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
}
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
match check_domain_blacklist_reason(domain).await {
Some(DomainBlacklistReason::Regex) => err_silent!("Domain is blacklisted", domain),
Some(DomainBlacklistReason::IP) => err_silent!("Host resolves to a non-global IP", domain),
None => (),
}
let icon_result = get_icon_url(domain).await?;
let mut buffer = Bytes::new();
@ -711,22 +562,19 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
_ => debug!("Extracted icon from data:image uri is invalid"),
};
} else {
match get_page_with_referer(&icon.href, &icon_result.referer).await {
Ok(res) => {
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
let res = get_page_with_referer(&icon.href, &icon_result.referer).await?;
// Check if the icon type is allowed, else try an icon from the list.
icon_type = get_icon_type(&buffer);
if icon_type.is_none() {
buffer.clear();
debug!("Icon from {}, is not a valid image type", icon.href);
continue;
}
info!("Downloaded icon from {}", icon.href);
break;
}
Err(e) => debug!("{:?}", e),
};
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
// Check if the icon type is allowed, else try an icon from the list.
icon_type = get_icon_type(&buffer);
if icon_type.is_none() {
buffer.clear();
debug!("Icon from {}, is not a valid image type", icon.href);
continue;
}
info!("Downloaded icon from {}", icon.href);
break;
}
}

View File

@ -15,7 +15,7 @@ use crate::{
two_factor::{authenticator, duo, email, enforce_2fa_policy, webauthn, yubikey},
},
push::register_push_device,
ApiResult, EmptyResult, JsonResult, JsonUpcase,
ApiResult, EmptyResult, JsonResult,
},
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp},
db::{models::*, DbConn},
@ -295,7 +295,12 @@ async fn _password_login(
"KdfIterations": user.client_kdf_iter,
"KdfMemory": user.client_kdf_memory,
"KdfParallelism": user.client_kdf_parallelism,
"ResetMasterPassword": false,// TODO: Same as above
"ResetMasterPassword": false, // TODO: Same as above
"ForcePasswordReset": false,
"MasterPasswordPolicy": {
"object": "masterPasswordPolicy",
},
"scope": scope,
"unofficialServer": true,
"UserDecryptionOptions": {
@ -597,7 +602,7 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
result["TwoFactorProviders2"][provider.to_string()] = json!({
"Nfc": yubikey_metadata.Nfc,
"Nfc": yubikey_metadata.nfc,
})
}
@ -626,19 +631,18 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
}
#[post("/accounts/prelogin", data = "<data>")]
async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
async fn prelogin(data: Json<PreloginData>, conn: DbConn) -> Json<Value> {
_prelogin(data, conn).await
}
#[post("/accounts/register", data = "<data>")]
async fn identity_register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
async fn identity_register(data: Json<RegisterData>, conn: DbConn) -> JsonResult {
_register(data, conn).await
}
// https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts
// https://github.com/bitwarden/mobile/blob/master/src/Core/Models/Request/TokenRequest.cs
#[derive(Debug, Clone, Default, FromForm)]
#[allow(non_snake_case)]
struct ConnectData {
#[field(name = uncased("grant_type"))]
#[field(name = uncased("granttype"))]

View File

@ -20,7 +20,7 @@ pub use crate::api::{
core::two_factor::send_incomplete_2fa_notifications,
core::{emergency_notification_reminder_job, emergency_request_timeout_job},
core::{event_cleanup_job, events_routes as core_events_routes},
icons::routes as icons_routes,
icons::{is_domain_blacklisted, routes as icons_routes},
identity::routes as identity_routes,
notifications::routes as notifications_routes,
notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS},
@ -33,23 +33,18 @@ pub use crate::api::{
web::static_files,
};
use crate::db::{models::User, DbConn};
use crate::util;
// Type aliases for API methods results
type ApiResult<T> = Result<T, crate::error::Error>;
pub type JsonResult = ApiResult<Json<Value>>;
pub type EmptyResult = ApiResult<()>;
type JsonUpcase<T> = Json<util::UpCase<T>>;
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;
type JsonVec<T> = Json<Vec<T>>;
// Common structs representing JSON data received
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
struct PasswordOrOtpData {
MasterPasswordHash: Option<String>,
Otp: Option<String>,
master_password_hash: Option<String>,
otp: Option<String>,
}
impl PasswordOrOtpData {
@ -59,7 +54,7 @@ impl PasswordOrOtpData {
pub async fn validate(&self, user: &User, delete_if_valid: bool, conn: &mut DbConn) -> EmptyResult {
use crate::api::core::two_factor::protected_actions::validate_protected_action_otp;
match (self.MasterPasswordHash.as_deref(), self.Otp.as_deref()) {
match (self.master_password_hash.as_deref(), self.otp.as_deref()) {
(Some(pw_hash), None) => {
if !user.check_valid_password(pw_hash) {
err!("Invalid password");

View File

@ -289,7 +289,7 @@ fn serialize(val: Value) -> Vec<u8> {
fn serialize_date(date: NaiveDateTime) -> Value {
let seconds: i64 = date.and_utc().timestamp();
let nanos: i64 = date.timestamp_subsec_nanos().into();
let nanos: i64 = date.and_utc().timestamp_subsec_nanos().into();
let timestamp = nanos << 34 | seconds;
let bs = timestamp.to_be_bytes();

View File

@ -4,7 +4,7 @@ use chrono::{TimeDelta, Utc};
use num_traits::FromPrimitive;
use once_cell::sync::{Lazy, OnceCell};
use jsonwebtoken::{self, errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
use jsonwebtoken::{errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
use openssl::rsa::Rsa;
use serde::de::DeserializeOwned;
use serde::ser::Serialize;
@ -391,10 +391,8 @@ impl<'r> FromRequest<'r> for Host {
let host = if let Some(host) = headers.get_one("X-Forwarded-Host") {
host
} else if let Some(host) = headers.get_one("Host") {
host
} else {
""
headers.get_one("Host").unwrap_or_default()
};
format!("{protocol}://{host}")
@ -691,7 +689,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
_ => err_handler!("Error getting DB"),
};
if !can_access_collection(&headers.org_user, &col_id, &mut conn).await {
if !Collection::can_access_collection(&headers.org_user, &col_id, &mut conn).await {
err_handler!("The current user isn't a manager for this collection")
}
}
@ -764,10 +762,6 @@ impl From<ManagerHeadersLoose> for Headers {
}
}
}
async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
org_user.has_full_access()
|| Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn).await
}
impl ManagerHeaders {
pub async fn from_loose(
@ -779,7 +773,7 @@ impl ManagerHeaders {
if uuid::Uuid::parse_str(col_id).is_err() {
err!("Collection Id is malformed!");
}
if !can_access_collection(&h.org_user, col_id, conn).await {
if !Collection::can_access_collection(&h.org_user, col_id, conn).await {
err!("You don't have access to all collections!");
}
}

View File

@ -42,13 +42,13 @@ impl Attachment {
pub fn to_json(&self, host: &str) -> Value {
json!({
"Id": self.id,
"Url": self.get_url(host),
"FileName": self.file_name,
"Size": self.file_size.to_string(),
"SizeName": crate::util::get_display_size(self.file_size),
"Key": self.akey,
"Object": "attachment"
"id": self.id,
"url": self.get_url(host),
"fileName": self.file_name,
"size": self.file_size.to_string(),
"sizeName": crate::util::get_display_size(self.file_size),
"key": self.akey,
"object": "attachment"
})
}
}

View File

@ -1,3 +1,4 @@
use crate::util::LowerCase;
use crate::CONFIG;
use chrono::{NaiveDateTime, TimeDelta, Utc};
use serde_json::Value;
@ -81,7 +82,7 @@ impl Cipher {
pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult {
let mut validation_errors = serde_json::Map::new();
for (index, cipher) in cipher_data.iter().enumerate() {
if let Some(note) = &cipher.Notes {
if let Some(note) = &cipher.notes {
if note.len() > 10_000 {
validation_errors.insert(
format!("Ciphers[{index}].Notes"),
@ -135,10 +136,6 @@ impl Cipher {
}
}
let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
let password_history_json =
self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null);
// We don't need these values at all for Organizational syncs
// Skip any other database calls if this is the case and just return false.
let (read_only, hide_passwords) = if sync_type == CipherSyncType::User {
@ -153,20 +150,34 @@ impl Cipher {
(false, false)
};
let fields_json = self
.fields
.as_ref()
.and_then(|s| serde_json::from_str::<LowerCase<Value>>(s).ok())
.unwrap_or_default()
.data;
let password_history_json = self
.password_history
.as_ref()
.and_then(|s| serde_json::from_str::<LowerCase<Value>>(s).ok())
.unwrap_or_default()
.data;
// Get the type_data or a default to an empty json object '{}'.
// If not passing an empty object, mobile clients will crash.
let mut type_data_json: Value =
serde_json::from_str(&self.data).unwrap_or_else(|_| Value::Object(serde_json::Map::new()));
let mut type_data_json = serde_json::from_str::<LowerCase<Value>>(&self.data)
.map(|d| d.data)
.unwrap_or_else(|_| Value::Object(serde_json::Map::new()));
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
if self.atype == 1 {
if type_data_json["Uris"].is_array() {
let uri = type_data_json["Uris"][0]["Uri"].clone();
type_data_json["Uri"] = uri;
if type_data_json["uris"].is_array() {
let uri = type_data_json["uris"][0]["uri"].clone();
type_data_json["uri"] = uri;
} else {
// Upstream always has an Uri key/value
type_data_json["Uri"] = Value::Null;
type_data_json["uri"] = Value::Null;
}
}
@ -175,10 +186,10 @@ impl Cipher {
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
// data_json should always contain the following keys with every atype
data_json["Fields"] = fields_json.clone();
data_json["Name"] = json!(self.name);
data_json["Notes"] = json!(self.notes);
data_json["PasswordHistory"] = password_history_json.clone();
data_json["fields"] = fields_json.clone();
data_json["name"] = json!(self.name);
data_json["notes"] = json!(self.notes);
data_json["passwordHistory"] = password_history_json.clone();
let collection_ids = if let Some(cipher_sync_data) = cipher_sync_data {
if let Some(cipher_collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
@ -198,48 +209,48 @@ impl Cipher {
//
// Ref: https://github.com/bitwarden/server/blob/master/src/Core/Models/Api/Response/CipherResponseModel.cs
let mut json_object = json!({
"Object": "cipherDetails",
"Id": self.uuid,
"Type": self.atype,
"CreationDate": format_date(&self.created_at),
"RevisionDate": format_date(&self.updated_at),
"DeletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
"OrganizationId": self.organization_uuid,
"Key": self.key,
"Attachments": attachments_json,
"object": "cipherDetails",
"id": self.uuid,
"type": self.atype,
"creationDate": format_date(&self.created_at),
"revisionDate": format_date(&self.updated_at),
"deletedDate": self.deleted_at.map_or(Value::Null, |d| Value::String(format_date(&d))),
"reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
"organizationId": self.organization_uuid,
"key": self.key,
"attachments": attachments_json,
// We have UseTotp set to true by default within the Organization model.
// This variable together with UsersGetPremium is used to show or hide the TOTP counter.
"OrganizationUseTotp": true,
"organizationUseTotp": true,
// This field is specific to the cipherDetails type.
"CollectionIds": collection_ids,
"collectionIds": collection_ids,
"Name": self.name,
"Notes": self.notes,
"Fields": fields_json,
"name": self.name,
"notes": self.notes,
"fields": fields_json,
"Data": data_json,
"data": data_json,
"PasswordHistory": password_history_json,
"passwordHistory": password_history_json,
// All Cipher types are included by default as null, but only the matching one will be populated
"Login": null,
"SecureNote": null,
"Card": null,
"Identity": null,
"login": null,
"secureNote": null,
"card": null,
"identity": null,
});
// These values are only needed for user/default syncs
// Not during an organizational sync like `get_org_details`
// Skip adding these fields in that case
if sync_type == CipherSyncType::User {
json_object["FolderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
json_object["folderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string())
} else {
self.get_folder_uuid(user_uuid, conn).await
});
json_object["Favorite"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
json_object["favorite"] = json!(if let Some(cipher_sync_data) = cipher_sync_data {
cipher_sync_data.cipher_favorites.contains(&self.uuid)
} else {
self.is_favorite(user_uuid, conn).await
@ -247,15 +258,15 @@ impl Cipher {
// These values are true by default, but can be false if the
// cipher belongs to a collection or group where the org owner has enabled
// the "Read Only" or "Hide Passwords" restrictions for the user.
json_object["Edit"] = json!(!read_only);
json_object["ViewPassword"] = json!(!hide_passwords);
json_object["edit"] = json!(!read_only);
json_object["viewPassword"] = json!(!hide_passwords);
}
let key = match self.atype {
1 => "Login",
2 => "SecureNote",
3 => "Card",
4 => "Identity",
1 => "login",
2 => "secureNote",
3 => "card",
4 => "identity",
_ => panic!("Wrong type"),
};
@ -431,7 +442,7 @@ impl Cipher {
}
if let Some(ref org_uuid) = self.organization_uuid {
if let Some(cipher_sync_data) = cipher_sync_data {
return cipher_sync_data.user_group_full_access_for_organizations.get(org_uuid).is_some();
return cipher_sync_data.user_group_full_access_for_organizations.contains(org_uuid);
} else {
return Group::is_in_full_access_group(user_uuid, org_uuid, conn).await;
}

View File

@ -1,6 +1,6 @@
use serde_json::Value;
use super::{CollectionGroup, User, UserOrgStatus, UserOrgType, UserOrganization};
use super::{CollectionGroup, GroupUser, User, UserOrgStatus, UserOrgType, UserOrganization};
use crate::CONFIG;
db_object! {
@ -49,11 +49,11 @@ impl Collection {
pub fn to_json(&self) -> Value {
json!({
"ExternalId": self.external_id,
"Id": self.uuid,
"OrganizationId": self.org_uuid,
"Name": self.name,
"Object": "collection",
"externalId": self.external_id,
"id": self.uuid,
"organizationId": self.org_uuid,
"name": self.name,
"object": "collection",
})
}
@ -97,11 +97,20 @@ impl Collection {
};
let mut json_object = self.to_json();
json_object["Object"] = json!("collectionDetails");
json_object["ReadOnly"] = json!(read_only);
json_object["HidePasswords"] = json!(hide_passwords);
json_object["object"] = json!("collectionDetails");
json_object["readOnly"] = json!(read_only);
json_object["hidePasswords"] = json!(hide_passwords);
json_object
}
pub async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
org_user.has_status(UserOrgStatus::Confirmed)
&& (org_user.has_full_access()
|| CollectionUser::has_access_to_collection_by_user(col_id, &org_user.user_uuid, conn).await
|| (CONFIG.org_groups_enabled()
&& (GroupUser::has_full_access_by_member(&org_user.org_uuid, &org_user.uuid, conn).await
|| GroupUser::has_access_to_collection_by_member(col_id, &org_user.uuid, conn).await)))
}
}
use crate::db::DbConn;
@ -252,17 +261,6 @@ impl Collection {
}
}
// Check if a user has access to a specific collection
// FIXME: This needs to be reviewed. The query used by `find_by_user_uuid` could be adjusted to filter when needed.
// For now this is a good solution without making to much changes.
pub async fn has_access_by_collection_and_user_uuid(
collection_uuid: &str,
user_uuid: &str,
conn: &mut DbConn,
) -> bool {
Self::find_by_user_uuid(user_uuid.to_owned(), conn).await.into_iter().any(|c| c.uuid == collection_uuid)
}
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
Self::find_by_user_uuid(user_uuid.to_owned(), conn)
.await
@ -644,6 +642,10 @@ impl CollectionUser {
Ok(())
}}
}
pub async fn has_access_to_collection_by_user(col_id: &str, user_uuid: &str, conn: &mut DbConn) -> bool {
Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some()
}
}
/// Database methods

View File

@ -58,11 +58,11 @@ impl EmergencyAccess {
pub fn to_json(&self) -> Value {
json!({
"Id": self.uuid,
"Status": self.status,
"Type": self.atype,
"WaitTimeDays": self.wait_time_days,
"Object": "emergencyAccess",
"id": self.uuid,
"status": self.status,
"type": self.atype,
"waitTimeDays": self.wait_time_days,
"object": "emergencyAccess",
})
}
@ -70,36 +70,43 @@ impl EmergencyAccess {
let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn).await.expect("Grantor user not found.");
json!({
"Id": self.uuid,
"Status": self.status,
"Type": self.atype,
"WaitTimeDays": self.wait_time_days,
"GrantorId": grantor_user.uuid,
"Email": grantor_user.email,
"Name": grantor_user.name,
"Object": "emergencyAccessGrantorDetails",
"id": self.uuid,
"status": self.status,
"type": self.atype,
"waitTimeDays": self.wait_time_days,
"grantorId": grantor_user.uuid,
"email": grantor_user.email,
"name": grantor_user.name,
"object": "emergencyAccessGrantorDetails",
})
}
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Value {
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option<Value> {
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
Some(User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found."))
User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
} else if let Some(email) = self.email.as_deref() {
Some(User::find_by_mail(email, conn).await.expect("Grantee user not found."))
match User::find_by_mail(email, conn).await {
Some(user) => user,
None => {
// remove outstanding invitations which should not exist
let _ = Self::delete_all_by_grantee_email(email, conn).await;
return None;
}
}
} else {
None
return None;
};
json!({
"Id": self.uuid,
"Status": self.status,
"Type": self.atype,
"WaitTimeDays": self.wait_time_days,
"GranteeId": grantee_user.as_ref().map_or("", |u| &u.uuid),
"Email": grantee_user.as_ref().map_or("", |u| &u.email),
"Name": grantee_user.as_ref().map_or("", |u| &u.name),
"Object": "emergencyAccessGranteeDetails",
})
Some(json!({
"id": self.uuid,
"status": self.status,
"type": self.atype,
"waitTimeDays": self.wait_time_days,
"granteeId": grantee_user.uuid,
"email": grantee_user.email,
"name": grantee_user.name,
"object": "emergencyAccessGranteeDetails",
}))
}
}
@ -174,7 +181,7 @@ impl EmergencyAccess {
// Update the grantee so that it will refresh it's status.
User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await;
self.status = status;
self.updated_at = date.to_owned();
date.clone_into(&mut self.updated_at);
db_run! {conn: {
crate::util::retry(|| {
@ -192,7 +199,7 @@ impl EmergencyAccess {
conn: &mut DbConn,
) -> EmptyResult {
self.last_notification_at = Some(date.to_owned());
self.updated_at = date.to_owned();
date.clone_into(&mut self.updated_at);
db_run! {conn: {
crate::util::retry(|| {
@ -214,6 +221,13 @@ impl EmergencyAccess {
Ok(())
}
pub async fn delete_all_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
for ea in Self::find_all_invited_by_grantee_email(grantee_email, conn).await {
ea.delete(conn).await?;
}
Ok(())
}
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
User::update_uuid_revision(&self.grantor_uuid, conn).await;
@ -285,6 +299,15 @@ impl EmergencyAccess {
}}
}
pub async fn find_all_invited_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::email.eq(grantee_email))
.filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32))
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}}
}
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
@ -292,6 +315,21 @@ impl EmergencyAccess {
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}}
}
pub async fn accept_invite(&mut self, grantee_uuid: &str, grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email {
err!("User email does not match invite.");
}
if self.status == EmergencyAccessStatus::Accepted as i32 {
err!("Emergency contact already accepted.");
}
self.status = EmergencyAccessStatus::Accepted as i32;
self.grantee_uuid = Some(String::from(grantee_uuid));
self.email = None;
self.save(conn).await
}
}
// endregion

View File

@ -43,10 +43,10 @@ impl Folder {
use crate::util::format_date;
json!({
"Id": self.uuid,
"RevisionDate": format_date(&self.updated_at),
"Name": self.name,
"Object": "folder",
"id": self.uuid,
"revisionDate": format_date(&self.updated_at),
"name": self.name,
"object": "folder",
})
}
}

View File

@ -58,14 +58,14 @@ impl Group {
use crate::util::format_date;
json!({
"Id": self.uuid,
"OrganizationId": self.organizations_uuid,
"Name": self.name,
"AccessAll": self.access_all,
"ExternalId": self.external_id,
"CreationDate": format_date(&self.creation_date),
"RevisionDate": format_date(&self.revision_date),
"Object": "group"
"id": self.uuid,
"organizationId": self.organizations_uuid,
"name": self.name,
"accessAll": self.access_all,
"externalId": self.external_id,
"creationDate": format_date(&self.creation_date),
"revisionDate": format_date(&self.revision_date),
"object": "group"
})
}
@ -75,21 +75,21 @@ impl Group {
.iter()
.map(|entry| {
json!({
"Id": entry.collections_uuid,
"ReadOnly": entry.read_only,
"HidePasswords": entry.hide_passwords
"id": entry.collections_uuid,
"readOnly": entry.read_only,
"hidePasswords": entry.hide_passwords
})
})
.collect();
json!({
"Id": self.uuid,
"OrganizationId": self.organizations_uuid,
"Name": self.name,
"AccessAll": self.access_all,
"ExternalId": self.external_id,
"Collections": collections_groups,
"Object": "groupDetails"
"id": self.uuid,
"organizationId": self.organizations_uuid,
"name": self.name,
"accessAll": self.access_all,
"externalId": self.external_id,
"collections": collections_groups,
"object": "groupDetails"
})
}

View File

@ -4,7 +4,6 @@ use serde_json::Value;
use crate::api::EmptyResult;
use crate::db::DbConn;
use crate::error::MapResult;
use crate::util::UpCase;
use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization};
@ -39,16 +38,18 @@ pub enum OrgPolicyType {
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/SendOptionsPolicyData.cs
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
pub struct SendOptionsPolicyData {
pub DisableHideEmail: bool,
#[serde(rename = "disableHideEmail", alias = "DisableHideEmail")]
pub disable_hide_email: bool,
}
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/ResetPasswordDataModel.cs
#[derive(Deserialize)]
#[allow(non_snake_case)]
#[serde(rename_all = "camelCase")]
pub struct ResetPasswordDataModel {
pub AutoEnrollEnabled: bool,
#[serde(rename = "autoEnrollEnabled", alias = "AutoEnrollEnabled")]
pub auto_enroll_enabled: bool,
}
pub type OrgPolicyResult = Result<(), OrgPolicyErr>;
@ -78,12 +79,12 @@ impl OrgPolicy {
pub fn to_json(&self) -> Value {
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
json!({
"Id": self.uuid,
"OrganizationId": self.org_uuid,
"Type": self.atype,
"Data": data_json,
"Enabled": self.enabled,
"Object": "policy",
"id": self.uuid,
"organizationId": self.org_uuid,
"type": self.atype,
"data": data_json,
"enabled": self.enabled,
"object": "policy",
})
}
}
@ -307,9 +308,9 @@ impl OrgPolicy {
pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool {
match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await {
Some(policy) => match serde_json::from_str::<UpCase<ResetPasswordDataModel>>(&policy.data) {
Some(policy) => match serde_json::from_str::<ResetPasswordDataModel>(&policy.data) {
Ok(opts) => {
return policy.enabled && opts.data.AutoEnrollEnabled;
return policy.enabled && opts.auto_enroll_enabled;
}
_ => error!("Failed to deserialize ResetPasswordDataModel: {}", policy.data),
},
@ -327,9 +328,9 @@ impl OrgPolicy {
{
if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await {
if user.atype < UserOrgType::Admin {
match serde_json::from_str::<UpCase<SendOptionsPolicyData>>(&policy.data) {
match serde_json::from_str::<SendOptionsPolicyData>(&policy.data) {
Ok(opts) => {
if opts.data.DisableHideEmail {
if opts.disable_hide_email {
return true;
}
}

View File

@ -153,39 +153,39 @@ impl Organization {
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/Organizations/OrganizationResponseModel.cs
pub fn to_json(&self) -> Value {
json!({
"Id": self.uuid,
"Identifier": null, // not supported by us
"Name": self.name,
"Seats": 10, // The value doesn't matter, we don't check server-side
// "MaxAutoscaleSeats": null, // The value doesn't matter, we don't check server-side
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
"Use2fa": true,
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
"UseEvents": CONFIG.org_events_enabled(),
"UseGroups": CONFIG.org_groups_enabled(),
"UseTotp": true,
"UsePolicies": true,
// "UseScim": false, // Not supported (Not AGPLv3 Licensed)
"UseSso": false, // Not supported
// "UseKeyConnector": false, // Not supported
"SelfHost": true,
"UseApi": true,
"HasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(),
"UseResetPassword": CONFIG.mail_enabled(),
"id": self.uuid,
"identifier": null, // not supported by us
"name": self.name,
"seats": 10, // The value doesn't matter, we don't check server-side
// "maxAutoscaleSeats": null, // The value doesn't matter, we don't check server-side
"maxCollections": 10, // The value doesn't matter, we don't check server-side
"maxStorageGb": 10, // The value doesn't matter, we don't check server-side
"use2fa": true,
"useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
"useEvents": CONFIG.org_events_enabled(),
"useGroups": CONFIG.org_groups_enabled(),
"useTotp": true,
"usePolicies": true,
// "useScim": false, // Not supported (Not AGPLv3 Licensed)
"useSso": false, // Not supported
// "useKeyConnector": false, // Not supported
"selfHost": true,
"useApi": true,
"hasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(),
"useResetPassword": CONFIG.mail_enabled(),
"BusinessName": null,
"BusinessAddress1": null,
"BusinessAddress2": null,
"BusinessAddress3": null,
"BusinessCountry": null,
"BusinessTaxNumber": null,
"businessName": null,
"businessAddress1": null,
"businessAddress2": null,
"businessAddress3": null,
"businessCountry": null,
"businessTaxNumber": null,
"BillingEmail": self.billing_email,
"Plan": "TeamsAnnually",
"PlanType": 5, // TeamsAnnually plan
"UsersGetPremium": true,
"Object": "organization",
"billingEmail": self.billing_email,
"plan": "TeamsAnnually",
"planType": 5, // TeamsAnnually plan
"usersGetPremium": true,
"object": "organization",
})
}
}
@ -344,65 +344,64 @@ impl UserOrganization {
pub async fn to_json(&self, conn: &mut DbConn) -> Value {
let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap();
let permissions = json!({
// TODO: Add support for Custom User Roles
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
"accessEventLogs": false,
"accessImportExport": false,
"accessReports": false,
"createNewCollections": false,
"editAnyCollection": false,
"deleteAnyCollection": false,
"editAssignedCollections": false,
"deleteAssignedCollections": false,
"manageGroups": false,
"managePolicies": false,
"manageSso": false, // Not supported
"manageUsers": false,
"manageResetPassword": false,
"manageScim": false // Not supported (Not AGPLv3 Licensed)
});
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
json!({
"Id": self.org_uuid,
"Identifier": null, // Not supported
"Name": org.name,
"Seats": 10, // The value doesn't matter, we don't check server-side
"MaxCollections": 10, // The value doesn't matter, we don't check server-side
"UsersGetPremium": true,
"Use2fa": true,
"UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
"UseEvents": CONFIG.org_events_enabled(),
"UseGroups": CONFIG.org_groups_enabled(),
"UseTotp": true,
// "UseScim": false, // Not supported (Not AGPLv3 Licensed)
"UsePolicies": true,
"UseApi": true,
"SelfHost": true,
"HasPublicAndPrivateKeys": org.private_key.is_some() && org.public_key.is_some(),
"ResetPasswordEnrolled": self.reset_password_key.is_some(),
"UseResetPassword": CONFIG.mail_enabled(),
"SsoBound": false, // Not supported
"UseSso": false, // Not supported
"ProviderId": null,
"ProviderName": null,
// "KeyConnectorEnabled": false,
// "KeyConnectorUrl": null,
"id": self.org_uuid,
"identifier": null, // Not supported
"name": org.name,
"seats": 10, // The value doesn't matter, we don't check server-side
"maxCollections": 10, // The value doesn't matter, we don't check server-side
"usersGetPremium": true,
"use2fa": true,
"useDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
"useEvents": CONFIG.org_events_enabled(),
"useGroups": CONFIG.org_groups_enabled(),
"useTotp": true,
// "useScim": false, // Not supported (Not AGPLv3 Licensed)
"usePolicies": true,
"useApi": true,
"selfHost": true,
"hasPublicAndPrivateKeys": org.private_key.is_some() && org.public_key.is_some(),
"resetPasswordEnrolled": self.reset_password_key.is_some(),
"useResetPassword": CONFIG.mail_enabled(),
"ssoBound": false, // Not supported
"useSso": false, // Not supported
"providerId": null,
"providerName": null,
// "keyConnectorEnabled": false,
// "keyConnectorUrl": null,
// TODO: Add support for Custom User Roles
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
// "Permissions": {
// "AccessEventLogs": false,
// "AccessImportExport": false,
// "AccessReports": false,
// "ManageAllCollections": false,
// "CreateNewCollections": false,
// "EditAnyCollection": false,
// "DeleteAnyCollection": false,
// "ManageAssignedCollections": false,
// "editAssignedCollections": false,
// "deleteAssignedCollections": false,
// "ManageCiphers": false,
// "ManageGroups": false,
// "ManagePolicies": false,
// "ManageResetPassword": false,
// "ManageSso": false, // Not supported
// "ManageUsers": false,
// "ManageScim": false, // Not supported (Not AGPLv3 Licensed)
// },
"permissions": permissions,
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
"maxStorageGb": 10, // The value doesn't matter, we don't check server-side
// These are per user
"UserId": self.user_uuid,
"Key": self.akey,
"Status": self.status,
"Type": self.atype,
"Enabled": true,
"userId": self.user_uuid,
"key": self.akey,
"status": self.status,
"type": self.atype,
"enabled": true,
"Object": "profileOrganization",
"object": "profileOrganization",
})
}
@ -438,9 +437,9 @@ impl UserOrganization {
.iter()
.map(|cu| {
json!({
"Id": cu.collection_uuid,
"ReadOnly": cu.read_only,
"HidePasswords": cu.hide_passwords,
"id": cu.collection_uuid,
"readOnly": cu.read_only,
"hidePasswords": cu.hide_passwords,
})
})
.collect()
@ -449,29 +448,29 @@ impl UserOrganization {
};
json!({
"Id": self.uuid,
"UserId": self.user_uuid,
"Name": user.name,
"Email": user.email,
"ExternalId": self.external_id,
"Groups": groups,
"Collections": collections,
"id": self.uuid,
"userId": self.user_uuid,
"name": user.name,
"email": user.email,
"externalId": self.external_id,
"groups": groups,
"collections": collections,
"Status": status,
"Type": self.atype,
"AccessAll": self.access_all,
"TwoFactorEnabled": twofactor_enabled,
"ResetPasswordEnrolled": self.reset_password_key.is_some(),
"status": status,
"type": self.atype,
"accessAll": self.access_all,
"twoFactorEnabled": twofactor_enabled,
"resetPasswordEnrolled": self.reset_password_key.is_some(),
"Object": "organizationUserUserDetails",
"object": "organizationUserUserDetails",
})
}
pub fn to_json_user_access_restrictions(&self, col_user: &CollectionUser) -> Value {
json!({
"Id": self.uuid,
"ReadOnly": col_user.read_only,
"HidePasswords": col_user.hide_passwords,
"id": self.uuid,
"readOnly": col_user.read_only,
"hidePasswords": col_user.hide_passwords,
})
}
@ -485,9 +484,9 @@ impl UserOrganization {
.iter()
.map(|c| {
json!({
"Id": c.collection_uuid,
"ReadOnly": c.read_only,
"HidePasswords": c.hide_passwords,
"id": c.collection_uuid,
"readOnly": c.read_only,
"hidePasswords": c.hide_passwords,
})
})
.collect()
@ -502,15 +501,15 @@ impl UserOrganization {
};
json!({
"Id": self.uuid,
"UserId": self.user_uuid,
"id": self.uuid,
"userId": self.user_uuid,
"Status": status,
"Type": self.atype,
"AccessAll": self.access_all,
"Collections": coll_uuids,
"status": status,
"type": self.atype,
"accessAll": self.access_all,
"collections": coll_uuids,
"Object": "organizationUserDetails",
"object": "organizationUserDetails",
})
}
pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {

View File

@ -125,26 +125,26 @@ impl Send {
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
json!({
"Id": self.uuid,
"AccessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()),
"Type": self.atype,
"id": self.uuid,
"accessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()),
"type": self.atype,
"Name": self.name,
"Notes": self.notes,
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
"name": self.name,
"notes": self.notes,
"text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
"file": if self.atype == SendType::File as i32 { Some(&data) } else { None },
"Key": self.akey,
"MaxAccessCount": self.max_access_count,
"AccessCount": self.access_count,
"Password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
"Disabled": self.disabled,
"HideEmail": self.hide_email,
"key": self.akey,
"maxAccessCount": self.max_access_count,
"accessCount": self.access_count,
"password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
"disabled": self.disabled,
"hideEmail": self.hide_email,
"RevisionDate": format_date(&self.revision_date),
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
"DeletionDate": format_date(&self.deletion_date),
"Object": "send",
"revisionDate": format_date(&self.revision_date),
"expirationDate": self.expiration_date.as_ref().map(format_date),
"deletionDate": format_date(&self.deletion_date),
"object": "send",
})
}
@ -154,16 +154,16 @@ impl Send {
let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
json!({
"Id": self.uuid,
"Type": self.atype,
"id": self.uuid,
"type": self.atype,
"Name": self.name,
"Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
"File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
"name": self.name,
"text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
"file": if self.atype == SendType::File as i32 { Some(&data) } else { None },
"ExpirationDate": self.expiration_date.as_ref().map(format_date),
"CreatorIdentifier": self.creator_identifier(conn).await,
"Object": "send-access",
"expirationDate": self.expiration_date.as_ref().map(format_date),
"creatorIdentifier": self.creator_identifier(conn).await,
"object": "send-access",
})
}
}
@ -290,25 +290,18 @@ impl Send {
pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<i64> {
let sends = Self::find_by_user(user_uuid, conn).await;
#[allow(non_snake_case)]
#[derive(serde::Deserialize, Default)]
#[derive(serde::Deserialize)]
struct FileData {
Size: Option<NumberOrString>,
size: Option<NumberOrString>,
#[serde(rename = "size", alias = "Size")]
size: NumberOrString,
}
let mut total: i64 = 0;
for send in sends {
if send.atype == SendType::File as i32 {
let data: FileData = serde_json::from_str(&send.data).unwrap_or_default();
let size = match (data.size, data.Size) {
(Some(s), _) => s.into_i64(),
(_, Some(s)) => s.into_i64(),
(None, None) => continue,
};
if let Ok(size) = size {
if let Ok(size) =
serde_json::from_str::<FileData>(&send.data).map_err(Into::into).and_then(|d| d.size.into_i64())
{
total = total.checked_add(size)?;
};
}

View File

@ -54,17 +54,17 @@ impl TwoFactor {
pub fn to_json(&self) -> Value {
json!({
"Enabled": self.enabled,
"Key": "", // This key and value vary
"Object": "twoFactorAuthenticator" // This value varies
"enabled": self.enabled,
"key": "", // This key and value vary
"Oobject": "twoFactorAuthenticator" // This value varies
})
}
pub fn to_json_provider(&self) -> Value {
json!({
"Enabled": self.enabled,
"Type": self.atype,
"Object": "twoFactorProvider"
"enabled": self.enabled,
"type": self.atype,
"object": "twoFactorProvider"
})
}
}

View File

@ -241,23 +241,25 @@ impl User {
json!({
"_Status": status as i32,
"Id": self.uuid,
"Name": self.name,
"Email": self.email,
"EmailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
"Premium": true,
"MasterPasswordHint": self.password_hint,
"Culture": "en-US",
"TwoFactorEnabled": twofactor_enabled,
"Key": self.akey,
"PrivateKey": self.private_key,
"SecurityStamp": self.security_stamp,
"Organizations": orgs_json,
"Providers": [],
"ProviderOrganizations": [],
"ForcePasswordReset": false,
"AvatarColor": self.avatar_color,
"Object": "profile",
"id": self.uuid,
"name": self.name,
"email": self.email,
"emailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
"premium": true,
"premiumFromOrganization": false,
"masterPasswordHint": self.password_hint,
"culture": "en-US",
"twoFactorEnabled": twofactor_enabled,
"key": self.akey,
"privateKey": self.private_key,
"securityStamp": self.security_stamp,
"organizations": orgs_json,
"providers": [],
"providerOrganizations": [],
"forcePasswordReset": false,
"avatarColor": self.avatar_color,
"usesKeyConnector": false,
"object": "profile",
})
}
@ -311,6 +313,7 @@ impl User {
Send::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?;
UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
Cipher::delete_all_by_user(&self.uuid, conn).await?;
Favorite::delete_all_by_user(&self.uuid, conn).await?;

View File

@ -179,18 +179,18 @@ fn _serialize(e: &impl serde::Serialize, _msg: &str) -> String {
fn _api_error(_: &impl std::any::Any, msg: &str) -> String {
let json = json!({
"Message": msg,
"message": msg,
"error": "",
"error_description": "",
"ValidationErrors": {"": [ msg ]},
"ErrorModel": {
"Message": msg,
"Object": "error"
"validationErrors": {"": [ msg ]},
"errorModel": {
"message": msg,
"object": "error"
},
"ExceptionMessage": null,
"ExceptionStackTrace": null,
"InnerExceptionMessage": null,
"Object": "error"
"exceptionMessage": null,
"exceptionStackTrace": null,
"innerExceptionMessage": null,
"object": "error"
});
_serialize(&json, "")
}

View File

@ -3,7 +3,7 @@
// The more key/value pairs there are the more recursion occurs.
// We want to keep this as low as possible, but not higher then 128.
// If you go above 128 it will cause rust-analyzer to fail,
#![recursion_limit = "87"]
#![recursion_limit = "90"]
// When enabled use MiMalloc as malloc instead of the default malloc
#[cfg(feature = "enable_mimalloc")]
@ -211,8 +211,8 @@ fn launch_info() {
}
fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
// Depending on the main log level we either want to disable or enable logging for trust-dns.
// Else if there are timeouts it will clutter the logs since trust-dns uses warn for this.
// Depending on the main log level we either want to disable or enable logging for hickory.
// Else if there are timeouts it will clutter the logs since hickory uses warn for this.
let hickory_level = if level >= log::LevelFilter::Debug {
level
} else {
@ -266,7 +266,7 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
.level_for("handlebars::render", handlebars_level)
// Prevent cookie_store logs
.level_for("cookie_store", log::LevelFilter::Off)
// Variable level for trust-dns used by reqwest
// Variable level for hickory used by reqwest
.level_for("hickory_resolver::name_server::name_server", hickory_level)
.level_for("hickory_proto::xfer", hickory_level)
.level_for("diesel_logger", diesel_logger_level)

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,7 @@
use std::{collections::HashMap, io::Cursor, ops::Deref, path::Path};
use num_traits::ToPrimitive;
use once_cell::sync::Lazy;
use rocket::{
fairing::{Fairing, Info, Kind},
http::{ContentType, Header, HeaderMap, Method, Status},
@ -520,30 +521,38 @@ pub fn container_base_image() -> &'static str {
use std::fmt;
use serde::de::{self, DeserializeOwned, Deserializer, MapAccess, SeqAccess, Visitor};
use serde_json::{self, Value};
use serde_json::Value;
pub type JsonMap = serde_json::Map<String, Value>;
#[derive(Serialize, Deserialize)]
pub struct UpCase<T: DeserializeOwned> {
#[serde(deserialize_with = "upcase_deserialize")]
pub struct LowerCase<T: DeserializeOwned> {
#[serde(deserialize_with = "lowercase_deserialize")]
#[serde(flatten)]
pub data: T,
}
impl Default for LowerCase<Value> {
fn default() -> Self {
Self {
data: Value::Null,
}
}
}
// https://github.com/serde-rs/serde/issues/586
pub fn upcase_deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
pub fn lowercase_deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where
T: DeserializeOwned,
D: Deserializer<'de>,
{
let d = deserializer.deserialize_any(UpCaseVisitor)?;
let d = deserializer.deserialize_any(LowerCaseVisitor)?;
T::deserialize(d).map_err(de::Error::custom)
}
struct UpCaseVisitor;
struct LowerCaseVisitor;
impl<'de> Visitor<'de> for UpCaseVisitor {
impl<'de> Visitor<'de> for LowerCaseVisitor {
type Value = Value;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
@ -557,7 +566,7 @@ impl<'de> Visitor<'de> for UpCaseVisitor {
let mut result_map = JsonMap::new();
while let Some((key, value)) = map.next_entry()? {
result_map.insert(upcase_first(key), upcase_value(value));
result_map.insert(lcase_first(key), lowercase_value(value));
}
Ok(Value::Object(result_map))
@ -570,20 +579,20 @@ impl<'de> Visitor<'de> for UpCaseVisitor {
let mut result_seq = Vec::<Value>::new();
while let Some(value) = seq.next_element()? {
result_seq.push(upcase_value(value));
result_seq.push(lowercase_value(value));
}
Ok(Value::Array(result_seq))
}
}
fn upcase_value(value: Value) -> Value {
fn lowercase_value(value: Value) -> Value {
if let Value::Object(map) = value {
let mut new_value = Value::Object(serde_json::Map::new());
for (key, val) in map.into_iter() {
let processed_key = _process_key(&key);
new_value[processed_key] = upcase_value(val);
new_value[processed_key] = lowercase_value(val);
}
new_value
} else if let Value::Array(array) = value {
@ -591,7 +600,7 @@ fn upcase_value(value: Value) -> Value {
let mut new_value = Value::Array(vec![Value::Null; array.len()]);
for (index, val) in array.into_iter().enumerate() {
new_value[index] = upcase_value(val);
new_value[index] = lowercase_value(val);
}
new_value
} else {
@ -603,12 +612,12 @@ fn upcase_value(value: Value) -> Value {
// This key is part of the Identity Cipher (Social Security Number)
fn _process_key(key: &str) -> String {
match key.to_lowercase().as_ref() {
"ssn" => "SSN".into(),
_ => self::upcase_first(key),
"ssn" => "ssn".into(),
_ => self::lcase_first(key),
}
}
#[derive(Deserialize, Debug, Clone)]
#[derive(Clone, Debug, Deserialize)]
#[serde(untagged)]
pub enum NumberOrString {
Number(i64),
@ -701,14 +710,9 @@ where
use reqwest::{header, Client, ClientBuilder};
pub fn get_reqwest_client() -> Client {
match get_reqwest_client_builder().build() {
Ok(client) => client,
Err(e) => {
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
get_reqwest_client_builder().hickory_dns(false).build().expect("Failed to build client")
}
}
pub fn get_reqwest_client() -> &'static Client {
static INSTANCE: Lazy<Client> = Lazy::new(|| get_reqwest_client_builder().build().expect("Failed to build client"));
&INSTANCE
}
pub fn get_reqwest_client_builder() -> ClientBuilder {
@ -767,3 +771,248 @@ pub fn parse_experimental_client_feature_flags(experimental_client_feature_flags
feature_states
}
mod dns_resolver {
use std::{
fmt,
net::{IpAddr, SocketAddr},
sync::Arc,
};
use hickory_resolver::{system_conf::read_system_conf, TokioAsyncResolver};
use once_cell::sync::Lazy;
use reqwest::dns::{Name, Resolve, Resolving};
use crate::{util::is_global, CONFIG};
#[derive(Debug, Clone)]
pub enum CustomResolverError {
Blacklist {
domain: String,
},
NonGlobalIp {
domain: String,
ip: IpAddr,
},
}
impl CustomResolverError {
pub fn downcast_ref(e: &dyn std::error::Error) -> Option<&Self> {
let mut source = e.source();
while let Some(err) = source {
source = err.source();
if let Some(err) = err.downcast_ref::<CustomResolverError>() {
return Some(err);
}
}
None
}
}
impl fmt::Display for CustomResolverError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Blacklist {
domain,
} => write!(f, "Blacklisted domain: {domain} matched ICON_BLACKLIST_REGEX"),
Self::NonGlobalIp {
domain,
ip,
} => write!(f, "IP {ip} for domain '{domain}' is not a global IP!"),
}
}
}
impl std::error::Error for CustomResolverError {}
#[derive(Debug, Clone)]
pub enum CustomDnsResolver {
Default(),
Hickory(Arc<TokioAsyncResolver>),
}
type BoxError = Box<dyn std::error::Error + Send + Sync>;
impl CustomDnsResolver {
pub fn instance() -> Arc<Self> {
static INSTANCE: Lazy<Arc<CustomDnsResolver>> = Lazy::new(CustomDnsResolver::new);
Arc::clone(&*INSTANCE)
}
fn new() -> Arc<Self> {
match read_system_conf() {
Ok((config, opts)) => {
let resolver = TokioAsyncResolver::tokio(config.clone(), opts.clone());
Arc::new(Self::Hickory(Arc::new(resolver)))
}
Err(e) => {
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
Arc::new(Self::Default())
}
}
}
// Note that we get an iterator of addresses, but we only grab the first one for convenience
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
pre_resolve(name)?;
let result = match self {
Self::Default() => tokio::net::lookup_host(name).await?.next(),
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
};
if let Some(addr) = &result {
post_resolve(name, addr.ip())?;
}
Ok(result)
}
}
fn pre_resolve(name: &str) -> Result<(), CustomResolverError> {
if crate::api::is_domain_blacklisted(name) {
return Err(CustomResolverError::Blacklist {
domain: name.to_string(),
});
}
Ok(())
}
fn post_resolve(name: &str, ip: IpAddr) -> Result<(), CustomResolverError> {
if CONFIG.icon_blacklist_non_global_ips() && !is_global(ip) {
Err(CustomResolverError::NonGlobalIp {
domain: name.to_string(),
ip,
})
} else {
Ok(())
}
}
impl Resolve for CustomDnsResolver {
fn resolve(&self, name: Name) -> Resolving {
let this = self.clone();
Box::pin(async move {
let name = name.as_str();
let result = this.resolve_domain(name).await?;
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
})
}
}
}
pub use dns_resolver::{CustomDnsResolver, CustomResolverError};
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
#[allow(clippy::nonminimal_bool)]
#[cfg(any(not(feature = "unstable"), test))]
pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
match ip {
std::net::IpAddr::V4(ip) => {
!(ip.octets()[0] == 0 // "This network"
|| ip.is_private()
|| (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) //ip.is_shared()
|| ip.is_loopback()
|| ip.is_link_local()
// addresses reserved for future protocols (`192.0.0.0/24`)
||(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|| ip.is_documentation()
|| (ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) // ip.is_benchmarking()
|| (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) //ip.is_reserved()
|| ip.is_broadcast())
}
std::net::IpAddr::V6(ip) => {
!(ip.is_unspecified()
|| ip.is_loopback()
// IPv4-mapped Address (`::ffff:0:0/96`)
|| matches!(ip.segments(), [0, 0, 0, 0, 0, 0xffff, _, _])
// IPv4-IPv6 Translat. (`64:ff9b:1::/48`)
|| matches!(ip.segments(), [0x64, 0xff9b, 1, _, _, _, _, _])
// Discard-Only Address Block (`100::/64`)
|| matches!(ip.segments(), [0x100, 0, 0, 0, _, _, _, _])
// IETF Protocol Assignments (`2001::/23`)
|| (matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200)
&& !(
// Port Control Protocol Anycast (`2001:1::1`)
u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001
// Traversal Using Relays around NAT Anycast (`2001:1::2`)
|| u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002
// AMT (`2001:3::/32`)
|| matches!(ip.segments(), [0x2001, 3, _, _, _, _, _, _])
// AS112-v6 (`2001:4:112::/48`)
|| matches!(ip.segments(), [0x2001, 4, 0x112, _, _, _, _, _])
// ORCHIDv2 (`2001:20::/28`)
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x2F).contains(&b))
))
|| ((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8)) // ip.is_documentation()
|| ((ip.segments()[0] & 0xfe00) == 0xfc00) //ip.is_unique_local()
|| ((ip.segments()[0] & 0xffc0) == 0xfe80)) //ip.is_unicast_link_local()
}
}
}
#[cfg(not(feature = "unstable"))]
pub use is_global_hardcoded as is_global;
#[cfg(feature = "unstable")]
#[inline(always)]
pub fn is_global(ip: std::net::IpAddr) -> bool {
ip.is_global()
}
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 30 seconds or so and they are correct as of nightly 2023-07-17
/// The IPV6 can't be checked in a reasonable time, so we check over a hundred billion random ones, so far correct
/// Note that the is_global implementation is subject to change as new IP RFCs are created
///
/// To run while showing progress output:
/// cargo +nightly test --release --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
use std::net::IpAddr;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {}", ip)
}
}
}
}
}
#[test]
#[ignore]
fn test_ipv6_global() {
use rand::Rng;
std::thread::scope(|s| {
for t in 0..16 {
let handle = s.spawn(move || {
let mut v = [0u8; 16];
let mut rng = rand::thread_rng();
for i in 0..20 {
println!("Thread {t} Iter: {i}/50");
for _ in 0..500_000_000 {
rng.fill(&mut v);
let ip = IpAddr::V6(std::net::Ipv6Addr::from(v));
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {ip}");
}
}
});
}
});
}
}

View File

@ -71,9 +71,9 @@ with urllib.request.urlopen(DOMAIN_LISTS_URL) as response:
global_domains = []
for name, domain_list in domain_lists.items():
entry = OrderedDict()
entry["Type"] = enums[name]
entry["Domains"] = domain_list
entry["Excluded"] = False
entry["type"] = enums[name]
entry["domains"] = domain_list
entry["excluded"] = False
global_domains.append(entry)
# Write out the global domains JSON file.