0
0
Fork 0
mirror of https://github.com/dani-garcia/vaultwarden synced 2024-06-16 10:58:20 +02:00

Compare commits

..

2 commits

Author SHA1 Message Date
Daniel García e83e28bed8
Merge 909e0d7672 into 129b835ac7 2024-04-06 12:05:56 +02:00
Daniel García 909e0d7672
Change API inputs/outputs and structs to camelCase 2024-03-23 15:58:59 +01:00
33 changed files with 829 additions and 1315 deletions

View file

@ -46,7 +46,7 @@ jobs:
steps:
# Checkout the repo
- name: "Checkout"
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b #v4.1.4
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
# End Checkout the repo
@ -74,7 +74,7 @@ jobs:
# Only install the clippy and rustfmt components on the default rust-toolchain
- name: "Install rust-toolchain version"
uses: dtolnay/rust-toolchain@bb45937a053e097f8591208d8e74c90db1873d07 # master @ Apr 14, 2024, 9:02 PM GMT+2
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
if: ${{ matrix.channel == 'rust-toolchain' }}
with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"
@ -84,7 +84,7 @@ jobs:
# Install the any other channel to be used for which we do not execute clippy and rustfmt
- name: "Install MSRV version"
uses: dtolnay/rust-toolchain@bb45937a053e097f8591208d8e74c90db1873d07 # master @ Apr 14, 2024, 9:02 PM GMT+2
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248 # master @ 2023-12-07 - 10:22 PM GMT+1
if: ${{ matrix.channel != 'rust-toolchain' }}
with:
toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}"

View file

@ -13,7 +13,7 @@ jobs:
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
# End Checkout the repo
# Download hadolint - https://github.com/hadolint/hadolint/releases

View file

@ -58,7 +58,7 @@ jobs:
steps:
# Checkout the repo
- name: Checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
fetch-depth: 0
@ -69,11 +69,11 @@ jobs:
# Start Docker Buildx
- name: Setup Docker Buildx
uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
# https://github.com/moby/buildkit/issues/3969
# Also set max parallelism to 2, the default of 4 breaks GitHub Actions
with:
buildkitd-config-inline: |
config-inline: |
[worker.oci]
max-parallelism = 2
driver-opts: |
@ -102,7 +102,7 @@ jobs:
# Login to Docker Hub
- name: Login to Docker Hub
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
@ -116,7 +116,7 @@ jobs:
# Login to GitHub Container Registry
- name: Login to GitHub Container Registry
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@ -137,7 +137,7 @@ jobs:
# Login to Quay.io
- name: Login to Quay.io
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
@ -171,7 +171,7 @@ jobs:
echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}localhost:5000/vaultwarden/server" | tee -a "${GITHUB_ENV}"
- name: Bake ${{ matrix.base_image }} containers
uses: docker/bake-action@73b0efa7a0e8ac276e0a8d5c580698a942ff10b5 # v4.4.0
uses: docker/bake-action@849707117b03d39aba7924c50a10376a69e88d7d # v4.1.0
env:
BASE_TAGS: "${{ env.BASE_TAGS }}"
SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}"
@ -229,28 +229,28 @@ jobs:
# Upload artifacts to Github Actions
- name: "Upload amd64 artifact"
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-amd64
path: vaultwarden-amd64
- name: "Upload arm64 artifact"
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-arm64
path: vaultwarden-arm64
- name: "Upload armv7 artifact"
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv7
path: vaultwarden-armv7
- name: "Upload armv6 artifact"
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
if: ${{ matrix.base_image == 'alpine' }}
with:
name: vaultwarden-${{ env.SOURCE_VERSION }}-linux-armv6

View file

@ -25,10 +25,10 @@ jobs:
actions: read
steps:
- name: Checkout code
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b #v4.1.4
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@d710430a6722f083d3b36b8339ff66b32f22ee55 # v0.19.0
uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca # v0.16.1
with:
scan-type: repo
ignore-unfixed: true
@ -37,6 +37,6 @@ jobs:
severity: CRITICAL,HIGH
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@2bbafcdd7fbf96243689e764c2f15d9735164f33 # v3.25.3
uses: github/codeql-action/upload-sarif@b7bf0a3ed3ecfa44160715d7c442788f65f0f923 # v3.23.2
with:
sarif_file: 'trivy-results.sarif'

835
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -36,7 +36,7 @@ unstable = []
[target."cfg(not(windows))".dependencies]
# Logging
syslog = "6.1.1"
syslog = "6.1.0"
[dependencies]
# Logging
@ -60,21 +60,21 @@ rocket = { version = "0.5.0", features = ["tls", "json"], default-features = fal
rocket_ws = { version ="0.1.0" }
# WebSockets libraries
rmpv = "1.0.2" # MessagePack library
rmpv = "1.0.1" # MessagePack library
# Concurrent HashMap used for WebSocket messaging and favicons
dashmap = "5.5.3"
# Async futures
futures = "0.3.30"
tokio = { version = "1.37.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
tokio = { version = "1.36.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] }
# A generic serialization/deserialization framework
serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116"
serde = { version = "1.0.197", features = ["derive"] }
serde_json = "1.0.114"
# A safe, extensible ORM and Query builder
diesel = { version = "2.1.6", features = ["chrono", "r2d2", "numeric"] }
diesel = { version = "2.1.5", features = ["chrono", "r2d2", "numeric"] }
diesel_migrations = "2.1.0"
diesel_logger = { version = "0.3.0", optional = true }
@ -89,12 +89,12 @@ ring = "0.17.8"
uuid = { version = "1.8.0", features = ["v4"] }
# Date and time libraries
chrono = { version = "0.4.38", features = ["clock", "serde"], default-features = false }
chrono-tz = "0.9.0"
time = "0.3.36"
chrono = { version = "0.4.35", features = ["clock", "serde"], default-features = false }
chrono-tz = "0.8.6"
time = "0.3.34"
# Job scheduler
job_scheduler_ng = "2.0.5"
job_scheduler_ng = "2.0.4"
# Data encoding library Hex/Base32/Base64
data-encoding = "2.5.0"
@ -115,28 +115,27 @@ webauthn-rs = "0.3.2"
url = "2.5.0"
# Email libraries
lettre = { version = "0.11.7", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
lettre = { version = "0.11.4", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false }
percent-encoding = "2.3.1" # URL encoding library used for URL's in the emails
email_address = "0.2.4"
# HTML Template library
handlebars = { version = "5.1.2", features = ["dir_source"] }
handlebars = { version = "5.1.0", features = ["dir_source"] }
# HTTP client (Used for favicons, version check, DUO and HIBP API)
reqwest = { version = "0.12.4", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
hickory-resolver = "0.24.1"
reqwest = { version = "0.11.27", features = ["default-tls", "native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies", "hickory-dns"], default-features = false}
# Favicon extraction libraries
html5gum = "0.5.7"
regex = { version = "1.10.4", features = ["std", "perf", "unicode-perl"], default-features = false }
regex = { version = "1.10.3", features = ["std", "perf", "unicode-perl"], default-features = false }
data-url = "0.3.1"
bytes = "1.6.0"
bytes = "1.5.0"
# Cache function results (Used for version check and favicon fetching)
cached = { version = "0.50.0", features = ["async"] }
cached = { version = "0.49.2", features = ["async"] }
# Used for custom short lived cookie jar during favicon extraction
cookie = "0.18.1"
cookie = "0.18.0"
cookie_store = "0.21.0"
# Used by U2F, JWT and PostgreSQL
@ -154,8 +153,8 @@ semver = "1.0.22"
# Allow overriding the default memory allocator
# Mainly used for the musl builds, since the default musl malloc is very slow
mimalloc = { version = "0.1.41", features = ["secure"], default-features = false, optional = true }
which = "6.0.1"
mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true }
which = "6.0.0"
# Argon2 library with support for the PHC format
argon2 = "0.5.3"
@ -206,14 +205,14 @@ unsafe_code = "forbid"
non_ascii_idents = "forbid"
# Deny
future_incompatible = { level = "deny", priority = -1 }
future_incompatible = "deny"
noop_method_call = "deny"
pointer_structural_match = "deny"
rust_2018_idioms = { level = "deny", priority = -1 }
rust_2021_compatibility = { level = "deny", priority = -1 }
rust_2018_idioms = "deny"
rust_2021_compatibility = "deny"
trivial_casts = "deny"
trivial_numeric_casts = "deny"
unused = { level = "deny", priority = -1 }
unused = "deny"
unused_import_braces = "deny"
unused_lifetimes = "deny"
deprecated_in_future = "deny"

View file

@ -1,10 +1,10 @@
---
vault_version: "v2024.3.1"
vault_image_digest: "sha256:689b1e706f29e1858a5c7e0ec82e40fac793322e5e0ac9102ab09c2620207cd5"
# Cross Compile Docker Helper Scripts v1.4.0
# Cross Compile Docker Helper Scripts v1.3.0
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
xx_image_digest: "sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4"
rust_version: 1.77.2 # Rust version to be used
xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc"
rust_version: 1.77.0 # Rust version to be used
debian_version: bookworm # Debian release name to be used
alpine_version: 3.19 # Alpine version to be used
# For which platforms/architectures will we try to build images

View file

@ -31,10 +31,10 @@ FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:689b1e706f29e
########################## ALPINE BUILD IMAGES ##########################
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
## And for Alpine we define all build images here, they will only be loaded when actually used
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.77.2 as build_amd64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.77.2 as build_arm64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.77.2 as build_armv7
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.77.2 as build_armv6
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.77.0 as build_amd64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.77.0 as build_arm64
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.77.0 as build_armv7
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.77.0 as build_armv6
########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006
@ -65,14 +65,13 @@ RUN mkdir -pv "${CARGO_HOME}" \
RUN USER=root cargo new --bin /app
WORKDIR /app
# Environment variables for Cargo on Alpine based builds
# Shared variables across Debian and Alpine
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
# Output the current contents of the file
cat /env-cargo
# Configure the DB ARG as late as possible to not invalidate the cached layers above
# Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc

View file

@ -31,11 +31,11 @@ FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:689b1e706f29e
########################## Cross Compile Docker Helper Scripts ##########################
## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts
## And these bash scripts do not have any significant difference if at all
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4 AS xx
FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx
########################## BUILD IMAGE ##########################
# hadolint ignore=DL3006
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.77.2-slim-bookworm as build
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.77.0-slim-bookworm as build
COPY --from=xx / /
ARG TARGETARCH
ARG TARGETVARIANT
@ -88,17 +88,9 @@ RUN mkdir -pv "${CARGO_HOME}" \
RUN USER=root cargo new --bin /app
WORKDIR /app
# Environment variables for Cargo on Debian based builds
ARG ARCH_OPENSSL_LIB_DIR \
ARCH_OPENSSL_INCLUDE_DIR
# Environment variables for cargo across Debian and Alpine
RUN source /env-cargo && \
if xx-info is-cross ; then \
# Some special variables if needed to override some build paths
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
fi && \
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
# Because of this we generate the needed environment variables here which we can load in the needed steps.
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \

View file

@ -108,17 +108,9 @@ RUN USER=root cargo new --bin /app
WORKDIR /app
{% if base == "debian" %}
# Environment variables for Cargo on Debian based builds
ARG ARCH_OPENSSL_LIB_DIR \
ARCH_OPENSSL_INCLUDE_DIR
# Environment variables for cargo across Debian and Alpine
RUN source /env-cargo && \
if xx-info is-cross ; then \
# Some special variables if needed to override some build paths
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
fi && \
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
# Because of this we generate the needed environment variables here which we can load in the needed steps.
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
@ -134,14 +126,13 @@ RUN source /env-cargo && \
# Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql
{% elif base == "alpine" %}
# Environment variables for Cargo on Alpine based builds
# Shared variables across Debian and Alpine
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
# Output the current contents of the file
cat /env-cargo
# Configure the DB ARG as late as possible to not invalidate the cached layers above
# Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc
{% endif %}

View file

@ -11,11 +11,6 @@ With just these two files we can build both Debian and Alpine images for the fol
- armv7 (linux/arm/v7)
- armv6 (linux/arm/v6)
Some unsupported platforms for Debian based images. These are not built and tested by default and are only provided to make it easier for users to build for these architectures.
- 386 (linux/386)
- ppc64le (linux/ppc64le)
- s390x (linux/s390x)
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
This ensures the container build process can run binaries from other architectures.<br>

View file

@ -125,40 +125,6 @@ target "debian-armv6" {
tags = generate_tags("", "-armv6")
}
// ==== Start of unsupported Debian architecture targets ===
// These are provided just to help users build for these rare platforms
// They will not be built by default
target "debian-386" {
inherits = ["debian"]
platforms = ["linux/386"]
tags = generate_tags("", "-386")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/i386-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/i386-linux-gnu"
}
}
target "debian-ppc64le" {
inherits = ["debian"]
platforms = ["linux/ppc64le"]
tags = generate_tags("", "-ppc64le")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/powerpc64le-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/powerpc64le-linux-gnu"
}
}
target "debian-s390x" {
inherits = ["debian"]
platforms = ["linux/s390x"]
tags = generate_tags("", "-s390x")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/s390x-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/s390x-linux-gnu"
}
}
// ==== End of unsupported Debian architecture targets ===
// A Group to build all platforms individually for local testing
group "debian-all" {
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]

View file

@ -1,4 +1,4 @@
[toolchain]
channel = "1.77.2"
channel = "1.77.0"
components = [ "rustfmt", "clippy" ]
profile = "minimal"

View file

@ -701,7 +701,10 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
let (latest_release, latest_commit, latest_web_build) =
get_release_info(has_http_access, running_within_container).await;
let ip_header_name = &ip_header.0.unwrap_or_default();
let ip_header_name = match &ip_header.0 {
Some(h) => h,
_ => "",
};
let diagnostics_json = json!({
"dns_resolved": dns_resolved,
@ -714,8 +717,8 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
"running_within_container": running_within_container,
"container_base_image": if running_within_container { container_base_image() } else { "Not applicable" },
"has_http_access": has_http_access,
"ip_header_exists": !ip_header_name.is_empty(),
"ip_header_match": ip_header_name.eq(&CONFIG.ip_header()),
"ip_header_exists": &ip_header.0.is_some(),
"ip_header_match": ip_header_name == CONFIG.ip_header(),
"ip_header_name": ip_header_name,
"ip_header_config": &CONFIG.ip_header(),
"uses_proxy": uses_proxy,

View file

@ -166,8 +166,7 @@ pub async fn _register(data: Json<RegisterData>, mut conn: DbConn) -> JsonResult
}
user
} else if CONFIG.is_signup_allowed(&email)
|| (CONFIG.emergency_access_allowed()
&& EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some())
|| EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some()
{
user
} else {
@ -218,6 +217,7 @@ pub async fn _register(data: Json<RegisterData>, mut conn: DbConn) -> JsonResult
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await {
error!("Error sending welcome email: {:#?}", e);
}
user.last_verifying_at = Some(user.created_at);
} else if let Err(e) = mail::send_welcome(&user.email).await {
error!("Error sending welcome email: {:#?}", e);
@ -229,14 +229,6 @@ pub async fn _register(data: Json<RegisterData>, mut conn: DbConn) -> JsonResult
}
user.save(&mut conn).await?;
// accept any open emergency access invitations
if !CONFIG.mail_enabled() && CONFIG.emergency_access_allowed() {
for mut emergency_invite in EmergencyAccess::find_all_invited_by_grantee_email(&user.email, &mut conn).await {
let _ = emergency_invite.accept_invite(&user.uuid, &user.email, &mut conn).await;
}
}
Ok(Json(json!({
"object": "register",
"captchaBypassToken": "",
@ -441,46 +433,24 @@ async fn post_kdf(data: Json<ChangeKdfData>, headers: Headers, mut conn: DbConn,
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct UpdateFolderData {
// There is a bug in 2024.3.x which adds a `null` item.
// To bypass this we allow a Option here, but skip it during the updates
// See: https://github.com/bitwarden/clients/issues/8453
id: Option<String>,
id: String,
name: String,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct UpdateEmergencyAccessData {
id: String,
key_encrypted: String,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct UpdateResetPasswordData {
organization_id: String,
reset_password_key: String,
}
use super::ciphers::CipherData;
use super::sends::{update_send_from_data, SendData};
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct KeyData {
ciphers: Vec<CipherData>,
folders: Vec<UpdateFolderData>,
sends: Vec<SendData>,
emergency_access_keys: Vec<UpdateEmergencyAccessData>,
reset_password_keys: Vec<UpdateResetPasswordData>,
key: String,
master_password_hash: String,
private_key: String,
master_password_hash: String,
}
#[post("/accounts/key", data = "<data>")]
async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
// TODO: See if we can wrap everything within a SQL Transaction. If something fails it should revert everything.
let data: KeyData = data.into_inner();
if !headers.user.check_valid_password(&data.master_password_hash) {
@ -497,83 +467,37 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, mut conn: DbConn,
// Update folder data
for folder_data in data.folders {
// Skip `null` folder id entries.
// See: https://github.com/bitwarden/clients/issues/8453
if let Some(folder_id) = folder_data.id {
let mut saved_folder = match Folder::find_by_uuid(&folder_id, &mut conn).await {
Some(folder) => folder,
None => err!("Folder doesn't exist"),
};
if &saved_folder.user_uuid != user_uuid {
err!("The folder is not owned by the user")
}
saved_folder.name = folder_data.name;
saved_folder.save(&mut conn).await?
}
}
// Update emergency access data
for emergency_access_data in data.emergency_access_keys {
let mut saved_emergency_access = match EmergencyAccess::find_by_uuid(&emergency_access_data.id, &mut conn).await
{
Some(emergency_access) => emergency_access,
None => err!("Emergency access doesn't exist"),
let mut saved_folder = match Folder::find_by_uuid(&folder_data.id, &mut conn).await {
Some(folder) => folder,
None => err!("Folder doesn't exist"),
};
if &saved_emergency_access.grantor_uuid != user_uuid {
err!("The emergency access is not owned by the user")
if &saved_folder.user_uuid != user_uuid {
err!("The folder is not owned by the user")
}
saved_emergency_access.key_encrypted = Some(emergency_access_data.key_encrypted);
saved_emergency_access.save(&mut conn).await?
}
// Update reset password data
for reset_password_data in data.reset_password_keys {
let mut user_org =
match UserOrganization::find_by_user_and_org(user_uuid, &reset_password_data.organization_id, &mut conn)
.await
{
Some(reset_password) => reset_password,
None => err!("Reset password doesn't exist"),
};
user_org.reset_password_key = Some(reset_password_data.reset_password_key);
user_org.save(&mut conn).await?
}
// Update send data
for send_data in data.sends {
let mut send = match Send::find_by_uuid(send_data.id.as_ref().unwrap(), &mut conn).await {
Some(send) => send,
None => err!("Send doesn't exist"),
};
update_send_from_data(&mut send, send_data, &headers, &mut conn, &nt, UpdateType::None).await?;
saved_folder.name = folder_data.name;
saved_folder.save(&mut conn).await?
}
// Update cipher data
use super::ciphers::update_cipher_from_data;
for cipher_data in data.ciphers {
if cipher_data.organization_id.is_none() {
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.id.as_ref().unwrap(), &mut conn).await {
Some(cipher) => cipher,
None => err!("Cipher doesn't exist"),
};
let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.id.as_ref().unwrap(), &mut conn).await {
Some(cipher) => cipher,
None => err!("Cipher doesn't exist"),
};
if saved_cipher.user_uuid.as_ref().unwrap() != user_uuid {
err!("The cipher is not owned by the user")
}
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
// We force the users to logout after the user has been saved to try and prevent these issues.
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None)
.await?
if saved_cipher.user_uuid.as_ref().unwrap() != user_uuid {
err!("The cipher is not owned by the user")
}
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
// We force the users to logout after the user has been saved to try and prevent these issues.
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None)
.await?
}
// Update user data

View file

@ -10,7 +10,6 @@ use rocket::{
};
use serde_json::Value;
use crate::util::NumberOrString;
use crate::{
api::{self, core::log_event, EmptyResult, JsonResult, Notify, PasswordOrOtpData, UpdateType},
auth::Headers,
@ -206,7 +205,7 @@ pub struct CipherData {
// Folder id is not included in import
folder_id: Option<String>,
// TODO: Some of these might appear all the time, no need for Option
pub organization_id: Option<String>,
organization_id: Option<String>,
key: Option<String>,
@ -317,7 +316,7 @@ async fn post_ciphers(data: Json<CipherData>, headers: Headers, mut conn: DbConn
data.last_known_revision_date = None;
let mut cipher = Cipher::new(data.r#type, data.name.clone());
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
}
@ -348,7 +347,7 @@ pub async fn update_cipher_from_data(
cipher: &mut Cipher,
data: CipherData,
headers: &Headers,
shared_to_collections: Option<Vec<String>>,
shared_to_collection: bool,
conn: &mut DbConn,
nt: &Notify<'_>,
ut: UpdateType,
@ -387,7 +386,7 @@ pub async fn update_cipher_from_data(
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
None => err!("You don't have permission to add item to organization"),
Some(org_user) => {
if shared_to_collections.is_some()
if shared_to_collection
|| org_user.has_full_access()
|| cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await
{
@ -514,15 +513,8 @@ pub async fn update_cipher_from_data(
)
.await;
}
nt.send_cipher_update(
ut,
cipher,
&cipher.update_users_revision(conn).await,
&headers.device.uuid,
shared_to_collections,
conn,
)
.await;
nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn).await, &headers.device.uuid, None, conn)
.await;
}
Ok(())
}
@ -583,7 +575,7 @@ async fn post_ciphers_import(
cipher_data.folder_id = folder_uuid;
let mut cipher = Cipher::new(cipher_data.r#type, cipher_data.name.clone());
update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await?;
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await?;
}
let mut user = headers.user;
@ -645,7 +637,7 @@ async fn put_cipher(
err!("Cipher is not write accessible")
}
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
}
@ -890,7 +882,7 @@ async fn share_cipher_by_uuid(
None => err!("Cipher doesn't exist"),
};
let mut shared_to_collections = vec![];
let mut shared_to_collection = false;
if let Some(organization_uuid) = &data.cipher.organization_id {
for uuid in &data.collection_ids {
@ -899,7 +891,7 @@ async fn share_cipher_by_uuid(
Some(collection) => {
if collection.is_writable_by_user(&headers.user.uuid, conn).await {
CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?;
shared_to_collections.push(collection.uuid);
shared_to_collection = true;
} else {
err!("No rights to modify the collection")
}
@ -915,7 +907,7 @@ async fn share_cipher_by_uuid(
UpdateType::SyncCipherCreate
};
update_cipher_from_data(&mut cipher, data.cipher, headers, Some(shared_to_collections), conn, nt, ut).await?;
update_cipher_from_data(&mut cipher, data.cipher, headers, shared_to_collection, conn, nt, ut).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
}
@ -949,7 +941,7 @@ async fn get_attachment(uuid: &str, attachment_id: &str, headers: Headers, mut c
struct AttachmentRequestData {
key: String,
file_name: String,
file_size: NumberOrString,
file_size: i64,
admin_request: Option<bool>, // true when attaching from an org vault view
}
@ -979,20 +971,18 @@ async fn post_attachment_v2(
}
let data: AttachmentRequestData = data.into_inner();
let file_size = data.file_size.into_i64()?;
if file_size < 0 {
if data.file_size < 0 {
err!("Attachment size can't be negative")
}
let attachment_id = crypto::generate_attachment_id();
let attachment =
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.file_name, file_size, Some(data.key));
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.file_name, data.file_size, Some(data.key));
attachment.save(&mut conn).await.expect("Error saving attachment");
let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id);
let response_key = match data.admin_request {
Some(b) if b => "cipherMiniResponse",
_ => "cipherResponse",
Some(b) if b => "CipherMiniResponse",
_ => "CipherResponse",
};
Ok(Json(json!({ // AttachmentUploadDataResponseModel

View file

@ -61,9 +61,7 @@ async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
for ea in emergency_access_list {
if let Some(grantee) = ea.to_json_grantee_details(&mut conn).await {
emergency_access_list_json.push(grantee)
}
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await);
}
Json(json!({
@ -97,9 +95,7 @@ async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?;
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emergency_access) => Ok(Json(
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
)),
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)),
None => err!("Emergency access not valid."),
}
}
@ -209,7 +205,7 @@ async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mu
err!("You can not set yourself as an emergency contact.")
}
let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await {
let grantee_user = match User::find_by_mail(&email, &mut conn).await {
None => {
if !CONFIG.invitations_allowed() {
err!(format!("Grantee user does not exist: {}", &email))
@ -226,10 +222,9 @@ async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mu
let mut user = User::new(email.clone());
user.save(&mut conn).await?;
(user, true)
user
}
Some(user) if user.password_hash.is_empty() => (user, true),
Some(user) => (user, false),
Some(user) => user,
};
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
@ -257,9 +252,15 @@ async fn send_invite(data: Json<EmergencyAccessInviteData>, headers: Headers, mu
&grantor_user.email,
)
.await?;
} else if !new_user {
// if mail is not enabled immediately accept the invitation for existing users
new_emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
} else {
// Automatically mark user as accepted if no email invites
match User::find_by_mail(&email, &mut conn).await {
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
},
None => err!("Grantee user not found."),
}
}
Ok(())
@ -303,12 +304,17 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
&grantor_user.email,
)
.await?;
} else if !grantee_user.password_hash.is_empty() {
// accept the invitation for existing user
emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
} else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &mut conn).await.is_none() {
let invitation = Invitation::new(&email);
invitation.save(&mut conn).await?;
} else {
if Invitation::find_by_mail(&email, &mut conn).await.is_none() {
let invitation = Invitation::new(&email);
invitation.save(&mut conn).await?;
}
// Automatically mark user as accepted if no email invites
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
}
}
Ok(())
@ -357,7 +363,10 @@ async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers,
&& grantor_user.name == claims.grantor_name
&& grantor_user.email == claims.grantor_email
{
emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &mut conn).await?;
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
}
if CONFIG.mail_enabled() {
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
@ -369,6 +378,26 @@ async fn accept_invite(emer_id: &str, data: Json<AcceptData>, headers: Headers,
}
}
async fn accept_invite_process(
grantee_uuid: &str,
emergency_access: &mut EmergencyAccess,
grantee_email: &str,
conn: &mut DbConn,
) -> EmptyResult {
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
err!("User email does not match invite.");
}
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
err!("Emergency contact already accepted.");
}
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
emergency_access.email = None;
emergency_access.save(conn).await
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct ConfirmData {

View file

@ -186,17 +186,14 @@ fn version() -> Json<&'static str> {
#[get("/config")]
fn config() -> Json<Value> {
let domain = crate::CONFIG.domain();
let mut feature_states =
parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
// Force the new key rotation feature
feature_states.insert("key-rotation-improvements".to_string(), true);
let feature_states = parse_experimental_client_feature_flags(&crate::CONFIG.experimental_client_feature_flags());
Json(json!({
// Note: The clients use this version to handle backwards compatibility concerns
// This means they expect a version that closely matches the Bitwarden server version
// We should make sure that we keep this updated when we support the new server features
// Version history:
// - Individual cipher key encryption: 2023.9.1
"version": "2024.2.0",
"version": "2023.9.1",
"gitHash": option_env!("GIT_REV"),
"server": {
"name": "Vaultwarden",

View file

@ -329,19 +329,27 @@ async fn get_org_collections_details(org_id: &str, headers: ManagerHeadersLoose,
&& GroupUser::has_full_access_by_member(org_id, &user_org.uuid, &mut conn).await);
for col in Collection::find_by_organization(org_id, &mut conn).await {
// check whether the current user has access to the given collection
let assigned = has_full_access_to_org
|| CollectionUser::has_access_to_collection_by_user(&col.uuid, &user_org.user_uuid, &mut conn).await
|| (CONFIG.org_groups_enabled()
&& GroupUser::has_access_to_collection_by_member(&col.uuid, &user_org.uuid, &mut conn).await);
// assigned indicates whether the current user has access to the given collection
let mut assigned = has_full_access_to_org;
// get the users assigned directly to the given collection
let users: Vec<Value> = coll_users
.iter()
.filter(|collection_user| collection_user.collection_uuid == col.uuid)
.map(|collection_user| SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json())
.map(|collection_user| {
// check if the current user is assigned to this collection directly
if collection_user.user_uuid == user_org.uuid {
assigned = true;
}
SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()
})
.collect();
// check if the current user has access to the given collection via a group
if !assigned && CONFIG.org_groups_enabled() {
assigned = GroupUser::has_access_to_collection_by_member(&col.uuid, &user_org.uuid, &mut conn).await;
}
// get the group details for the given collection
let groups: Vec<Value> = if CONFIG.org_groups_enabled() {
CollectionGroup::find_by_collection(&col.uuid, &mut conn)
@ -666,16 +674,24 @@ async fn get_org_collection_detail(
Vec::with_capacity(0)
};
let mut assigned = false;
let users: Vec<Value> =
CollectionUser::find_by_collection_swap_user_uuid_with_org_user_uuid(&collection.uuid, &mut conn)
.await
.iter()
.map(|collection_user| {
// Remember `user_uuid` is swapped here with the `user_org.uuid` with a join during the `find_by_collection_swap_user_uuid_with_org_user_uuid` call.
// We check here if the current user is assigned to this collection or not.
if collection_user.user_uuid == user_org.uuid {
assigned = true;
}
SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()
})
.collect();
let assigned = Collection::can_access_collection(&user_org, &collection.uuid, &mut conn).await;
if user_org.access_all {
assigned = true;
}
let mut json_object = collection.to_json();
json_object["assigned"] = json!(assigned);
@ -1595,7 +1611,7 @@ async fn post_org_import(
let mut ciphers = Vec::new();
for cipher_data in data.ciphers {
let mut cipher = Cipher::new(cipher_data.r#type, cipher_data.name.clone());
update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await.ok();
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await.ok();
ciphers.push(cipher);
}
@ -2227,7 +2243,7 @@ impl GroupRequest {
}
pub fn update_group(&self, mut group: Group) -> Group {
group.name.clone_from(&self.name);
group.name = self.name.clone();
group.access_all = self.access_all.unwrap_or(false);
// Group Updates do not support changing the external_id
// These input fields are in a disabled state, and can only be updated/added via ldap_import

View file

@ -49,7 +49,7 @@ pub async fn purge_sends(pool: DbPool) {
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SendData {
struct SendData {
r#type: i32,
key: String,
password: Option<String>,
@ -65,9 +65,6 @@ pub struct SendData {
text: Option<Value>,
file: Option<Value>,
file_length: Option<NumberOrString>,
// Used for key rotations
pub id: Option<String>,
}
/// Enforces the `Disable Send` policy. A non-owner/admin user belonging to
@ -546,19 +543,6 @@ async fn put_send(id: &str, data: Json<SendData>, headers: Headers, mut conn: Db
None => err!("Send not found"),
};
update_send_from_data(&mut send, data, &headers, &mut conn, &nt, UpdateType::SyncSendUpdate).await?;
Ok(Json(send.to_json()))
}
pub async fn update_send_from_data(
send: &mut Send,
data: SendData,
headers: &Headers,
conn: &mut DbConn,
nt: &Notify<'_>,
ut: UpdateType,
) -> EmptyResult {
if send.user_uuid.as_ref() != Some(&headers.user.uuid) {
err!("Send is not owned by user")
}
@ -567,12 +551,6 @@ pub async fn update_send_from_data(
err!("Sends can't change type")
}
if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
err!(
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
);
}
// When updating a file Send, we receive nulls in the File field, as it's immutable,
// so we only need to update the data field in the Text case
if data.r#type == SendType::Text as i32 {
@ -585,6 +563,11 @@ pub async fn update_send_from_data(
send.data = data_str;
}
if data.deletion_date > Utc::now() + TimeDelta::try_days(31).unwrap() {
err!(
"You cannot have a Send with a deletion date that far into the future. Adjust the Deletion Date to a value less than 31 days from now and try again."
);
}
send.name = data.name;
send.akey = data.key;
send.deletion_date = data.deletion_date.naive_utc();
@ -602,11 +585,17 @@ pub async fn update_send_from_data(
send.set_password(Some(&password));
}
send.save(conn).await?;
if ut != UpdateType::None {
nt.send_send_update(ut, send, &send.update_users_revision(conn).await, &headers.device.uuid, conn).await;
}
Ok(())
send.save(&mut conn).await?;
nt.send_send_update(
UpdateType::SyncSendUpdate,
&send,
&send.update_users_revision(&mut conn).await,
&headers.device.uuid,
&mut conn,
)
.await;
Ok(Json(send.to_json()))
}
#[delete("/sends/<id>")]

View file

@ -1,6 +1,6 @@
use std::{
net::IpAddr,
sync::{Arc, Mutex},
sync::Arc,
time::{Duration, SystemTime},
};
@ -16,13 +16,14 @@ use rocket::{http::ContentType, response::Redirect, Route};
use tokio::{
fs::{create_dir_all, remove_file, symlink_metadata, File},
io::{AsyncReadExt, AsyncWriteExt},
net::lookup_host,
};
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
use crate::{
error::Error,
util::{get_reqwest_client_builder, Cached, CustomDnsResolver, CustomResolverError},
util::{get_reqwest_client_builder, Cached},
CONFIG,
};
@ -48,32 +49,48 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
let pool_idle_timeout = Duration::from_secs(10);
// Reuse the client between requests
get_reqwest_client_builder()
let client = get_reqwest_client_builder()
.cookie_provider(Arc::clone(&cookie_store))
.timeout(icon_download_timeout)
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
.dns_resolver(CustomDnsResolver::instance())
.default_headers(default_headers.clone())
.build()
.expect("Failed to build client")
.hickory_dns(true)
.default_headers(default_headers.clone());
match client.build() {
Ok(client) => client,
Err(e) => {
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
get_reqwest_client_builder()
.cookie_provider(cookie_store)
.timeout(icon_download_timeout)
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
.hickory_dns(false)
.default_headers(default_headers)
.build()
.expect("Failed to build client")
}
}
});
// Build Regex only once since this takes a lot of time.
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
#[get("/<domain>/icon.png")]
fn icon_external(domain: &str) -> Option<Redirect> {
// Special HashMap which holds the user defined Regex to speedup matching the regex.
static ICON_BLACKLIST_REGEX: Lazy<dashmap::DashMap<String, Regex>> = Lazy::new(dashmap::DashMap::new);
async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
if !is_valid_domain(domain) {
warn!("Invalid domain: {}", domain);
return None;
}
if is_domain_blacklisted(domain) {
if check_domain_blacklist_reason(domain).await.is_some() {
return None;
}
let url = CONFIG._icon_service_url().replace("{}", domain);
let url = template.replace("{}", domain);
match CONFIG.icon_redirect_code() {
301 => Some(Redirect::moved(url)), // legacy permanent redirect
302 => Some(Redirect::found(url)), // legacy temporary redirect
@ -86,6 +103,11 @@ fn icon_external(domain: &str) -> Option<Redirect> {
}
}
#[get("/<domain>/icon.png")]
async fn icon_external(domain: &str) -> Option<Redirect> {
icon_redirect(domain, &CONFIG._icon_service_url()).await
}
#[get("/<domain>/icon.png")]
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
@ -144,28 +166,153 @@ fn is_valid_domain(domain: &str) -> bool {
true
}
pub fn is_domain_blacklisted(domain: &str) -> bool {
let Some(config_blacklist) = CONFIG.icon_blacklist_regex() else {
return false;
};
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
#[allow(clippy::nonminimal_bool)]
#[cfg(not(feature = "unstable"))]
fn is_global(ip: IpAddr) -> bool {
match ip {
IpAddr::V4(ip) => {
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
// globally routable addresses in the 192.0.0.0/24 range.
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
return true;
}
!ip.is_private()
&& !ip.is_loopback()
&& !ip.is_link_local()
&& !ip.is_broadcast()
&& !ip.is_documentation()
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
// Make sure the address is not in 0.0.0.0/8
&& ip.octets()[0] != 0
}
IpAddr::V6(ip) => {
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
true
} else {
!ip.is_multicast()
&& !ip.is_loopback()
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
&& !ip.is_unspecified()
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
}
}
}
}
// Compiled domain blacklist
static COMPILED_BLACKLIST: Mutex<Option<(String, Regex)>> = Mutex::new(None);
let mut guard = COMPILED_BLACKLIST.lock().unwrap();
#[cfg(feature = "unstable")]
fn is_global(ip: IpAddr) -> bool {
ip.is_global()
}
// If the stored regex is up to date, use it
if let Some((value, regex)) = &*guard {
if value == &config_blacklist {
return regex.is_match(domain);
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct
/// Note that the is_global implementation is subject to change as new IP RFCs are created
///
/// To run while showing progress output:
/// cargo test --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
}
}
// If we don't have a regex stored, or it's not up to date, recreate it
let regex = Regex::new(&config_blacklist).unwrap();
let is_match = regex.is_match(domain);
*guard = Some((config_blacklist, regex));
#[test]
#[ignore]
fn test_ipv6_global() {
use ring::rand::{SecureRandom, SystemRandom};
let mut v = [0u8; 16];
let rand = SystemRandom::new();
for i in 0..1_000 {
println!("Iter: {}/1_000", i);
for _ in 0..10_000_000 {
rand.fill(&mut v).expect("Error generating random values");
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
(v[14] as u16) << 8 | v[15] as u16,
(v[12] as u16) << 8 | v[13] as u16,
(v[10] as u16) << 8 | v[11] as u16,
(v[8] as u16) << 8 | v[9] as u16,
(v[6] as u16) << 8 | v[7] as u16,
(v[4] as u16) << 8 | v[5] as u16,
(v[2] as u16) << 8 | v[3] as u16,
(v[0] as u16) << 8 | v[1] as u16,
));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
}
is_match
#[derive(Clone)]
enum DomainBlacklistReason {
Regex,
IP,
}
use cached::proc_macro::cached;
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
async fn check_domain_blacklist_reason(domain: &str) -> Option<DomainBlacklistReason> {
// First check the blacklist regex if there is a match.
// This prevents the blocked domain(s) from being leaked via a DNS lookup.
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
let is_match = if let Some(regex) = ICON_BLACKLIST_REGEX.get(&blacklist) {
regex.is_match(domain)
} else {
// Clear the current list if the previous key doesn't exists.
// To prevent growing of the HashMap after someone has changed it via the admin interface.
if ICON_BLACKLIST_REGEX.len() >= 1 {
ICON_BLACKLIST_REGEX.clear();
}
// Generate the regex to store in too the Lazy Static HashMap.
let blacklist_regex = Regex::new(&blacklist).unwrap();
let is_match = blacklist_regex.is_match(domain);
ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex);
is_match
};
if is_match {
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain);
return Some(DomainBlacklistReason::Regex);
}
}
if CONFIG.icon_blacklist_non_global_ips() {
if let Ok(s) = lookup_host((domain, 0)).await {
for addr in s {
if !is_global(addr.ip()) {
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
return Some(DomainBlacklistReason::IP);
}
}
}
}
None
}
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
@ -195,13 +342,6 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
}
Err(e) => {
// If this error comes from the custom resolver, this means this is a blacklisted domain
// or non global IP, don't save the miss file in this case to avoid leaking it
if let Some(error) = CustomResolverError::downcast_ref(&e) {
warn!("{error}");
return None;
}
warn!("Unable to download icon: {:?}", e);
let miss_indicator = path + ".miss";
save_icon(&miss_indicator, &[]).await;
@ -351,48 +491,42 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
let ssldomain = format!("https://{domain}");
let httpdomain = format!("http://{domain}");
// First check the domain as given during the request for HTTPS.
let resp = match get_page(&ssldomain).await {
Err(e) if CustomResolverError::downcast_ref(&e).is_none() => {
// If we get an error that is not caused by the blacklist, we retry with HTTP
match get_page(&httpdomain).await {
mut sub_resp @ Err(_) => {
// When the domain is not an IP, and has more then one dot, remove all subdomains.
let is_ip = domain.parse::<IpAddr>();
if is_ip.is_err() && domain.matches('.').count() > 1 {
let mut domain_parts = domain.split('.');
let base_domain = format!(
"{base}.{tld}",
tld = domain_parts.next_back().unwrap(),
base = domain_parts.next_back().unwrap()
);
if is_valid_domain(&base_domain) {
let sslbase = format!("https://{base_domain}");
let httpbase = format!("http://{base_domain}");
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
// First check the domain as given during the request for both HTTPS and HTTP.
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await {
Ok(c) => Ok(c),
Err(e) => {
let mut sub_resp = Err(e);
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
}
// When the domain is not an IP, and has more then one dot, remove all subdomains.
let is_ip = domain.parse::<IpAddr>();
if is_ip.is_err() && domain.matches('.').count() > 1 {
let mut domain_parts = domain.split('.');
let base_domain = format!(
"{base}.{tld}",
tld = domain_parts.next_back().unwrap(),
base = domain_parts.next_back().unwrap()
);
if is_valid_domain(&base_domain) {
let sslbase = format!("https://{base_domain}");
let httpbase = format!("http://{base_domain}");
debug!("[get_icon_url]: Trying without subdomains '{base_domain}'");
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
let www_domain = format!("www.{domain}");
if is_valid_domain(&www_domain) {
let sslwww = format!("https://{www_domain}");
let httpwww = format!("http://{www_domain}");
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
}
}
sub_resp
sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await;
}
res => res,
}
}
// If we get a result or a blacklist error, just continue
res => res,
// When the domain is not an IP, and has less then 2 dots, try to add www. infront of it.
} else if is_ip.is_err() && domain.matches('.').count() < 2 {
let www_domain = format!("www.{domain}");
if is_valid_domain(&www_domain) {
let sslwww = format!("https://{www_domain}");
let httpwww = format!("http://{www_domain}");
debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'");
sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await;
}
}
sub_resp
}
};
// Create the iconlist
@ -439,12 +573,21 @@ async fn get_page(url: &str) -> Result<Response, Error> {
}
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
match check_domain_blacklist_reason(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
Some(DomainBlacklistReason::Regex) => warn!("Favicon '{}' is from a blacklisted domain!", url),
Some(DomainBlacklistReason::IP) => warn!("Favicon '{}' is hosted on a non-global IP!", url),
None => (),
}
let mut client = CLIENT.get(url);
if !referer.is_empty() {
client = client.header("Referer", referer)
}
Ok(client.send().await?.error_for_status()?)
match client.send().await {
Ok(c) => c.error_for_status().map_err(Into::into),
Err(e) => err_silent!(format!("{e}")),
}
}
/// Returns a Integer with the priority of the type of the icon which to prefer.
@ -527,6 +670,12 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
}
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
match check_domain_blacklist_reason(domain).await {
Some(DomainBlacklistReason::Regex) => err_silent!("Domain is blacklisted", domain),
Some(DomainBlacklistReason::IP) => err_silent!("Host resolves to a non-global IP", domain),
None => (),
}
let icon_result = get_icon_url(domain).await?;
let mut buffer = Bytes::new();
@ -562,19 +711,22 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
_ => debug!("Extracted icon from data:image uri is invalid"),
};
} else {
let res = get_page_with_referer(&icon.href, &icon_result.referer).await?;
match get_page_with_referer(&icon.href, &icon_result.referer).await {
Ok(res) => {
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
// Check if the icon type is allowed, else try an icon from the list.
icon_type = get_icon_type(&buffer);
if icon_type.is_none() {
buffer.clear();
debug!("Icon from {}, is not a valid image type", icon.href);
continue;
}
info!("Downloaded icon from {}", icon.href);
break;
// Check if the icon type is allowed, else try an icon from the list.
icon_type = get_icon_type(&buffer);
if icon_type.is_none() {
buffer.clear();
debug!("Icon from {}, is not a valid image type", icon.href);
continue;
}
info!("Downloaded icon from {}", icon.href);
break;
}
Err(e) => debug!("{:?}", e),
};
}
}

View file

@ -120,14 +120,14 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
"expires_in": expires_in,
"token_type": "Bearer",
"refresh_token": device.refresh_token,
"Key": user.akey,
"PrivateKey": user.private_key,
"key": user.akey,
"privateKey": user.private_key,
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"KdfMemory": user.client_kdf_memory,
"KdfParallelism": user.client_kdf_parallelism,
"ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
"kdf": user.client_kdf_type,
"kdfIterations": user.client_kdf_iter,
"kdfMemory": user.client_kdf_memory,
"kdfParallelism": user.client_kdf_parallelism,
"resetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
"scope": scope,
"unofficialServer": true,
});
@ -287,30 +287,25 @@ async fn _password_login(
"expires_in": expires_in,
"token_type": "Bearer",
"refresh_token": device.refresh_token,
"Key": user.akey,
"PrivateKey": user.private_key,
//"TwoFactorToken": "11122233333444555666777888999"
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"KdfMemory": user.client_kdf_memory,
"KdfParallelism": user.client_kdf_parallelism,
"ResetMasterPassword": false, // TODO: Same as above
"ForcePasswordReset": false,
"MasterPasswordPolicy": {
"object": "masterPasswordPolicy",
},
"key": user.akey,
"privateKey": user.private_key,
//"twoFactorToken": "11122233333444555666777888999"
"kdf": user.client_kdf_type,
"kdfIterations": user.client_kdf_iter,
"kdfMemory": user.client_kdf_memory,
"kdfParallelism": user.client_kdf_parallelism,
"resetMasterPassword": false,// TODO: Same as above
"scope": scope,
"unofficialServer": true,
"UserDecryptionOptions": {
"HasMasterPassword": !user.password_hash.is_empty(),
"Object": "userDecryptionOptions"
"userDecryptionOptions": {
"hasMasterPassword": !user.password_hash.is_empty(),
"object": "userDecryptionOptions"
},
});
if let Some(token) = twofactor_token {
result["TwoFactorToken"] = Value::String(token);
result["twoFactorToken"] = Value::String(token);
}
info!("User {} logged in successfully. IP: {}", username, ip.ip);
@ -414,14 +409,14 @@ async fn _user_api_key_login(
"access_token": access_token,
"expires_in": expires_in,
"token_type": "Bearer",
"Key": user.akey,
"PrivateKey": user.private_key,
"key": user.akey,
"privateKey": user.private_key,
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"KdfMemory": user.client_kdf_memory,
"KdfParallelism": user.client_kdf_parallelism,
"ResetMasterPassword": false, // TODO: Same as above
"kdf": user.client_kdf_type,
"kdfIterations": user.client_kdf_iter,
"kdfMemory": user.client_kdf_memory,
"kdfParallelism": user.client_kdf_parallelism,
"resetMasterPassword": false, // TODO: Same as above
"scope": "api",
"unofficialServer": true,
});
@ -564,6 +559,7 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
let mut result = json!({
"error" : "invalid_grant",
"error_description" : "Two factor required.",
// These two are intentionally left with PascalCase, as of 2024-02-27 the clients still use them like this
"TwoFactorProviders" : providers,
"TwoFactorProviders2" : {} // { "0" : null }
});
@ -588,8 +584,8 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
let (signature, host) = duo::generate_duo_signature(&email, conn).await?;
result["TwoFactorProviders2"][provider.to_string()] = json!({
"Host": host,
"Signature": signature,
"host": host,
"signature": signature,
});
}
@ -602,7 +598,7 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?;
result["TwoFactorProviders2"][provider.to_string()] = json!({
"Nfc": yubikey_metadata.nfc,
"nfc": yubikey_metadata.nfc,
})
}
@ -619,7 +615,7 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo
let email_data = email::EmailTokenData::from_json(&twofactor.data)?;
result["TwoFactorProviders2"][provider.to_string()] = json!({
"Email": email::obscure_email(&email_data.email),
"email": email::obscure_email(&email_data.email),
})
}

View file

@ -20,7 +20,7 @@ pub use crate::api::{
core::two_factor::send_incomplete_2fa_notifications,
core::{emergency_notification_reminder_job, emergency_request_timeout_job},
core::{event_cleanup_job, events_routes as core_events_routes},
icons::{is_domain_blacklisted, routes as icons_routes},
icons::routes as icons_routes,
identity::routes as identity_routes,
notifications::routes as notifications_routes,
notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS},

View file

@ -289,7 +289,7 @@ fn serialize(val: Value) -> Vec<u8> {
fn serialize_date(date: NaiveDateTime) -> Value {
let seconds: i64 = date.and_utc().timestamp();
let nanos: i64 = date.and_utc().timestamp_subsec_nanos().into();
let nanos: i64 = date.timestamp_subsec_nanos().into();
let timestamp = nanos << 34 | seconds;
let bs = timestamp.to_be_bytes();

View file

@ -4,7 +4,7 @@ use chrono::{TimeDelta, Utc};
use num_traits::FromPrimitive;
use once_cell::sync::{Lazy, OnceCell};
use jsonwebtoken::{errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
use jsonwebtoken::{self, errors::ErrorKind, Algorithm, DecodingKey, EncodingKey, Header};
use openssl::rsa::Rsa;
use serde::de::DeserializeOwned;
use serde::ser::Serialize;
@ -391,8 +391,10 @@ impl<'r> FromRequest<'r> for Host {
let host = if let Some(host) = headers.get_one("X-Forwarded-Host") {
host
} else if let Some(host) = headers.get_one("Host") {
host
} else {
headers.get_one("Host").unwrap_or_default()
""
};
format!("{protocol}://{host}")
@ -689,7 +691,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
_ => err_handler!("Error getting DB"),
};
if !Collection::can_access_collection(&headers.org_user, &col_id, &mut conn).await {
if !can_access_collection(&headers.org_user, &col_id, &mut conn).await {
err_handler!("The current user isn't a manager for this collection")
}
}
@ -762,6 +764,10 @@ impl From<ManagerHeadersLoose> for Headers {
}
}
}
async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
org_user.has_full_access()
|| Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn).await
}
impl ManagerHeaders {
pub async fn from_loose(
@ -773,7 +779,7 @@ impl ManagerHeaders {
if uuid::Uuid::parse_str(col_id).is_err() {
err!("Collection Id is malformed!");
}
if !Collection::can_access_collection(&h.org_user, col_id, conn).await {
if !can_access_collection(&h.org_user, col_id, conn).await {
err!("You don't have access to all collections!");
}
}

View file

@ -442,7 +442,7 @@ impl Cipher {
}
if let Some(ref org_uuid) = self.organization_uuid {
if let Some(cipher_sync_data) = cipher_sync_data {
return cipher_sync_data.user_group_full_access_for_organizations.contains(org_uuid);
return cipher_sync_data.user_group_full_access_for_organizations.get(org_uuid).is_some();
} else {
return Group::is_in_full_access_group(user_uuid, org_uuid, conn).await;
}

View file

@ -1,6 +1,6 @@
use serde_json::Value;
use super::{CollectionGroup, GroupUser, User, UserOrgStatus, UserOrgType, UserOrganization};
use super::{CollectionGroup, User, UserOrgStatus, UserOrgType, UserOrganization};
use crate::CONFIG;
db_object! {
@ -102,15 +102,6 @@ impl Collection {
json_object["hidePasswords"] = json!(hide_passwords);
json_object
}
pub async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
org_user.has_status(UserOrgStatus::Confirmed)
&& (org_user.has_full_access()
|| CollectionUser::has_access_to_collection_by_user(col_id, &org_user.user_uuid, conn).await
|| (CONFIG.org_groups_enabled()
&& (GroupUser::has_full_access_by_member(&org_user.org_uuid, &org_user.uuid, conn).await
|| GroupUser::has_access_to_collection_by_member(col_id, &org_user.uuid, conn).await)))
}
}
use crate::db::DbConn;
@ -261,6 +252,17 @@ impl Collection {
}
}
// Check if a user has access to a specific collection
// FIXME: This needs to be reviewed. The query used by `find_by_user_uuid` could be adjusted to filter when needed.
// For now this is a good solution without making to much changes.
pub async fn has_access_by_collection_and_user_uuid(
collection_uuid: &str,
user_uuid: &str,
conn: &mut DbConn,
) -> bool {
Self::find_by_user_uuid(user_uuid.to_owned(), conn).await.into_iter().any(|c| c.uuid == collection_uuid)
}
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
Self::find_by_user_uuid(user_uuid.to_owned(), conn)
.await
@ -642,10 +644,6 @@ impl CollectionUser {
Ok(())
}}
}
pub async fn has_access_to_collection_by_user(col_id: &str, user_uuid: &str, conn: &mut DbConn) -> bool {
Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some()
}
}
/// Database methods

View file

@ -81,32 +81,25 @@ impl EmergencyAccess {
})
}
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option<Value> {
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Value {
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
Some(User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found."))
} else if let Some(email) = self.email.as_deref() {
match User::find_by_mail(email, conn).await {
Some(user) => user,
None => {
// remove outstanding invitations which should not exist
let _ = Self::delete_all_by_grantee_email(email, conn).await;
return None;
}
}
Some(User::find_by_mail(email, conn).await.expect("Grantee user not found."))
} else {
return None;
None
};
Some(json!({
json!({
"id": self.uuid,
"status": self.status,
"type": self.atype,
"waitTimeDays": self.wait_time_days,
"granteeId": grantee_user.uuid,
"email": grantee_user.email,
"name": grantee_user.name,
"granteeId": grantee_user.as_ref().map_or("", |u| &u.uuid),
"email": grantee_user.as_ref().map_or("", |u| &u.email),
"name": grantee_user.as_ref().map_or("", |u| &u.name),
"object": "emergencyAccessGranteeDetails",
}))
})
}
}
@ -181,7 +174,7 @@ impl EmergencyAccess {
// Update the grantee so that it will refresh it's status.
User::update_uuid_revision(self.grantee_uuid.as_ref().expect("Error getting grantee"), conn).await;
self.status = status;
date.clone_into(&mut self.updated_at);
self.updated_at = date.to_owned();
db_run! {conn: {
crate::util::retry(|| {
@ -199,7 +192,7 @@ impl EmergencyAccess {
conn: &mut DbConn,
) -> EmptyResult {
self.last_notification_at = Some(date.to_owned());
date.clone_into(&mut self.updated_at);
self.updated_at = date.to_owned();
db_run! {conn: {
crate::util::retry(|| {
@ -221,13 +214,6 @@ impl EmergencyAccess {
Ok(())
}
pub async fn delete_all_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
for ea in Self::find_all_invited_by_grantee_email(grantee_email, conn).await {
ea.delete(conn).await?;
}
Ok(())
}
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
User::update_uuid_revision(&self.grantor_uuid, conn).await;
@ -299,15 +285,6 @@ impl EmergencyAccess {
}}
}
pub async fn find_all_invited_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::email.eq(grantee_email))
.filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32))
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}}
}
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
@ -315,21 +292,6 @@ impl EmergencyAccess {
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}}
}
pub async fn accept_invite(&mut self, grantee_uuid: &str, grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email {
err!("User email does not match invite.");
}
if self.status == EmergencyAccessStatus::Accepted as i32 {
err!("Emergency contact already accepted.");
}
self.status = EmergencyAccessStatus::Accepted as i32;
self.grantee_uuid = Some(String::from(grantee_uuid));
self.email = None;
self.save(conn).await
}
}
// endregion

View file

@ -344,25 +344,6 @@ impl UserOrganization {
pub async fn to_json(&self, conn: &mut DbConn) -> Value {
let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap();
let permissions = json!({
// TODO: Add support for Custom User Roles
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
"accessEventLogs": false,
"accessImportExport": false,
"accessReports": false,
"createNewCollections": false,
"editAnyCollection": false,
"deleteAnyCollection": false,
"editAssignedCollections": false,
"deleteAssignedCollections": false,
"manageGroups": false,
"managePolicies": false,
"manageSso": false, // Not supported
"manageUsers": false,
"manageResetPassword": false,
"manageScim": false // Not supported (Not AGPLv3 Licensed)
});
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
json!({
"id": self.org_uuid,
@ -390,7 +371,27 @@ impl UserOrganization {
// "keyConnectorEnabled": false,
// "keyConnectorUrl": null,
"permissions": permissions,
// TODO: Add support for Custom User Roles
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
// "permissions": {
// "accessEventLogs": false,
// "accessImportExport": false,
// "accessReports": false,
// "manageAllCollections": false,
// "createNewCollections": false,
// "editAnyCollection": false,
// "deleteAnyCollection": false,
// "manageAssignedCollections": false,
// "editAssignedCollections": false,
// "deleteAssignedCollections": false,
// "manageCiphers": false,
// "manageGroups": false,
// "managePolicies": false,
// "manageResetPassword": false,
// "manageSso": false, // Not supported
// "manageUsers": false,
// "manageScim": false, // Not supported (Not AGPLv3 Licensed)
// },
"maxStorageGb": 10, // The value doesn't matter, we don't check server-side

View file

@ -240,13 +240,12 @@ impl User {
};
json!({
"_Status": status as i32,
"_status": status as i32,
"id": self.uuid,
"name": self.name,
"mame": self.name,
"email": self.email,
"emailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
"premium": true,
"premiumFromOrganization": false,
"masterPasswordHint": self.password_hint,
"culture": "en-US",
"twoFactorEnabled": twofactor_enabled,
@ -258,7 +257,6 @@ impl User {
"providerOrganizations": [],
"forcePasswordReset": false,
"avatarColor": self.avatar_color,
"usesKeyConnector": false,
"object": "profile",
})
}
@ -313,7 +311,6 @@ impl User {
Send::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?;
UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
Cipher::delete_all_by_user(&self.uuid, conn).await?;
Favorite::delete_all_by_user(&self.uuid, conn).await?;

View file

@ -3,7 +3,7 @@
// The more key/value pairs there are the more recursion occurs.
// We want to keep this as low as possible, but not higher then 128.
// If you go above 128 it will cause rust-analyzer to fail,
#![recursion_limit = "90"]
#![recursion_limit = "87"]
// When enabled use MiMalloc as malloc instead of the default malloc
#[cfg(feature = "enable_mimalloc")]
@ -211,8 +211,8 @@ fn launch_info() {
}
fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
// Depending on the main log level we either want to disable or enable logging for hickory.
// Else if there are timeouts it will clutter the logs since hickory uses warn for this.
// Depending on the main log level we either want to disable or enable logging for trust-dns.
// Else if there are timeouts it will clutter the logs since trust-dns uses warn for this.
let hickory_level = if level >= log::LevelFilter::Debug {
level
} else {
@ -266,7 +266,7 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
.level_for("handlebars::render", handlebars_level)
// Prevent cookie_store logs
.level_for("cookie_store", log::LevelFilter::Off)
// Variable level for hickory used by reqwest
// Variable level for trust-dns used by reqwest
.level_for("hickory_resolver::name_server::name_server", hickory_level)
.level_for("hickory_proto::xfer", hickory_level)
.level_for("diesel_logger", diesel_logger_level)

View file

@ -29,7 +29,7 @@
{{#if TwoFactorEnabled}}
<span class="badge bg-success me-2" title="2FA is enabled">2FA</span>
{{/if}}
{{#case _Status 1}}
{{#case _status 1}}
<span class="badge bg-warning text-dark me-2" title="User is invited">Invited</span>
{{/case}}
{{#if EmailVerified}}
@ -72,7 +72,7 @@
{{else}}
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-enable-user>Enable User</button><br>
{{/if}}
{{#case _Status 1}}
{{#case _status 1}}
<button type="button" class="btn btn-sm btn-link p-0 border-0 float-right" vw-resend-user-invite>Resend invite</button><br>
{{/case}}
</span>

View file

@ -4,7 +4,6 @@
use std::{collections::HashMap, io::Cursor, ops::Deref, path::Path};
use num_traits::ToPrimitive;
use once_cell::sync::Lazy;
use rocket::{
fairing::{Fairing, Info, Kind},
http::{ContentType, Header, HeaderMap, Method, Status},
@ -521,7 +520,7 @@ pub fn container_base_image() -> &'static str {
use std::fmt;
use serde::de::{self, DeserializeOwned, Deserializer, MapAccess, SeqAccess, Visitor};
use serde_json::Value;
use serde_json::{self, Value};
pub type JsonMap = serde_json::Map<String, Value>;
@ -710,9 +709,14 @@ where
use reqwest::{header, Client, ClientBuilder};
pub fn get_reqwest_client() -> &'static Client {
static INSTANCE: Lazy<Client> = Lazy::new(|| get_reqwest_client_builder().build().expect("Failed to build client"));
&INSTANCE
pub fn get_reqwest_client() -> Client {
match get_reqwest_client_builder().build() {
Ok(client) => client,
Err(e) => {
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
get_reqwest_client_builder().hickory_dns(false).build().expect("Failed to build client")
}
}
}
pub fn get_reqwest_client_builder() -> ClientBuilder {
@ -771,248 +775,3 @@ pub fn parse_experimental_client_feature_flags(experimental_client_feature_flags
feature_states
}
mod dns_resolver {
use std::{
fmt,
net::{IpAddr, SocketAddr},
sync::Arc,
};
use hickory_resolver::{system_conf::read_system_conf, TokioAsyncResolver};
use once_cell::sync::Lazy;
use reqwest::dns::{Name, Resolve, Resolving};
use crate::{util::is_global, CONFIG};
#[derive(Debug, Clone)]
pub enum CustomResolverError {
Blacklist {
domain: String,
},
NonGlobalIp {
domain: String,
ip: IpAddr,
},
}
impl CustomResolverError {
pub fn downcast_ref(e: &dyn std::error::Error) -> Option<&Self> {
let mut source = e.source();
while let Some(err) = source {
source = err.source();
if let Some(err) = err.downcast_ref::<CustomResolverError>() {
return Some(err);
}
}
None
}
}
impl fmt::Display for CustomResolverError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Blacklist {
domain,
} => write!(f, "Blacklisted domain: {domain} matched ICON_BLACKLIST_REGEX"),
Self::NonGlobalIp {
domain,
ip,
} => write!(f, "IP {ip} for domain '{domain}' is not a global IP!"),
}
}
}
impl std::error::Error for CustomResolverError {}
#[derive(Debug, Clone)]
pub enum CustomDnsResolver {
Default(),
Hickory(Arc<TokioAsyncResolver>),
}
type BoxError = Box<dyn std::error::Error + Send + Sync>;
impl CustomDnsResolver {
pub fn instance() -> Arc<Self> {
static INSTANCE: Lazy<Arc<CustomDnsResolver>> = Lazy::new(CustomDnsResolver::new);
Arc::clone(&*INSTANCE)
}
fn new() -> Arc<Self> {
match read_system_conf() {
Ok((config, opts)) => {
let resolver = TokioAsyncResolver::tokio(config.clone(), opts.clone());
Arc::new(Self::Hickory(Arc::new(resolver)))
}
Err(e) => {
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
Arc::new(Self::Default())
}
}
}
// Note that we get an iterator of addresses, but we only grab the first one for convenience
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
pre_resolve(name)?;
let result = match self {
Self::Default() => tokio::net::lookup_host(name).await?.next(),
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
};
if let Some(addr) = &result {
post_resolve(name, addr.ip())?;
}
Ok(result)
}
}
fn pre_resolve(name: &str) -> Result<(), CustomResolverError> {
if crate::api::is_domain_blacklisted(name) {
return Err(CustomResolverError::Blacklist {
domain: name.to_string(),
});
}
Ok(())
}
fn post_resolve(name: &str, ip: IpAddr) -> Result<(), CustomResolverError> {
if CONFIG.icon_blacklist_non_global_ips() && !is_global(ip) {
Err(CustomResolverError::NonGlobalIp {
domain: name.to_string(),
ip,
})
} else {
Ok(())
}
}
impl Resolve for CustomDnsResolver {
fn resolve(&self, name: Name) -> Resolving {
let this = self.clone();
Box::pin(async move {
let name = name.as_str();
let result = this.resolve_domain(name).await?;
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
})
}
}
}
pub use dns_resolver::{CustomDnsResolver, CustomResolverError};
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
#[allow(clippy::nonminimal_bool)]
#[cfg(any(not(feature = "unstable"), test))]
pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
match ip {
std::net::IpAddr::V4(ip) => {
!(ip.octets()[0] == 0 // "This network"
|| ip.is_private()
|| (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) //ip.is_shared()
|| ip.is_loopback()
|| ip.is_link_local()
// addresses reserved for future protocols (`192.0.0.0/24`)
||(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|| ip.is_documentation()
|| (ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) // ip.is_benchmarking()
|| (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) //ip.is_reserved()
|| ip.is_broadcast())
}
std::net::IpAddr::V6(ip) => {
!(ip.is_unspecified()
|| ip.is_loopback()
// IPv4-mapped Address (`::ffff:0:0/96`)
|| matches!(ip.segments(), [0, 0, 0, 0, 0, 0xffff, _, _])
// IPv4-IPv6 Translat. (`64:ff9b:1::/48`)
|| matches!(ip.segments(), [0x64, 0xff9b, 1, _, _, _, _, _])
// Discard-Only Address Block (`100::/64`)
|| matches!(ip.segments(), [0x100, 0, 0, 0, _, _, _, _])
// IETF Protocol Assignments (`2001::/23`)
|| (matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200)
&& !(
// Port Control Protocol Anycast (`2001:1::1`)
u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001
// Traversal Using Relays around NAT Anycast (`2001:1::2`)
|| u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002
// AMT (`2001:3::/32`)
|| matches!(ip.segments(), [0x2001, 3, _, _, _, _, _, _])
// AS112-v6 (`2001:4:112::/48`)
|| matches!(ip.segments(), [0x2001, 4, 0x112, _, _, _, _, _])
// ORCHIDv2 (`2001:20::/28`)
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x2F).contains(&b))
))
|| ((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8)) // ip.is_documentation()
|| ((ip.segments()[0] & 0xfe00) == 0xfc00) //ip.is_unique_local()
|| ((ip.segments()[0] & 0xffc0) == 0xfe80)) //ip.is_unicast_link_local()
}
}
}
#[cfg(not(feature = "unstable"))]
pub use is_global_hardcoded as is_global;
#[cfg(feature = "unstable")]
#[inline(always)]
pub fn is_global(ip: std::net::IpAddr) -> bool {
ip.is_global()
}
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 30 seconds or so and they are correct as of nightly 2023-07-17
/// The IPV6 can't be checked in a reasonable time, so we check over a hundred billion random ones, so far correct
/// Note that the is_global implementation is subject to change as new IP RFCs are created
///
/// To run while showing progress output:
/// cargo +nightly test --release --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
use std::net::IpAddr;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {}", ip)
}
}
}
}
}
#[test]
#[ignore]
fn test_ipv6_global() {
use rand::Rng;
std::thread::scope(|s| {
for t in 0..16 {
let handle = s.spawn(move || {
let mut v = [0u8; 16];
let mut rng = rand::thread_rng();
for i in 0..20 {
println!("Thread {t} Iter: {i}/50");
for _ in 0..500_000_000 {
rng.fill(&mut v);
let ip = IpAddr::V6(std::net::Ipv6Addr::from(v));
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {ip}");
}
}
});
}
});
}
}