0
0
Fork 0
mirror of https://github.com/dani-garcia/vaultwarden synced 2024-06-15 18:38:21 +02:00

Compare commits

...

8 commits

Author SHA1 Message Date
FDHoho007 5a1927ee7a
Merge e7314dd96a into 0fe93edea6 2024-04-28 00:08:22 +02:00
FDHoho007 e7314dd96a Fix public api for domains with path prefix 2024-04-28 00:08:20 +02:00
Daniel García 0fe93edea6
Some fixes for the new mobile apps (#4526) 2024-04-27 23:24:04 +02:00
Stefan Melmuk e9aa5a545e
fix emergency access invites (#4337)
* fix emergency access invites with no mail

when mail is disabled instead of accepting emergency access for all
invited users automatically, we only accept if the user already exists

on registration of a new account any open emergency access invitations
will be accepted, if mail is disabled

also prevent invited emergency access contacts to register if emergency
access is disabled (this is only relevant for when mail is enabled, if
mail is disabled they should have an Invitation entry)

* delete emergency access invitations

if an invited user is deleted in the /admin panel their emergency
access invitation will remain in the database which causes
the to_json_grantee_details fn to panic

* improve missing emergency access grantees

instead of returning an empty emergency access contact the entry should
not be added to the list. also the error handling can be improved a bit.
2024-04-27 22:16:05 +02:00
Stefan Melmuk 9dcc738f85
improve access to collections via groups (#4441)
* refactor get_org_collections_details

* improve access to collection check

* fix get_org_collection_detail too
2024-04-27 22:09:00 +02:00
Kristof Mattei 84a7c7da5d
Pass in collection ids to notifier when sharing cipher. (#4517) 2024-04-27 21:53:10 +02:00
Mathijs van Veluw ca9234ed86
Add extra (unsupported) container build arch's (#4524)
There was a PR (#4370) to add i686/i386 support for Vaultwarden.
That specific PR was not a viable way of adding this.

This PR adds extra architectures for Debian based containers which we
will not support by default. Those images will not be build and pushed
to our container registries.

Added the following architectures:
 - linux/386
 - linux/ppc64le
 - linux/s390x

Again, there will be no major support for these architectures, but it
will allow people who use these architectures to build a Debian based
binary more easily
2024-04-27 21:51:14 +02:00
Daniel García 27dc67fadd
Implement custom DNS resolver (#3988) 2024-04-27 20:25:34 +02:00
22 changed files with 563 additions and 404 deletions

10
Cargo.lock generated
View file

@ -2819,7 +2819,6 @@ dependencies = [
"futures-core", "futures-core",
"futures-util", "futures-util",
"h2 0.4.4", "h2 0.4.4",
"hickory-resolver",
"http 1.1.0", "http 1.1.0",
"http-body 1.0.0", "http-body 1.0.0",
"http-body-util", "http-body-util",
@ -3187,9 +3186,9 @@ checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca"
[[package]] [[package]]
name = "serde" name = "serde"
version = "1.0.198" version = "1.0.199"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a"
dependencies = [ dependencies = [
"serde_derive", "serde_derive",
] ]
@ -3206,9 +3205,9 @@ dependencies = [
[[package]] [[package]]
name = "serde_derive" name = "serde_derive"
version = "1.0.198" version = "1.0.199"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -3970,6 +3969,7 @@ dependencies = [
"futures", "futures",
"governor", "governor",
"handlebars", "handlebars",
"hickory-resolver",
"html5gum", "html5gum",
"job_scheduler_ng", "job_scheduler_ng",
"jsonwebtoken", "jsonwebtoken",

View file

@ -67,7 +67,7 @@ dashmap = "5.5.3"
# Async futures # Async futures
futures = "0.3.30" futures = "0.3.30"
tokio = { version = "1.37.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] } tokio = { version = "1.37.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
# A generic serialization/deserialization framework # A generic serialization/deserialization framework
serde = { version = "1.0.198", features = ["derive"] } serde = { version = "1.0.198", features = ["derive"] }
@ -123,7 +123,8 @@ email_address = "0.2.4"
handlebars = { version = "5.1.2", features = ["dir_source"] } handlebars = { version = "5.1.2", features = ["dir_source"] }
# HTTP client (Used for favicons, version check, DUO and HIBP API) # HTTP client (Used for favicons, version check, DUO and HIBP API)
reqwest = { version = "0.12.4", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies", "hickory-dns"] } reqwest = { version = "0.12.4", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
hickory-resolver = "0.24.1"
# Favicon extraction libraries # Favicon extraction libraries
html5gum = "0.5.7" html5gum = "0.5.7"

View file

@ -65,13 +65,14 @@ RUN mkdir -pv "${CARGO_HOME}" \
RUN USER=root cargo new --bin /app RUN USER=root cargo new --bin /app
WORKDIR /app WORKDIR /app
# Shared variables across Debian and Alpine # Environment variables for Cargo on Alpine based builds
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \ RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic # To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \ if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
# Output the current contents of the file # Output the current contents of the file
cat /env-cargo cat /env-cargo
# Configure the DB ARG as late as possible to not invalidate the cached layers above
# Enable MiMalloc to improve performance on Alpine builds # Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc ARG DB=sqlite,mysql,postgresql,enable_mimalloc

View file

@ -88,9 +88,17 @@ RUN mkdir -pv "${CARGO_HOME}" \
RUN USER=root cargo new --bin /app RUN USER=root cargo new --bin /app
WORKDIR /app WORKDIR /app
# Environment variables for cargo across Debian and Alpine # Environment variables for Cargo on Debian based builds
ARG ARCH_OPENSSL_LIB_DIR \
ARCH_OPENSSL_INCLUDE_DIR
RUN source /env-cargo && \ RUN source /env-cargo && \
if xx-info is-cross ; then \ if xx-info is-cross ; then \
# Some special variables if needed to override some build paths
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
fi && \
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries. # We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
# Because of this we generate the needed environment variables here which we can load in the needed steps. # Because of this we generate the needed environment variables here which we can load in the needed steps.
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \ echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \

View file

@ -108,9 +108,17 @@ RUN USER=root cargo new --bin /app
WORKDIR /app WORKDIR /app
{% if base == "debian" %} {% if base == "debian" %}
# Environment variables for cargo across Debian and Alpine # Environment variables for Cargo on Debian based builds
ARG ARCH_OPENSSL_LIB_DIR \
ARCH_OPENSSL_INCLUDE_DIR
RUN source /env-cargo && \ RUN source /env-cargo && \
if xx-info is-cross ; then \ if xx-info is-cross ; then \
# Some special variables if needed to override some build paths
if [[ -n "${ARCH_OPENSSL_LIB_DIR}" && -n "${ARCH_OPENSSL_INCLUDE_DIR}" ]]; then \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_LIB_DIR=${ARCH_OPENSSL_LIB_DIR}" >> /env-cargo && \
echo "export $(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_OPENSSL_INCLUDE_DIR=${ARCH_OPENSSL_INCLUDE_DIR}" >> /env-cargo ; \
fi && \
# We can't use xx-cargo since that uses clang, which doesn't work for our libraries. # We can't use xx-cargo since that uses clang, which doesn't work for our libraries.
# Because of this we generate the needed environment variables here which we can load in the needed steps. # Because of this we generate the needed environment variables here which we can load in the needed steps.
echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \ echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \
@ -126,13 +134,14 @@ RUN source /env-cargo && \
# Configure the DB ARG as late as possible to not invalidate the cached layers above # Configure the DB ARG as late as possible to not invalidate the cached layers above
ARG DB=sqlite,mysql,postgresql ARG DB=sqlite,mysql,postgresql
{% elif base == "alpine" %} {% elif base == "alpine" %}
# Shared variables across Debian and Alpine # Environment variables for Cargo on Alpine based builds
RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \ RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \
# To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic # To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic
if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \ if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \
# Output the current contents of the file # Output the current contents of the file
cat /env-cargo cat /env-cargo
# Configure the DB ARG as late as possible to not invalidate the cached layers above
# Enable MiMalloc to improve performance on Alpine builds # Enable MiMalloc to improve performance on Alpine builds
ARG DB=sqlite,mysql,postgresql,enable_mimalloc ARG DB=sqlite,mysql,postgresql,enable_mimalloc
{% endif %} {% endif %}

View file

@ -11,6 +11,11 @@ With just these two files we can build both Debian and Alpine images for the fol
- armv7 (linux/arm/v7) - armv7 (linux/arm/v7)
- armv6 (linux/arm/v6) - armv6 (linux/arm/v6)
Some unsupported platforms for Debian based images. These are not built and tested by default and are only provided to make it easier for users to build for these architectures.
- 386 (linux/386)
- ppc64le (linux/ppc64le)
- s390x (linux/s390x)
To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br> To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br>
This ensures the container build process can run binaries from other architectures.<br> This ensures the container build process can run binaries from other architectures.<br>

View file

@ -125,6 +125,40 @@ target "debian-armv6" {
tags = generate_tags("", "-armv6") tags = generate_tags("", "-armv6")
} }
// ==== Start of unsupported Debian architecture targets ===
// These are provided just to help users build for these rare platforms
// They will not be built by default
target "debian-386" {
inherits = ["debian"]
platforms = ["linux/386"]
tags = generate_tags("", "-386")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/i386-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/i386-linux-gnu"
}
}
target "debian-ppc64le" {
inherits = ["debian"]
platforms = ["linux/ppc64le"]
tags = generate_tags("", "-ppc64le")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/powerpc64le-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/powerpc64le-linux-gnu"
}
}
target "debian-s390x" {
inherits = ["debian"]
platforms = ["linux/s390x"]
tags = generate_tags("", "-s390x")
args = {
ARCH_OPENSSL_LIB_DIR = "/usr/lib/s390x-linux-gnu"
ARCH_OPENSSL_INCLUDE_DIR = "/usr/include/s390x-linux-gnu"
}
}
// ==== End of unsupported Debian architecture targets ===
// A Group to build all platforms individually for local testing // A Group to build all platforms individually for local testing
group "debian-all" { group "debian-all" {
targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"] targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"]

View file

@ -166,7 +166,8 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
} }
user user
} else if CONFIG.is_signup_allowed(&email) } else if CONFIG.is_signup_allowed(&email)
|| EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some() || (CONFIG.emergency_access_allowed()
&& EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some())
{ {
user user
} else { } else {
@ -217,7 +218,6 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await { if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await {
error!("Error sending welcome email: {:#?}", e); error!("Error sending welcome email: {:#?}", e);
} }
user.last_verifying_at = Some(user.created_at); user.last_verifying_at = Some(user.created_at);
} else if let Err(e) = mail::send_welcome(&user.email).await { } else if let Err(e) = mail::send_welcome(&user.email).await {
error!("Error sending welcome email: {:#?}", e); error!("Error sending welcome email: {:#?}", e);
@ -229,6 +229,14 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json
} }
user.save(&mut conn).await?; user.save(&mut conn).await?;
// accept any open emergency access invitations
if !CONFIG.mail_enabled() && CONFIG.emergency_access_allowed() {
for mut emergency_invite in EmergencyAccess::find_all_invited_by_grantee_email(&user.email, &mut conn).await {
let _ = emergency_invite.accept_invite(&user.uuid, &user.email, &mut conn).await;
}
}
Ok(Json(json!({ Ok(Json(json!({
"Object": "register", "Object": "register",
"CaptchaBypassToken": "", "CaptchaBypassToken": "",
@ -568,7 +576,7 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
// Prevent triggering cipher updates via WebSockets by settings UpdateType::None // Prevent triggering cipher updates via WebSockets by settings UpdateType::None
// The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues. // The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
// We force the users to logout after the user has been saved to try and prevent these issues. // We force the users to logout after the user has been saved to try and prevent these issues.
update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None) update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None)
.await? .await?
} }
} }

View file

@ -10,6 +10,7 @@ use rocket::{
}; };
use serde_json::Value; use serde_json::Value;
use crate::util::NumberOrString;
use crate::{ use crate::{
api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordOrOtpData, UpdateType}, api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordOrOtpData, UpdateType},
auth::Headers, auth::Headers,
@ -321,7 +322,7 @@ async fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, mut conn:
data.LastKnownRevisionDate = None; data.LastKnownRevisionDate = None;
let mut cipher = Cipher::new(data.Type, data.Name.clone()); let mut cipher = Cipher::new(data.Type, data.Name.clone());
update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherCreate).await?; update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
} }
@ -352,7 +353,7 @@ pub async fn update_cipher_from_data(
cipher: &mut Cipher, cipher: &mut Cipher,
data: CipherData, data: CipherData,
headers: &Headers, headers: &Headers,
shared_to_collection: bool, shared_to_collections: Option<Vec<String>>,
conn: &mut DbConn, conn: &mut DbConn,
nt: &Notify<'_>, nt: &Notify<'_>,
ut: UpdateType, ut: UpdateType,
@ -391,7 +392,7 @@ pub async fn update_cipher_from_data(
match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await { match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
None => err!("You don't have permission to add item to organization"), None => err!("You don't have permission to add item to organization"),
Some(org_user) => { Some(org_user) => {
if shared_to_collection if shared_to_collections.is_some()
|| org_user.has_full_access() || org_user.has_full_access()
|| cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await || cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await
{ {
@ -518,7 +519,14 @@ pub async fn update_cipher_from_data(
) )
.await; .await;
} }
nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn).await, &headers.device.uuid, None, conn) nt.send_cipher_update(
ut,
cipher,
&cipher.update_users_revision(conn).await,
&headers.device.uuid,
shared_to_collections,
conn,
)
.await; .await;
} }
Ok(()) Ok(())
@ -580,7 +588,7 @@ async fn post_ciphers_import(
cipher_data.FolderId = folder_uuid; cipher_data.FolderId = folder_uuid;
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone()); let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await?; update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await?;
} }
let mut user = headers.user; let mut user = headers.user;
@ -648,7 +656,7 @@ async fn put_cipher(
err!("Cipher is not write accessible") err!("Cipher is not write accessible")
} }
update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?; update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
} }
@ -898,7 +906,7 @@ async fn share_cipher_by_uuid(
None => err!("Cipher doesn't exist"), None => err!("Cipher doesn't exist"),
}; };
let mut shared_to_collection = false; let mut shared_to_collections = vec![];
if let Some(organization_uuid) = &data.Cipher.OrganizationId { if let Some(organization_uuid) = &data.Cipher.OrganizationId {
for uuid in &data.CollectionIds { for uuid in &data.CollectionIds {
@ -907,7 +915,7 @@ async fn share_cipher_by_uuid(
Some(collection) => { Some(collection) => {
if collection.is_writable_by_user(&headers.user.uuid, conn).await { if collection.is_writable_by_user(&headers.user.uuid, conn).await {
CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?; CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?;
shared_to_collection = true; shared_to_collections.push(collection.uuid);
} else { } else {
err!("No rights to modify the collection") err!("No rights to modify the collection")
} }
@ -923,7 +931,7 @@ async fn share_cipher_by_uuid(
UpdateType::SyncCipherCreate UpdateType::SyncCipherCreate
}; };
update_cipher_from_data(&mut cipher, data.Cipher, headers, shared_to_collection, conn, nt, ut).await?; update_cipher_from_data(&mut cipher, data.Cipher, headers, Some(shared_to_collections), conn, nt, ut).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await)) Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
} }
@ -957,7 +965,7 @@ async fn get_attachment(uuid: &str, attachment_id: &str, headers: Headers, mut c
struct AttachmentRequestData { struct AttachmentRequestData {
Key: String, Key: String,
FileName: String, FileName: String,
FileSize: i64, FileSize: NumberOrString,
AdminRequest: Option<bool>, // true when attaching from an org vault view AdminRequest: Option<bool>, // true when attaching from an org vault view
} }
@ -987,12 +995,14 @@ async fn post_attachment_v2(
} }
let data: AttachmentRequestData = data.into_inner().data; let data: AttachmentRequestData = data.into_inner().data;
if data.FileSize < 0 { let file_size = data.FileSize.into_i64()?;
if file_size < 0 {
err!("Attachment size can't be negative") err!("Attachment size can't be negative")
} }
let attachment_id = crypto::generate_attachment_id(); let attachment_id = crypto::generate_attachment_id();
let attachment = let attachment =
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, data.FileSize, Some(data.Key)); Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, file_size, Some(data.Key));
attachment.save(&mut conn).await.expect("Error saving attachment"); attachment.save(&mut conn).await.expect("Error saving attachment");
let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id); let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id);

View file

@ -61,7 +61,9 @@ async fn get_contacts(headers: Headers, mut conn: DbConn) -> Json<Value> {
let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await; let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await;
let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len()); let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len());
for ea in emergency_access_list { for ea in emergency_access_list {
emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await); if let Some(grantee) = ea.to_json_grantee_details(&mut conn).await {
emergency_access_list_json.push(grantee)
}
} }
Json(json!({ Json(json!({
@ -95,7 +97,9 @@ async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult {
check_emergency_access_enabled()?; check_emergency_access_enabled()?;
match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)), Some(emergency_access) => Ok(Json(
emergency_access.to_json_grantee_details(&mut conn).await.expect("Grantee user should exist but does not!"),
)),
None => err!("Emergency access not valid."), None => err!("Emergency access not valid."),
} }
} }
@ -209,7 +213,7 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
err!("You can not set yourself as an emergency contact.") err!("You can not set yourself as an emergency contact.")
} }
let grantee_user = match User::find_by_mail(&email, &mut conn).await { let (grantee_user, new_user) = match User::find_by_mail(&email, &mut conn).await {
None => { None => {
if !CONFIG.invitations_allowed() { if !CONFIG.invitations_allowed() {
err!(format!("Grantee user does not exist: {}", &email)) err!(format!("Grantee user does not exist: {}", &email))
@ -226,9 +230,10 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
let mut user = User::new(email.clone()); let mut user = User::new(email.clone());
user.save(&mut conn).await?; user.save(&mut conn).await?;
user (user, true)
} }
Some(user) => user, Some(user) if user.password_hash.is_empty() => (user, true),
Some(user) => (user, false),
}; };
if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email( if EmergencyAccess::find_by_grantor_uuid_and_grantee_uuid_or_email(
@ -256,15 +261,9 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade
&grantor_user.email, &grantor_user.email,
) )
.await?; .await?;
} else { } else if !new_user {
// Automatically mark user as accepted if no email invites // if mail is not enabled immediately accept the invitation for existing users
match User::find_by_mail(&email, &mut conn).await { new_emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
},
None => err!("Grantee user not found."),
}
} }
Ok(()) Ok(())
@ -308,19 +307,14 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp
&grantor_user.email, &grantor_user.email,
) )
.await?; .await?;
} else { } else if !grantee_user.password_hash.is_empty() {
if Invitation::find_by_mail(&email, &mut conn).await.is_none() { // accept the invitation for existing user
emergency_access.accept_invite(&grantee_user.uuid, &email, &mut conn).await?;
} else if CONFIG.invitations_allowed() && Invitation::find_by_mail(&email, &mut conn).await.is_none() {
let invitation = Invitation::new(&email); let invitation = Invitation::new(&email);
invitation.save(&mut conn).await?; invitation.save(&mut conn).await?;
} }
// Automatically mark user as accepted if no email invites
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await {
Ok(v) => v,
Err(e) => err!(e.to_string()),
}
}
Ok(()) Ok(())
} }
@ -367,10 +361,7 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
&& grantor_user.name == claims.grantor_name && grantor_user.name == claims.grantor_name
&& grantor_user.email == claims.grantor_email && grantor_user.email == claims.grantor_email
{ {
match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await { emergency_access.accept_invite(&grantee_user.uuid, &grantee_user.email, &mut conn).await?;
Ok(v) => v,
Err(e) => err!(e.to_string()),
}
if CONFIG.mail_enabled() { if CONFIG.mail_enabled() {
mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?; mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?;
@ -382,26 +373,6 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea
} }
} }
async fn accept_invite_process(
grantee_uuid: &str,
emergency_access: &mut EmergencyAccess,
grantee_email: &str,
conn: &mut DbConn,
) -> EmptyResult {
if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email {
err!("User email does not match invite.");
}
if emergency_access.status == EmergencyAccessStatus::Accepted as i32 {
err!("Emergency contact already accepted.");
}
emergency_access.status = EmergencyAccessStatus::Accepted as i32;
emergency_access.grantee_uuid = Some(String::from(grantee_uuid));
emergency_access.email = None;
emergency_access.save(conn).await
}
#[derive(Deserialize)] #[derive(Deserialize)]
#[allow(non_snake_case)] #[allow(non_snake_case)]
struct ConfirmData { struct ConfirmData {

View file

@ -329,27 +329,19 @@ async fn get_org_collections_details(org_id: &str, headers: ManagerHeadersLoose,
&& GroupUser::has_full_access_by_member(org_id, &user_org.uuid, &mut conn).await); && GroupUser::has_full_access_by_member(org_id, &user_org.uuid, &mut conn).await);
for col in Collection::find_by_organization(org_id, &mut conn).await { for col in Collection::find_by_organization(org_id, &mut conn).await {
// assigned indicates whether the current user has access to the given collection // check whether the current user has access to the given collection
let mut assigned = has_full_access_to_org; let assigned = has_full_access_to_org
|| CollectionUser::has_access_to_collection_by_user(&col.uuid, &user_org.user_uuid, &mut conn).await
|| (CONFIG.org_groups_enabled()
&& GroupUser::has_access_to_collection_by_member(&col.uuid, &user_org.uuid, &mut conn).await);
// get the users assigned directly to the given collection // get the users assigned directly to the given collection
let users: Vec<Value> = coll_users let users: Vec<Value> = coll_users
.iter() .iter()
.filter(|collection_user| collection_user.collection_uuid == col.uuid) .filter(|collection_user| collection_user.collection_uuid == col.uuid)
.map(|collection_user| { .map(|collection_user| SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json())
// check if the current user is assigned to this collection directly
if collection_user.user_uuid == user_org.uuid {
assigned = true;
}
SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()
})
.collect(); .collect();
// check if the current user has access to the given collection via a group
if !assigned && CONFIG.org_groups_enabled() {
assigned = GroupUser::has_access_to_collection_by_member(&col.uuid, &user_org.uuid, &mut conn).await;
}
// get the group details for the given collection // get the group details for the given collection
let groups: Vec<Value> = if CONFIG.org_groups_enabled() { let groups: Vec<Value> = if CONFIG.org_groups_enabled() {
CollectionGroup::find_by_collection(&col.uuid, &mut conn) CollectionGroup::find_by_collection(&col.uuid, &mut conn)
@ -672,24 +664,16 @@ async fn get_org_collection_detail(
Vec::with_capacity(0) Vec::with_capacity(0)
}; };
let mut assigned = false;
let users: Vec<Value> = let users: Vec<Value> =
CollectionUser::find_by_collection_swap_user_uuid_with_org_user_uuid(&collection.uuid, &mut conn) CollectionUser::find_by_collection_swap_user_uuid_with_org_user_uuid(&collection.uuid, &mut conn)
.await .await
.iter() .iter()
.map(|collection_user| { .map(|collection_user| {
// Remember `user_uuid` is swapped here with the `user_org.uuid` with a join during the `find_by_collection_swap_user_uuid_with_org_user_uuid` call.
// We check here if the current user is assigned to this collection or not.
if collection_user.user_uuid == user_org.uuid {
assigned = true;
}
SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json() SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()
}) })
.collect(); .collect();
if user_org.access_all { let assigned = Collection::can_access_collection(&user_org, &collection.uuid, &mut conn).await;
assigned = true;
}
let mut json_object = collection.to_json(); let mut json_object = collection.to_json();
json_object["Assigned"] = json!(assigned); json_object["Assigned"] = json!(assigned);
@ -1618,7 +1602,7 @@ async fn post_org_import(
let mut ciphers = Vec::new(); let mut ciphers = Vec::new();
for cipher_data in data.Ciphers { for cipher_data in data.Ciphers {
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone()); let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await.ok(); update_cipher_from_data(&mut cipher, cipher_data, &headers, None, &mut conn, &nt, UpdateType::None).await.ok();
ciphers.push(cipher); ciphers.push(cipher);
} }

View file

@ -216,12 +216,8 @@ impl<'r> FromRequest<'r> for PublicToken {
if time_now > claims.exp { if time_now > claims.exp {
err_handler!("Token expired"); err_handler!("Token expired");
} }
// Check if claims.iss is host|claims.scope[0] // Check if claims.iss is domain|claims.scope[0]
let host = match auth::Host::from_request(request).await { let complete_host = format!("{}|{}", CONFIG.domain_origin(), claims.scope[0]);
Outcome::Success(host) => host,
_ => err_handler!("Error getting Host"),
};
let complete_host = format!("{}|{}", host.host, claims.scope[0]);
if complete_host != claims.iss { if complete_host != claims.iss {
err_handler!("Token not issued by this server"); err_handler!("Token not issued by this server");
} }

View file

@ -1,6 +1,6 @@
use std::{ use std::{
net::IpAddr, net::IpAddr,
sync::Arc, sync::{Arc, Mutex},
time::{Duration, SystemTime}, time::{Duration, SystemTime},
}; };
@ -16,14 +16,13 @@ use rocket::{http::ContentType, response::Redirect, Route};
use tokio::{ use tokio::{
fs::{create_dir_all, remove_file, symlink_metadata, File}, fs::{create_dir_all, remove_file, symlink_metadata, File},
io::{AsyncReadExt, AsyncWriteExt}, io::{AsyncReadExt, AsyncWriteExt},
net::lookup_host,
}; };
use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer}; use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer};
use crate::{ use crate::{
error::Error, error::Error,
util::{get_reqwest_client_builder, Cached}, util::{get_reqwest_client_builder, Cached, CustomDnsResolver, CustomResolverError},
CONFIG, CONFIG,
}; };
@ -49,48 +48,32 @@ static CLIENT: Lazy<Client> = Lazy::new(|| {
let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout()); let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout());
let pool_idle_timeout = Duration::from_secs(10); let pool_idle_timeout = Duration::from_secs(10);
// Reuse the client between requests // Reuse the client between requests
let client = get_reqwest_client_builder() get_reqwest_client_builder()
.cookie_provider(Arc::clone(&cookie_store)) .cookie_provider(Arc::clone(&cookie_store))
.timeout(icon_download_timeout) .timeout(icon_download_timeout)
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections .pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds .pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
.hickory_dns(true) .dns_resolver(CustomDnsResolver::instance())
.default_headers(default_headers.clone()); .default_headers(default_headers.clone())
match client.build() {
Ok(client) => client,
Err(e) => {
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
get_reqwest_client_builder()
.cookie_provider(cookie_store)
.timeout(icon_download_timeout)
.pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections
.pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds
.hickory_dns(false)
.default_headers(default_headers)
.build() .build()
.expect("Failed to build client") .expect("Failed to build client")
}
}
}); });
// Build Regex only once since this takes a lot of time. // Build Regex only once since this takes a lot of time.
static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap()); static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap());
// Special HashMap which holds the user defined Regex to speedup matching the regex. #[get("/<domain>/icon.png")]
static ICON_BLACKLIST_REGEX: Lazy<dashmap::DashMap<String, Regex>> = Lazy::new(dashmap::DashMap::new); fn icon_external(domain: &str) -> Option<Redirect> {
async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
if !is_valid_domain(domain) { if !is_valid_domain(domain) {
warn!("Invalid domain: {}", domain); warn!("Invalid domain: {}", domain);
return None; return None;
} }
if check_domain_blacklist_reason(domain).await.is_some() { if is_domain_blacklisted(domain) {
return None; return None;
} }
let url = template.replace("{}", domain); let url = CONFIG._icon_service_url().replace("{}", domain);
match CONFIG.icon_redirect_code() { match CONFIG.icon_redirect_code() {
301 => Some(Redirect::moved(url)), // legacy permanent redirect 301 => Some(Redirect::moved(url)), // legacy permanent redirect
302 => Some(Redirect::found(url)), // legacy temporary redirect 302 => Some(Redirect::found(url)), // legacy temporary redirect
@ -103,11 +86,6 @@ async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> {
} }
} }
#[get("/<domain>/icon.png")]
async fn icon_external(domain: &str) -> Option<Redirect> {
icon_redirect(domain, &CONFIG._icon_service_url()).await
}
#[get("/<domain>/icon.png")] #[get("/<domain>/icon.png")]
async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> { async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> {
const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png"); const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png");
@ -166,153 +144,28 @@ fn is_valid_domain(domain: &str) -> bool {
true true
} }
/// TODO: This is extracted from IpAddr::is_global, which is unstable: pub fn is_domain_blacklisted(domain: &str) -> bool {
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global let Some(config_blacklist) = CONFIG.icon_blacklist_regex() else {
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged return false;
#[allow(clippy::nonminimal_bool)]
#[cfg(not(feature = "unstable"))]
fn is_global(ip: IpAddr) -> bool {
match ip {
IpAddr::V4(ip) => {
// check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two
// globally routable addresses in the 192.0.0.0/24 range.
if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a {
return true;
}
!ip.is_private()
&& !ip.is_loopback()
&& !ip.is_link_local()
&& !ip.is_broadcast()
&& !ip.is_documentation()
&& !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000))
&& !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
&& !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast())
&& !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18)
// Make sure the address is not in 0.0.0.0/8
&& ip.octets()[0] != 0
}
IpAddr::V6(ip) => {
if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 {
true
} else {
!ip.is_multicast()
&& !ip.is_loopback()
&& !((ip.segments()[0] & 0xffc0) == 0xfe80)
&& !((ip.segments()[0] & 0xfe00) == 0xfc00)
&& !ip.is_unspecified()
&& !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8))
}
}
}
}
#[cfg(feature = "unstable")]
fn is_global(ip: IpAddr) -> bool {
ip.is_global()
}
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11
/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct
/// Note that the is_global implementation is subject to change as new IP RFCs are created
///
/// To run while showing progress output:
/// cargo test --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
}
}
#[test]
#[ignore]
fn test_ipv6_global() {
use ring::rand::{SecureRandom, SystemRandom};
let mut v = [0u8; 16];
let rand = SystemRandom::new();
for i in 0..1_000 {
println!("Iter: {}/1_000", i);
for _ in 0..10_000_000 {
rand.fill(&mut v).expect("Error generating random values");
let ip = IpAddr::V6(std::net::Ipv6Addr::new(
(v[14] as u16) << 8 | v[15] as u16,
(v[12] as u16) << 8 | v[13] as u16,
(v[10] as u16) << 8 | v[11] as u16,
(v[8] as u16) << 8 | v[9] as u16,
(v[6] as u16) << 8 | v[7] as u16,
(v[4] as u16) << 8 | v[5] as u16,
(v[2] as u16) << 8 | v[3] as u16,
(v[0] as u16) << 8 | v[1] as u16,
));
assert_eq!(ip.is_global(), is_global(ip))
}
}
}
}
#[derive(Clone)]
enum DomainBlacklistReason {
Regex,
IP,
}
use cached::proc_macro::cached;
#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)]
async fn check_domain_blacklist_reason(domain: &str) -> Option<DomainBlacklistReason> {
// First check the blacklist regex if there is a match.
// This prevents the blocked domain(s) from being leaked via a DNS lookup.
if let Some(blacklist) = CONFIG.icon_blacklist_regex() {
// Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it.
let is_match = if let Some(regex) = ICON_BLACKLIST_REGEX.get(&blacklist) {
regex.is_match(domain)
} else {
// Clear the current list if the previous key doesn't exists.
// To prevent growing of the HashMap after someone has changed it via the admin interface.
if ICON_BLACKLIST_REGEX.len() >= 1 {
ICON_BLACKLIST_REGEX.clear();
}
// Generate the regex to store in too the Lazy Static HashMap.
let blacklist_regex = Regex::new(&blacklist).unwrap();
let is_match = blacklist_regex.is_match(domain);
ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex);
is_match
}; };
if is_match { // Compiled domain blacklist
debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain); static COMPILED_BLACKLIST: Mutex<Option<(String, Regex)>> = Mutex::new(None);
return Some(DomainBlacklistReason::Regex); let mut guard = COMPILED_BLACKLIST.lock().unwrap();
// If the stored regex is up to date, use it
if let Some((value, regex)) = &*guard {
if value == &config_blacklist {
return regex.is_match(domain);
} }
} }
if CONFIG.icon_blacklist_non_global_ips() { // If we don't have a regex stored, or it's not up to date, recreate it
if let Ok(s) = lookup_host((domain, 0)).await { let regex = Regex::new(&config_blacklist).unwrap();
for addr in s { let is_match = regex.is_match(domain);
if !is_global(addr.ip()) { *guard = Some((config_blacklist, regex));
debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain);
return Some(DomainBlacklistReason::IP);
}
}
}
}
None is_match
} }
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> { async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
@ -342,6 +195,13 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string())) Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
} }
Err(e) => { Err(e) => {
// If this error comes from the custom resolver, this means this is a blacklisted domain
// or non global IP, don't save the miss file in this case to avoid leaking it
if let Some(error) = CustomResolverError::downcast_ref(&e) {
warn!("{error}");
return None;
}
warn!("Unable to download icon: {:?}", e); warn!("Unable to download icon: {:?}", e);
let miss_indicator = path + ".miss"; let miss_indicator = path + ".miss";
save_icon(&miss_indicator, &[]).await; save_icon(&miss_indicator, &[]).await;
@ -491,12 +351,12 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
let ssldomain = format!("https://{domain}"); let ssldomain = format!("https://{domain}");
let httpdomain = format!("http://{domain}"); let httpdomain = format!("http://{domain}");
// First check the domain as given during the request for both HTTPS and HTTP. // First check the domain as given during the request for HTTPS.
let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await { let resp = match get_page(&ssldomain).await {
Ok(c) => Ok(c), Err(e) if CustomResolverError::downcast_ref(&e).is_none() => {
Err(e) => { // If we get an error that is not caused by the blacklist, we retry with HTTP
let mut sub_resp = Err(e); match get_page(&httpdomain).await {
mut sub_resp @ Err(_) => {
// When the domain is not an IP, and has more then one dot, remove all subdomains. // When the domain is not an IP, and has more then one dot, remove all subdomains.
let is_ip = domain.parse::<IpAddr>(); let is_ip = domain.parse::<IpAddr>();
if is_ip.is_err() && domain.matches('.').count() > 1 { if is_ip.is_err() && domain.matches('.').count() > 1 {
@ -527,6 +387,12 @@ async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> {
} }
sub_resp sub_resp
} }
res => res,
}
}
// If we get a result or a blacklist error, just continue
res => res,
}; };
// Create the iconlist // Create the iconlist
@ -573,21 +439,12 @@ async fn get_page(url: &str) -> Result<Response, Error> {
} }
async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> { async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> {
match check_domain_blacklist_reason(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await {
Some(DomainBlacklistReason::Regex) => warn!("Favicon '{}' is from a blacklisted domain!", url),
Some(DomainBlacklistReason::IP) => warn!("Favicon '{}' is hosted on a non-global IP!", url),
None => (),
}
let mut client = CLIENT.get(url); let mut client = CLIENT.get(url);
if !referer.is_empty() { if !referer.is_empty() {
client = client.header("Referer", referer) client = client.header("Referer", referer)
} }
match client.send().await { Ok(client.send().await?.error_for_status()?)
Ok(c) => c.error_for_status().map_err(Into::into),
Err(e) => err_silent!(format!("{e}")),
}
} }
/// Returns a Integer with the priority of the type of the icon which to prefer. /// Returns a Integer with the priority of the type of the icon which to prefer.
@ -670,12 +527,6 @@ fn parse_sizes(sizes: &str) -> (u16, u16) {
} }
async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> { async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
match check_domain_blacklist_reason(domain).await {
Some(DomainBlacklistReason::Regex) => err_silent!("Domain is blacklisted", domain),
Some(DomainBlacklistReason::IP) => err_silent!("Host resolves to a non-global IP", domain),
None => (),
}
let icon_result = get_icon_url(domain).await?; let icon_result = get_icon_url(domain).await?;
let mut buffer = Bytes::new(); let mut buffer = Bytes::new();
@ -711,8 +562,8 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
_ => debug!("Extracted icon from data:image uri is invalid"), _ => debug!("Extracted icon from data:image uri is invalid"),
}; };
} else { } else {
match get_page_with_referer(&icon.href, &icon_result.referer).await { let res = get_page_with_referer(&icon.href, &icon_result.referer).await?;
Ok(res) => {
buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net) buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net)
// Check if the icon type is allowed, else try an icon from the list. // Check if the icon type is allowed, else try an icon from the list.
@ -725,9 +576,6 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
info!("Downloaded icon from {}", icon.href); info!("Downloaded icon from {}", icon.href);
break; break;
} }
Err(e) => debug!("{:?}", e),
};
}
} }
if buffer.is_empty() { if buffer.is_empty() {

View file

@ -295,7 +295,12 @@ async fn _password_login(
"KdfIterations": user.client_kdf_iter, "KdfIterations": user.client_kdf_iter,
"KdfMemory": user.client_kdf_memory, "KdfMemory": user.client_kdf_memory,
"KdfParallelism": user.client_kdf_parallelism, "KdfParallelism": user.client_kdf_parallelism,
"ResetMasterPassword": false,// TODO: Same as above "ResetMasterPassword": false, // TODO: Same as above
"ForcePasswordReset": false,
"MasterPasswordPolicy": {
"object": "masterPasswordPolicy",
},
"scope": scope, "scope": scope,
"unofficialServer": true, "unofficialServer": true,
"UserDecryptionOptions": { "UserDecryptionOptions": {

View file

@ -20,7 +20,7 @@ pub use crate::api::{
core::two_factor::send_incomplete_2fa_notifications, core::two_factor::send_incomplete_2fa_notifications,
core::{emergency_notification_reminder_job, emergency_request_timeout_job}, core::{emergency_notification_reminder_job, emergency_request_timeout_job},
core::{event_cleanup_job, events_routes as core_events_routes}, core::{event_cleanup_job, events_routes as core_events_routes},
icons::routes as icons_routes, icons::{is_domain_blacklisted, routes as icons_routes},
identity::routes as identity_routes, identity::routes as identity_routes,
notifications::routes as notifications_routes, notifications::routes as notifications_routes,
notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS}, notifications::{AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS},

View file

@ -689,7 +689,7 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
_ => err_handler!("Error getting DB"), _ => err_handler!("Error getting DB"),
}; };
if !can_access_collection(&headers.org_user, &col_id, &mut conn).await { if !Collection::can_access_collection(&headers.org_user, &col_id, &mut conn).await {
err_handler!("The current user isn't a manager for this collection") err_handler!("The current user isn't a manager for this collection")
} }
} }
@ -762,10 +762,6 @@ impl From<ManagerHeadersLoose> for Headers {
} }
} }
} }
async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
org_user.has_full_access()
|| Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn).await
}
impl ManagerHeaders { impl ManagerHeaders {
pub async fn from_loose( pub async fn from_loose(
@ -777,7 +773,7 @@ impl ManagerHeaders {
if uuid::Uuid::parse_str(col_id).is_err() { if uuid::Uuid::parse_str(col_id).is_err() {
err!("Collection Id is malformed!"); err!("Collection Id is malformed!");
} }
if !can_access_collection(&h.org_user, col_id, conn).await { if !Collection::can_access_collection(&h.org_user, col_id, conn).await {
err!("You don't have access to all collections!"); err!("You don't have access to all collections!");
} }
} }

View file

@ -1,6 +1,6 @@
use serde_json::Value; use serde_json::Value;
use super::{CollectionGroup, User, UserOrgStatus, UserOrgType, UserOrganization}; use super::{CollectionGroup, GroupUser, User, UserOrgStatus, UserOrgType, UserOrganization};
use crate::CONFIG; use crate::CONFIG;
db_object! { db_object! {
@ -102,6 +102,15 @@ impl Collection {
json_object["HidePasswords"] = json!(hide_passwords); json_object["HidePasswords"] = json!(hide_passwords);
json_object json_object
} }
pub async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool {
org_user.has_status(UserOrgStatus::Confirmed)
&& (org_user.has_full_access()
|| CollectionUser::has_access_to_collection_by_user(col_id, &org_user.user_uuid, conn).await
|| (CONFIG.org_groups_enabled()
&& (GroupUser::has_full_access_by_member(&org_user.org_uuid, &org_user.uuid, conn).await
|| GroupUser::has_access_to_collection_by_member(col_id, &org_user.uuid, conn).await)))
}
} }
use crate::db::DbConn; use crate::db::DbConn;
@ -252,17 +261,6 @@ impl Collection {
} }
} }
// Check if a user has access to a specific collection
// FIXME: This needs to be reviewed. The query used by `find_by_user_uuid` could be adjusted to filter when needed.
// For now this is a good solution without making to much changes.
pub async fn has_access_by_collection_and_user_uuid(
collection_uuid: &str,
user_uuid: &str,
conn: &mut DbConn,
) -> bool {
Self::find_by_user_uuid(user_uuid.to_owned(), conn).await.into_iter().any(|c| c.uuid == collection_uuid)
}
pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
Self::find_by_user_uuid(user_uuid.to_owned(), conn) Self::find_by_user_uuid(user_uuid.to_owned(), conn)
.await .await
@ -644,6 +642,10 @@ impl CollectionUser {
Ok(()) Ok(())
}} }}
} }
pub async fn has_access_to_collection_by_user(col_id: &str, user_uuid: &str, conn: &mut DbConn) -> bool {
Self::find_by_collection_and_user(col_id, user_uuid, conn).await.is_some()
}
} }
/// Database methods /// Database methods

View file

@ -81,25 +81,32 @@ impl EmergencyAccess {
}) })
} }
pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Value { pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Option<Value> {
let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() { let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
Some(User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")) User::find_by_uuid(grantee_uuid, conn).await.expect("Grantee user not found.")
} else if let Some(email) = self.email.as_deref() { } else if let Some(email) = self.email.as_deref() {
Some(User::find_by_mail(email, conn).await.expect("Grantee user not found.")) match User::find_by_mail(email, conn).await {
Some(user) => user,
None => {
// remove outstanding invitations which should not exist
let _ = Self::delete_all_by_grantee_email(email, conn).await;
return None;
}
}
} else { } else {
None return None;
}; };
json!({ Some(json!({
"Id": self.uuid, "Id": self.uuid,
"Status": self.status, "Status": self.status,
"Type": self.atype, "Type": self.atype,
"WaitTimeDays": self.wait_time_days, "WaitTimeDays": self.wait_time_days,
"GranteeId": grantee_user.as_ref().map_or("", |u| &u.uuid), "GranteeId": grantee_user.uuid,
"Email": grantee_user.as_ref().map_or("", |u| &u.email), "Email": grantee_user.email,
"Name": grantee_user.as_ref().map_or("", |u| &u.name), "Name": grantee_user.name,
"Object": "emergencyAccessGranteeDetails", "Object": "emergencyAccessGranteeDetails",
}) }))
} }
} }
@ -214,6 +221,13 @@ impl EmergencyAccess {
Ok(()) Ok(())
} }
pub async fn delete_all_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
for ea in Self::find_all_invited_by_grantee_email(grantee_email, conn).await {
ea.delete(conn).await?;
}
Ok(())
}
pub async fn delete(self, conn: &mut DbConn) -> EmptyResult { pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
User::update_uuid_revision(&self.grantor_uuid, conn).await; User::update_uuid_revision(&self.grantor_uuid, conn).await;
@ -285,6 +299,15 @@ impl EmergencyAccess {
}} }}
} }
pub async fn find_all_invited_by_grantee_email(grantee_email: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: {
emergency_access::table
.filter(emergency_access::email.eq(grantee_email))
.filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32))
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}}
}
pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> { pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
db_run! { conn: { db_run! { conn: {
emergency_access::table emergency_access::table
@ -292,6 +315,21 @@ impl EmergencyAccess {
.load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db() .load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
}} }}
} }
pub async fn accept_invite(&mut self, grantee_uuid: &str, grantee_email: &str, conn: &mut DbConn) -> EmptyResult {
if self.email.is_none() || self.email.as_ref().unwrap() != grantee_email {
err!("User email does not match invite.");
}
if self.status == EmergencyAccessStatus::Accepted as i32 {
err!("Emergency contact already accepted.");
}
self.status = EmergencyAccessStatus::Accepted as i32;
self.grantee_uuid = Some(String::from(grantee_uuid));
self.email = None;
self.save(conn).await
}
} }
// endregion // endregion

View file

@ -344,6 +344,25 @@ impl UserOrganization {
pub async fn to_json(&self, conn: &mut DbConn) -> Value { pub async fn to_json(&self, conn: &mut DbConn) -> Value {
let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap(); let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap();
let permissions = json!({
// TODO: Add support for Custom User Roles
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
"accessEventLogs": false,
"accessImportExport": false,
"accessReports": false,
"createNewCollections": false,
"editAnyCollection": false,
"deleteAnyCollection": false,
"editAssignedCollections": false,
"deleteAssignedCollections": false,
"manageGroups": false,
"managePolicies": false,
"manageSso": false, // Not supported
"manageUsers": false,
"manageResetPassword": false,
"manageScim": false // Not supported (Not AGPLv3 Licensed)
});
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs // https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
json!({ json!({
"Id": self.org_uuid, "Id": self.org_uuid,
@ -371,27 +390,7 @@ impl UserOrganization {
// "KeyConnectorEnabled": false, // "KeyConnectorEnabled": false,
// "KeyConnectorUrl": null, // "KeyConnectorUrl": null,
// TODO: Add support for Custom User Roles "permissions": permissions,
// See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
// "Permissions": {
// "AccessEventLogs": false,
// "AccessImportExport": false,
// "AccessReports": false,
// "ManageAllCollections": false,
// "CreateNewCollections": false,
// "EditAnyCollection": false,
// "DeleteAnyCollection": false,
// "ManageAssignedCollections": false,
// "editAssignedCollections": false,
// "deleteAssignedCollections": false,
// "ManageCiphers": false,
// "ManageGroups": false,
// "ManagePolicies": false,
// "ManageResetPassword": false,
// "ManageSso": false, // Not supported
// "ManageUsers": false,
// "ManageScim": false, // Not supported (Not AGPLv3 Licensed)
// },
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side "MaxStorageGb": 10, // The value doesn't matter, we don't check server-side

View file

@ -246,6 +246,7 @@ impl User {
"Email": self.email, "Email": self.email,
"EmailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(), "EmailVerified": !CONFIG.mail_enabled() || self.verified_at.is_some(),
"Premium": true, "Premium": true,
"PremiumFromOrganization": false,
"MasterPasswordHint": self.password_hint, "MasterPasswordHint": self.password_hint,
"Culture": "en-US", "Culture": "en-US",
"TwoFactorEnabled": twofactor_enabled, "TwoFactorEnabled": twofactor_enabled,
@ -257,6 +258,7 @@ impl User {
"ProviderOrganizations": [], "ProviderOrganizations": [],
"ForcePasswordReset": false, "ForcePasswordReset": false,
"AvatarColor": self.avatar_color, "AvatarColor": self.avatar_color,
"UsesKeyConnector": false,
"Object": "profile", "Object": "profile",
}) })
} }
@ -311,6 +313,7 @@ impl User {
Send::delete_all_by_user(&self.uuid, conn).await?; Send::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?; EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
EmergencyAccess::delete_all_by_grantee_email(&self.email, conn).await?;
UserOrganization::delete_all_by_user(&self.uuid, conn).await?; UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
Cipher::delete_all_by_user(&self.uuid, conn).await?; Cipher::delete_all_by_user(&self.uuid, conn).await?;
Favorite::delete_all_by_user(&self.uuid, conn).await?; Favorite::delete_all_by_user(&self.uuid, conn).await?;

View file

@ -3,7 +3,7 @@
// The more key/value pairs there are the more recursion occurs. // The more key/value pairs there are the more recursion occurs.
// We want to keep this as low as possible, but not higher then 128. // We want to keep this as low as possible, but not higher then 128.
// If you go above 128 it will cause rust-analyzer to fail, // If you go above 128 it will cause rust-analyzer to fail,
#![recursion_limit = "87"] #![recursion_limit = "90"]
// When enabled use MiMalloc as malloc instead of the default malloc // When enabled use MiMalloc as malloc instead of the default malloc
#[cfg(feature = "enable_mimalloc")] #[cfg(feature = "enable_mimalloc")]
@ -211,8 +211,8 @@ fn launch_info() {
} }
fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> { fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
// Depending on the main log level we either want to disable or enable logging for trust-dns. // Depending on the main log level we either want to disable or enable logging for hickory.
// Else if there are timeouts it will clutter the logs since trust-dns uses warn for this. // Else if there are timeouts it will clutter the logs since hickory uses warn for this.
let hickory_level = if level >= log::LevelFilter::Debug { let hickory_level = if level >= log::LevelFilter::Debug {
level level
} else { } else {
@ -266,7 +266,7 @@ fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> {
.level_for("handlebars::render", handlebars_level) .level_for("handlebars::render", handlebars_level)
// Prevent cookie_store logs // Prevent cookie_store logs
.level_for("cookie_store", log::LevelFilter::Off) .level_for("cookie_store", log::LevelFilter::Off)
// Variable level for trust-dns used by reqwest // Variable level for hickory used by reqwest
.level_for("hickory_resolver::name_server::name_server", hickory_level) .level_for("hickory_resolver::name_server::name_server", hickory_level)
.level_for("hickory_proto::xfer", hickory_level) .level_for("hickory_proto::xfer", hickory_level)
.level_for("diesel_logger", diesel_logger_level) .level_for("diesel_logger", diesel_logger_level)

View file

@ -4,6 +4,7 @@
use std::{collections::HashMap, io::Cursor, ops::Deref, path::Path}; use std::{collections::HashMap, io::Cursor, ops::Deref, path::Path};
use num_traits::ToPrimitive; use num_traits::ToPrimitive;
use once_cell::sync::Lazy;
use rocket::{ use rocket::{
fairing::{Fairing, Info, Kind}, fairing::{Fairing, Info, Kind},
http::{ContentType, Header, HeaderMap, Method, Status}, http::{ContentType, Header, HeaderMap, Method, Status},
@ -701,14 +702,9 @@ where
use reqwest::{header, Client, ClientBuilder}; use reqwest::{header, Client, ClientBuilder};
pub fn get_reqwest_client() -> Client { pub fn get_reqwest_client() -> &'static Client {
match get_reqwest_client_builder().build() { static INSTANCE: Lazy<Client> = Lazy::new(|| get_reqwest_client_builder().build().expect("Failed to build client"));
Ok(client) => client, &INSTANCE
Err(e) => {
error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'");
get_reqwest_client_builder().hickory_dns(false).build().expect("Failed to build client")
}
}
} }
pub fn get_reqwest_client_builder() -> ClientBuilder { pub fn get_reqwest_client_builder() -> ClientBuilder {
@ -767,3 +763,248 @@ pub fn parse_experimental_client_feature_flags(experimental_client_feature_flags
feature_states feature_states
} }
mod dns_resolver {
use std::{
fmt,
net::{IpAddr, SocketAddr},
sync::Arc,
};
use hickory_resolver::{system_conf::read_system_conf, TokioAsyncResolver};
use once_cell::sync::Lazy;
use reqwest::dns::{Name, Resolve, Resolving};
use crate::{util::is_global, CONFIG};
#[derive(Debug, Clone)]
pub enum CustomResolverError {
Blacklist {
domain: String,
},
NonGlobalIp {
domain: String,
ip: IpAddr,
},
}
impl CustomResolverError {
pub fn downcast_ref(e: &dyn std::error::Error) -> Option<&Self> {
let mut source = e.source();
while let Some(err) = source {
source = err.source();
if let Some(err) = err.downcast_ref::<CustomResolverError>() {
return Some(err);
}
}
None
}
}
impl fmt::Display for CustomResolverError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Blacklist {
domain,
} => write!(f, "Blacklisted domain: {domain} matched ICON_BLACKLIST_REGEX"),
Self::NonGlobalIp {
domain,
ip,
} => write!(f, "IP {ip} for domain '{domain}' is not a global IP!"),
}
}
}
impl std::error::Error for CustomResolverError {}
#[derive(Debug, Clone)]
pub enum CustomDnsResolver {
Default(),
Hickory(Arc<TokioAsyncResolver>),
}
type BoxError = Box<dyn std::error::Error + Send + Sync>;
impl CustomDnsResolver {
pub fn instance() -> Arc<Self> {
static INSTANCE: Lazy<Arc<CustomDnsResolver>> = Lazy::new(CustomDnsResolver::new);
Arc::clone(&*INSTANCE)
}
fn new() -> Arc<Self> {
match read_system_conf() {
Ok((config, opts)) => {
let resolver = TokioAsyncResolver::tokio(config.clone(), opts.clone());
Arc::new(Self::Hickory(Arc::new(resolver)))
}
Err(e) => {
warn!("Error creating Hickory resolver, falling back to default: {e:?}");
Arc::new(Self::Default())
}
}
}
// Note that we get an iterator of addresses, but we only grab the first one for convenience
async fn resolve_domain(&self, name: &str) -> Result<Option<SocketAddr>, BoxError> {
pre_resolve(name)?;
let result = match self {
Self::Default() => tokio::net::lookup_host(name).await?.next(),
Self::Hickory(r) => r.lookup_ip(name).await?.iter().next().map(|a| SocketAddr::new(a, 0)),
};
if let Some(addr) = &result {
post_resolve(name, addr.ip())?;
}
Ok(result)
}
}
fn pre_resolve(name: &str) -> Result<(), CustomResolverError> {
if crate::api::is_domain_blacklisted(name) {
return Err(CustomResolverError::Blacklist {
domain: name.to_string(),
});
}
Ok(())
}
fn post_resolve(name: &str, ip: IpAddr) -> Result<(), CustomResolverError> {
if CONFIG.icon_blacklist_non_global_ips() && !is_global(ip) {
Err(CustomResolverError::NonGlobalIp {
domain: name.to_string(),
ip,
})
} else {
Ok(())
}
}
impl Resolve for CustomDnsResolver {
fn resolve(&self, name: Name) -> Resolving {
let this = self.clone();
Box::pin(async move {
let name = name.as_str();
let result = this.resolve_domain(name).await?;
Ok::<reqwest::dns::Addrs, _>(Box::new(result.into_iter()))
})
}
}
}
pub use dns_resolver::{CustomDnsResolver, CustomResolverError};
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
#[allow(clippy::nonminimal_bool)]
#[cfg(any(not(feature = "unstable"), test))]
pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
match ip {
std::net::IpAddr::V4(ip) => {
!(ip.octets()[0] == 0 // "This network"
|| ip.is_private()
|| (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) //ip.is_shared()
|| ip.is_loopback()
|| ip.is_link_local()
// addresses reserved for future protocols (`192.0.0.0/24`)
||(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|| ip.is_documentation()
|| (ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) // ip.is_benchmarking()
|| (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) //ip.is_reserved()
|| ip.is_broadcast())
}
std::net::IpAddr::V6(ip) => {
!(ip.is_unspecified()
|| ip.is_loopback()
// IPv4-mapped Address (`::ffff:0:0/96`)
|| matches!(ip.segments(), [0, 0, 0, 0, 0, 0xffff, _, _])
// IPv4-IPv6 Translat. (`64:ff9b:1::/48`)
|| matches!(ip.segments(), [0x64, 0xff9b, 1, _, _, _, _, _])
// Discard-Only Address Block (`100::/64`)
|| matches!(ip.segments(), [0x100, 0, 0, 0, _, _, _, _])
// IETF Protocol Assignments (`2001::/23`)
|| (matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200)
&& !(
// Port Control Protocol Anycast (`2001:1::1`)
u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001
// Traversal Using Relays around NAT Anycast (`2001:1::2`)
|| u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002
// AMT (`2001:3::/32`)
|| matches!(ip.segments(), [0x2001, 3, _, _, _, _, _, _])
// AS112-v6 (`2001:4:112::/48`)
|| matches!(ip.segments(), [0x2001, 4, 0x112, _, _, _, _, _])
// ORCHIDv2 (`2001:20::/28`)
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x2F).contains(&b))
))
|| ((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8)) // ip.is_documentation()
|| ((ip.segments()[0] & 0xfe00) == 0xfc00) //ip.is_unique_local()
|| ((ip.segments()[0] & 0xffc0) == 0xfe80)) //ip.is_unicast_link_local()
}
}
}
#[cfg(not(feature = "unstable"))]
pub use is_global_hardcoded as is_global;
#[cfg(feature = "unstable")]
#[inline(always)]
pub fn is_global(ip: std::net::IpAddr) -> bool {
ip.is_global()
}
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 30 seconds or so and they are correct as of nightly 2023-07-17
/// The IPV6 can't be checked in a reasonable time, so we check over a hundred billion random ones, so far correct
/// Note that the is_global implementation is subject to change as new IP RFCs are created
///
/// To run while showing progress output:
/// cargo +nightly test --release --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
use std::net::IpAddr;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {}", ip)
}
}
}
}
}
#[test]
#[ignore]
fn test_ipv6_global() {
use rand::Rng;
std::thread::scope(|s| {
for t in 0..16 {
let handle = s.spawn(move || {
let mut v = [0u8; 16];
let mut rng = rand::thread_rng();
for i in 0..20 {
println!("Thread {t} Iter: {i}/50");
for _ in 0..500_000_000 {
rng.fill(&mut v);
let ip = IpAddr::V6(std::net::Ipv6Addr::from(v));
assert_eq!(ip.is_global(), is_global_hardcoded(ip), "IP mismatch: {ip}");
}
}
});
}
});
}
}