2 Commits

Author SHA1 Message Date
Valentin Tolmer
8c052c091e fix hipb typo 2023-04-13 09:32:53 +02:00
Valentin Tolmer
278fb1630d server: implement haveibeenpwned endpoint
See #39.
2023-04-04 20:34:37 +02:00
118 changed files with 1825 additions and 6744 deletions

View File

@@ -1,9 +1,7 @@
FROM rust:1.72
FROM rust:1.66
ARG USERNAME=lldapdev
# We need to keep the user as 1001 to match the GitHub runner's UID.
# See https://github.com/actions/checkout/issues/956.
ARG USER_UID=1001
ARG USER_UID=1000
ARG USER_GID=$USER_UID
# Create the user
@@ -23,4 +21,4 @@ RUN RUSTFLAGS=-Ctarget-feature=-crt-static cargo install wasm-pack \
USER $USERNAME
ENV CARGO_HOME=/home/$USERNAME/.cargo
ENV SHELL=/bin/bash
ENV SHELL=/bin/bash

View File

@@ -34,20 +34,12 @@ package.json
.vscode
.devcontainer
# Created databases
*.db
*.db-shm
*.db-wal
# These are backup files generated by rustfmt
**/*.rs.bk
# Various config files that shouldn't be tracked
.env
lldap_config.toml
server_key
users.db*
screenshot.png
recipe.json
lldap_config.toml
cert.pem
key.pem

2
.gitattributes vendored
View File

@@ -1,4 +1,4 @@
example_configs/** linguist-documentation
example-configs/** linguist-documentation
docs/** linguist-documentation
*.md linguist-documentation
lldap_config.docker_template.toml linguist-documentation

5
.github/FUNDING.yml vendored
View File

@@ -1,5 +0,0 @@
# These are supported funding model platforms
github: [lldap]
custom: ['https://bmc.link/nitnelave']

2
.github/codecov.yml vendored
View File

@@ -10,5 +10,3 @@ ignore:
- "docs"
- "example_configs"
- "migration-tool"
- "scripts"
- "set-password"

View File

@@ -11,10 +11,10 @@ RUN mkdir -p /lldap/app
RUN if [ "${TARGETPLATFORM}" = "linux/amd64" ]; then \
mv bin/x86_64-unknown-linux-musl-lldap-bin/lldap target/lldap && \
mv bin/x86_64-unknown-linux-musl-lldap_migration_tool-bin/lldap_migration_tool target/lldap_migration_tool && \
mv bin/x86_64-unknown-linux-musl-migration-tool-bin/migration-tool target/migration-tool && \
mv bin/x86_64-unknown-linux-musl-lldap_set_password-bin/lldap_set_password target/lldap_set_password && \
chmod +x target/lldap && \
chmod +x target/lldap_migration_tool && \
chmod +x target/migration-tool && \
chmod +x target/lldap_set_password && \
ls -la target/ . && \
pwd \
@@ -22,21 +22,21 @@ RUN if [ "${TARGETPLATFORM}" = "linux/amd64" ]; then \
RUN if [ "${TARGETPLATFORM}" = "linux/arm64" ]; then \
mv bin/aarch64-unknown-linux-musl-lldap-bin/lldap target/lldap && \
mv bin/aarch64-unknown-linux-musl-lldap_migration_tool-bin/lldap_migration_tool target/lldap_migration_tool && \
mv bin/aarch64-unknown-linux-musl-migration-tool-bin/migration-tool target/migration-tool && \
mv bin/aarch64-unknown-linux-musl-lldap_set_password-bin/lldap_set_password target/lldap_set_password && \
chmod +x target/lldap && \
chmod +x target/lldap_migration_tool && \
chmod +x target/migration-tool && \
chmod +x target/lldap_set_password && \
ls -la target/ . && \
pwd \
; fi
RUN if [ "${TARGETPLATFORM}" = "linux/arm/v7" ]; then \
mv bin/armv7-unknown-linux-musleabihf-lldap-bin/lldap target/lldap && \
mv bin/armv7-unknown-linux-musleabihf-lldap_migration_tool-bin/lldap_migration_tool target/lldap_migration_tool && \
mv bin/armv7-unknown-linux-musleabihf-lldap_set_password-bin/lldap_set_password target/lldap_set_password && \
mv bin/armv7-unknown-linux-gnueabihf-lldap-bin/lldap target/lldap && \
mv bin/armv7-unknown-linux-gnueabihf-migration-tool-bin/migration-tool target/migration-tool && \
mv bin/armv7-unknown-linux-gnueabihf-lldap_set_password-bin/lldap_set_password target/lldap_set_password && \
chmod +x target/lldap && \
chmod +x target/lldap_migration_tool && \
chmod +x target/migration-tool && \
chmod +x target/lldap_set_password && \
ls -la target/ . && \
pwd \
@@ -47,7 +47,7 @@ COPY docker-entrypoint.sh /docker-entrypoint.sh
COPY lldap_config.docker_template.toml /lldap/
COPY web/index_local.html web/index.html
RUN cp target/lldap /lldap/ && \
cp target/lldap_migration_tool /lldap/ && \
cp target/migration-tool /lldap/ && \
cp target/lldap_set_password /lldap/ && \
cp -R web/index.html \
web/pkg \

View File

@@ -11,10 +11,10 @@ RUN mkdir -p /lldap/app
RUN if [ "${TARGETPLATFORM}" = "linux/amd64" ]; then \
mv bin/x86_64-unknown-linux-musl-lldap-bin/lldap target/lldap && \
mv bin/x86_64-unknown-linux-musl-lldap_migration_tool-bin/lldap_migration_tool target/lldap_migration_tool && \
mv bin/x86_64-unknown-linux-musl-migration-tool-bin/migration-tool target/migration-tool && \
mv bin/x86_64-unknown-linux-musl-lldap_set_password-bin/lldap_set_password target/lldap_set_password && \
chmod +x target/lldap && \
chmod +x target/lldap_migration_tool && \
chmod +x target/migration-tool && \
chmod +x target/lldap_set_password && \
ls -la target/ . && \
pwd \
@@ -22,21 +22,21 @@ RUN if [ "${TARGETPLATFORM}" = "linux/amd64" ]; then \
RUN if [ "${TARGETPLATFORM}" = "linux/arm64" ]; then \
mv bin/aarch64-unknown-linux-musl-lldap-bin/lldap target/lldap && \
mv bin/aarch64-unknown-linux-musl-lldap_migration_tool-bin/lldap_migration_tool target/lldap_migration_tool && \
mv bin/aarch64-unknown-linux-musl-migration-tool-bin/migration-tool target/migration-tool && \
mv bin/aarch64-unknown-linux-musl-lldap_set_password-bin/lldap_set_password target/lldap_set_password && \
chmod +x target/lldap && \
chmod +x target/lldap_migration_tool && \
chmod +x target/migration-tool && \
chmod +x target/lldap_set_password && \
ls -la target/ . && \
pwd \
; fi
RUN if [ "${TARGETPLATFORM}" = "linux/arm/v7" ]; then \
mv bin/armv7-unknown-linux-musleabihf-lldap-bin/lldap target/lldap && \
mv bin/armv7-unknown-linux-musleabihf-lldap_migration_tool-bin/lldap_migration_tool target/lldap_migration_tool && \
mv bin/armv7-unknown-linux-musleabihf-lldap_set_password-bin/lldap_set_password target/lldap_set_password && \
mv bin/armv7-unknown-linux-gnueabihf-lldap-bin/lldap target/lldap && \
mv bin/armv7-unknown-linux-gnueabihf-migration-tool-bin/migration-tool target/migration-tool && \
mv bin/armv7-unknown-linux-gnueabihf-lldap_set_password-bin/lldap_set_password target/lldap_set_password && \
chmod +x target/lldap && \
chmod +x target/lldap_migration_tool && \
chmod +x target/migration-tool && \
chmod +x target/lldap_set_password && \
ls -la target/ . && \
pwd \
@@ -47,7 +47,7 @@ COPY docker-entrypoint.sh /docker-entrypoint.sh
COPY lldap_config.docker_template.toml /lldap/
COPY web/index_local.html web/index.html
RUN cp target/lldap /lldap/ && \
cp target/lldap_migration_tool /lldap/ && \
cp target/migration-tool /lldap/ && \
cp target/lldap_set_password /lldap/ && \
cp -R web/index.html \
web/pkg \

View File

@@ -1,40 +1,45 @@
# Keep tracking base image
FROM rust:1.71-slim-bookworm
FROM rust:1.66-slim-bullseye
# Set needed env path
ENV PATH="/opt/armv7l-linux-musleabihf-cross/:/opt/armv7l-linux-musleabihf-cross/bin/:/opt/aarch64-linux-musl-cross/:/opt/aarch64-linux-musl-cross/bin/:/opt/x86_64-linux-musl-cross/:/opt/x86_64-linux-musl-cross/bin/:$PATH"
ENV PATH="/opt/aarch64-linux-musl-cross/:/opt/aarch64-linux-musl-cross/bin/:/opt/x86_64-linux-musl-cross/:/opt/x86_64-linux-musl-cross/bin/:$PATH"
# Set building env
ENV CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse \
CARGO_NET_GIT_FETCH_WITH_CLI=true \
CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABIHF_LINKER=armv7l-linux-musleabihf-gcc \
CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER=aarch64-linux-musl-gcc \
CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER=x86_64-linux-musl-gcc \
CC_armv7_unknown_linux_musleabihf=armv7l-linux-musleabihf-gcc \
CC_x86_64_unknown_linux_musl=x86_64-linux-musl-gcc \
CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc
### Install Additional Build Tools
### Install build deps x86_64
RUN apt update && \
apt install -y --no-install-recommends curl git wget make perl pkg-config tar jq gzip && \
apt install -y --no-install-recommends curl git wget build-essential make perl pkg-config curl tar jq musl-tools gzip && \
curl -fsSL https://deb.nodesource.com/setup_lts.x | bash - && \
apt update && \
apt install -y --no-install-recommends nodejs && \
apt clean && \
rm -rf /var/lib/apt/lists/*
### Add musl-gcc aarch64, x86_64 and armv7l
### Install build deps aarch64 build
RUN dpkg --add-architecture arm64 && \
apt update && \
apt install -y gcc-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-arm64-cross libc6-dev-arm64-cross gzip && \
apt clean && \
rm -rf /var/lib/apt/lists/* && \
rustup target add aarch64-unknown-linux-gnu
### armhf deps
RUN dpkg --add-architecture armhf && \
apt update && \
apt install -y gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf libc6-armhf-cross libc6-dev-armhf-cross gzip && \
apt clean && \
rm -rf /var/lib/apt/lists/* && \
rustup target add armv7-unknown-linux-gnueabihf
### Add musl-gcc aarch64 and x86_64
RUN wget -c https://musl.cc/x86_64-linux-musl-cross.tgz && \
tar zxf ./x86_64-linux-musl-cross.tgz -C /opt && \
wget -c https://musl.cc/aarch64-linux-musl-cross.tgz && \
tar zxf ./aarch64-linux-musl-cross.tgz -C /opt && \
wget -c http://musl.cc/armv7l-linux-musleabihf-cross.tgz && \
tar zxf ./armv7l-linux-musleabihf-cross.tgz -C /opt && \
rm ./x86_64-linux-musl-cross.tgz && \
rm ./aarch64-linux-musl-cross.tgz && \
rm ./armv7l-linux-musleabihf-cross.tgz
rm ./aarch64-linux-musl-cross.tgz
### Add musl target
RUN rustup target add x86_64-unknown-linux-musl && \
rustup target add aarch64-unknown-linux-musl && \
rustup target add armv7-unknown-linux-musleabihf
rustup target add aarch64-unknown-linux-musl
CMD ["bash"]

View File

@@ -30,6 +30,7 @@ env:
# build-ui , create/compile the web
### install wasm
### install rollup
### run app/build.sh
### upload artifacts
@@ -39,10 +40,10 @@ env:
# GitHub actions randomly timeout when downloading musl-gcc, using custom dev image #
# Look into .github/workflows/Dockerfile.dev for development image details #
# Using lldap dev image based on https://hub.docker.com/_/rust and musl-gcc bundled #
# lldap/rust-dev:latest #
#######################################################################################
# Cargo build
### armv7, aarch64 and amd64 is musl based
### Cargo build
### aarch64 and amd64 is musl based
### armv7 is glibc based, musl had issue with time_t when cross compile https://github.com/rust-lang/libc/issues/1848
# build-ui,builds-armhf, build-aarch64, build-amd64 will upload artifacts will be used next job
@@ -50,11 +51,12 @@ env:
### will run lldap with postgres, mariadb and sqlite backend, do selfcheck command.
# Build docker image
### Triplet docker image arch with debian and alpine base
### Triplet docker image arch with debian base
### amd64 & aarch64 with alpine base
# build-docker-image job will fetch artifacts and run Dockerfile.ci then push the image.
### Look into .github/workflows/Dockerfile.ci.debian or .github/workflowds/Dockerfile.ci.alpine
# Create release artifacts
# create release artifacts
### Fetch artifacts
### Clean up web artifact
### Setup folder structure
@@ -82,12 +84,12 @@ jobs:
build-ui:
runs-on: ubuntu-latest
needs: pre_job
if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.event_name == 'release' }}
if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
container:
image: lldap/rust-dev:latest
image: nitnelave/rust-dev:latest
steps:
- name: Checkout repository
uses: actions/checkout@v4.0.0
uses: actions/checkout@v3.5.0
- uses: actions/cache@v3
with:
path: |
@@ -99,6 +101,8 @@ jobs:
key: lldap-ui-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
lldap-ui-
- name: Install rollup (nodejs)
run: npm install -g rollup
- name: Add wasm target (rust)
run: rustup target add wasm32-unknown-unknown
- name: Install wasm-pack with cargo
@@ -119,20 +123,22 @@ jobs:
build-bin:
runs-on: ubuntu-latest
needs: pre_job
if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.event_name == 'release' }}
if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
strategy:
fail-fast: false
matrix:
target: [armv7-unknown-linux-musleabihf, aarch64-unknown-linux-musl, x86_64-unknown-linux-musl]
target: [armv7-unknown-linux-gnueabihf, aarch64-unknown-linux-musl, x86_64-unknown-linux-musl]
container:
image: lldap/rust-dev:latest
image: nitnelave/rust-dev:latest
env:
CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-gcc
CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER: aarch64-linux-musl-gcc
CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER: x86_64-linux-musl-gcc
CARGO_TERM_COLOR: always
RUSTFLAGS: -Ctarget-feature=+crt-static
CARGO_HOME: ${GITHUB_WORKSPACE}/.cargo
steps:
- name: Checkout repository
uses: actions/checkout@v4.0.0
uses: actions/checkout@v3.5.0
- uses: actions/cache@v3
with:
path: |
@@ -145,7 +151,7 @@ jobs:
restore-keys: |
lldap-bin-${{ matrix.target }}-
- name: Compile ${{ matrix.target }} lldap and tools
run: cargo build --target=${{ matrix.target }} --release -p lldap -p lldap_migration_tool -p lldap_set_password
run: cargo build --target=${{ matrix.target }} --release -p lldap -p migration-tool -p lldap_set_password
- name: Check path
run: ls -al target/release
- name: Upload ${{ matrix.target}} lldap artifacts
@@ -156,8 +162,8 @@ jobs:
- name: Upload ${{ matrix.target }} migration tool artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.target }}-lldap_migration_tool-bin
path: target/${{ matrix.target }}/release/lldap_migration_tool
name: ${{ matrix.target }}-migration-tool-bin
path: target/${{ matrix.target }}/release/migration-tool
- name: Upload ${{ matrix.target }} password tool artifacts
uses: actions/upload-artifact@v3
with:
@@ -174,13 +180,11 @@ jobs:
ports:
- 3306:3306
env:
MARIADB_USER: lldapuser
MARIADB_PASSWORD: lldappass
MARIADB_DATABASE: lldap
MARIADB_ALLOW_EMPTY_ROOT_PASSWORD: 1
options: >-
--name mariadb
--health-cmd="mariadb-admin ping" --health-interval=5s --health-timeout=2s --health-retries=3
MYSQL_USER: lldapuser
MYSQL_PASSWORD: lldappass
MYSQL_DATABASE: lldap
MYSQL_ROOT_PASSWORD: rootpass
options: --name mariadb
postgresql:
image: postgres:latest
@@ -190,12 +194,7 @@ jobs:
POSTGRES_USER: lldapuser
POSTGRES_PASSWORD: lldappass
POSTGRES_DB: lldap
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
--name postgresql
options: --name postgresql
steps:
- name: Download artifacts
@@ -257,27 +256,17 @@ jobs:
POSTGRES_USER: lldapuser
POSTGRES_PASSWORD: lldappass
POSTGRES_DB: lldap
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
--name postgresql
options: --name postgresql
mariadb:
image: mariadb:latest
ports:
- 3306:3306
env:
MARIADB_USER: lldapuser
MARIADB_PASSWORD: lldappass
MARIADB_DATABASE: lldap
MARIADB_ALLOW_EMPTY_ROOT_PASSWORD: 1
options: >-
--name mariadb
--health-cmd="mariadb-admin ping" --health-interval=5s --health-timeout=2s --health-retries=3
MYSQL_USER: lldapuser
MYSQL_PASSWORD: lldappass
MYSQL_DATABASE: lldap
MYSQL_ROOT_PASSWORD: rootpass
options: --name mariadb
mysql:
image: mysql:latest
ports:
@@ -286,18 +275,11 @@ jobs:
MYSQL_USER: lldapuser
MYSQL_PASSWORD: lldappass
MYSQL_DATABASE: lldap
MYSQL_ALLOW_EMPTY_PASSWORD: 1
options: >-
--name mysql
--health-cmd="mysqladmin ping" --health-interval=5s --health-timeout=2s --health-retries=3
MYSQL_ROOT_PASSWORD: rootpass
options: --name mysql
steps:
- name: Checkout scripts
uses: actions/checkout@v4.0.0
with:
sparse-checkout: 'scripts'
- name: Download LLDAP artifacts
uses: actions/download-artifact@v3
with:
@@ -332,7 +314,7 @@ jobs:
- name: Create dummy user
run: |
TOKEN=$(curl -X POST -H "Content-Type: application/json" -d '{"username": "admin", "password": "ldappass"}' http://localhost:17170/auth/simple/login | jq -r .token)
TOKEN=$(curl -X POST -H "Content-Type: application/json" -d '{"username": "admin", "password": "ldappass"}' http://localhost:17170/auth/simple/login | cut -c 11-277)
echo "$TOKEN"
curl 'http://localhost:17170/api/graphql' -H 'Content-Type: application/json' -H "Authorization: Bearer ${TOKEN//[$'\t\r\n ']}" --data-binary '{"query":"mutation{\n createUser(user:\n {\n id: \"dummyuser\",\n email: \"dummyuser@example.com\"\n }\n )\n {\n id\n email\n }\n}\n\n\n"}' --compressed
bin/lldap_set_password --base-url http://localhost:17170 --admin-username admin --admin-password ldappass --token $TOKEN --username dummyuser --password dummypassword
@@ -346,8 +328,10 @@ jobs:
- name: Export and Converting to Postgress
run: |
bash ./scripts/sqlite_dump_commands.sh | sqlite3 ./users.db > ./dump.sql
sed -i -r -e "s/X'([[:xdigit:]]+'[^'])/'\\\x\\1/g" -e ":a; s/(INSERT INTO user_attribute_schema\(.*\) VALUES\(.*),1([^']*\);)$/\1,true\2/; s/(INSERT INTO user_attribute_schema\(.*\) VALUES\(.*),0([^']*\);)$/\1,false\2/; ta" -e '1s/^/BEGIN;\n/' -e '$aCOMMIT;' ./dump.sql
curl -L https://raw.githubusercontent.com/nitnelave/lldap/main/scripts/sqlite_dump_commands.sh -o helper.sh
chmod +x ./helper.sh
./helper.sh | sqlite3 ./users.db > ./dump.sql
sed -i -r -e "s/X'([[:xdigit:]]+'[^'])/'\\\x\\1/g" -e '1s/^/BEGIN;\n/' -e '$aCOMMIT;' ./dump.sql
- name: Create schema on postgres
run: |
@@ -355,14 +339,16 @@ jobs:
- name: Copy converted db to postgress and import
run: |
docker ps -a
docker cp ./dump.sql postgresql:/tmp/dump.sql
docker exec postgresql bash -c "psql -U lldapuser -d lldap < /tmp/dump.sql" | tee import.log
docker exec postgresql bash -c "psql -U lldapuser -d lldap < /tmp/dump.sql"
rm ./dump.sql
! grep ERROR import.log > /dev/null
- name: Export and Converting to mariadb
run: |
bash ./scripts/sqlite_dump_commands.sh | sqlite3 ./users.db > ./dump.sql
curl -L https://raw.githubusercontent.com/nitnelave/lldap/main/scripts/sqlite_dump_commands.sh -o helper.sh
chmod +x ./helper.sh
./helper.sh | sqlite3 ./users.db > ./dump.sql
cp ./dump.sql ./dump-no-sed.sql
sed -i -r -e "s/([^']'[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{9})\+00:00'([^'])/\1'\2/g" \-e 's/^INSERT INTO "?([a-zA-Z0-9_]+)"?/INSERT INTO `\1`/' -e '1s/^/START TRANSACTION;\n/' -e '$aCOMMIT;' ./dump.sql
sed -i '1 i\SET FOREIGN_KEY_CHECKS = 0;' ./dump.sql
@@ -372,14 +358,16 @@ jobs:
- name: Copy converted db to mariadb and import
run: |
docker ps -a
docker cp ./dump.sql mariadb:/tmp/dump.sql
docker exec mariadb bash -c "mariadb -ulldapuser -plldappass -f lldap < /tmp/dump.sql" | tee import.log
docker exec mariadb bash -c "mariadb -ulldapuser -plldappass -f lldap < /tmp/dump.sql"
rm ./dump.sql
! grep ERROR import.log > /dev/null
- name: Export and Converting to mysql
run: |
bash ./scripts/sqlite_dump_commands.sh | sqlite3 ./users.db > ./dump.sql
curl -L https://raw.githubusercontent.com/nitnelave/lldap/main/scripts/sqlite_dump_commands.sh -o helper.sh
chmod +x ./helper.sh
./helper.sh | sqlite3 ./users.db > ./dump.sql
sed -i -r -e 's/^INSERT INTO "?([a-zA-Z0-9_]+)"?/INSERT INTO `\1`/' -e '1s/^/START TRANSACTION;\n/' -e '$aCOMMIT;' ./dump.sql
sed -i '1 i\SET FOREIGN_KEY_CHECKS = 0;' ./dump.sql
@@ -388,10 +376,10 @@ jobs:
- name: Copy converted db to mysql and import
run: |
docker ps -a
docker cp ./dump.sql mysql:/tmp/dump.sql
docker exec mysql bash -c "mysql -ulldapuser -plldappass -f lldap < /tmp/dump.sql" | tee import.log
docker exec mysql bash -c "mysql -ulldapuser -plldappass -f lldap < /tmp/dump.sql"
rm ./dump.sql
! grep ERROR import.log > /dev/null
- name: Run lldap with postgres DB and healthcheck again
run: |
@@ -427,51 +415,22 @@ jobs:
LLDAP_http_port: 17173
LLDAP_JWT_SECRET: somejwtsecret
- name: Test Dummy User Postgres
run: ldapsearch -H ldap://localhost:3891 -LLL -D "uid=dummyuser,ou=people,dc=example,dc=com" -w 'dummypassword' -s "One" -b "ou=people,dc=example,dc=com"
- name: Test Dummy User MariaDB
run: ldapsearch -H ldap://localhost:3892 -LLL -D "uid=dummyuser,ou=people,dc=example,dc=com" -w 'dummypassword' -s "One" -b "ou=people,dc=example,dc=com"
- name: Test Dummy User MySQL
run: ldapsearch -H ldap://localhost:3893 -LLL -D "uid=dummyuser,ou=people,dc=example,dc=com" -w 'dummypassword' -s "One" -b "ou=people,dc=example,dc=com"
- name: Test Dummy User
run: |
ldapsearch -H ldap://localhost:3891 -LLL -D "uid=dummyuser,ou=people,dc=example,dc=com" -w 'dummypassword' -s "One" -b "ou=people,dc=example,dc=com"
ldapsearch -H ldap://localhost:3892 -LLL -D "uid=dummyuser,ou=people,dc=example,dc=com" -w 'dummypassword' -s "One" -b "ou=people,dc=example,dc=com"
ldapsearch -H ldap://localhost:3893 -LLL -D "uid=dummyuser,ou=people,dc=example,dc=com" -w 'dummypassword' -s "One" -b "ou=people,dc=example,dc=com"
build-docker-image:
needs: [build-ui, build-bin]
name: Build Docker image
runs-on: ubuntu-latest
strategy:
matrix:
container: ["debian","alpine"]
include:
- container: alpine
platforms: linux/amd64,linux/arm64
tags: |
type=ref,event=pr
type=semver,pattern=v{{version}}
type=semver,pattern=v{{major}}
type=semver,pattern=v{{major}}.{{minor}}
type=semver,pattern=v{{version}},suffix=
type=semver,pattern=v{{major}},suffix=
type=semver,pattern=v{{major}}.{{minor}},suffix=
type=raw,value=latest,enable={{ is_default_branch }}
type=raw,value=stable,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
type=raw,value=stable,enable=${{ startsWith(github.ref, 'refs/tags/v') }},suffix=
type=raw,value=latest,enable={{ is_default_branch }},suffix=
- container: debian
platforms: linux/amd64,linux/arm64,linux/arm/v7
tags: |
type=ref,event=pr
type=semver,pattern=v{{version}}
type=semver,pattern=v{{major}}
type=semver,pattern=v{{major}}.{{minor}}
type=raw,value=latest,enable={{ is_default_branch }}
type=raw,value=stable,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v4.0.0
uses: actions/checkout@v3.5.0
- name: Download all artifacts
uses: actions/download-artifact@v3
with:
@@ -487,66 +446,86 @@ jobs:
uses: docker/setup-qemu-action@v2
- uses: docker/setup-buildx-action@v2
- name: Docker ${{ matrix.container }} meta
- name: Docker meta
id: meta
uses: docker/metadata-action@v4
with:
# list of Docker images to use as base name for tags
images: |
nitnelave/lldap
lldap/lldap
ghcr.io/lldap/lldap
# Wanted Docker tags
# vX-alpine
# vX.Y-alpine
# vX.Y.Z-alpine
# latest
# latest-alpine
# stable
# stable-alpine
#################
# vX-debian
# vX.Y-debian
# vX.Y.Z-debian
# latest-debian
# stable-debian
#################
# Check matrix for tag list definition
flavor: |
latest=false
suffix=-${{ matrix.container }}
tags: ${{ matrix.tags }}
# generate Docker tags based on the following events/attributes
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha
# Docker login to nitnelave/lldap and lldap/lldap
- name: Login to Nitnelave/LLDAP Docker Hub
- name: parse tag
uses: gacts/github-slug@v1
id: slug
- name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
registry: ghcr.io
username: nitnelave
password: ${{ secrets.GITHUB_TOKEN }}
########################################
#### docker image build ####
#### docker image :latest tag build ####
########################################
- name: Build ${{ matrix.container }} Docker Image
- name: Build and push latest alpine
if: github.event_name != 'release'
uses: docker/build-push-action@v4
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
platforms: ${{ matrix.platforms }}
file: ./.github/workflows/Dockerfile.ci.${{ matrix.container }}
tags: |
${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/arm64
file: ./.github/workflows/Dockerfile.ci.alpine
tags: nitnelave/lldap:latest, nitnelave/lldap:latest-alpine
cache-from: type=gha,mode=max
cache-to: type=gha,mode=max
- name: Build and push latest debian
if: github.event_name != 'release'
uses: docker/build-push-action@v4
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
platforms: linux/amd64,linux/arm64,linux/arm/v7
file: ./.github/workflows/Dockerfile.ci.debian
tags: nitnelave/lldap:latest-debian
cache-from: type=gha,mode=max
cache-to: type=gha,mode=max
########################################
#### docker image :semver tag build ####
########################################
- name: Build and push release alpine
if: github.event_name == 'release'
uses: docker/build-push-action@v4
with:
context: .
platforms: linux/amd64,linux/arm64
push: true
# Tag as latest, stable, semver, major, major.minor and major.minor.patch.
file: ./.github/workflows/Dockerfile.ci.alpine
tags: nitnelave/lldap:stable, nitnelave/lldap:stable-alpine, nitnelave/lldap:v${{ steps.slug.outputs.version-semantic }}, nitnelave/lldap:v${{ steps.slug.outputs.version-major }}, nitnelave/lldap:v${{ steps.slug.outputs.version-major }}.${{ steps.slug.outputs.version-minor }}, nitnelave/lldap:v${{ steps.slug.outputs.version-major }}.${{ steps.slug.outputs.version-minor }}.${{ steps.slug.outputs.version-patch }}, nitnelave/lldap:v${{ steps.slug.outputs.version-semantic }}-alpine, nitnelave/lldap:v${{ steps.slug.outputs.version-major }}-alpine, nitnelave/lldap:v${{ steps.slug.outputs.version-major }}-alpine.${{ steps.slug.outputs.version-minor }}-alpine, nitnelave/lldap:v${{ steps.slug.outputs.version-major }}.${{ steps.slug.outputs.version-minor }}.${{ steps.slug.outputs.version-patch }}-alpine
cache-from: type=gha,mode=max
cache-to: type=gha,mode=max
- name: Build and push release debian
if: github.event_name == 'release'
uses: docker/build-push-action@v4
with:
context: .
platforms: linux/amd64,linux/arm64,linux/arm/v7
push: true
# Tag as latest, stable, semver, major, major.minor and major.minor.patch.
file: ./.github/workflows/Dockerfile.ci.debian
tags: nitnelave/lldap:stable-debian, nitnelave/lldap:v${{ steps.slug.outputs.version-semantic }}-debian, nitnelave/lldap:v${{ steps.slug.outputs.version-major }}-debian, nitnelave/lldap:v${{ steps.slug.outputs.version-major }}.${{ steps.slug.outputs.version-minor }}-debian, nitnelave/lldap:v${{ steps.slug.outputs.version-major }}.${{ steps.slug.outputs.version-minor }}.${{ steps.slug.outputs.version-patch }}-debian
cache-from: type=gha,mode=max
cache-to: type=gha,mode=max
@@ -558,14 +537,6 @@ jobs:
password: ${{ secrets.DOCKERHUB_PASSWORD }}
repository: nitnelave/lldap
- name: Update lldap repo description
if: github.event_name != 'pull_request'
uses: peter-evans/dockerhub-description@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_PASSWORD }}
repository: lldap/lldap
###############################################################
### Download artifacts, clean up ui, upload to release page ###
###############################################################
@@ -587,15 +558,15 @@ jobs:
run: |
mv bin/aarch64-unknown-linux-musl-lldap-bin/lldap bin/aarch64-lldap
mv bin/x86_64-unknown-linux-musl-lldap-bin/lldap bin/amd64-lldap
mv bin/armv7-unknown-linux-musleabihf-lldap-bin/lldap bin/armhf-lldap
mv bin/aarch64-unknown-linux-musl-lldap_migration_tool-bin/lldap_migration_tool bin/aarch64-lldap_migration_tool
mv bin/x86_64-unknown-linux-musl-lldap_migration_tool-bin/lldap_migration_tool bin/amd64-lldap_migration_tool
mv bin/armv7-unknown-linux-musleabihf-lldap_migration_tool-bin/lldap_migration_tool bin/armhf-lldap_migration_tool
mv bin/armv7-unknown-linux-gnueabihf-lldap-bin/lldap bin/armhf-lldap
mv bin/aarch64-unknown-linux-musl-migration-tool-bin/migration-tool bin/aarch64-migration-tool
mv bin/x86_64-unknown-linux-musl-migration-tool-bin/migration-tool bin/amd64-migration-tool
mv bin/armv7-unknown-linux-gnueabihf-migration-tool-bin/migration-tool bin/armhf-migration-tool
mv bin/aarch64-unknown-linux-musl-lldap_set_password-bin/lldap_set_password bin/aarch64-lldap_set_password
mv bin/x86_64-unknown-linux-musl-lldap_set_password-bin/lldap_set_password bin/amd64-lldap_set_password
mv bin/armv7-unknown-linux-musleabihf-lldap_set_password-bin/lldap_set_password bin/armhf-lldap_set_password
mv bin/armv7-unknown-linux-gnueabihf-lldap_set_password-bin/lldap_set_password bin/armhf-lldap_set_password
chmod +x bin/*-lldap
chmod +x bin/*-lldap_migration_tool
chmod +x bin/*-migration-tool
chmod +x bin/*-lldap_set_password
- name: Download llap ui artifacts
@@ -621,9 +592,9 @@ jobs:
mv bin/aarch64-lldap aarch64-lldap/lldap
mv bin/amd64-lldap amd64-lldap/lldap
mv bin/armhf-lldap armhf-lldap/lldap
mv bin/aarch64-lldap_migration_tool aarch64-lldap/lldap_migration_tool
mv bin/amd64-lldap_migration_tool amd64-lldap/lldap_migration_tool
mv bin/armhf-lldap_migration_tool armhf-lldap/lldap_migration_tool
mv bin/aarch64-migration-tool aarch64-lldap/migration-tool
mv bin/amd64-migration-tool amd64-lldap/migration-tool
mv bin/armhf-migration-tool armhf-lldap/migration-tool
mv bin/aarch64-lldap_set_password aarch64-lldap/lldap_set_password
mv bin/amd64-lldap_set_password amd64-lldap/lldap_set_password
mv bin/armhf-lldap_set_password armhf-lldap/lldap_set_password

View File

@@ -1,20 +0,0 @@
name: Release Bot
on:
release:
types: [published]
jobs:
comment:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: nflaig/release-comment-on-pr@master
with:
token: ${{ secrets.RELEASE_BOT_TOKEN }}
message: |
Thank you everyone for the contribution!
This feature is now available in the latest release, [${releaseTag}](${releaseUrl}).
You can support LLDAP by starring our repo, contributing some configuration examples and becoming a sponsor.

View File

@@ -33,7 +33,7 @@ jobs:
steps:
- name: Checkout sources
uses: actions/checkout@v4.0.0
uses: actions/checkout@v3.5.0
- uses: Swatinem/rust-cache@v2
- name: Build
run: cargo build --verbose --workspace
@@ -52,7 +52,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4.0.0
uses: actions/checkout@v3.5.0
- uses: Swatinem/rust-cache@v2
@@ -69,7 +69,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4.0.0
uses: actions/checkout@v3.5.0
- uses: Swatinem/rust-cache@v2
@@ -81,14 +81,12 @@ jobs:
coverage:
name: Code coverage
needs:
- pre_job
- test
needs: pre_job
if: ${{ needs.pre_job.outputs.should_skip != 'true' || (github.event_name == 'push' && github.ref == 'refs/heads/main') }}
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4.0.0
uses: actions/checkout@v3.5.0
- name: Install Rust
run: rustup toolchain install nightly --component llvm-tools-preview && rustup component add llvm-tools-preview --toolchain stable-x86_64-unknown-linux-gnu
@@ -103,13 +101,6 @@ jobs:
run: cargo llvm-cov --no-run --lcov --output-path lcov.info
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
if: github.ref != 'refs/heads/main' || github.event_name != 'push'
with:
files: lcov.info
fail_ci_if_error: true
- name: Upload coverage to Codecov (main)
uses: codecov/codecov-action@v3
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
with:
files: lcov.info
fail_ci_if_error: true

View File

@@ -5,98 +5,6 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.5.0] 2023-09-14
### Breaking
- Emails and UUIDs are now enforced to be unique.
- If you have several users with the same email, you'll have to disambiguate
them. You can do that by either issuing SQL commands directly
(`UPDATE users SET email = 'x@x' WHERE user_id = 'bob';`), or by reverting
to a 0.4.x version of LLDAP and editing the user through the web UI.
An error will prevent LLDAP 0.5+ from starting otherwise.
- This was done to prevent account takeover for systems that allow to
login via email.
### Added
- The server private key can be set as a seed from an env variable (#504).
- This is especially useful when you have multiple containers, they don't
need to share a writeable folder.
- Added support for changing the password through a plain LDAP Modify
operation (as opposed to an extended operation), to allow Jellyfin
to change password (#620).
- Allow creating a user with multiple objectClass (#612).
- Emails now have a message ID (#608).
- Added a warning for browsers that have WASM/JS disabled (#639).
- Added support for querying OUs in LDAP (#669).
- Added a button to clear the avatar in the UI (#358).
### Changed
- Groups are now sorted by name in the web UI (#623).
- ARM build now uses musl (#584).
- Improved logging.
- Default admin user is only created if there are no admins (#563).
- That allows you to remove the default admin, making it harder to
bruteforce.
### Fixed
- Fixed URL parsing with a trailing slash in the password setting utility
(#597).
In addition to all that, there was significant progress towards #67,
user-defined attributes. That complex feature will unblock integration with many
systems, including PAM authentication.
### New services
- Ejabberd
- Ergo
- LibreNMS
- Mealie
- MinIO
- OpnSense
- PfSense
- PowerDnsAdmin
- Proxmox
- Squid
- Tandoor recipes
- TheLounge
- Zabbix-web
- Zulip
## [0.4.3] 2023-04-11
The repository has changed from `nitnelave/lldap` to `lldap/lldap`, both on GitHub
and on DockerHub (although we will keep publishing the images to
`nitnelave/lldap` for the foreseeable future). All data on GitHub has been
migrated, and the new docker images are available both on DockerHub and on the
GHCR under `lldap/lldap`.
### Added
- EC private keys are not supported for LDAPS.
### Changed
- SMTP user no longer has a default value (and instead defaults to unauthenticated).
### Fixed
- WASM payload is now delivered uncompressed to Safari due to a Safari bug.
- Password reset no longer redirects to login page.
- NextCloud config should add the "mail" attribute.
- GraphQL parameters are now urldecoded, to support special characters in usernames.
- Healthcheck correctly checks the server certificate.
### New services
- Home Assistant
- Shaarli
## [0.4.2] - 2023-03-27
### Added

1169
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -9,8 +9,6 @@ members = [
default-members = ["server"]
resolver = "2"
[profile.release]
lto = true

View File

@@ -31,12 +31,12 @@ FROM chef AS builder
COPY --from=planner /tmp/recipe.json recipe.json
RUN cargo chef cook --release -p lldap_app --target wasm32-unknown-unknown \
&& cargo chef cook --release -p lldap \
&& cargo chef cook --release -p lldap_migration_tool \
&& cargo chef cook --release -p migration-tool \
&& cargo chef cook --release -p lldap_set_password
# Copy the source and build the app and server.
COPY --chown=app:app . .
RUN cargo build --release -p lldap -p lldap_migration_tool -p lldap_set_password \
RUN cargo build --release -p lldap -p migration-tool -p lldap_set_password \
# Build the frontend.
&& ./app/build.sh
@@ -78,7 +78,7 @@ WORKDIR /app
COPY --from=builder /app/app/index_local.html app/index.html
COPY --from=builder /app/app/static app/static
COPY --from=builder /app/app/pkg app/pkg
COPY --from=builder /app/target/release/lldap /app/target/release/lldap_migration_tool /app/target/release/lldap_set_password ./
COPY --from=builder /app/target/release/lldap /app/target/release/migration-tool /app/target/release/lldap_set_password ./
COPY docker-entrypoint.sh lldap_config.docker_template.toml ./
RUN set -x \

View File

@@ -13,7 +13,6 @@
<a href="https://discord.gg/h5PEdRMNyP">
<img alt="Discord" src="https://img.shields.io/discord/898492935446876200?label=discord&logo=discord" />
</a>
<a href="https://twitter.com/nitnelave1?ref_src=twsrc%5Etfw">
<img
src="https://img.shields.io/twitter/follow/nitnelave1?style=social"
@@ -24,32 +23,23 @@
src="https://img.shields.io/badge/unsafe-forbidden-success.svg"
alt="Unsafe forbidden"/>
</a>
<a href="https://app.codecov.io/gh/lldap/lldap">
<img alt="Codecov" src="https://img.shields.io/codecov/c/github/lldap/lldap" />
</a>
<br/>
<a href="https://www.buymeacoffee.com/nitnelave" target="_blank">
<img src="https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png" alt="Buy Me A Coffee" style="height: 41px !important;width: 174px !important;box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;-webkit-box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;" >
<a href="https://app.codecov.io/gh/nitnelave/lldap">
<img alt="Codecov" src="https://img.shields.io/codecov/c/github/nitnelave/lldap" />
</a>
</p>
- [About](#about)
- [Installation](#installation)
- [With Docker](#with-docker)
- [With Kubernetes](#with-kubernetes)
- [From source](#from-source)
- [Backend](#backend)
- [Frontend](#frontend)
- [Cross-compilation](#cross-compilation)
- [Client configuration](#client-configuration)
- [Compatible services](#compatible-services)
- [General configuration guide](#general-configuration-guide)
- [Sample client configurations](#sample-client-configurations)
- [Migrating from SQLite](#migrating-from-sqlite)
- [Comparisons with other services](#comparisons-with-other-services)
- [vs OpenLDAP](#vs-openldap)
- [vs FreeIPA](#vs-freeipa)
- [vs Kanidm](#vs-kanidm)
- [I can't log in!](#i-cant-log-in)
- [Contributions](#contributions)
@@ -131,8 +121,6 @@ services:
ports:
# For LDAP
- "3890:3890"
# For LDAPS (LDAP Over SSL), enable port if LLDAP_LDAPS_OPTIONS__ENABLED set true, look env below
#- "6360:6360"
# For the web front-end
- "17170:17170"
volumes:
@@ -146,10 +134,6 @@ services:
- LLDAP_JWT_SECRET=REPLACE_WITH_RANDOM
- LLDAP_LDAP_USER_PASS=REPLACE_WITH_PASSWORD
- LLDAP_LDAP_BASE_DN=dc=example,dc=com
# If using LDAPS, set enabled true and configure cert and key path
# - LLDAP_LDAPS_OPTIONS__ENABLED=true
# - LLDAP_LDAPS_OPTIONS__CERT_FILE=/path/to/certfile.crt
# - LLDAP_LDAPS_OPTIONS__KEY_FILE=/path/to/keyfile.key
# You can also set a different database:
# - LLDAP_DATABASE_URL=mysql://mysql-user:password@mysql-server/my-database
# - LLDAP_DATABASE_URL=postgres://postgres-user:password@postgres-server/my-database
@@ -174,7 +158,7 @@ To compile the project, you'll need:
Then you can compile the server (and the migration tool if you want):
```shell
cargo build --release -p lldap -p lldap_migration_tool
cargo build --release -p lldap -p migration-tool
```
The resulting binaries will be in `./target/release/`. Alternatively, you can
@@ -273,38 +257,28 @@ folder for help with:
- [Dex](example_configs/dex_config.yml)
- [Dokuwiki](example_configs/dokuwiki.md)
- [Dolibarr](example_configs/dolibarr.md)
- [Ejabberd](example_configs/ejabberd.md)
- [Emby](example_configs/emby.md)
- [Ergo IRCd](example_configs/ergo.md)
- [Gitea](example_configs/gitea.md)
- [Grafana](example_configs/grafana_ldap_config.toml)
- [Hedgedoc](example_configs/hedgedoc.md)
- [Jellyfin](example_configs/jellyfin.md)
- [Jitsi Meet](example_configs/jitsi_meet.conf)
- [KeyCloak](example_configs/keycloak.md)
- [LibreNMS](example_configs/librenms.md)
- [Matrix](example_configs/matrix_synapse.yml)
- [Mealie](example_configs/mealie.md)
- [MinIO](example_configs/minio.md)
- [Nextcloud](example_configs/nextcloud.md)
- [Nexus](example_configs/nexus.md)
- [Organizr](example_configs/Organizr.md)
- [Portainer](example_configs/portainer.md)
- [PowerDNS Admin](example_configs/powerdns_admin.md)
- [Proxmox VE](example_configs/proxmox.md)
- [Rancher](example_configs/rancher.md)
- [Seafile](example_configs/seafile.md)
- [Shaarli](example_configs/shaarli.md)
- [Squid](example_configs/squid.md)
- [Syncthing](example_configs/syncthing.md)
- [TheLounge](example_configs/thelounge.md)
- [Vaultwarden](example_configs/vaultwarden.md)
- [WeKan](example_configs/wekan.md)
- [WG Portal](example_configs/wg_portal.env.example)
- [WikiJS](example_configs/wikijs.md)
- [XBackBone](example_configs/xbackbone_config.php)
- [Zendto](example_configs/zendto.md)
- [Zulip](example_configs/zulip.md)
## Migrating from SQLite

View File

@@ -1,12 +1,8 @@
[package]
authors = ["Valentin Tolmer <valentin@tolmer.fr>"]
description = "Frontend for LLDAP"
edition = "2021"
homepage = "https://github.com/lldap/lldap"
license = "GPL-3.0-only"
name = "lldap_app"
repository = "https://github.com/lldap/lldap"
version = "0.5.0"
version = "0.4.3-alpha"
authors = ["Valentin Tolmer <valentin@tolmer.fr>"]
edition = "2021"
include = ["src/**/*", "queries/**/*", "Cargo.toml", "../schema.graphql"]
[dependencies]
@@ -23,6 +19,7 @@ serde = "1"
serde_json = "1"
url-escape = "0.1.1"
validator = "=0.14"
sha1 = "*"
validator_derive = "*"
wasm-bindgen = "0.2"
wasm-bindgen-futures = "*"
@@ -31,6 +28,7 @@ yew-router = "0.16"
# Needed because of https://github.com/tkaitchuck/aHash/issues/95
indexmap = "=1.6.2"
gloo-timers = "0.2.6"
[dependencies.web-sys]
version = "0.3"

View File

@@ -15,8 +15,8 @@
src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.1/dist/js/bootstrap.bundle.min.js"
integrity="sha384-/bQdsTh/da6pkI1MST/rWKFNjaCP5gBSY4sEBT38Q/9RBh9AH40zEOg7Hlq2THRZ"
crossorigin="anonymous"></script>
<script
src="https://cdn.jsdelivr.net/npm/bootstrap-dark-5@1.1.3/dist/js/darkmode.min.js"
<script
src="https://cdn.jsdelivr.net/npm/bootstrap-dark-5@1.1.3/dist/js/darkmode.min.js"
integrity="sha384-A4SLs39X/aUfwRclRaXvNeXNBTLZdnZdHhhteqbYFS2jZTRD79tKeFeBn7SGXNpi"
crossorigin="anonymous"></script>
<link
@@ -43,23 +43,6 @@
</head>
<body>
<noscript>
<!-- This will be displayed if the user doesn't have JavaScript enabled. -->
LLDAP requires JavaScript, please switch to a compatible browser or
enable it.
</noscript>
<script>
/* Detect if the user has WASM support. */
if (typeof WebAssembly === 'undefined') {
const pWASMMsg = document.createElement("p")
pWASMMsg.innerHTML = `
LLDAP requires WASM and JIT for JavaScript, please switch to a
compatible browser or enable it.
`
document.body.appendChild(pWASMMsg)
}
</script>
</body>
</html>

View File

@@ -13,8 +13,8 @@
<script
src="/static/bootstrap.bundle.min.js"
integrity="sha384-/bQdsTh/da6pkI1MST/rWKFNjaCP5gBSY4sEBT38Q/9RBh9AH40zEOg7Hlq2THRZ"></script>
<script
src="/static/darkmode.min.js"
<script
src="/static/darkmode.min.js"
integrity="sha384-A4SLs39X/aUfwRclRaXvNeXNBTLZdnZdHhhteqbYFS2jZTRD79tKeFeBn7SGXNpi"></script>
<link
rel="stylesheet"
@@ -40,23 +40,6 @@
</head>
<body>
<noscript>
<!-- This will be displayed if the user doesn't have JavaScript enabled. -->
LLDAP requires JavaScript, please switch to a compatible browser or
enable it.
</noscript>
<script>
/* Detect if the user has WASM support. */
if (typeof WebAssembly === 'undefined') {
const pWASMMsg = document.createElement("p")
pWASMMsg.innerHTML = `
LLDAP requires WASM and JIT for JavaScript, please switch to a
compatible browser or enable it.
`
document.body.appendChild(pWASMMsg)
}
</script>
</body>
</html>

View File

@@ -1,4 +1,5 @@
use crate::{
components::password_field::PasswordField,
components::router::{AppRoute, Link},
infra::{
api::HostService,
@@ -128,11 +129,9 @@ impl CommonComponent<ChangePasswordForm> for ChangePasswordForm {
Msg::SubmitNewPassword => {
let mut rng = rand::rngs::OsRng;
let new_password = self.form.model().password;
let registration_start_request = opaque::client::registration::start_registration(
new_password.as_bytes(),
&mut rng,
)
.context("Could not initiate password change")?;
let registration_start_request =
opaque::client::registration::start_registration(&new_password, &mut rng)
.context("Could not initiate password change")?;
let req = registration::ClientRegistrationStartRequest {
username: ctx.props().username.clone(),
registration_start_request: registration_start_request.message,
@@ -256,14 +255,12 @@ impl Component for ChangePasswordForm {
{":"}
</label>
<div class="col-sm-10">
<Field
<PasswordField<FormModel>
form={&self.form}
field_name="password"
input_type="password"
class="form-control"
class_invalid="is-invalid has-error"
class_valid="has-success"
autocomplete="new-password"
oninput={link.callback(|_| Msg::FormUpdate)} />
<div class="invalid-feedback">
{&self.form.field_message("password")}

View File

@@ -117,10 +117,7 @@ impl CommonComponent<CreateUserForm> for CreateUserForm {
let opaque::client::registration::ClientRegistrationStartResult {
state,
message,
} = opaque::client::registration::start_registration(
password.as_bytes(),
&mut rng,
)?;
} = opaque::client::registration::start_registration(&password, &mut rng)?;
let req = registration::ClientRegistrationStartRequest {
username: user_id,
registration_start_request: message,
@@ -237,7 +234,7 @@ impl Component for CreateUserForm {
</div>
</div>
<div class="form-group row mb-3">
<label for="display_name"
<label for="display-name"
class="form-label col-4 col-form-label">
{"Display name:"}
</label>
@@ -256,7 +253,7 @@ impl Component for CreateUserForm {
</div>
</div>
<div class="form-group row mb-3">
<label for="first_name"
<label for="first-name"
class="form-label col-4 col-form-label">
{"First name:"}
</label>
@@ -275,7 +272,7 @@ impl Component for CreateUserForm {
</div>
</div>
<div class="form-group row mb-3">
<label for="last_name"
<label for="last-name"
class="form-label col-4 col-form-label">
{"Last name:"}
</label>

View File

@@ -149,9 +149,9 @@ impl Component for LoginForm {
let link = &ctx.link();
if self.refreshing {
html! {
<div>
<img src={"spinner.gif"} alt={"Loading"} />
</div>
<div class="spinner-border" role="status">
<span class="sr-only">{"Loading..."}</span>
</div>
}
} else {
html! {

View File

@@ -10,6 +10,7 @@ pub mod group_details;
pub mod group_table;
pub mod login;
pub mod logout;
pub mod password_field;
pub mod remove_user_from_group;
pub mod reset_password_step1;
pub mod reset_password_step2;

View File

@@ -0,0 +1,152 @@
use crate::infra::{
api::{hash_password, HostService, PasswordHash, PasswordWasLeaked},
common_component::{CommonComponent, CommonComponentParts},
};
use anyhow::Result;
use gloo_timers::callback::Timeout;
use web_sys::{HtmlInputElement, InputEvent};
use yew::{html, Callback, Classes, Component, Context, Properties};
use yew_form::{Field, Form, Model};
pub enum PasswordFieldMsg {
OnInput(String),
OnInputIdle,
PasswordCheckResult(Result<(Option<PasswordWasLeaked>, PasswordHash)>),
}
#[derive(PartialEq)]
pub enum PasswordState {
// Whether the password was found in a leak.
Checked(PasswordWasLeaked),
// Server doesn't support checking passwords (TODO: move to config).
NotSupported,
// Requested a check, no response yet from the server.
Loading,
// User is still actively typing.
Typing,
}
pub struct PasswordField<FormModel: Model> {
common: CommonComponentParts<Self>,
timeout_task: Option<Timeout>,
password: String,
password_check_state: PasswordState,
_marker: std::marker::PhantomData<FormModel>,
}
impl<FormModel: Model> CommonComponent<PasswordField<FormModel>> for PasswordField<FormModel> {
fn handle_msg(
&mut self,
ctx: &Context<Self>,
msg: <Self as Component>::Message,
) -> anyhow::Result<bool> {
match msg {
PasswordFieldMsg::OnInput(password) => {
self.password = password;
if self.password_check_state != PasswordState::NotSupported {
self.password_check_state = PasswordState::Typing;
if self.password.len() >= 8 {
let link = ctx.link().clone();
self.timeout_task = Some(Timeout::new(500, move || {
link.send_message(PasswordFieldMsg::OnInputIdle)
}));
}
}
}
PasswordFieldMsg::PasswordCheckResult(result) => {
self.timeout_task = None;
// If there's an error from the backend, don't retry.
self.password_check_state = PasswordState::NotSupported;
if let (Some(check), hash) = result? {
if hash == hash_password(&self.password) {
self.password_check_state = PasswordState::Checked(check)
}
}
}
PasswordFieldMsg::OnInputIdle => {
self.timeout_task = None;
if self.password_check_state != PasswordState::NotSupported {
self.password_check_state = PasswordState::Loading;
self.common.call_backend(
ctx,
HostService::check_password_haveibeenpwned(hash_password(&self.password)),
PasswordFieldMsg::PasswordCheckResult,
);
}
}
}
Ok(true)
}
fn mut_common(&mut self) -> &mut CommonComponentParts<PasswordField<FormModel>> {
&mut self.common
}
}
#[derive(Properties, PartialEq, Clone)]
pub struct PasswordFieldProperties<FormModel: Model> {
pub field_name: String,
pub form: Form<FormModel>,
#[prop_or_else(|| { "form-control".into() })]
pub class: Classes,
#[prop_or_else(|| { "is-invalid".into() })]
pub class_invalid: Classes,
#[prop_or_else(|| { "is-valid".into() })]
pub class_valid: Classes,
#[prop_or_else(Callback::noop)]
pub oninput: Callback<String>,
}
impl<FormModel: Model> Component for PasswordField<FormModel> {
type Message = PasswordFieldMsg;
type Properties = PasswordFieldProperties<FormModel>;
fn create(_: &Context<Self>) -> Self {
Self {
common: CommonComponentParts::<Self>::create(),
timeout_task: None,
password: String::new(),
password_check_state: PasswordState::Typing,
_marker: std::marker::PhantomData,
}
}
fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool {
CommonComponentParts::<Self>::update(self, ctx, msg)
}
fn view(&self, ctx: &Context<Self>) -> yew::Html {
let link = &ctx.link();
html! {
<div>
<Field<FormModel>
autocomplete={"new-password"}
input_type={"password"}
field_name={ctx.props().field_name.clone()}
form={ctx.props().form.clone()}
class={ctx.props().class.clone()}
class_invalid={ctx.props().class_invalid.clone()}
class_valid={ctx.props().class_valid.clone()}
oninput={link.callback(|e: InputEvent| {
use wasm_bindgen::JsCast;
let target = e.target().unwrap();
let input = target.dyn_into::<HtmlInputElement>().unwrap();
PasswordFieldMsg::OnInput(input.value())
})} />
{
match self.password_check_state {
PasswordState::Checked(PasswordWasLeaked(true)) => html! { <i class="bi bi-x"></i> },
PasswordState::Checked(PasswordWasLeaked(false)) => html! { <i class="bi bi-check"></i> },
PasswordState::NotSupported | PasswordState::Typing => html!{},
PasswordState::Loading =>
html! {
<div class="spinner-border spinner-border-sm" role="status">
<span class="sr-only">{"Loading..."}</span>
</div>
},
}
}
</div>
}
}
}

View File

@@ -1,5 +1,8 @@
use crate::{
components::router::{AppRoute, Link},
components::{
password_field::PasswordField,
router::{AppRoute, Link},
},
infra::{
api::HostService,
common_component::{CommonComponent, CommonComponentParts},
@@ -65,7 +68,7 @@ impl CommonComponent<ResetPasswordStep2Form> for ResetPasswordStep2Form {
let mut rng = rand::rngs::OsRng;
let new_password = self.form.model().password;
let registration_start_request =
opaque_registration::start_registration(new_password.as_bytes(), &mut rng)
opaque_registration::start_registration(&new_password, &mut rng)
.context("Could not initiate password change")?;
let req = registration::ClientRegistrationStartRequest {
username: self.username.clone().unwrap(),
@@ -176,14 +179,12 @@ impl Component for ResetPasswordStep2Form {
{"New password*:"}
</label>
<div class="col-sm-10">
<Field
<PasswordField<FormModel>
form={&self.form}
field_name="password"
class="form-control"
class_invalid="is-invalid has-error"
class_valid="has-success"
autocomplete="new-password"
input_type="password"
oninput={link.callback(|_| Msg::FormUpdate)} />
<div class="invalid-feedback">
{&self.form.field_message("password")}

View File

@@ -67,8 +67,7 @@ pub struct UpdateUser;
pub struct UserDetailsForm {
common: CommonComponentParts<Self>,
form: yew_form::Form<UserModel>,
// None means that the avatar hasn't changed.
avatar: Option<JsFile>,
avatar: JsFile,
reader: Option<FileReader>,
/// True if we just successfully updated the user, to display a success message.
just_updated: bool,
@@ -82,8 +81,6 @@ pub enum Msg {
FileSelected(File),
/// The "Submit" button was clicked.
SubmitClicked,
/// The "Clear" button for the avatar was clicked.
ClearAvatarClicked,
/// A picked file finished loading.
FileLoaded(String, Result<Vec<u8>>),
/// We got the response from the server about our update message.
@@ -105,12 +102,7 @@ impl CommonComponent<UserDetailsForm> for UserDetailsForm {
match msg {
Msg::Update => Ok(true),
Msg::FileSelected(new_avatar) => {
if self
.avatar
.as_ref()
.and_then(|f| f.file.as_ref().map(|f| f.name()))
!= Some(new_avatar.name())
{
if self.avatar.file.as_ref().map(|f| f.name()) != Some(new_avatar.name()) {
let file_name = new_avatar.name();
let link = ctx.link().clone();
self.reader = Some(read_as_bytes(&new_avatar, move |res| {
@@ -119,32 +111,26 @@ impl CommonComponent<UserDetailsForm> for UserDetailsForm {
res.map_err(|e| anyhow::anyhow!("{:#}", e)),
))
}));
self.avatar = Some(JsFile {
self.avatar = JsFile {
file: Some(new_avatar),
contents: None,
});
};
}
Ok(true)
}
Msg::SubmitClicked => self.submit_user_update_form(ctx),
Msg::ClearAvatarClicked => {
self.avatar = Some(JsFile::default());
Ok(true)
}
Msg::UserUpdated(response) => self.user_update_finished(response),
Msg::FileLoaded(file_name, data) => {
if let Some(avatar) = &mut self.avatar {
if let Some(file) = &avatar.file {
if file.name() == file_name {
let data = data?;
if !is_valid_jpeg(data.as_slice()) {
// Clear the selection.
self.avatar = None;
bail!("Chosen image is not a valid JPEG");
} else {
avatar.contents = Some(data);
return Ok(true);
}
if let Some(file) = &self.avatar.file {
if file.name() == file_name {
let data = data?;
if !is_valid_jpeg(data.as_slice()) {
// Clear the selection.
self.avatar = JsFile::default();
bail!("Chosen image is not a valid JPEG");
} else {
self.avatar.contents = Some(data);
return Ok(true);
}
}
}
@@ -173,7 +159,7 @@ impl Component for UserDetailsForm {
Self {
common: CommonComponentParts::<Self>::create(),
form: yew_form::Form::new(model),
avatar: None,
avatar: JsFile::default(),
just_updated: false,
reader: None,
user: ctx.props().user.clone(),
@@ -189,13 +175,11 @@ impl Component for UserDetailsForm {
type Field = yew_form::Field<UserModel>;
let link = &ctx.link();
let avatar_string = match &self.avatar {
Some(avatar) => {
let avatar_base64 = to_base64(avatar);
avatar_base64.as_deref().unwrap_or("").to_owned()
}
None => self.user.avatar.as_deref().unwrap_or("").to_owned(),
};
let avatar_base64 = maybe_to_base64(&self.avatar).unwrap_or_default();
let avatar_string = avatar_base64
.as_deref()
.or(self.user.avatar.as_deref())
.unwrap_or("");
html! {
<div class="py-3">
<form class="form">
@@ -307,7 +291,7 @@ impl Component for UserDetailsForm {
</label>
<div class="col-8">
<div class="row align-items-center">
<div class="col-5">
<div class="col-8">
<input
class="form-control"
id="avatarInput"
@@ -318,27 +302,12 @@ impl Component for UserDetailsForm {
Self::upload_files(input.files())
})} />
</div>
<div class="col-3">
<button
class="btn btn-secondary col-auto"
id="avatarClear"
disabled={self.common.is_task_running()}
onclick={link.callback(|e: MouseEvent| {e.prevent_default(); Msg::ClearAvatarClicked})}>
{"Clear"}
</button>
</div>
<div class="col-4">
{
if !avatar_string.is_empty() {
html!{
<img
id="avatarDisplay"
src={format!("data:image/jpeg;base64, {}", avatar_string)}
style="max-height:128px;max-width:128px;height:auto;width:auto;"
alt="Avatar" />
}
} else { html! {} }
}
<img
id="avatarDisplay"
src={format!("data:image/jpeg;base64, {}", avatar_string)}
style="max-height:128px;max-width:128px;height:auto;width:auto;"
alt="Avatar" />
</div>
</div>
</div>
@@ -376,10 +345,10 @@ impl UserDetailsForm {
if !self.form.validate() {
bail!("Invalid inputs");
}
if let Some(JsFile {
if let JsFile {
file: Some(_),
contents: None,
}) = &self.avatar
} = &self.avatar
{
bail!("Image file hasn't finished loading, try again");
}
@@ -407,9 +376,7 @@ impl UserDetailsForm {
if base_user.last_name != model.last_name {
user_input.lastName = Some(model.last_name);
}
if let Some(avatar) = &self.avatar {
user_input.avatar = Some(to_base64(avatar)?);
}
user_input.avatar = maybe_to_base64(&self.avatar)?;
// Nothing changed.
if user_input == default_user_input {
return Ok(false);
@@ -431,8 +398,8 @@ impl UserDetailsForm {
self.user.display_name = model.display_name;
self.user.first_name = model.first_name;
self.user.last_name = model.last_name;
if let Some(avatar) = &self.avatar {
self.user.avatar = Some(to_base64(avatar)?);
if let Some(avatar) = maybe_to_base64(&self.avatar)? {
self.user.avatar = Some(avatar);
}
self.just_updated = true;
Ok(true)
@@ -457,12 +424,12 @@ fn is_valid_jpeg(bytes: &[u8]) -> bool {
.is_ok()
}
fn to_base64(file: &JsFile) -> Result<String> {
fn maybe_to_base64(file: &JsFile) -> Result<Option<String>> {
match file {
JsFile {
file: None,
contents: _,
} => Ok(String::new()),
} => Ok(None),
JsFile {
file: Some(_),
contents: None,
@@ -474,7 +441,7 @@ fn to_base64(file: &JsFile) -> Result<String> {
if !is_valid_jpeg(data.as_slice()) {
bail!("Chosen image is not a valid JPEG");
}
Ok(base64::encode(data))
Ok(Some(base64::encode(data)))
}
}
}

View File

@@ -1,4 +1,4 @@
use super::cookies::set_cookie;
use crate::infra::cookies::set_cookie;
use anyhow::{anyhow, Context, Result};
use gloo_net::http::{Method, Request};
use graphql_client::GraphQLQuery;
@@ -74,6 +74,19 @@ fn set_cookies_from_jwt(response: login::ServerLoginResponse) -> Result<(String,
.context("Error setting cookie")
}
#[derive(PartialEq)]
pub struct PasswordHash(String);
#[derive(PartialEq)]
pub struct PasswordWasLeaked(pub bool);
pub fn hash_password(password: &str) -> PasswordHash {
use sha1::{Digest, Sha1};
let mut hasher = Sha1::new();
hasher.update(password);
PasswordHash(format!("{:X}", hasher.finalize()))
}
impl HostService {
pub async fn graphql_query<QueryType>(
variables: QueryType::Variables,
@@ -194,4 +207,35 @@ impl HostService {
!= http::StatusCode::NOT_FOUND,
)
}
pub async fn check_password_haveibeenpwned(
password_hash: PasswordHash,
) -> Result<(Option<PasswordWasLeaked>, PasswordHash)> {
use lldap_auth::password_reset::*;
let hash_prefix = &password_hash.0[0..5];
match call_server_json_with_error_message::<PasswordHashList, _>(
&format!("/auth/password/check/{}", hash_prefix),
NO_BODY,
"Could not validate token",
)
.await
{
Ok(r) => {
for PasswordHashCount { hash, count } in r.hashes {
if password_hash.0[5..] == hash && count != 0 {
return Ok((Some(PasswordWasLeaked(true)), password_hash));
}
}
Ok((Some(PasswordWasLeaked(false)), password_hash))
}
Err(e) => {
if e.to_string().contains("[501]:") {
// Unimplemented, no API key.
Ok((None, password_hash))
} else {
Err(e)
}
}
}
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

View File

@@ -1,12 +1,8 @@
[package]
authors = ["Valentin Tolmer <valentin@tolmer.fr>"]
description = "Authentication protocol for LLDAP"
edition = "2021"
homepage = "https://github.com/lldap/lldap"
license = "GPL-3.0-only"
name = "lldap_auth"
repository = "https://github.com/lldap/lldap"
version = "0.4.0"
version = "0.3.0"
authors = ["Valentin Tolmer <valentin@tolmer.fr>"]
edition = "2021"
[features]
default = ["opaque_server", "opaque_client"]
@@ -34,7 +30,7 @@ features = [ "serde" ]
# For WASM targets, use the JS getrandom.
[target.'cfg(not(target_arch = "wasm32"))'.dependencies.getrandom]
version = "0.2"
features = ["js"]
[target.'cfg(target_arch = "wasm32")'.dependencies.getrandom]
version = "0.2"
features = ["js"]

View File

@@ -102,6 +102,17 @@ pub mod password_reset {
pub user_id: String,
pub token: String,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct PasswordHashCount {
pub hash: String,
pub count: u64,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct PasswordHashList {
pub hashes: Vec<PasswordHashCount>,
}
}
#[derive(Clone, Serialize, Deserialize)]

View File

@@ -77,10 +77,10 @@ pub mod client {
pub use opaque_ke::ClientRegistrationFinishParameters;
/// Initiate the registration negotiation.
pub fn start_registration<R: RngCore + CryptoRng>(
password: &[u8],
password: &str,
rng: &mut R,
) -> AuthenticationResult<ClientRegistrationStartResult> {
Ok(ClientRegistration::start(rng, password)?)
Ok(ClientRegistration::start(rng, password.as_bytes())?)
}
/// Finalize the registration negotiation.

View File

@@ -20,7 +20,7 @@ LLDAP has a command that will connect to a target database and initialize the
schema. If running with docker, run the following command to use your active
instance (this has the benefit of ensuring your container has access):
```sh
```
docker exec -it <LLDAP container name> /app/lldap create_schema -d <Target database url>
```
@@ -34,7 +34,7 @@ databases (SQLite in this example) will give an error if LLDAP is in the middle
statements. There are various ways to do this, but a simple enough way is filtering a
whole database dump. This repo contains [a script](/scripts/sqlite_dump_commands.sh) to generate SQLite commands for creating an appropriate dump:
```sh
```
./sqlite_dump_commands.sh | sqlite3 /path/to/lldap/config/users.db > /path/to/dump.sql
```
@@ -49,9 +49,8 @@ a transaction in case one of the statements fail.
PostgreSQL uses a different hex string format. The command below should switch SQLite
format to PostgreSQL format, and wrap it all in a transaction:
```sh
```
sed -i -r -e "s/X'([[:xdigit:]]+'[^'])/'\\\x\\1/g" \
-e ":a; s/(INSERT INTO user_attribute_schema\(.*\) VALUES\(.*),1([^']*\);)$/\1,true\2/; s/(INSERT INTO user_attribute_schema\(.*\) VALUES\(.*),0([^']*\);)$/\1,false\2/; ta" \
-e '1s/^/BEGIN;\n/' \
-e '$aCOMMIT;' /path/to/dump.sql
```
@@ -59,11 +58,11 @@ sed -i -r -e "s/X'([[:xdigit:]]+'[^'])/'\\\x\\1/g" \
### To MySQL
MySQL mostly cooperates, but it gets some errors if you don't escape the `groups` table. It also uses
backticks to escape table name instead of quotes. Run the
backticks to escape table name instead of quotes. Run the
following command to wrap all table names in backticks for good measure, and wrap the inserts in
a transaction:
```sh
```
sed -i -r -e 's/^INSERT INTO "?([a-zA-Z0-9_]+)"?/INSERT INTO `\1`/' \
-e '1s/^/START TRANSACTION;\n/' \
-e '$aCOMMIT;' \
@@ -75,7 +74,7 @@ sed -i -r -e 's/^INSERT INTO "?([a-zA-Z0-9_]+)"?/INSERT INTO `\1`/' \
While MariaDB is supposed to be identical to MySQL, it doesn't support timezone offsets on DATETIME
strings. Use the following command to remove those and perform the additional MySQL sanitization:
```sh
```
sed -i -r -e "s/([^']'[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{9})\+00:00'([^'])/\1'\2/g" \
-e 's/^INSERT INTO "?([a-zA-Z0-9_]+)"?/INSERT INTO `\1`/' \
-e '1s/^/START TRANSACTION;\n/' \

View File

@@ -1,58 +0,0 @@
# Migration from 0.4 to 0.5
Welcome! If you're here, it's probably that the migration from 0.4.x to 0.5
didn't go smoothly for you. Don't worry, we can fix that.
## Multiple users with the same email
This is the most common case. You can see in the LLDAP logs that there are
several users with the same email, and they are listed.
This is not allowed anymore in v0.5, to prevent a user from setting their email
to someone else's email and gaining access to systems that identify by email.
The problem is that you currently have several users with the same email, so the
constraint cannot be enforced.
### Step 1: Take a note of the users with duplicate emails
In the LLDAP logs when you tried to start v0.5+, you'll see some warnings with
the list of users with the same emails. Take note of them.
### Step 2: Downgrade to v0.4.3
If using docker, switch to the `lldap/lldap:v0.4.3` image. Alternatively, grab
the binaries at https://github.com/lldap/lldap/releases/tag/v0.4.3.
This downgrade is safe and supported.
### Step 3: Remove duplicate emails
Restart LLDAP with the v0.4.3 version, and using your notes from step 1, change
the email of users with duplicate emails to make sure that each email is unique.
### Step 4: Upgrade again
You can now revert to the initial version.
## Multiple users/groups with the same UUID
This should be extremely rare. In this case, you'll need to find which users
have the same UUID, revert to v0.4.3 to be able to apply the changes, and delete
one of the duplicates.
## FAQ
### What if I want several users to be controlled by the same email?
You can use plus codes to set "the same" email to several users, while ensuring
that they can't identify as each other. For instance:
- Admin: `admin@example.com`
- Read-only admin: `admin+readonly@example.com`
- Jellyfin admin: `admin+jellyfin@example.com`
### I'm upgrading to a higher version than v0.5.
This guide is still relevant: you can use whatever later version in place of
v0.5. You'll still need to revert to v0.4.3 to apply the changes.

View File

@@ -10,7 +10,7 @@ $conf['plugin']['authldap']['server'] = 'ldap://lldap_server:3890'; #IP of
$conf['plugin']['authldap']['usertree'] = 'ou=people,dc=example,dc=com';
$conf['plugin']['authldap']['grouptree'] = 'ou=groups, dc=example, dc=com';
$conf['plugin']['authldap']['userfilter'] = '(&(uid=%{user})(objectClass=person))';
$conf['plugin']['authldap']['groupfilter'] = '(&(member=%{dn})(objectClass=groupOfUniqueNames))';
$conf['plugin']['authldap']['groupfilter'] = '(objectClass=group)';
$conf['plugin']['authldap']['attributes'] = array('cn', 'displayname', 'mail', 'givenname', 'objectclass', 'sn', 'uid', 'memberof');
$conf['plugin']['authldap']['version'] = 3;
$conf['plugin']['authldap']['binddn'] = 'cn=admin,ou=people,dc=example,dc=com';

View File

@@ -1,30 +0,0 @@
# Basic LDAP auth for a Ejabberd XMPP server
[Main documentation here.](https://docs.ejabberd.im/admin/configuration/ldap/)
For simple user auth add this to main ejabberd.yml:
```
host_config:
xmpp.example.org:
auth_method: [ldap]
ldap_servers:
- 127.0.0.1 #IP or hostname of LLDAP server
ldap_port: 3890
ldap_uids:
- uid
ldap_rootdn: "uid=lldap_readonly,ou=people,dc=example,dc=org"
ldap_password: "secret"
ldap_base: "ou=people,dc=example,dc=org"
```
## vCard from LDAP
Theoretically possible, [see the documentation.](https://docs.ejabberd.im/admin/configuration/ldap/#vcard-in-ldap)
TODO
## Shared roster groups from LDAP
Theoretically possible, [see the documentation.](https://docs.ejabberd.im/admin/configuration/ldap/#shared-roster-in-ldap)
TODO

View File

@@ -1,22 +0,0 @@
# Basic LDAP auth for an Ergo IRC server
[Main documentation here.](https://github.com/ergochat/ergo-ldap)
For simple user auth prepare a ldap-config.yaml with the following settings
```
host: "127.0.0.1"
port: 3890
timeout: 30s
# uncomment for TLS / LDAPS:
# use-ssl: true
bind-dn: "uid=%s,ou=people,dc=example,dc=org"
```
Then add the compiled ergo-ldap program to your Ergo folder and make sure it can be executed by the same user your Ergo IRCd runs as.
Follow the instructions in the main Ergo config file's accounts section on how to execute an external auth program.
Make sure SASL auth is enabled and then restart Ergo to enable LDAP linked SASL auth.

View File

@@ -37,13 +37,13 @@ search_base_dns = ["dc=example,dc=org"]
[servers.attributes]
member_of = "memberOf"
email = "mail"
name = "displayName"
name = "givenName"
surname = "sn"
username = "uid"
# If you want to map your ldap groups to grafana's groups, see: https://grafana.com/docs/grafana/latest/auth/ldap/#group-mappings
# As a quick example, here is how you would map lldap's admin group to grafana's admin
# [[servers.group_mappings]]
# group_dn = "cn=lldap_admin,ou=groups,dc=example,dc=org"
# group_dn = "uid=lldap_admin,ou=groups,dc=example,dc=org"
# org_role = "Admin"
# grafana_admin = true

View File

@@ -1,24 +0,0 @@
# Home Assistant Configuration
Home Assistant configures ldap auth via the [Command Line Auth Provider](https://www.home-assistant.io/docs/authentication/providers/#command-line). The wiki mentions a script that can be used for LDAP authentication, but it doesn't work in the container version (it is lacking both `ldapsearch` and `curl` ldap protocol support). Thankfully LLDAP has a graphql API to save the day!
## Graphql-based Auth Script
The [auth script](lldap-ha-auth.sh) attempts to authenticate a user against an LLDAP server, using credentials provided via `username` and `password` environment variables. The first argument must be the URL of your LLDAP server, accessible from Home Assistant. You can provide an additional optional argument to confine allowed logins to a single group. The script will output the user's display name as the `name` variable, if not empty.
1. Copy the [auth script](lldap-ha-auth.sh) to your home assistant instance. In this example, we use `/config/lldap-ha-auth.sh`.
- Set the script as executable by running `chmod +x /config/lldap-ha-auth.sh`
2. Add the following to your configuration.yaml in Home assistant:
```yaml
homeassistant:
auth_providers:
# Ensure you have the homeassistant provider enabled if you want to continue using your existing accounts
- type: homeassistant
- type: command_line
command: /config/lldap-ha-auth.sh
# Only allow users in the 'homeassistant_user' group to login.
# Change to ["https://lldap.example.com"] to allow all users
args: ["https://lldap.example.com", "homeassistant_user"]
meta: true
```
3. Reload your config or restart Home Assistant

View File

@@ -37,9 +37,9 @@ Otherwise, just use:
```
### Admin Base DN
The DN to search for your admins.
The DN of your admin group. If you have `media_admin` as your group you would use:
```
ou=people,dc=example,dc=com
cn=media_admin,ou=groups,dc=example,dc=com
```
### Admin Filter
@@ -49,15 +49,8 @@ that), use:
```
(memberof=cn=media_admin,ou=groups,dc=example,dc=com)
```
Bear in mind that admins must also be a member of the users group if you use one.
Otherwise, you can use LLDAP's admin group:
```
(memberof=cn=lldap_admin,ou=groups,dc=example,dc=com)
```
## Password change
To allow changing Passwords via Jellyfin the following things are required
- The bind user needs to have the group lldap_password_manager (changing passwords of members of the group lldap_admin does not work to prevent privilege escalation)
- Check `Allow Password Change`
- `LDAP Password Attribute` Needs to be set to `userPassword`

View File

@@ -62,11 +62,3 @@ Once the groups are synchronized, go to "Manage > Groups" on the left. Click on
Assign the role "admin" to the group. Now you can log in as the LLDAP admin to
the KeyCloak admin console.
## Fixing duplicate names or missing First Names for users
Since Keycloak and LLDAP use different attributes for different parts of a user's name, you may see duplicated or missing names for users in Keycloak. To fix this, update the attribute mappings:
Go back to "User Federation", edit your LDAP integration and click on the "Mappers" tab.
Find or create the "first name" mapper (it should have type `user-attribute-ldap-mapper`) and ensure the "LDAP Attribute" setting is set to `givenname`. Keycloak may have defaulted to `cn` which LLDAP uses for the "Display Name" of a user.

View File

@@ -1,193 +0,0 @@
# Configuration for LibreNMS
You can either configure LibreNMS from the webui or from the command line. This is a list of the variables that you should set.
## Essential
## auth_ldap_uid_attribute
```
uid
```
This sets 'uid' as the unique ldap attribute for users.
## auth_ldap_groupmemberattr
```
member
```
## auth_ldap_groups
```'
{"nms_admin": {"level": 10}}'
```
or
```
auth_ldap_groups.nms_admin.level: 10
```
These are both the same.
This example sets the group nms_admin as Admin (level 10).
Set others to match more groups at different levels.
## auth_ldap_starttls
```
false
```
## auth_ldap_server
```
[lldap server ip]
```
## auth_ldap_port
```
3890
```
## auth_ldap_suffix
```
,ou=people,dc=example,dc=com
```
Not sure if the case of people actually matters.
Make sure you keep the initial comma.
## auth_ldap_groupbase
```
ou=groups,dc=example,dc=com
```
## auth_mechanism
```
ldap
```
Be careful with this as you will lock yourself out if ldap does not work correctly. Set back to 'mysql' to turn ldap off.
### auth_ldap_require_groupmembership
```
false
```
## Testing
Use the test script to make sure it works.
```
./script/auth_test.php -u <user>
```
Make sure the level is correctly populated. Should look like this:
```
librenms:/opt/librenms# ./scripts/auth_test.php -uadmin
Authentication Method: ldap
Password:
Authenticate user admin:
AUTH SUCCESS
User (admin):
username => admin
realname => Administrator
user_id => admin
email => admin@example.com
level => 10
Groups: cn=nms_admin,ou=groups,dc=example,dc=com
```
## Setting variables
### Web UI
You can set all the varibles in the web UI in: Settings -> Authentication -> LDAP Settings
### Command line
You can use the lnms command to *get* config options like this:
```
lnms config:get auth_ldap_uid_attribute
```
You can use the lnms command to *set* config options like this:
```
lnms config:set auth_ldap_uid_attribute uid
```
Read more [here](https://docs.librenms.org/Support/Configuration/)
### Pre load configuration for Docker
You can create a file named: /data/config/ldap.yaml and place your variables in there.
```
librenms:/opt/librenms# cat /data/config/auth.yaml
auth_mechanism: ldap
auth_ldap_server: 172.17.0.1
auth_ldap_port: 3890
auth_ldap_version: 3
auth_ldap_suffix: ,ou=people,dc=example,dc=com
auth_ldap_groupbase: ou=groups,dc=example,dc=com
auth_ldap_prefix: uid=
auth_ldap_starttls: False
auth_ldap_attr: {"uid": "uid"}
auth_ldap_uid_attribute: uid
auth_ldap_groups: {"nms_admin": {"level": 10}}
auth_ldap_groupmemberattr: member
auth_ldap_require_groupmembership: False
auth_ldap_debug: False
auth_ldap_group: cn=groupname,ou=groups,dc=example,dc=com
auth_ldap_groupmembertype: username
auth_ldap_timeout: 5
auth_ldap_emailattr: mail
auth_ldap_userdn: True
auth_ldap_userlist_filter:
auth_ldap_wildcard_ou: False
```
Read more [here](https://github.com/librenms/docker#configuration-management)
## Issue with current LibreNMS
The current version (23.7.0 at the time of writing) does not support lldap. A fix has been accepted to LibreNMS so the next version should just work.
[Link to the commit](https://github.com/librenms/librenms/commit/a71ca98fac1a75753b102be8b3644c4c3ee1a624)
If you want to apply the fix manually, run git apply with this patch.
```
diff --git a/LibreNMS/Authentication/LdapAuthorizer.php b/LibreNMS/Authentication/LdapAuthorizer.php
index 5459759ab..037a7382b 100644
--- a/LibreNMS/Authentication/LdapAuthorizer.php
+++ b/LibreNMS/Authentication/LdapAuthorizer.php
@@ -233,7 +233,7 @@ class LdapAuthorizer extends AuthorizerBase
$entries = ldap_get_entries($connection, $search);
foreach ($entries as $entry) {
$user = $this->ldapToUser($entry);
- if ((int) $user['user_id'] !== (int) $user_id) {
+ if ($user['user_id'] != $user_id) {
continue;
}
@@ -360,7 +360,7 @@ class LdapAuthorizer extends AuthorizerBase
return [
'username' => $entry['uid'][0],
'realname' => $entry['cn'][0],
- 'user_id' => (int) $entry[$uid_attr][0],
+ 'user_id' => $entry[$uid_attr][0],
'email' => $entry[Config::get('auth_ldap_emailattr', 'mail')][0],
'level' => $this->getUserlevel($entry['uid'][0]),
];
```

View File

@@ -1,70 +0,0 @@
#!/bin/bash
# Usernames should be validated using a regular expression to be of
# a known format. Special characters will be escaped anyway, but it is
# generally not recommended to allow more than necessary.
# This pattern is set by default. In your config file, you can either
# overwrite it with a different one or use "unset USERNAME_PATTERN" to
# disable validation completely.
USERNAME_PATTERN='^[a-z|A-Z|0-9|_|-|.]+$'
# When the timeout (in seconds) is exceeded (e.g. due to slow networking),
# authentication fails.
TIMEOUT=3
# Log messages to stderr.
log() {
echo "$1" >&2
}
# Get server address
if [ -z "$1" ]; then
log "Usage: lldap-auth.sh <LLDAP server address> <Optional group to filter>"
exit 2
fi
SERVER_URL="${1%/}"
# Check username and password are present and not malformed.
if [ -z "$username" ] || [ -z "$password" ]; then
log "Need username and password environment variables."
exit 2
elif [ ! -z "$USERNAME_PATTERN" ]; then
username_match=$(echo "$username" | sed -r "s/$USERNAME_PATTERN/x/")
if [ "$username_match" != "x" ]; then
log "Username '$username' has an invalid format."
exit 2
fi
fi
RESPONSE=$(curl -f -s -X POST -m "$TIMEOUT" -H "Content-type: application/json" -d '{"username":"'"$username"'","password":"'"$password"'"}' "$SERVER_URL/auth/simple/login")
if [[ $? -ne 0 ]]; then
log "Auth failed"
exit 1
fi
TOKEN=$(jq -e -r .token <<< $RESPONSE)
if [[ $? -ne 0 ]]; then
log "Failed to parse token"
exit 1
fi
RESPONSE=$(curl -f -s -m "$TIMEOUT" -H "Content-type: application/json" -H "Authorization: Bearer ${TOKEN}" -d '{"variables":{"id":"'"$username"'"},"query":"query($id:String!){user(userId:$id){displayName groups{displayName}}}"}' "$SERVER_URL/api/graphql")
if [[ $? -ne 0 ]]; then
log "Failed to get user"
exit 1
fi
USER_JSON=$(jq -e .data.user <<< $RESPONSE)
if [[ $? -ne 0 ]]; then
log "Failed to parse user json"
exit 1
fi
if [[ ! -z "$2" ]] && ! jq -e '.groups|map(.displayName)|index("'"$2"'")' <<< $USER_JSON > /dev/null 2>&1; then
log "User is not in group '$2'"
exit 1
fi
DISPLAY_NAME=$(jq -r .displayName <<< $USER_JSON)
[[ ! -z "$DISPLAY_NAME" ]] && echo "name = $DISPLAY_NAME"

View File

@@ -1,28 +0,0 @@
# Mealie
Configuration is done solely with environmental variables in the mealie-api docker-compose config:
## Note
[LDAP integration in Mealie currently only works with the nightly branch](https://github.com/hay-kot/mealie/issues/2402#issuecomment-1560176528), so `hkotel/mealie:api-nightly` and `hkotel/mealie:frontend-nightly` rather than the current "stable" release of `v1.0.0beta-5`
## Configuration
The following config should let you login with either members of the `mealie` group as a user, or as an admin user with members of the `mealie-admin` group.
Mealie first checks credentials in the `mealie` group to authenticate, then checks for the presence of the user in the `mealie-admin` group and elevates that account to admin status if present, therefore for any account to be an admin account it must belong in both the `mealie` group and the `mealie-admin` group.
It is recommended to create a `readonly_user` and add them to the `lldap_strict_readonly` group to bind with.
```yaml
- LDAP_AUTH_ENABLED=true
- LDAP_SERVER_URL=ldap://lldap:3890
- LDAP_TLS_INSECURE=true ## Only required for LDAPS with a self-signed certificate
- LDAP_BASE_DN=ou=people,dc=example,dc=com
- LDAP_USER_FILTER=(memberof=cn=mealie,ou=groups,dc=example,dc=com)
- LDAP_ADMIN_FILTER=(memberof=cn=mealie-admin,ou=groups,dc=example,dc=com)
- LDAP_QUERY_BIND=cn=readonly_user,ou=people,dc=example,dc=com
- LDAP_QUERY_PASSWORD=READONLY_USER_PASSWORD
- LDAP_ID_ATTRIBUTE=uid
- LDAP_NAME_ATTRIBUTE=displayName
- LDAP_MAIL_ATTRIBUTE=mail
```

View File

@@ -1,37 +0,0 @@
# MinIO Configuration
MinIO is a High-Performance Object Storage released under GNU Affero General Public License v3. 0. It is API compatible with the Amazon S3 cloud storage service. This example assists with basic LDAP configuration and policy attachment.
## LDAP Config
### Navigation
- Login to the WebUI as a consoleAdmin user
- Navigate to `Administrator > Identity > LDAP`
- Click `Edit Configuration`
### Configuration Options
- Server Insecure: Enabled
- Server Address: Hostname or IP for your LLDAP host
- Lookup Bind DN: `uid=admin,ou=people,dc=example,dc=com`
- It is recommended that you create a separate user account (e.g, `bind_user`) instead of `admin` for sharing Bind credentials with other services. The `bind_user` should be a member of the `lldap_strict_readonly` group to limit access to your LDAP configuration in LLDAP.
- Lookup Bind Password: The password for the user referenced above
- User DN Search Base: `ou=people,dc=example,dc=com`
- User DN Search Filter: `(&(uid=%s)(memberOf=cn=minio_admin,ou=groups,dc=example,dc=com))`
- This search filter will only allow users that are members of the `minio_admin` group to authenticate. To allow all lldap users, this filter can be used instead `(uid=%s)`
- Group Search Base DN: `ou=groups,dc=example,dc=com`
- Group Search Filter: `(member=%d)`
### Enable LDAP
> Note there appears to be a bug in some versions of MinIO where LDAP is enabled and working, however the configuration UI reports that it is not enabled.
Now, you can enable LDAP authentication by clicking the `Enable LDAP` button, a restart of the service or container is needed. With this configuration, LLDAP users will be able to log in to MinIO now. However they will not be able to do anything, as we need to attach policies giving permissions to users.
## Policy Attachment
Creating MinIO policies is outside of the scope for this document, but it is well documented by MinIO [here](https://min.io/docs/minio/linux/administration/identity-access-management/policy-based-access-control.html). Policies are written in JSON, are extremely flexible, and can be configured to be very granular. In this example we will be using one of the built-in Policies, `consoleAdmin`. We will be applying these policies with the `mc` command line utility.
- Alias your MinIO instance: `mc alias set myMinIO http://<your-minio-address>:<your-minio-api-port> admin <your-admin-password>`
- Attach a policy to your LDAP group: `mc admin policy attach myMinIO consoleAdmin --group='cn=minio_admin,ou=groups,dc=example,dc=com'`

View File

@@ -70,7 +70,7 @@ _The first two can be any string you'd like to identify the connection with. The
* *_Do not_* use commas in the Nextcloud Social Login app scope! This caused many issues for me.
* Be sure you update your Authelia `configuration.yml`. Specifically, the line: `redirect_uris`. The new URL should be
`https://nextcloud.example.com/apps/sociallogin/custom_oidc/Authelia`, in some cases the URL also contains the index.php file and has to look like this `https://nextcloud.example.com/index.php/apps/sociallogin/custom_oidc/Authelia`. Check if your nextcloud has index.php in it's URL because if it has this won't work without and if it hasn't the link with index.php won't work.
`https://auth.example.com/index.php/apps/sociallogin/custom_oidc/Authelia`.
* The final field in the URL (Authelia) needs to be the same value you used in the Social Login "Internal Name" field.
* If you've setup LLDAP correctly in nextcloud, the last dropdown for _Default Group_ should show you the `nextcloud_users` group you setup in LLDAP.

View File

@@ -1,113 +0,0 @@
# Configuration for OPNsense
## Create a LDAP Server
- Login to OPNsense
- Navigate to: `System > Access > Servers`
- Create a new server by clicking on the `+` icon
## Server Config
- Descriptive Name: `A Descriptive Name`
- Type: `LDAP`
- Hostname or IP address: `Hostname or IP for your LLDAP host`
- Port value: `Your LLDAP port`
- Default: `3890`
- Transport: `TCP - Standard`
- Protocol version: `3`
Make sure the host running LLDAP is accessible to OPNsense and that you mapped the LLDAP port to the LLDAP host.
## LDAP Config
### Bind credentials
#### User DN
```
uid=admin,ou=people,dc=example,dc=com
```
It is recommended that you create a separate user account (e.g, `bind_user`) instead of `admin` for sharing Bind credentials with other services. The `bind_user` should be a member of the `lldap_strict_readonly` group to limit access to your LDAP configuration in LLDAP.
#### Password
```
xxx
```
Enter the password that you set for the user specified in the User DN field.
### Search Scope
```
One Level
```
### Base DN
```
dc=example,dc=com
```
This is the same LDAP Base DN that you set via the *LLDAP_LDAP_BASE_DN* environment variable or in `lldap_config.toml`.
### Authentication containers
```
ou=people,dc=example,dc=com
```
Note: The `Select` box may not work for selecting containers. You can just enter the `Authentication containers` directly into the text field.
### Extended Query
```
&(objectClass=person)(memberof=cn=lldap_admin,ou=groups,dc=example,dc=com)
```
It is recommended that you create a unique LDAP group (e.g., `lldap_opnsense`) in LLDAP and use that group in this query instead of `lldap_admin`. This will limit OPNsense access to users in the `lldap_opnsense` group and make it easier to synchronize LLDAP groups with OPNsense groups for managing OPNsense access.
### Initial Template
```
OpenLDAP
```
### User naming attribute
```
uid
```
## Optional Configuration
The above configuration will connect OPNsense to LLDAP. This optional configuration will synchronize groups between LLDAP and OPNsense and automate user creation when an authorized LLDAP user logs into OPNsense.
### Remaining Server Configuration
Enable the following options on the OPNsense configuration page for your LLDAP server (the same page where you entered the prior configuration):
- Read Properties: `Checked`
- Synchronize groups: `Checked`
- Automatic user creation: `Checked`
### Create OPNsense Group
Go to `System > Access > Groups` and create a new group with the **same** name as the LLDAP group used to authenticate users for OPNsense.
By default, you would name your OPNsense group `lldap_admin` unless you followed the recommended advice in this guide and created a separate `lldap_opnsense` group for managing OPNsense users.
If you want your LLDAP users to have full administrator access in OPNsense, then you need to edit the `Assigned Privileges` for the group and add the `GUI - All pages` system privilege.
### Enable LLDAP as an Authentication Option
Go to `System > Settings > Administration` page and scroll down to the `Authentication` section. Add your LLDAP server configuration to the `Server` field.
## Testing LLDAP
OPNsense includes a built-in feature for testing user authentication at `System > Access > Tester`. Select your LLDAP server configuration in the `Authentication Server` to test logins for your LLDAP users.
## More Information
Please read the [OPNsense docs](https://docs.opnsense.org/manual/how-tos/user-ldap.html) for more information on LDAP configuration and managing access to OPNsense.

View File

@@ -1,117 +0,0 @@
# Configuration for pfSense
## Create a LDAP Server
- Login to pfSense
- Navigate to: `System > User Manager > Authentication Servers`
- Create a new server by clicking on the `+ Add` button
## LDAP Server Settings
- Descriptive Name: `A Descriptive Name`
- Type: `LDAP`
- Hostname or IP address: `Hostname or IP for your LLDAP host`
- Port value: `Your LLDAP port`
- Transport: `TCP - Standard`
- Protocol version: `3`
- Server Timeout: `25`
(Make sure the host running LLDAP is accessible to pfSense and that you mapped the LLDAP port to the LLDAP host)
### Search Scope
```
Entire Subtree
```
### Base DN
```
dc=example,dc=com
```
This is the same LDAP Base DN that you set via the *LLDAP_LDAP_BASE_DN* environment variable or in `lldap_config.toml`.
### Authentication containers
```
ou=people
```
Note: The `Select a container` box may not work for selecting containers. You can just enter the `Authentication containers` directly into the text field.
### Extended Query
Enable extended query: `Checked`
### Query:
```
&(objectClass=person)(|(memberof=cn=pfsense_admin,ou=groups,dc=example,dc=com)(memberof=cn=pfsense_guest,ou=groups,dc=example,dc=com))
```
This example gives you two groups in LLDAP, one for pfSense admin access (`pfsense_admin`) and one for guest access (`pfsense_guest`). You **must** create these exact same groups in both LLDAP and pfSense, then give them the correct permissions in pfSense.
### Bind Anonymous
`Unchecked`
### Bind credentials
#### User DN
```
uid=yourbinduser,ou=people,dc=example,dc=com
```
It is recommended that you create a separate read-only user account (e.g, `readonly`) instead of `admin` for sharing Bind credentials with other services. The `readonly` should be a member of the `lldap_strict_readonly` group to limit access to your LDAP configuration in LLDAP.
#### Password
```
LLDAPPasswordForBindUser
```
### User naming attribute
```
uid
```
### Group naming attribute
```
cn
```
### Group member attribute
```
memberof
```
### RFC 2307 Groups
`Unchecked`
### Group Object Class
`groupOfUniqueNames`
### Shell Authentication Group DN
`cn=pfsense_admin,ou=groups,dc=example,dc=com`
(This is only if you want to give a group shell access through LDAP. Leave blank and only the pfSense admin user will have shell access.
### Remaining Server Configuration
Enable the following options on the pfSense configuration page for your LLDAP server (the same page where you entered the prior configuration):
- UTF8 Encodes: `Checked`
- Username Alterations: `Unchecked`
- Allow unauthenticated bind: `Unchecked`
### Create pfSense Groups
Go to `System > User Manager > Groups` and create a new group(s) with the **same exact** name as the LLDAP group(s) used to authenticate users for pfSense (`pfsense_admin` and `pfsense_guest` in this example).
If you want your LLDAP users to have full administrator access in pfSense, then you need to edit the `Assigned Privileges` for the group and add the `WebCfg - All pages` system privilege. If you do not give any permissions to a group, you will be able to log in but only see an empty webUI.
### Enable LLDAP as an Authentication Option
Go to `System > User Manager > Settings` page. Add your LLDAP server configuration to the `Authentication Server` field. **The "Save & Test" Button will fail the test results at step 3. No clue why.**
## Testing LLDAP
pfSense includes a built-in feature for testing user authentication at `Diagnostics > Authentication`. Select your LLDAP server configuration in the `Authentication Server` to test logins for your LLDAP users. The groups (only the ones you added to pfSense) should show up when tested.
## More Information
Please read the [pfSense docs](https://docs.netgate.com/pfsense/en/latest/usermanager/ldap.html) for more information on LDAP configuration and managing access to pfSense.

View File

@@ -1,39 +0,0 @@
# Configuration for PowerDNS Admin
## Navigate
- Login to PowerDNS Admin
- Navigate to: `Administration > Settings > Authentication`
- Select the `LDAP` tab of the `Authentication Settings`
## LDAP Config
- Enable LDAP Authentication: Checked
- Type: OpenLDAP
### Administrator Info
- LDAP URI: `ldap://<your-lldap-ip-or-hostname>:3890`
- LDAP Base DN: `ou=people,dc=example,dc=com`
- LDAP admin username: `uid=admin,ou=people,dc=example,dc=com`
- It is recommended that you create a separate user account (e.g, `bind_user`) instead of `admin` for sharing Bind credentials with other services. The `bind_user` should be a member of the `lldap_strict_readonly` group to limit access to your LDAP configuration in LLDAP.
- LDAP admin password: password of the user specified above
### Filters
- Basic filter: `(objectClass=person)`
- Username field: `uid`
- Group filter: `(objectClass=groupOfUniqueNames)`
- Group name field: `member`
### Group Security (Optional)
> If Group Security is disabled, all users authenticated via LDAP will be given the "User" role.
Group Security is an optional configuration for LLDAP users. It provides a simple 1:1 mapping between LDAP groups, and PowerDNS roles.
- Status: On
- Admin group: `cn=dns_admin,ou=groups,dc=example,dc=com`
- Operator group: `cn=dns_operator,ou=groups,dc=example,dc=com`
- User group: `cn=dns_user,ou=groups,dc=example,dc=com`

View File

@@ -1,83 +0,0 @@
# Proxmox VE Example
Proxmox Virtual Environment is a hyper-converged infrastructure open-source software. It is a hosted hypervisor that can run operating systems including Linux and Windows on x64 hardware. In this example we will setup user and group syncronization, with two example groups `proxmox_user` and `proxmox_admin`. This example was made using Proxmox VE 8.0.3.
## Navigation
- From the `Server View` open the `Datacenter` page
- Then in this page, open the `Permissions > Realms` menu
- In this menu, select `Add > LDAP Server`
## General Options
- Realm: The internal proxmox name for this authentication method
- Base Domain Name: `dc=example,dc=com`
- User Attribute Name: `uid`
- Server: Your LLDAP hostname or IP
- Port: `3890`
- SSL: Leave unchecked unless you're using LDAPS
- Comment: This field will be exposed as the "name" in the login page
## Sync Options
- Bind User: `uid=admin,ou=people,dc=example,dc=com`
- It is recommended that you create a separate user account (e.g, `bind_user`) instead of `admin` for sharing Bind credentials with other services. The `bind_user` should be a member of the `lldap_strict_readonly` group to limit access to your LDAP configuration in LLDAP.
- Bind Password: password of the user specified above
- E-Mail Attribute: `mail`
- Groupname attr: `cn`
- User Filter: `(&(objectClass=person)(|(memberof=cn=proxmox_user,ou=groups,dc=example,dc=com)(memberof=cn=proxmox_admin,ou=groups,dc=example,dc=com)))`
- This filter will only copy users that are members of the `proxmox_user` or `proxmox_admin` groups. If you want to enable all users in lldap, this filter can be used: `(objectClass=person)`
- Group Filter: `(&(objectClass=groupofuniquenames)(|(cn=proxmox_user)(cn=proxmox_admin)))`
- This filter will only copy the `proxmox_user` or `proxmox_admin` groups explicitly. If you want to sync all groups, this filter can be used: `(objectClass=groupofnames)`
- Default Sync Options:
- Scope: `Users and Groups`
- Remove Vanished Options
- Entry: Checked
- Properties: Checked
## Syncronizing
Proxmox operates LDAP authentication by syncronizing with your lldap server to a local database. This sync can be triggered manually, and on a scheduled basis. Proxmox also offers a preview feature, which will report any changes to the local DB from a sync, without applying the changes. It is highly recommended to run a preview on your first syncronization after making any filter changes, to ensure syncronization is happening as expected.
### First Sync
- With the options saved, and from the `Permissions > Realms` page, select the LDAP realm you just created and click `Sync`
- At the sync dialog, click the Preview button, and carefully check the output to ensure all the users and groups you expect are seen, and that nothing is being remove unexpectedly.
- Once the preview output is matching what we expect, we can click the Sync button, on the `Realm Sync` dialog for the ldap realm we created.
### Scheduled Sync (Optional)
- Once we are confident that LDAP syncronizing is working as expected, this can be scheduled as a job from the `Permissions > Realms` page.
- On the second half of the page, click `Add` under `Realm Sync Jobs`
- Set a schedule for this job and click `Create`
## ACLs
Once you have users and groups syncronized from lldap, it is necessary to grant some perimssions to these users or groups so that they are able to use Proxmox. Proxmox handles this with a filesystem-like tree structure, and "roles" which are collections of permissions. In our basic example, we will grant the built-in `Administrator` role to our `proxmox_admin` role to the entire system. Then we will also grant the `proxmox_user` group several roles with different paths so they can clone and create VMs within a specific resource pool (`UserVMs`), but are otherwise restricted from editing or deleting other resources.
> Note that Promox appends the realm name to groups when syncing, so if you named your realm `lldap` the groups as synced will be `proxmox_user-lldap` and `proxmox_admin-lldap`
### Administrator
- From the Datacenter pane, select the `Permissions` menu page.
- Click `Add > Group Permission`
- Path: Type or select `/`
- Group: Type or select the admin group that has syncronized (`proxmox_admin-lldap` in our example)
- Role: `Administrator`
- Finish by clicking the `Add` button and this access should now be granted
### User Role
> This example assumes we have created Resource Pools named `UserVMs` and `Templates`
- From the Datacenter pane, select the `Permissions` menu page.
- We will be adding six rules in total, for each one clicking `Add > Group Permission`
- Path: `/pool/UserVMs`, Group: `proxmox_user-lldap`, Role: PVEVMAdmin
- Path: `/pool/UserVMs`, Group: `proxmox_user-lldap`, Role: PVEPoolAdmin
- Path: `/pool/Templates`, Group: `proxmox_user-lldap`, Role: PVEPoolUser
- Path: `/pool/Templates`, Group: `proxmox_user-lldap`, Role: PVETemplateUser
- The following two rules are based on a default setup of Proxmox, and may need to be updated based on your networking and storage configuration
- Path: `/sdn/zones/localnetwork`, Group: `proxmox_user-lldap`, Role: PVESDNUser
- Path: `/storage/local-lvm`, Group: `proxmox_user-lldap`, Role: PVEDatastoreUser
That completes our basic example. The ACL rules in Proxmox are very flexible though, and custom roles can be created as well. The Proxmox documentation on [User Management](https://pve.proxmox.com/wiki/User_Management) goes into more depth if you wish to write a policy that better fits your use case.

View File

@@ -1,57 +0,0 @@
# Squid
[Squid](http://www.squid-cache.org/) is a caching HTTP/HTTPS proxy.
This guide will show you how to configure it to allow any user of the group `proxy` to use the proxy server.
The configuration file `/etc/squid/squid.conf`
```
auth_param basic program /usr/lib/squid/basic_ldap_auth -b "dc=example,dc=com" -D "uid=admin,ou=people,dc=example,dc=com" -W /etc/squid/ldap_password -f "(&(memberOf=uid=proxy,ou=groups,dc=example,dc=com)(uid=%s))" -H ldap://IP_OF_LLDAP_SERVER:3890
acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN)
acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN)
acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN)
acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines
acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN)
acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN)
acl localnet src fc00::/7 # RFC 4193 local private network range
acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
acl SSL_ports port 443
acl Safe_ports port 80 # http
acl Safe_ports port 21 # ftp
acl Safe_ports port 443 # https
acl Safe_ports port 70 # gopher
acl Safe_ports port 210 # wais
acl Safe_ports port 1025-65535 # unregistered ports
acl Safe_ports port 280 # http-mgmt
acl Safe_ports port 488 # gss-http
acl Safe_ports port 591 # filemaker
acl Safe_ports port 777 # multiling http
http_access deny !Safe_ports
http_access deny CONNECT !SSL_ports
http_access allow localhost manager
http_access deny manager
include /etc/squid/conf.d/*.conf
http_access allow localhost
acl ldap-auth proxy_auth REQUIRED
http_access allow ldap-auth
# http_access deny all
http_port 3128
coredump_dir /var/spool/squid
refresh_pattern ^ftp: 1440 20% 10080
refresh_pattern ^gopher: 1440 0% 1440
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims
refresh_pattern \/InRelease$ 0 0% 0 refresh-ims
refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
refresh_pattern . 0 20% 4320
```
The password for the binduser is stored in `/etc/squid/ldap_password` e.g.
```
PASSWORD_FOR_BINDUSER
```
After you restart squid with `systemctl restart squid` check it is working with
```
curl -O -L "https://www.redhat.com/index.html" -x "user_name:password@proxy.example.com:3128"
```

View File

@@ -1,37 +0,0 @@
# Tandoor Recipes LDAP configuration
## LDAP settings are defined by environmental variables as defined in [Tandoor's documentation](https://docs.tandoor.dev/features/authentication/#ldap)
### #Required#
It is recommended to have a read-only account to bind to
```
LDAP_AUTH=1
AUTH_LDAP_SERVER_URI=ldap://lldap:3890/
AUTH_LDAP_BIND_DN=uid=ro_admin,ou=people,DC=example,DC=com
AUTH_LDAP_BIND_PASSWORD=CHANGEME
AUTH_LDAP_USER_SEARCH_BASE_DN=ou=people,DC=example,DC=com
```
### #Optional#
By default it authenticates everybody identified by the search base DN, this allows you to pull certain users from the ```tandoor_users``` group
```
AUTH_LDAP_USER_SEARCH_FILTER_STR=(&(&(objectclass=person)(memberOf=cn=tandoor_users,ou=groups,dc=example,dc=com))(uid=%(user)s))
```
Map Tandoor user fields with their LLDAP counterparts
```
AUTH_LDAP_USER_ATTR_MAP={'first_name': 'givenName', 'last_name': 'sn', 'email': 'mail'}
```
Set whether or not to always update user fields at login and how many seconds for a timeout
```
AUTH_LDAP_ALWAYS_UPDATE_USER=1
AUTH_LDAP_CACHE_TIMEOUT=3600
```
If you use secure LDAP
```
AUTH_LDAP_START_TLS=1
AUTH_LDAP_TLS_CACERTFILE=/etc/ssl/certs/own-ca.pem
```

View File

@@ -1,43 +0,0 @@
# Basic LDAP auth for a The Lounge IRC web-client
[Main documentation here.](https://thelounge.chat/docs/configuration#ldap-support)
## Simple Config:
In this config, The Lounge will use the credentials provided in web ui to authenticate with lldap. It'll allow access if authentication was successful.
```
ldap: {
enable: true,
url: "ldap://localhost:389",
tlsOptions: {},
primaryKey: "uid",
baseDN: "ou=people,dc=example,dc=com",
},
```
## Advanced Config:
`rootDN` is similar to bind DN in other applications. It is used in combination with `rootPassword` to query lldap. `ldap-viewer` user in `lldap` is a member of the group `lldap_strict_readonly` group. This gives `ldap-viewer` user permission to query `lldap`.
With the `filter`, You can limit The Lounge access to users who are a member of the group `thelounge`.
```
ldap: {
enable: true,
url: "ldap://localhost:389",
tlsOptions: {},
primaryKey: "uid",
searchDN: {
rootDN: "uid=ldap-viewer,ou=people,dc=example,dc=com",
rootPassword: ""
filter: "(memberOf=cn=thelounge,ou=groups,dc=example,dc=com)",
base: "dc=example,dc=com",
scope: "sub",
},
},
```

View File

@@ -1,56 +0,0 @@
# Zabbix Web Configuration
This example is for the Zabbix Web interface version 6.0, which is the supported LTS version as of August 2023. Later versions have additional options.
For the associated 6.0 documentation see [here](https://www.zabbix.com/documentation/6.0/en/manual/web_interface/frontend_sections/administration/authentication) and for the current manual see [here](https://www.zabbix.com/documentation/current/en/manual).
***Note that an LDAP user must exist in Zabbix Web as well, however its Zabbix password will not be used.*** When creating the user in Zabbix, the user should also be added to your desired Zabbix roles/groups.
## Configure LDAP Settings
- Log in to the web interface as an admin
- Navigate to `Administration > Authentication > LDAP Settings`
### Enable LDAP authentication
Checked
### LDAP host
URI of your LLDAP host. Example: `ldap://ldap.example.com:3890` or `ldaps://ldap.example.com:6360` for TLS.
### Port
Not used when using a full LDAP URI as above, but feel free to put `3890` or `6360` for TLS.
### Base DN
Your LLDAP_LDAP_BASE. Example: `dc=example,dc=com`
### Search attribute
`uid`
### Case-sensitive login
Checked
### Bind DN
`uid=admin,ou=people,dc=example,dc=com`
Alternately, it is recommended that you create a separate user account (e.g, `bind_user`) instead of `admin` for sharing Bind credentials with other services. The `bind_user` should be a member of the `lldap_strict_readonly` group to limit access to your LDAP configuration in LLDAP.
### Bind password
Password for the above bind DN user.
### Test authentication
The test authentication `Login` and `User password` must be used to check the connection and whether an LDAP user can be successfully authenticated. Zabbix will not activate LDAP authentication if it is unable to authenticate the test user.
## Enable LDAP in Zabbix Web
- Navigate to `Administration > Authentication > Authentication` (the first tab)
- Set "Default authentication" to "LDAP"
- Click "Update"

View File

@@ -13,6 +13,6 @@ You setup https://zend.to/ for using LDAP by editing `/opt/zendto/config/prefere
'authLDAPUsernameAttr' => 'uid',
'authLDAPEmailAttr' => 'mail',
'authLDAPMemberKey' => 'memberOf',
'authLDAPMemberRole' => 'cn=zendto,ou=groups,dc=example,dc=com',
'authLDAPMemberRole' => 'uid=zendto,ou=groups,dc=example,dc=com',
```
Every user of the group `zendto` is allowed to login.

View File

@@ -1,143 +0,0 @@
# Configuration for Zulip
Zulip combines the immediacy of real-time chat with an email threading model.
Their ldap-documentation is here: [zulip.readthedocs.io](https://zulip.readthedocs.io/en/stable/production/authentication-methods.html#ldap-including-active-directory)
Zulip has two installation methods, either by running the recommended installer or by docker/podman compose.
The way how the service is configured differs depending on the installation method, so keep in mind you will only need one of the following examples.
> Important info
> The available/configured userdata will be automatically imported at the first login.
> If you want to import it before the user logs in for the first time or
> if you want to keep the data in sync with LLDAP you need to trigger the import by hand (or via cronjob).
> `/home/zulip/deployments/current/manage.py sync_ldap_user_data`
## Container based configuration
The following configuration takes place in the environment section of your compose-file.
1) Enable the LDAP authentication backend
Find the line`ZULIP_AUTH_BACKENDS: "EmailAuthBackend"` and change it to `ZULIP_AUTH_BACKENDS: "ZulipLDAPAuthBackend,EmailAuthBackend"`.
2) Configure how to connect with LLDAP
The user specified in `SETTING_AUTH_LDAP_BIND_DN` is used to querry data from LLDAP.
Zulip is only able to authenticate users and read data via ldap it is not able to write data or change the users password.
Because of this limitation we will use the group `lldap_strict_readonly` for this user.
Add the following lines to your configuration and change the values according to your setup.
```
SETTING_AUTH_LDAP_SERVER_URI: "ldap://lldap:3890"
SETTING_AUTH_LDAP_BIND_DN: "uid=zulip,ou=people,dc=example,dc=com"
SECRETS_auth_ldap_bind_password: "superSECURE_Pa55word"
```
3) Configure how to search for existing users
Add the following lines to your configuration and change the values according to your setup.
```
SETTING_AUTH_LDAP_USER_SEARCH: >
LDAPSearch("ou=people,dc=example,dc=com", ldap.SCOPE_SUBTREE, "(uid=%(user)s)")
SETTING_LDAP_EMAIL_ATTR: mail
SETTING_AUTH_LDAP_REVERSE_EMAIL_SEARCH: >
LDAPSearch("ou=people,dc=example,dc=com", ldap.SCOPE_SUBTREE, "(mail=%(email)s)")
SETTING_AUTH_LDAP_USERNAME_ATTR: "uid"
```
4) Configure the user-data mapping
This step is optional, the sample below shows the maximum of available options, you can use all of them or none.
Add the following lines to your configuration and remove the fields you don't want to be synced.
The field `"full_name": "cn"` is mandatory.
```
SETTING_AUTH_LDAP_USER_ATTR_MAP: >
{"full_name": "cn","first_name": "givenName","last_name": "sn","avatar": "jpegPhoto"}
```
5) Configure which groups are allowed to authenticate
This step is optional, if you do not specify anything here all users from your LLDAP server will be able to login.
This example will grant access to all users who are a member of `zulip_users`.
Add the following lines to your configuration and change the values according to your setup.
```
ZULIP_CUSTOM_SETTINGS: "import django_auth_ldap"
SETTING_AUTH_LDAP_GROUP_TYPE: "django_auth_ldap.config.GroupOfUniqueNamesType(name_attr='cn')"
SETTING_AUTH_LDAP_REQUIRE_GROUP: "cn=zulip_users,ou=groups,dc=example,dc=com"
SETTING_AUTH_LDAP_GROUP_SEARCH: >
LDAPSearch("ou=groups,dc=example,dc=com", ldap.SCOPE_SUBTREE, "(objectClass=GroupOfUniqueNames)")
```
6) Disallow local changes after importing userdata
This step is optional, you may want disallow the user to change their name and avatar if you import this data via ldap.
Add the following lines to your configuration and change the values according to your setup.
```
SETTING_NAME_CHANGES_DISABLED: True
SETTING_AVATAR_CHANGES_DISABLED: True
```
> Important Info
> Zulip will not write the user profile back to your ldap server.
> If the user changes their profil in Zulip those changes will be overwritten when the next syncronzation with LLDAP is triggerd.
> Allow changes to the user profile only if you do not plan to synchronize it with LLDAP regularly.
## Installer based configuration
The following configuration takes place in the configuration-file `/etc/zulip/settings.py`.
1) Enable the LDAP authentication backend
Find the line `AUTHENTICATION_BACKENDS` and uncomment `"zproject.backends.ZulipLDAPAuthBackend"`.
2) Configure how to connect with LLDAP
The user specified in `AUTH_LDAP_BIND_DN` is used to querry data from LLDAP.
Zulip is only able to authenticate users and read data via ldap it is not able to write data or change the users password.
Because of this limitation we will use the group `lldap_strict_readonly` for this user.
Uncomment the following lines in your configuration and change the values according to your setup.
```
AUTH_LDAP_SERVER_URI = "ldap://lldap:3890"
AUTH_LDAP_BIND_DN = "uid=zulip,ou=people,dc=example,dc=com"
```
The password corresponding to AUTH_LDAP_BIND_DN goes in `/etc/zulip/zulip-secrets.conf`.
Add a single new line to that file like below.
```
auth_ldap_bind_password = superSECURE_Pa55word
```
3) Configure how to search for existing users
Uncomment the following lines in your configuration and change the values according to your setup.
```
AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=people,dc=example,dc=com", ldap.SCOPE_SUBTREE, "(uid=%(user)s)")
LDAP_EMAIL_ATTR = mail
AUTH_LDAP_REVERSE_EMAIL_SEARCH = LDAPSearch("ou=people,dc=example,dc=com", ldap.SCOPE_SUBTREE, "(mail=%(email)s)")
AUTH_LDAP_USERNAME_ATTR = "uid"
```
4) Configure the user-data mapping
This step is optional, the sample below shows the maximum of available options, you can use all of them or none.
Find the line `AUTH_LDAP_USER_ATTR_MAP`, then uncomment the values you want to map and change the values according to your setup.
```
AUTH_LDAP_USER_ATTR_MAP = {
"full_name": "cn",
"first_name": "givenName",
"last_name": "sn",
"avatar": "jpegPhoto",
}
```
5) Configure which groups are allowed to authenticate
This step is optional, if you do not specify anything here all users from your LLDAP server will be able to login.
This example will grant access to all users who are a member of `zulip_users`.
Add the following lines to your configuration and change the values according to your setup.
```
import django_auth_ldap
AUTH_LDAP_GROUP_TYPE = "django_auth_ldap.config.GroupOfUniqueNamesType(name_attr='cn')"
AUTH_LDAP_REQUIRE_GROUP = "cn=zulip_users,ou=groups,dc=example,dc=com"
AUTH_LDAP_GROUP_SEARCH = LDAPSearch("ou=groups,dc=example,dc=com", ldap.SCOPE_SUBTREE, "(objectClass=GroupOfUniqueNames)")
```
6) Disallow local changes after importing userdata
This step is optional, you may want disallow the user to change their name and avatar if you import this data via ldap.
Uncomment the following lines in your configuration and change the values according to your setup.
```
NAME_CHANGES_DISABLED: True
AVATAR_CHANGES_DISABLED: True
```
> Important Info
> Zulip will not write the user profile back to your ldap server.
> If the user changes their profil in Zulip those changes will be overwritten when the next syncronzation with LLDAP is triggerd.
> Allow changes to the user profile only if you do not plan to synchronize it with LLDAP regularly.

View File

@@ -93,17 +93,8 @@ database_url = "sqlite:///data/users.db?mode=rwc"
## would still have to perform an (expensive) brute force attack to find
## each password.
## Randomly generated on first run if it doesn't exist.
## Alternatively, you can use key_seed to override this instead of relying on
## a file.
## Env variable: LLDAP_KEY_FILE
key_file = "/data/private_key"
## Seed to generate the server private key, see key_file above.
## This can be any random string, the recommendation is that it's at least 12
## characters long.
## Env variable: LLDAP_KEY_SEED
#key_seed = "RanD0m STR1ng"
## Ignored attributes.
## Some services will request attributes that are not present in LLDAP. When it
## is the case, LLDAP will warn about the attribute being unknown. If you want
@@ -115,7 +106,7 @@ key_file = "/data/private_key"
## Options to configure SMTP parameters, to send password reset emails.
## To set these options from environment variables, use the following format
## (example with "password"): LLDAP_SMTP_OPTIONS__PASSWORD
[smtp_options]
#[smtp_options]
## Whether to enabled password reset via email, from LLDAP.
#enable_password_reset=true
## The SMTP server.
@@ -137,7 +128,7 @@ key_file = "/data/private_key"
## Options to configure LDAPS.
## To set these options from environment variables, use the following format
## (example with "port"): LLDAP_LDAPS_OPTIONS__PORT
[ldaps_options]
#[ldaps_options]
## Whether to enable LDAPS.
#enabled=true
## Port on which to listen.

View File

@@ -1,12 +1,8 @@
[package]
authors = ["Valentin Tolmer <valentin@tolmer.fr>"]
description = "CLI migration tool to go from OpenLDAP to LLDAP"
edition = "2021"
homepage = "https://github.com/lldap/lldap"
license = "GPL-3.0-only"
name = "lldap_migration_tool"
repository = "https://github.com/lldap/lldap"
name = "migration-tool"
version = "0.4.2"
edition = "2021"
authors = ["Valentin Tolmer <valentin@tolmer.fr>"]
[dependencies]
anyhow = "*"

View File

@@ -271,7 +271,7 @@ pub fn get_users(connection: &mut LdapClient) -> Result<Vec<User>, anyhow::Error
.default(maybe_user_ou.unwrap_or_default())
.auto_complete(|s, _| {
let mut answers = autocomplete_domain_suffix(s, domain);
answers.extend(all_ous.clone());
answers.extend(all_ous.clone().into_iter());
answers
})
.build();
@@ -383,7 +383,7 @@ pub fn get_groups(connection: &mut LdapClient) -> Result<Vec<LdapGroup>> {
.default(maybe_group_ou.unwrap_or_default())
.auto_complete(|s, _| {
let mut answers = autocomplete_domain_suffix(s, domain);
answers.extend(all_ous.clone());
answers.extend(all_ous.clone().into_iter());
answers
})
.build();

28
schema.graphql generated
View File

@@ -1,8 +1,3 @@
type AttributeValue {
name: String!
value: [String!]!
}
input EqualityConstraint {
field: String!
value: String!
@@ -24,8 +19,6 @@ type Group {
displayName: String!
creationDate: DateTimeUtc!
uuid: String!
"User-defined attributes."
attributes: [AttributeValue!]!
"The groups to which this user belongs."
users: [User!]!
}
@@ -46,11 +39,6 @@ input RequestFilter {
"DateTime"
scalar DateTimeUtc
type Schema {
userSchema: AttributeList!
groupSchema: AttributeList!
}
"The fields that can be updated for a group."
input UpdateGroupInput {
id: Int!
@@ -63,7 +51,6 @@ type Query {
users(filters: RequestFilter): [User!]!
groups: [Group!]!
group(groupId: Int!): Group!
schema: Schema!
}
"The details required to create a user."
@@ -85,25 +72,10 @@ type User {
avatar: String
creationDate: DateTimeUtc!
uuid: String!
"User-defined attributes."
attributes: [AttributeValue!]!
"The groups to which this user belongs."
groups: [Group!]!
}
type AttributeList {
attributes: [AttributeSchema!]!
}
type AttributeSchema {
name: String!
attributeType: String!
isList: Boolean!
isVisible: Boolean!
isEditable: Boolean!
isHardcoded: Boolean!
}
type Success {
ok: Boolean!
}

View File

@@ -1,15 +1,9 @@
#! /bin/bash
tables=("users" "groups" "memberships" "jwt_refresh_storage" "jwt_storage" "password_reset_tokens" "group_attribute_schema" "group_attributes")
tables=("users" "groups" "memberships" "jwt_refresh_storage" "jwt_storage" "password_reset_tokens")
echo ".header on"
for table in ${tables[@]}; do
echo ".mode insert $table"
echo "select * from $table;"
done
echo ".mode insert user_attribute_schema"
echo "select * from user_attribute_schema where user_attribute_schema_name not in ('first_name', 'last_name', 'avatar');"
echo ".mode insert user_attributes"
echo "select * from user_attributes;"
done

View File

@@ -1,14 +1,8 @@
[package]
authors = ["Valentin Tolmer <valentin@tolmer.fr>"]
categories = ["authentication", "command-line-utilities"]
description = "Super-simple and lightweight LDAP server"
edition = "2021"
homepage = "https://github.com/lldap/lldap"
keywords = ["cli", "ldap", "graphql", "server", "authentication"]
license = "GPL-3.0-only"
name = "lldap"
repository = "https://github.com/lldap/lldap"
version = "0.5.0"
version = "0.4.3-alpha"
[dependencies]
actix = "0.13"
@@ -34,13 +28,11 @@ itertools = "0.10"
juniper = "0.15"
jwt = "0.16"
lber = "0.4.1"
ldap3_proto = "^0.4"
ldap3_proto = ">=0.3.1"
log = "*"
orion = "0.17"
rand_chacha = "0.3"
rustls-pemfile = "1"
rustls = "0.20"
serde = "*"
serde_bytes = "0.11"
serde_json = "1"
sha2 = "0.10"
thiserror = "*"
@@ -52,7 +44,8 @@ tracing = "*"
tracing-actix-web = "0.7"
tracing-attributes = "^0.1.21"
tracing-log = "*"
urlencoding = "2"
rustls-pemfile = "1"
serde_bytes = "0.11"
webpki-roots = "*"
[dependencies.chrono]
@@ -66,7 +59,6 @@ version = "4"
[dependencies.figment]
features = ["env", "toml"]
version = "*"
[dependencies.tracing-subscriber]
version = "0.3"
features = ["env-filter", "tracing-log"]
@@ -90,21 +82,17 @@ version = "0.8"
features = ["serde"]
version = "*"
[dependencies.strum]
features = ["derive"]
version = "0.25"
[dependencies.tokio]
features = ["full"]
version = "1.25"
[dependencies.uuid]
features = ["v1", "v3"]
version = "1"
features = ["v3"]
version = "*"
[dependencies.tracing-forest]
features = ["smallvec", "chrono", "tokio"]
version = "^0.1.6"
version = "^0.1.4"
[dependencies.actix-tls]
features = ["default", "rustls"]
@@ -116,7 +104,7 @@ default-features = false
version = "0.24"
[dependencies.sea-orm]
version= "0.12"
version= "0.11"
default-features = false
features = ["macros", "with-chrono", "with-uuid", "sqlx-all", "runtime-actix-rustls"]
@@ -125,40 +113,5 @@ version = "0.11"
default-features = false
features = ["rustls-tls-webpki-roots"]
[dependencies.rustls]
version = "0.20"
features = ["dangerous_configuration"]
[dependencies.url]
version = "2"
features = ["serde"]
[dev-dependencies]
assert_cmd = "2.0"
mockall = "0.11.4"
nix = "0.26.2"
pretty_assertions = "1"
[dev-dependencies.graphql_client]
features = ["graphql_query_derive", "reqwest-rustls"]
default-features = false
version = "0.11"
[dev-dependencies.ldap3]
version = "*"
default-features = false
features = ["sync", "tls-rustls"]
[dev-dependencies.reqwest]
version = "*"
default-features = false
features = ["json", "blocking", "rustls-tls"]
[dev-dependencies.serial_test]
version = "2.0.0"
default-features = false
features = ["file_locks"]
[dev-dependencies.uuid]
version = "1"
features = ["v4"]
mockall = "0.11"

View File

@@ -7,8 +7,6 @@ pub enum DomainError {
AuthenticationError(String),
#[error("Database error: `{0}`")]
DatabaseError(#[from] sea_orm::DbErr),
#[error("Database transaction error: `{0}`")]
DatabaseTransactionError(#[from] sea_orm::TransactionError<sea_orm::DbErr>),
#[error("Authentication protocol error for `{0}`")]
AuthenticationProtocolError(#[from] lldap_auth::opaque::AuthenticationError),
#[error("Unknown crypto error: `{0}`")]
@@ -23,13 +21,4 @@ pub enum DomainError {
InternalError(String),
}
impl From<sea_orm::TransactionError<DomainError>> for DomainError {
fn from(value: sea_orm::TransactionError<DomainError>) -> Self {
match value {
sea_orm::TransactionError::Connection(e) => e.into(),
sea_orm::TransactionError::Transaction(e) => e,
}
}
}
pub type Result<T> = std::result::Result<T, DomainError>;

View File

@@ -1,8 +1,7 @@
use crate::domain::{
use super::{
error::Result,
types::{
AttributeType, Group, GroupDetails, GroupId, JpegPhoto, User, UserAndGroups, UserColumn,
UserId, Uuid,
Group, GroupDetails, GroupId, JpegPhoto, User, UserAndGroups, UserColumn, UserId, Uuid,
},
};
use async_trait::async_trait;
@@ -54,7 +53,6 @@ pub enum UserRequestFilter {
UserId(UserId),
UserIdSubString(SubStringFilter),
Equality(UserColumn, String),
AttributeEquality(String, String),
SubString(UserColumn, SubStringFilter),
// Check if a user belongs to a group identified by name.
MemberOf(String),
@@ -123,49 +121,18 @@ pub struct UpdateGroupRequest {
pub display_name: Option<String>,
}
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)]
pub struct AttributeSchema {
pub name: String,
//TODO: pub aliases: Vec<String>,
pub attribute_type: AttributeType,
pub is_list: bool,
pub is_visible: bool,
pub is_editable: bool,
pub is_hardcoded: bool,
}
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)]
pub struct AttributeList {
pub attributes: Vec<AttributeSchema>,
}
impl AttributeList {
pub fn get_attribute_type(&self, name: &str) -> Option<(AttributeType, bool)> {
self.attributes
.iter()
.find(|a| a.name == name)
.map(|a| (a.attribute_type, a.is_list))
}
}
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)]
pub struct Schema {
pub user_attributes: AttributeList,
pub group_attributes: AttributeList,
}
#[async_trait]
pub trait LoginHandler: Send + Sync {
async fn bind(&self, request: BindRequest) -> Result<()>;
}
#[async_trait]
pub trait GroupListerBackendHandler: SchemaBackendHandler {
pub trait GroupListerBackendHandler {
async fn list_groups(&self, filters: Option<GroupRequestFilter>) -> Result<Vec<Group>>;
}
#[async_trait]
pub trait GroupBackendHandler: SchemaBackendHandler {
pub trait GroupBackendHandler {
async fn get_group_details(&self, group_id: GroupId) -> Result<GroupDetails>;
async fn update_group(&self, request: UpdateGroupRequest) -> Result<()>;
async fn create_group(&self, group_name: &str) -> Result<GroupId>;
@@ -173,7 +140,7 @@ pub trait GroupBackendHandler: SchemaBackendHandler {
}
#[async_trait]
pub trait UserListerBackendHandler: SchemaBackendHandler {
pub trait UserListerBackendHandler {
async fn list_users(
&self,
filters: Option<UserRequestFilter>,
@@ -182,7 +149,7 @@ pub trait UserListerBackendHandler: SchemaBackendHandler {
}
#[async_trait]
pub trait UserBackendHandler: SchemaBackendHandler {
pub trait UserBackendHandler {
async fn get_user_details(&self, user_id: &UserId) -> Result<User>;
async fn create_user(&self, request: CreateUserRequest) -> Result<()>;
async fn update_user(&self, request: UpdateUserRequest) -> Result<()>;
@@ -192,11 +159,6 @@ pub trait UserBackendHandler: SchemaBackendHandler {
async fn get_user_groups(&self, user_id: &UserId) -> Result<HashSet<GroupDetails>>;
}
#[async_trait]
pub trait SchemaBackendHandler {
async fn get_schema(&self) -> Result<Schema>;
}
#[async_trait]
pub trait BackendHandler:
Send
@@ -205,16 +167,53 @@ pub trait BackendHandler:
+ UserBackendHandler
+ UserListerBackendHandler
+ GroupListerBackendHandler
+ SchemaBackendHandler
{
}
#[cfg(test)]
mod tests {
use super::*;
use base64::Engine;
use pretty_assertions::assert_ne;
mockall::mock! {
pub TestBackendHandler{}
impl Clone for TestBackendHandler {
fn clone(&self) -> Self;
}
#[async_trait]
impl GroupListerBackendHandler for TestBackendHandler {
async fn list_groups(&self, filters: Option<GroupRequestFilter>) -> Result<Vec<Group>>;
}
#[async_trait]
impl GroupBackendHandler for TestBackendHandler {
async fn get_group_details(&self, group_id: GroupId) -> Result<GroupDetails>;
async fn update_group(&self, request: UpdateGroupRequest) -> Result<()>;
async fn create_group(&self, group_name: &str) -> Result<GroupId>;
async fn delete_group(&self, group_id: GroupId) -> Result<()>;
}
#[async_trait]
impl UserListerBackendHandler for TestBackendHandler {
async fn list_users(&self, filters: Option<UserRequestFilter>, get_groups: bool) -> Result<Vec<UserAndGroups>>;
}
#[async_trait]
impl UserBackendHandler for TestBackendHandler {
async fn get_user_details(&self, user_id: &UserId) -> Result<User>;
async fn create_user(&self, request: CreateUserRequest) -> Result<()>;
async fn update_user(&self, request: UpdateUserRequest) -> Result<()>;
async fn delete_user(&self, user_id: &UserId) -> Result<()>;
async fn get_user_groups(&self, user_id: &UserId) -> Result<HashSet<GroupDetails>>;
async fn add_user_to_group(&self, user_id: &UserId, group_id: GroupId) -> Result<()>;
async fn remove_user_from_group(&self, user_id: &UserId, group_id: GroupId) -> Result<()>;
}
#[async_trait]
impl BackendHandler for TestBackendHandler {}
#[async_trait]
impl LoginHandler for TestBackendHandler {
async fn bind(&self, request: BindRequest) -> Result<()>;
}
}
#[cfg(test)]
mod tests {
use base64::Engine;
use super::*;
#[test]
fn test_uuid_time() {
use chrono::prelude::*;

View File

@@ -6,7 +6,7 @@ use tracing::{debug, instrument, warn};
use crate::domain::{
handler::{GroupListerBackendHandler, GroupRequestFilter},
ldap::error::LdapError,
types::{Group, UserId, Uuid},
types::{Group, GroupColumn, UserId, Uuid},
};
use super::{
@@ -140,8 +140,10 @@ fn convert_group_filter(
GroupRequestFilter::from(false)
})),
_ => match map_group_field(field) {
Some("display_name") => Ok(GroupRequestFilter::DisplayName(value.to_string())),
Some("uuid") => Ok(GroupRequestFilter::Uuid(
Some(GroupColumn::DisplayName) => {
Ok(GroupRequestFilter::DisplayName(value.to_string()))
}
Some(GroupColumn::Uuid) => Ok(GroupRequestFilter::Uuid(
Uuid::try_from(value.as_str()).map_err(|e| LdapError {
code: LdapResultCode::InappropriateMatching,
message: format!("Invalid UUID: {:#}", e),
@@ -179,7 +181,7 @@ fn convert_group_filter(
LdapFilter::Substring(field, substring_filter) => {
let field = &field.to_ascii_lowercase();
match map_group_field(field.as_str()) {
Some("display_name") => Ok(GroupRequestFilter::DisplayNameSubString(
Some(GroupColumn::DisplayName) => Ok(GroupRequestFilter::DisplayNameSubString(
substring_filter.clone().into(),
)),
_ => Err(LdapError {
@@ -198,13 +200,14 @@ fn convert_group_filter(
}
}
#[instrument(skip_all, level = "debug", fields(ldap_filter))]
#[instrument(skip_all, level = "debug")]
pub async fn get_groups_list<Backend: GroupListerBackendHandler>(
ldap_info: &LdapInfo,
ldap_filter: &LdapFilter,
base: &str,
backend: &Backend,
) -> LdapResult<Vec<Group>> {
debug!(?ldap_filter);
let filters = convert_group_filter(ldap_info, ldap_filter)?;
debug!(?filters);
backend

View File

@@ -5,24 +5,25 @@ use ldap3_proto::{
use tracing::{debug, instrument, warn};
use crate::domain::{
handler::{Schema, UserListerBackendHandler, UserRequestFilter},
handler::{UserListerBackendHandler, UserRequestFilter},
ldap::{
error::{LdapError, LdapResult},
utils::{
expand_attribute_wildcards, get_custom_attribute, get_group_id_from_distinguished_name,
get_user_id_from_distinguished_name, map_user_field, LdapInfo, UserFieldType,
},
error::LdapError,
utils::{expand_attribute_wildcards, get_user_id_from_distinguished_name},
},
types::{GroupDetails, User, UserAndGroups, UserColumn, UserId},
};
use super::{
error::LdapResult,
utils::{get_group_id_from_distinguished_name, map_user_field, LdapInfo},
};
pub fn get_user_attribute(
user: &User,
attribute: &str,
base_dn_str: &str,
groups: Option<&[GroupDetails]>,
ignored_user_attributes: &[String],
schema: &Schema,
) -> Option<Vec<Vec<u8>>> {
let attribute = attribute.to_ascii_lowercase();
let attribute_values = match attribute.as_str() {
@@ -37,13 +38,9 @@ pub fn get_user_attribute(
"uid" | "user_id" | "id" => vec![user.user_id.to_string().into_bytes()],
"entryuuid" | "uuid" => vec![user.uuid.to_string().into_bytes()],
"mail" | "email" => vec![user.email.clone().into_bytes()],
"givenname" | "first_name" | "firstname" => {
get_custom_attribute(&user.attributes, "first_name", schema)?
}
"sn" | "last_name" | "lastname" => {
get_custom_attribute(&user.attributes, "last_name", schema)?
}
"jpegphoto" | "avatar" => get_custom_attribute(&user.attributes, "avatar", schema)?,
"givenname" | "first_name" | "firstname" => vec![user.first_name.clone()?.into_bytes()],
"sn" | "last_name" | "lastname" => vec![user.last_name.clone()?.into_bytes()],
"jpegphoto" | "avatar" => vec![user.avatar.clone()?.into_bytes()],
"memberof" => groups
.into_iter()
.flatten()
@@ -103,7 +100,6 @@ fn make_ldap_search_user_result_entry(
attributes: &[String],
groups: Option<&[GroupDetails]>,
ignored_user_attributes: &[String],
schema: &Schema,
) -> LdapSearchResultEntry {
let expanded_attributes = expand_user_attribute_wildcards(attributes);
let dn = format!("uid={},ou=people,{}", user.user_id.as_str(), base_dn_str);
@@ -112,14 +108,8 @@ fn make_ldap_search_user_result_entry(
attributes: expanded_attributes
.iter()
.filter_map(|a| {
let values = get_user_attribute(
&user,
a,
base_dn_str,
groups,
ignored_user_attributes,
schema,
)?;
let values =
get_user_attribute(&user, a, base_dn_str, groups, ignored_user_attributes)?;
Some(LdapPartialAttribute {
atype: a.to_string(),
vals: values,
@@ -164,17 +154,9 @@ fn convert_user_filter(ldap_info: &LdapInfo, filter: &LdapFilter) -> LdapResult<
UserRequestFilter::from(false)
})),
_ => match map_user_field(field) {
UserFieldType::PrimaryField(UserColumn::UserId) => {
Ok(UserRequestFilter::UserId(UserId::new(value)))
}
UserFieldType::PrimaryField(field) => {
Ok(UserRequestFilter::Equality(field, value.clone()))
}
UserFieldType::Attribute(field) => Ok(UserRequestFilter::AttributeEquality(
field.to_owned(),
value.clone(),
)),
UserFieldType::NoMatch => {
Some(UserColumn::UserId) => Ok(UserRequestFilter::UserId(UserId::new(value))),
Some(field) => Ok(UserRequestFilter::Equality(field, value.clone())),
None => {
if !ldap_info.ignored_user_attributes.contains(field) {
warn!(
r#"Ignoring unknown user attribute "{}" in filter.\n\
@@ -194,26 +176,26 @@ fn convert_user_filter(ldap_info: &LdapInfo, filter: &LdapFilter) -> LdapResult<
field == "objectclass"
|| field == "dn"
|| field == "distinguishedname"
|| !matches!(map_user_field(field), UserFieldType::NoMatch),
|| map_user_field(field).is_some(),
))
}
LdapFilter::Substring(field, substring_filter) => {
let field = &field.to_ascii_lowercase();
match map_user_field(field.as_str()) {
UserFieldType::PrimaryField(UserColumn::UserId) => Ok(
UserRequestFilter::UserIdSubString(substring_filter.clone().into()),
),
UserFieldType::NoMatch
| UserFieldType::Attribute(_)
| UserFieldType::PrimaryField(UserColumn::CreationDate)
| UserFieldType::PrimaryField(UserColumn::Uuid) => Err(LdapError {
Some(UserColumn::UserId) => Ok(UserRequestFilter::UserIdSubString(
substring_filter.clone().into(),
)),
None
| Some(UserColumn::CreationDate)
| Some(UserColumn::Avatar)
| Some(UserColumn::Uuid) => Err(LdapError {
code: LdapResultCode::UnwillingToPerform,
message: format!(
"Unsupported user attribute for substring filter: {:?}",
field
),
}),
UserFieldType::PrimaryField(field) => Ok(UserRequestFilter::SubString(
Some(field) => Ok(UserRequestFilter::SubString(
field,
substring_filter.clone().into(),
)),
@@ -230,7 +212,7 @@ fn expand_user_attribute_wildcards(attributes: &[String]) -> Vec<&str> {
expand_attribute_wildcards(attributes, ALL_USER_ATTRIBUTE_KEYS)
}
#[instrument(skip_all, level = "debug", fields(ldap_filter, request_groups))]
#[instrument(skip_all, level = "debug")]
pub async fn get_user_list<Backend: UserListerBackendHandler>(
ldap_info: &LdapInfo,
ldap_filter: &LdapFilter,
@@ -238,6 +220,7 @@ pub async fn get_user_list<Backend: UserListerBackendHandler>(
base: &str,
backend: &Backend,
) -> LdapResult<Vec<UserAndGroups>> {
debug!(?ldap_filter);
let filters = convert_user_filter(ldap_info, ldap_filter)?;
debug!(?filters);
backend
@@ -253,7 +236,6 @@ pub fn convert_users_to_ldap_op<'a>(
users: Vec<UserAndGroups>,
attributes: &'a [String],
ldap_info: &'a LdapInfo,
schema: &'a Schema,
) -> impl Iterator<Item = LdapOp> + 'a {
users.into_iter().map(move |u| {
LdapOp::SearchResultEntry(make_ldap_search_user_result_entry(
@@ -262,7 +244,6 @@ pub fn convert_users_to_ldap_op<'a>(
attributes,
u.groups.as_deref(),
&ldap_info.ignored_user_attributes,
schema,
))
})
}

View File

@@ -1,12 +1,11 @@
use chrono::{NaiveDateTime, TimeZone};
use itertools::Itertools;
use ldap3_proto::{proto::LdapSubstringFilter, LdapResultCode};
use tracing::{debug, instrument, warn};
use crate::domain::{
handler::{Schema, SubStringFilter},
handler::SubStringFilter,
ldap::error::{LdapError, LdapResult},
types::{AttributeType, AttributeValue, JpegPhoto, UserColumn, UserId},
types::{GroupColumn, UserColumn, UserId},
};
impl From<LdapSubstringFilter> for SubStringFilter {
@@ -106,7 +105,7 @@ pub fn get_group_id_from_distinguished_name(
get_id_from_distinguished_name(dn, base_tree, base_dn_str, true)
}
#[instrument(skip(all_attribute_keys), level = "debug")]
#[instrument(skip_all, level = "debug")]
pub fn expand_attribute_wildcards<'a>(
ldap_attributes: &'a [String],
all_attribute_keys: &'a [&'static str],
@@ -128,7 +127,7 @@ pub fn expand_attribute_wildcards<'a>(
.into_iter()
.unique_by(|a| a.to_ascii_lowercase())
.collect_vec();
debug!(?resolved_attributes);
debug!(?ldap_attributes, ?resolved_attributes);
resolved_attributes
}
@@ -153,37 +152,31 @@ pub fn is_subtree(subtree: &[(String, String)], base_tree: &[(String, String)])
true
}
pub enum UserFieldType {
NoMatch,
PrimaryField(UserColumn),
Attribute(&'static str),
}
pub fn map_user_field(field: &str) -> UserFieldType {
assert!(field == field.to_ascii_lowercase());
match field {
"uid" | "user_id" | "id" => UserFieldType::PrimaryField(UserColumn::UserId),
"mail" | "email" => UserFieldType::PrimaryField(UserColumn::Email),
"cn" | "displayname" | "display_name" => {
UserFieldType::PrimaryField(UserColumn::DisplayName)
}
"givenname" | "first_name" | "firstname" => UserFieldType::Attribute("first_name"),
"sn" | "last_name" | "lastname" => UserFieldType::Attribute("last_name"),
"avatar" | "jpegphoto" => UserFieldType::Attribute("avatar"),
"creationdate" | "createtimestamp" | "modifytimestamp" | "creation_date" => {
UserFieldType::PrimaryField(UserColumn::CreationDate)
}
"entryuuid" | "uuid" => UserFieldType::PrimaryField(UserColumn::Uuid),
_ => UserFieldType::NoMatch,
}
}
pub fn map_group_field(field: &str) -> Option<&'static str> {
pub fn map_user_field(field: &str) -> Option<UserColumn> {
assert!(field == field.to_ascii_lowercase());
Some(match field {
"cn" | "displayname" | "uid" | "display_name" => "display_name",
"creationdate" | "createtimestamp" | "modifytimestamp" | "creation_date" => "creation_date",
"entryuuid" | "uuid" => "uuid",
"uid" | "user_id" | "id" => UserColumn::UserId,
"mail" | "email" => UserColumn::Email,
"cn" | "displayname" | "display_name" => UserColumn::DisplayName,
"givenname" | "first_name" | "firstname" => UserColumn::FirstName,
"sn" | "last_name" | "lastname" => UserColumn::LastName,
"avatar" | "jpegphoto" => UserColumn::Avatar,
"creationdate" | "createtimestamp" | "modifytimestamp" | "creation_date" => {
UserColumn::CreationDate
}
"entryuuid" | "uuid" => UserColumn::Uuid,
_ => return None,
})
}
pub fn map_group_field(field: &str) -> Option<GroupColumn> {
assert!(field == field.to_ascii_lowercase());
Some(match field {
"cn" | "displayname" | "uid" | "display_name" => GroupColumn::DisplayName,
"creationdate" | "createtimestamp" | "modifytimestamp" | "creation_date" => {
GroupColumn::CreationDate
}
"entryuuid" | "uuid" => GroupColumn::Uuid,
_ => return None,
})
}
@@ -194,64 +187,3 @@ pub struct LdapInfo {
pub ignored_user_attributes: Vec<String>,
pub ignored_group_attributes: Vec<String>,
}
pub fn get_custom_attribute(
attributes: &[AttributeValue],
attribute_name: &str,
schema: &Schema,
) -> Option<Vec<Vec<u8>>> {
let convert_date = |date| {
chrono::Utc
.from_utc_datetime(&date)
.to_rfc3339()
.into_bytes()
};
schema
.user_attributes
.get_attribute_type(attribute_name)
.and_then(|attribute_type| {
attributes
.iter()
.find(|a| a.name == attribute_name)
.map(|attribute| match attribute_type {
(AttributeType::String, false) => {
vec![attribute.value.unwrap::<String>().into_bytes()]
}
(AttributeType::Integer, false) => {
// LDAP integers are encoded as strings.
vec![attribute.value.unwrap::<i64>().to_string().into_bytes()]
}
(AttributeType::JpegPhoto, false) => {
vec![attribute.value.unwrap::<JpegPhoto>().into_bytes()]
}
(AttributeType::DateTime, false) => {
vec![convert_date(attribute.value.unwrap::<NaiveDateTime>())]
}
(AttributeType::String, true) => attribute
.value
.unwrap::<Vec<String>>()
.into_iter()
.map(String::into_bytes)
.collect(),
(AttributeType::Integer, true) => attribute
.value
.unwrap::<Vec<i64>>()
.into_iter()
.map(|i| i.to_string())
.map(String::into_bytes)
.collect(),
(AttributeType::JpegPhoto, true) => attribute
.value
.unwrap::<Vec<JpegPhoto>>()
.into_iter()
.map(JpegPhoto::into_bytes)
.collect(),
(AttributeType::DateTime, true) => attribute
.value
.unwrap::<Vec<NaiveDateTime>>()
.into_iter()
.map(convert_date)
.collect(),
})
})
}

View File

@@ -7,7 +7,6 @@ pub mod sql_backend_handler;
pub mod sql_group_backend_handler;
pub mod sql_migrations;
pub mod sql_opaque_handler;
pub mod sql_schema_backend_handler;
pub mod sql_tables;
pub mod sql_user_backend_handler;
pub mod types;

View File

@@ -1,52 +0,0 @@
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
use crate::domain::{handler::AttributeSchema, types::AttributeType};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "group_attribute_schema")]
pub struct Model {
#[sea_orm(
primary_key,
auto_increment = false,
column_name = "group_attribute_schema_name"
)]
pub attribute_name: String,
#[sea_orm(column_name = "group_attribute_schema_type")]
pub attribute_type: AttributeType,
#[sea_orm(column_name = "group_attribute_schema_is_list")]
pub is_list: bool,
#[sea_orm(column_name = "group_attribute_schema_is_group_visible")]
pub is_group_visible: bool,
#[sea_orm(column_name = "group_attribute_schema_is_group_editable")]
pub is_group_editable: bool,
#[sea_orm(column_name = "group_attribute_schema_is_hardcoded")]
pub is_hardcoded: bool,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::group_attributes::Entity")]
GroupAttributes,
}
impl Related<super::GroupAttributes> for Entity {
fn to() -> RelationDef {
Relation::GroupAttributes.def()
}
}
impl ActiveModelBehavior for ActiveModel {}
impl From<Model> for AttributeSchema {
fn from(value: Model) -> Self {
Self {
name: value.attribute_name,
attribute_type: value.attribute_type,
is_list: value.is_list,
is_visible: value.is_group_visible,
is_editable: value.is_group_editable,
is_hardcoded: value.is_hardcoded,
}
}
}

View File

@@ -1,72 +0,0 @@
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
use crate::domain::types::{AttributeValue, GroupId, Serialized};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "group_attributes")]
pub struct Model {
#[sea_orm(
primary_key,
auto_increment = false,
column_name = "group_attribute_group_id"
)]
pub group_id: GroupId,
#[sea_orm(
primary_key,
auto_increment = false,
column_name = "group_attribute_name"
)]
pub attribute_name: String,
#[sea_orm(column_name = "group_attribute_value")]
pub value: Serialized,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::groups::Entity",
from = "Column::GroupId",
to = "super::groups::Column::GroupId",
on_update = "Cascade",
on_delete = "Cascade"
)]
Groups,
#[sea_orm(
belongs_to = "super::group_attribute_schema::Entity",
from = "Column::AttributeName",
to = "super::group_attribute_schema::Column::AttributeName",
on_update = "Cascade",
on_delete = "Cascade"
)]
GroupAttributeSchema,
}
impl Related<super::Group> for Entity {
fn to() -> RelationDef {
Relation::Groups.def()
}
}
impl Related<super::GroupAttributeSchema> for Entity {
fn to() -> RelationDef {
Relation::GroupAttributeSchema.def()
}
}
impl ActiveModelBehavior for ActiveModel {}
impl From<Model> for AttributeValue {
fn from(
Model {
group_id: _,
attribute_name,
value,
}: Model,
) -> Self {
Self {
name: attribute_name,
value,
}
}
}

View File

@@ -37,7 +37,6 @@ impl From<Model> for crate::domain::types::Group {
creation_date: group.creation_date,
uuid: group.uuid,
users: vec![],
attributes: Vec::new(),
}
}
}
@@ -49,7 +48,6 @@ impl From<Model> for crate::domain::types::GroupDetails {
display_name: group.display_name,
creation_date: group.creation_date,
uuid: group.uuid,
attributes: Vec::new(),
}
}
}

View File

@@ -9,10 +9,4 @@ pub mod memberships;
pub mod password_reset_tokens;
pub mod users;
pub mod user_attribute_schema;
pub mod user_attributes;
pub mod group_attribute_schema;
pub mod group_attributes;
pub use prelude::*;

View File

@@ -1,9 +1,5 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.10.3
pub use super::group_attribute_schema::Column as GroupAttributeSchemaColumn;
pub use super::group_attribute_schema::Entity as GroupAttributeSchema;
pub use super::group_attributes::Column as GroupAttributesColumn;
pub use super::group_attributes::Entity as GroupAttributes;
pub use super::groups::Column as GroupColumn;
pub use super::groups::Entity as Group;
pub use super::jwt_refresh_storage::Column as JwtRefreshStorageColumn;
@@ -14,9 +10,5 @@ pub use super::memberships::Column as MembershipColumn;
pub use super::memberships::Entity as Membership;
pub use super::password_reset_tokens::Column as PasswordResetTokensColumn;
pub use super::password_reset_tokens::Entity as PasswordResetTokens;
pub use super::user_attribute_schema::Column as UserAttributeSchemaColumn;
pub use super::user_attribute_schema::Entity as UserAttributeSchema;
pub use super::user_attributes::Column as UserAttributesColumn;
pub use super::user_attributes::Entity as UserAttributes;
pub use super::users::Column as UserColumn;
pub use super::users::Entity as User;

View File

@@ -1,52 +0,0 @@
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
use crate::domain::{handler::AttributeSchema, types::AttributeType};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "user_attribute_schema")]
pub struct Model {
#[sea_orm(
primary_key,
auto_increment = false,
column_name = "user_attribute_schema_name"
)]
pub attribute_name: String,
#[sea_orm(column_name = "user_attribute_schema_type")]
pub attribute_type: AttributeType,
#[sea_orm(column_name = "user_attribute_schema_is_list")]
pub is_list: bool,
#[sea_orm(column_name = "user_attribute_schema_is_user_visible")]
pub is_user_visible: bool,
#[sea_orm(column_name = "user_attribute_schema_is_user_editable")]
pub is_user_editable: bool,
#[sea_orm(column_name = "user_attribute_schema_is_hardcoded")]
pub is_hardcoded: bool,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::user_attributes::Entity")]
UserAttributes,
}
impl Related<super::UserAttributes> for Entity {
fn to() -> RelationDef {
Relation::UserAttributes.def()
}
}
impl ActiveModelBehavior for ActiveModel {}
impl From<Model> for AttributeSchema {
fn from(value: Model) -> Self {
Self {
name: value.attribute_name,
attribute_type: value.attribute_type,
is_list: value.is_list,
is_visible: value.is_user_visible,
is_editable: value.is_user_editable,
is_hardcoded: value.is_hardcoded,
}
}
}

View File

@@ -1,72 +0,0 @@
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
use crate::domain::types::{AttributeValue, Serialized, UserId};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "user_attributes")]
pub struct Model {
#[sea_orm(
primary_key,
auto_increment = false,
column_name = "user_attribute_user_id"
)]
pub user_id: UserId,
#[sea_orm(
primary_key,
auto_increment = false,
column_name = "user_attribute_name"
)]
pub attribute_name: String,
#[sea_orm(column_name = "user_attribute_value")]
pub value: Serialized,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::users::Entity",
from = "Column::UserId",
to = "super::users::Column::UserId",
on_update = "Cascade",
on_delete = "Cascade"
)]
Users,
#[sea_orm(
belongs_to = "super::user_attribute_schema::Entity",
from = "Column::AttributeName",
to = "super::user_attribute_schema::Column::AttributeName",
on_update = "Cascade",
on_delete = "Cascade"
)]
UserAttributeSchema,
}
impl Related<super::User> for Entity {
fn to() -> RelationDef {
Relation::Users.def()
}
}
impl Related<super::UserAttributeSchema> for Entity {
fn to() -> RelationDef {
Relation::UserAttributeSchema.def()
}
}
impl ActiveModelBehavior for ActiveModel {}
impl From<Model> for AttributeValue {
fn from(
Model {
user_id: _,
attribute_name,
value,
}: Model,
) -> Self {
Self {
name: attribute_name,
value,
}
}
}

View File

@@ -3,7 +3,7 @@
use sea_orm::{entity::prelude::*, sea_query::BlobSize};
use serde::{Deserialize, Serialize};
use crate::domain::types::{UserId, Uuid};
use crate::domain::types::{JpegPhoto, UserId, Uuid};
#[derive(Copy, Clone, Default, Debug, DeriveEntity)]
pub struct Entity;
@@ -15,6 +15,9 @@ pub struct Model {
pub user_id: UserId,
pub email: String,
pub display_name: Option<String>,
pub first_name: Option<String>,
pub last_name: Option<String>,
pub avatar: Option<JpegPhoto>,
pub creation_date: chrono::NaiveDateTime,
pub password_hash: Option<Vec<u8>>,
pub totp_secret: Option<String>,
@@ -33,6 +36,9 @@ pub enum Column {
UserId,
Email,
DisplayName,
FirstName,
LastName,
Avatar,
CreationDate,
PasswordHash,
TotpSecret,
@@ -48,6 +54,9 @@ impl ColumnTrait for Column {
Column::UserId => ColumnType::String(Some(255)),
Column::Email => ColumnType::String(Some(255)),
Column::DisplayName => ColumnType::String(Some(255)),
Column::FirstName => ColumnType::String(Some(255)),
Column::LastName => ColumnType::String(Some(255)),
Column::Avatar => ColumnType::Binary(BlobSize::Long),
Column::CreationDate => ColumnType::DateTime,
Column::PasswordHash => ColumnType::Binary(BlobSize::Medium),
Column::TotpSecret => ColumnType::String(Some(64)),
@@ -115,9 +124,11 @@ impl From<Model> for crate::domain::types::User {
user_id: user.user_id,
email: user.email,
display_name: user.display_name,
first_name: user.first_name,
last_name: user.last_name,
creation_date: user.creation_date,
uuid: user.uuid,
attributes: Vec::new(),
avatar: user.avatar,
}
}
}

View File

@@ -32,7 +32,6 @@ pub mod tests {
infra::configuration::ConfigurationBuilder,
};
use lldap_auth::{opaque, registration};
use pretty_assertions::assert_eq;
use sea_orm::Database;
pub fn get_default_config() -> Configuration {
@@ -60,7 +59,7 @@ pub mod tests {
insert_user_no_password(handler, name).await;
let mut rng = rand::rngs::OsRng;
let client_registration_start =
opaque::client::registration::start_registration(pass.as_bytes(), &mut rng).unwrap();
opaque::client::registration::start_registration(pass, &mut rng).unwrap();
let response = handler
.registration_start(registration::ClientRegistrationStartRequest {
username: name.to_string(),
@@ -87,7 +86,7 @@ pub mod tests {
handler
.create_user(CreateUserRequest {
user_id: UserId::new(name),
email: format!("{}@bob.bob", name),
email: "bob@bob.bob".to_string(),
display_name: Some("display ".to_string() + name),
first_name: Some("first ".to_string() + name),
last_name: Some("last ".to_string() + name),

View File

@@ -5,7 +5,7 @@ use crate::domain::{
},
model::{self, GroupColumn, MembershipColumn},
sql_backend_handler::SqlBackendHandler,
types::{AttributeValue, Group, GroupDetails, GroupId, Uuid},
types::{Group, GroupDetails, GroupId, Uuid},
};
use async_trait::async_trait;
use sea_orm::{
@@ -13,7 +13,7 @@ use sea_orm::{
ActiveModelTrait, ActiveValue, ColumnTrait, EntityTrait, QueryFilter, QueryOrder, QuerySelect,
QueryTrait,
};
use tracing::instrument;
use tracing::{debug, instrument};
fn get_group_filter_expr(filter: GroupRequestFilter) -> Cond {
use GroupRequestFilter::*;
@@ -60,10 +60,12 @@ fn get_group_filter_expr(filter: GroupRequestFilter) -> Cond {
#[async_trait]
impl GroupListerBackendHandler for SqlBackendHandler {
#[instrument(skip(self), level = "debug", ret, err)]
#[instrument(skip_all, level = "debug", ret, err)]
async fn list_groups(&self, filters: Option<GroupRequestFilter>) -> Result<Vec<Group>> {
debug!(?filters);
let results = model::Group::find()
.order_by_asc(GroupColumn::GroupId)
// The order_by must be before find_with_related otherwise the primary order is by group_id.
.order_by_asc(GroupColumn::DisplayName)
.find_with_related(model::Membership)
.filter(
filters
@@ -83,7 +85,7 @@ impl GroupListerBackendHandler for SqlBackendHandler {
)
.all(&self.sql_pool)
.await?;
let mut groups: Vec<_> = results
Ok(results
.into_iter()
.map(|(group, users)| {
let users: Vec<_> = users.into_iter().map(|u| u.user_id).collect();
@@ -92,53 +94,25 @@ impl GroupListerBackendHandler for SqlBackendHandler {
..group.into()
}
})
.collect();
let group_ids = groups.iter().map(|u| &u.id);
let attributes = model::GroupAttributes::find()
.filter(model::GroupAttributesColumn::GroupId.is_in(group_ids))
.order_by_asc(model::GroupAttributesColumn::GroupId)
.order_by_asc(model::GroupAttributesColumn::AttributeName)
.all(&self.sql_pool)
.await?;
let mut attributes_iter = attributes.into_iter().peekable();
use itertools::Itertools; // For take_while_ref
for group in groups.iter_mut() {
assert!(attributes_iter
.peek()
.map(|u| u.group_id >= group.id)
.unwrap_or(true),
"Attributes are not sorted, groups are not sorted, or previous group didn't consume all the attributes");
group.attributes = attributes_iter
.take_while_ref(|u| u.group_id == group.id)
.map(AttributeValue::from)
.collect();
}
groups.sort_by(|g1, g2| g1.display_name.cmp(&g2.display_name));
Ok(groups)
.collect())
}
}
#[async_trait]
impl GroupBackendHandler for SqlBackendHandler {
#[instrument(skip(self), level = "debug", ret, err)]
#[instrument(skip_all, level = "debug", ret, err)]
async fn get_group_details(&self, group_id: GroupId) -> Result<GroupDetails> {
let mut group_details = model::Group::find_by_id(group_id)
debug!(?group_id);
model::Group::find_by_id(group_id)
.into_model::<GroupDetails>()
.one(&self.sql_pool)
.await?
.map(Into::<GroupDetails>::into)
.ok_or_else(|| DomainError::EntityNotFound(format!("{:?}", group_id)))?;
let attributes = model::GroupAttributes::find()
.filter(model::GroupAttributesColumn::GroupId.eq(group_details.group_id))
.order_by_asc(model::GroupAttributesColumn::AttributeName)
.all(&self.sql_pool)
.await?;
group_details.attributes = attributes.into_iter().map(AttributeValue::from).collect();
Ok(group_details)
.ok_or_else(|| DomainError::EntityNotFound(format!("{:?}", group_id)))
}
#[instrument(skip(self), level = "debug", err, fields(group_id = ?request.group_id))]
#[instrument(skip_all, level = "debug", err)]
async fn update_group(&self, request: UpdateGroupRequest) -> Result<()> {
debug!(?request.group_id);
let update_group = model::groups::ActiveModel {
group_id: ActiveValue::Set(request.group_id),
display_name: request
@@ -151,8 +125,9 @@ impl GroupBackendHandler for SqlBackendHandler {
Ok(())
}
#[instrument(skip(self), level = "debug", ret, err)]
#[instrument(skip_all, level = "debug", ret, err)]
async fn create_group(&self, group_name: &str) -> Result<GroupId> {
debug!(?group_name);
let now = chrono::Utc::now().naive_utc();
let uuid = Uuid::from_name_and_date(group_name, &now);
let new_group = model::groups::ActiveModel {
@@ -164,8 +139,9 @@ impl GroupBackendHandler for SqlBackendHandler {
Ok(new_group.insert(&self.sql_pool).await?.group_id)
}
#[instrument(skip(self), level = "debug", err)]
#[instrument(skip_all, level = "debug", err)]
async fn delete_group(&self, group_id: GroupId) -> Result<()> {
debug!(?group_id);
let res = model::Group::delete_by_id(group_id)
.exec(&self.sql_pool)
.await?;
@@ -183,7 +159,6 @@ impl GroupBackendHandler for SqlBackendHandler {
mod tests {
use super::*;
use crate::domain::{handler::SubStringFilter, sql_backend_handler::tests::*, types::UserId};
use pretty_assertions::assert_eq;
async fn get_group_ids(
handler: &SqlBackendHandler,

View File

@@ -1,19 +1,17 @@
use crate::domain::{
sql_tables::{DbConnection, SchemaVersion, LAST_SCHEMA_VERSION},
types::{AttributeType, GroupId, JpegPhoto, Serialized, UserId, Uuid},
sql_tables::{DbConnection, SchemaVersion},
types::{GroupId, UserId, Uuid},
};
use itertools::Itertools;
use sea_orm::{
sea_query::{
self, all, ColumnDef, Expr, ForeignKey, ForeignKeyAction, Func, Index, Query, Table, Value,
},
ConnectionTrait, DatabaseTransaction, DbErr, DeriveIden, FromQueryResult, Iden, Order,
Statement, TransactionTrait,
sea_query::{self, ColumnDef, Expr, ForeignKey, ForeignKeyAction, Query, Table, Value},
ConnectionTrait, FromQueryResult, Iden, Statement, TransactionTrait,
};
use serde::{Deserialize, Serialize};
use tracing::{error, info, instrument, warn};
use tracing::{info, instrument, warn};
#[derive(DeriveIden, PartialEq, Eq, Debug, Serialize, Deserialize, Clone, Copy)]
use super::sql_tables::LAST_SCHEMA_VERSION;
#[derive(Iden, PartialEq, Eq, Debug, Serialize, Deserialize, Clone, Copy)]
pub enum Users {
Table,
UserId,
@@ -29,7 +27,7 @@ pub enum Users {
Uuid,
}
#[derive(DeriveIden, PartialEq, Eq, Debug, Serialize, Deserialize, Clone, Copy)]
#[derive(Iden, PartialEq, Eq, Debug, Serialize, Deserialize, Clone, Copy)]
pub enum Groups {
Table,
GroupId,
@@ -38,53 +36,15 @@ pub enum Groups {
Uuid,
}
#[derive(DeriveIden, Clone, Copy)]
#[derive(Iden, Clone, Copy)]
pub enum Memberships {
Table,
UserId,
GroupId,
}
#[derive(DeriveIden, PartialEq, Eq, Debug, Serialize, Deserialize, Clone, Copy)]
pub enum UserAttributeSchema {
Table,
UserAttributeSchemaName,
UserAttributeSchemaType,
UserAttributeSchemaIsList,
UserAttributeSchemaIsUserVisible,
UserAttributeSchemaIsUserEditable,
UserAttributeSchemaIsHardcoded,
}
#[derive(DeriveIden, PartialEq, Eq, Debug, Serialize, Deserialize, Clone, Copy)]
pub enum UserAttributes {
Table,
UserAttributeUserId,
UserAttributeName,
UserAttributeValue,
}
#[derive(DeriveIden, PartialEq, Eq, Debug, Serialize, Deserialize, Clone, Copy)]
pub enum GroupAttributeSchema {
Table,
GroupAttributeSchemaName,
GroupAttributeSchemaType,
GroupAttributeSchemaIsList,
GroupAttributeSchemaIsGroupVisible,
GroupAttributeSchemaIsGroupEditable,
GroupAttributeSchemaIsHardcoded,
}
#[derive(DeriveIden, PartialEq, Eq, Debug, Serialize, Deserialize, Clone, Copy)]
pub enum GroupAttributes {
Table,
GroupAttributeGroupId,
GroupAttributeName,
GroupAttributeValue,
}
// Metadata about the SQL DB.
#[derive(DeriveIden)]
#[derive(Iden)]
pub enum Metadata {
Table,
// Which version of the schema we're at.
@@ -375,64 +335,72 @@ pub async fn upgrade_to_v1(pool: &DbConnection) -> std::result::Result<(), sea_o
}
async fn replace_column<I: Iden + Copy + 'static, const N: usize>(
transaction: DatabaseTransaction,
pool: &DbConnection,
table_name: I,
column_name: I,
mut new_column: ColumnDef,
update_values: [Statement; N],
) -> Result<DatabaseTransaction, DbErr> {
) -> anyhow::Result<()> {
// Update the definition of a column (in a compatible way). Due to Sqlite, this is more complicated:
// - rename the column to a temporary name
// - create the column with the new definition
// - copy the data from the temp column to the new one
// - update the new one if there are changes needed
// - drop the old one
let builder = transaction.get_database_backend();
#[derive(DeriveIden)]
enum TempTable {
TempName,
}
transaction
.execute(
builder.build(
Table::alter()
.table(table_name)
.rename_column(column_name, TempTable::TempName),
),
)
.await?;
transaction
.execute(builder.build(Table::alter().table(table_name).add_column(&mut new_column)))
.await?;
transaction
.execute(
builder.build(
Query::update()
.table(table_name)
.value(column_name, Expr::col((table_name, TempTable::TempName))),
),
)
.await?;
for statement in update_values {
transaction.execute(statement).await?;
}
transaction
.execute(
builder.build(
Table::alter()
.table(table_name)
.drop_column(TempTable::TempName),
),
)
.await?;
Ok(transaction)
let builder = pool.get_database_backend();
pool.transaction::<_, (), sea_orm::DbErr>(move |transaction| {
Box::pin(async move {
#[derive(Iden)]
enum TempTable {
TempName,
}
transaction
.execute(
builder.build(
Table::alter()
.table(table_name)
.rename_column(column_name, TempTable::TempName),
),
)
.await?;
transaction
.execute(
builder.build(Table::alter().table(table_name).add_column(&mut new_column)),
)
.await?;
transaction
.execute(
builder.build(
Query::update()
.table(table_name)
.value(column_name, Expr::col((table_name, TempTable::TempName))),
),
)
.await?;
for statement in update_values {
transaction.execute(statement).await?;
}
transaction
.execute(
builder.build(
Table::alter()
.table(table_name)
.drop_column(TempTable::TempName),
),
)
.await?;
Ok(())
})
})
.await?;
Ok(())
}
async fn migrate_to_v2(transaction: DatabaseTransaction) -> Result<DatabaseTransaction, DbErr> {
let builder = transaction.get_database_backend();
async fn migrate_to_v2(pool: &DbConnection) -> anyhow::Result<()> {
let builder = pool.get_database_backend();
// Allow nulls in DisplayName, and change empty string to null.
let transaction = replace_column(
transaction,
replace_column(
pool,
Users::Table,
Users::DisplayName,
ColumnDef::new(Users::DisplayName)
@@ -446,14 +414,14 @@ async fn migrate_to_v2(transaction: DatabaseTransaction) -> Result<DatabaseTrans
)],
)
.await?;
Ok(transaction)
Ok(())
}
async fn migrate_to_v3(transaction: DatabaseTransaction) -> Result<DatabaseTransaction, DbErr> {
let builder = transaction.get_database_backend();
async fn migrate_to_v3(pool: &DbConnection) -> anyhow::Result<()> {
let builder = pool.get_database_backend();
// Allow nulls in First and LastName. Users who created their DB in 0.4.1 have the not null constraint.
let transaction = replace_column(
transaction,
replace_column(
pool,
Users::Table,
Users::FirstName,
ColumnDef::new(Users::FirstName).string_len(255).to_owned(),
@@ -465,8 +433,8 @@ async fn migrate_to_v3(transaction: DatabaseTransaction) -> Result<DatabaseTrans
)],
)
.await?;
let transaction = replace_column(
transaction,
replace_column(
pool,
Users::Table,
Users::LastName,
ColumnDef::new(Users::LastName).string_len(255).to_owned(),
@@ -479,8 +447,8 @@ async fn migrate_to_v3(transaction: DatabaseTransaction) -> Result<DatabaseTrans
)
.await?;
// Change Avatar from binary to blob(long), because for MySQL this is 64kb.
let transaction = replace_column(
transaction,
replace_column(
pool,
Users::Table,
Users::Avatar,
ColumnDef::new(Users::Avatar)
@@ -489,436 +457,35 @@ async fn migrate_to_v3(transaction: DatabaseTransaction) -> Result<DatabaseTrans
[],
)
.await?;
Ok(transaction)
}
async fn migrate_to_v4(transaction: DatabaseTransaction) -> Result<DatabaseTransaction, DbErr> {
let builder = transaction.get_database_backend();
// Make emails and UUIDs unique.
if let Err(e) = transaction
.execute(
builder.build(
Index::create()
.if_not_exists()
.name("unique-user-email")
.table(Users::Table)
.col(Users::Email)
.unique(),
),
)
.await
{
error!(
r#"Found several users with the same email.
See https://github.com/lldap/lldap/blob/main/docs/migration_guides/v0.5.md for details.
Conflicting emails:
"#,
);
for (email, users) in &transaction
.query_all(
builder.build(
Query::select()
.from(Users::Table)
.columns([Users::Email, Users::UserId])
.order_by_columns([(Users::Email, Order::Asc), (Users::UserId, Order::Asc)])
.and_where(
Expr::col(Users::Email).in_subquery(
Query::select()
.from(Users::Table)
.column(Users::Email)
.group_by_col(Users::Email)
.cond_having(all![Expr::gt(
Expr::expr(Func::count(Expr::col(Users::Email))),
1
)])
.take(),
),
),
),
)
.await
.expect("Could not check duplicate users")
.into_iter()
.map(|row| {
(
row.try_get::<UserId>("", &Users::UserId.to_string())
.unwrap(),
row.try_get::<String>("", &Users::Email.to_string())
.unwrap(),
)
})
.group_by(|(_user, email)| email.to_owned())
{
warn!("Email: {email}");
for (user, _email) in users {
warn!(" User: {}", user.as_str());
}
}
return Err(e);
}
transaction
.execute(
builder.build(
Index::create()
.if_not_exists()
.name("unique-user-uuid")
.table(Users::Table)
.col(Users::Uuid)
.unique(),
),
)
.await?;
transaction
.execute(
builder.build(
Index::create()
.if_not_exists()
.name("unique-group-uuid")
.table(Groups::Table)
.col(Groups::Uuid)
.unique(),
),
)
.await?;
Ok(transaction)
}
async fn migrate_to_v5(transaction: DatabaseTransaction) -> Result<DatabaseTransaction, DbErr> {
let builder = transaction.get_database_backend();
transaction
.execute(
builder.build(
Table::create()
.table(UserAttributeSchema::Table)
.col(
ColumnDef::new(UserAttributeSchema::UserAttributeSchemaName)
.string_len(64)
.not_null()
.primary_key(),
)
.col(
ColumnDef::new(UserAttributeSchema::UserAttributeSchemaType)
.string_len(64)
.not_null(),
)
.col(
ColumnDef::new(UserAttributeSchema::UserAttributeSchemaIsList)
.boolean()
.not_null(),
)
.col(
ColumnDef::new(UserAttributeSchema::UserAttributeSchemaIsUserVisible)
.boolean()
.not_null(),
)
.col(
ColumnDef::new(UserAttributeSchema::UserAttributeSchemaIsUserEditable)
.boolean()
.not_null(),
)
.col(
ColumnDef::new(UserAttributeSchema::UserAttributeSchemaIsHardcoded)
.boolean()
.not_null(),
),
),
)
.await?;
transaction
.execute(
builder.build(
Table::create()
.table(GroupAttributeSchema::Table)
.col(
ColumnDef::new(GroupAttributeSchema::GroupAttributeSchemaName)
.string_len(64)
.not_null()
.primary_key(),
)
.col(
ColumnDef::new(GroupAttributeSchema::GroupAttributeSchemaType)
.string_len(64)
.not_null(),
)
.col(
ColumnDef::new(GroupAttributeSchema::GroupAttributeSchemaIsList)
.boolean()
.not_null(),
)
.col(
ColumnDef::new(GroupAttributeSchema::GroupAttributeSchemaIsGroupVisible)
.boolean()
.not_null(),
)
.col(
ColumnDef::new(GroupAttributeSchema::GroupAttributeSchemaIsGroupEditable)
.boolean()
.not_null(),
)
.col(
ColumnDef::new(GroupAttributeSchema::GroupAttributeSchemaIsHardcoded)
.boolean()
.not_null(),
),
),
)
.await?;
transaction
.execute(
builder.build(
Table::create()
.table(UserAttributes::Table)
.col(
ColumnDef::new(UserAttributes::UserAttributeUserId)
.string_len(255)
.not_null(),
)
.col(
ColumnDef::new(UserAttributes::UserAttributeName)
.string_len(64)
.not_null(),
)
.col(
ColumnDef::new(UserAttributes::UserAttributeValue)
.blob(sea_query::BlobSize::Long)
.not_null(),
)
.foreign_key(
ForeignKey::create()
.name("UserAttributeUserIdForeignKey")
.from(UserAttributes::Table, UserAttributes::UserAttributeUserId)
.to(Users::Table, Users::UserId)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.foreign_key(
ForeignKey::create()
.name("UserAttributeNameForeignKey")
.from(UserAttributes::Table, UserAttributes::UserAttributeName)
.to(
UserAttributeSchema::Table,
UserAttributeSchema::UserAttributeSchemaName,
)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.primary_key(
Index::create()
.col(UserAttributes::UserAttributeUserId)
.col(UserAttributes::UserAttributeName),
),
),
)
.await?;
transaction
.execute(
builder.build(
Table::create()
.table(GroupAttributes::Table)
.col(
ColumnDef::new(GroupAttributes::GroupAttributeGroupId)
.integer()
.not_null(),
)
.col(
ColumnDef::new(GroupAttributes::GroupAttributeName)
.string_len(64)
.not_null(),
)
.col(
ColumnDef::new(GroupAttributes::GroupAttributeValue)
.blob(sea_query::BlobSize::Long)
.not_null(),
)
.foreign_key(
ForeignKey::create()
.name("GroupAttributeGroupIdForeignKey")
.from(
GroupAttributes::Table,
GroupAttributes::GroupAttributeGroupId,
)
.to(Groups::Table, Groups::GroupId)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.foreign_key(
ForeignKey::create()
.name("GroupAttributeNameForeignKey")
.from(GroupAttributes::Table, GroupAttributes::GroupAttributeName)
.to(
GroupAttributeSchema::Table,
GroupAttributeSchema::GroupAttributeSchemaName,
)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade),
)
.primary_key(
Index::create()
.col(GroupAttributes::GroupAttributeGroupId)
.col(GroupAttributes::GroupAttributeName),
),
),
)
.await?;
transaction
.execute(
builder.build(
Query::insert()
.into_table(UserAttributeSchema::Table)
.columns([
UserAttributeSchema::UserAttributeSchemaName,
UserAttributeSchema::UserAttributeSchemaType,
UserAttributeSchema::UserAttributeSchemaIsList,
UserAttributeSchema::UserAttributeSchemaIsUserVisible,
UserAttributeSchema::UserAttributeSchemaIsUserEditable,
UserAttributeSchema::UserAttributeSchemaIsHardcoded,
])
.values_panic([
"first_name".into(),
AttributeType::String.into(),
false.into(),
true.into(),
true.into(),
true.into(),
])
.values_panic([
"last_name".into(),
AttributeType::String.into(),
false.into(),
true.into(),
true.into(),
true.into(),
])
.values_panic([
"avatar".into(),
AttributeType::JpegPhoto.into(),
false.into(),
true.into(),
true.into(),
true.into(),
]),
),
)
.await?;
{
let mut user_statement = Query::insert()
.into_table(UserAttributes::Table)
.columns([
UserAttributes::UserAttributeUserId,
UserAttributes::UserAttributeName,
UserAttributes::UserAttributeValue,
])
.to_owned();
#[derive(FromQueryResult)]
struct FullUserDetails {
user_id: UserId,
first_name: Option<String>,
last_name: Option<String>,
avatar: Option<JpegPhoto>,
}
let mut any_user = false;
for user in FullUserDetails::find_by_statement(builder.build(
Query::select().from(Users::Table).columns([
Users::UserId,
Users::FirstName,
Users::LastName,
Users::Avatar,
]),
))
.all(&transaction)
.await?
{
if let Some(name) = &user.first_name {
any_user = true;
user_statement.values_panic([
user.user_id.clone().into(),
"first_name".into(),
Serialized::from(name).into(),
]);
}
if let Some(name) = &user.last_name {
any_user = true;
user_statement.values_panic([
user.user_id.clone().into(),
"last_name".into(),
Serialized::from(name).into(),
]);
}
if let Some(avatar) = &user.avatar {
any_user = true;
user_statement.values_panic([
user.user_id.clone().into(),
"avatar".into(),
Serialized::from(avatar).into(),
]);
}
}
if any_user {
transaction.execute(builder.build(&user_statement)).await?;
}
}
for column in [Users::FirstName, Users::LastName, Users::Avatar] {
transaction
.execute(builder.build(Table::alter().table(Users::Table).drop_column(column)))
.await?;
}
Ok(transaction)
}
// This is needed to make an array of async functions.
macro_rules! to_sync {
($l:ident) => {
move |transaction| -> std::pin::Pin<
Box<dyn std::future::Future<Output = Result<DatabaseTransaction, DbErr>>>,
> { Box::pin($l(transaction)) }
};
Ok(())
}
pub async fn migrate_from_version(
pool: &DbConnection,
version: SchemaVersion,
last_version: SchemaVersion,
) -> anyhow::Result<()> {
match version.cmp(&last_version) {
std::cmp::Ordering::Less => (),
match version.cmp(&LAST_SCHEMA_VERSION) {
std::cmp::Ordering::Less => info!(
"Upgrading DB schema from {} to {}",
version.0, LAST_SCHEMA_VERSION.0
),
std::cmp::Ordering::Equal => return Ok(()),
std::cmp::Ordering::Greater => anyhow::bail!("DB version downgrading is not supported"),
}
info!("Upgrading DB schema from version {}", version.0);
let migrations = [
to_sync!(migrate_to_v2),
to_sync!(migrate_to_v3),
to_sync!(migrate_to_v4),
to_sync!(migrate_to_v5),
];
assert_eq!(migrations.len(), (LAST_SCHEMA_VERSION.0 - 1) as usize);
for migration in 2..=last_version.0 {
if version < SchemaVersion(migration) && SchemaVersion(migration) <= last_version {
info!("Upgrading DB schema to version {}", migration);
let transaction = pool.begin().await?;
let transaction = migrations[(migration - 2) as usize](transaction).await?;
let builder = transaction.get_database_backend();
transaction
.execute(
builder.build(
Query::update()
.table(Metadata::Table)
.value(Metadata::Version, Value::from(migration)),
),
)
.await?;
transaction.commit().await?;
}
if version < SchemaVersion(2) {
migrate_to_v2(pool).await?;
}
if version < SchemaVersion(3) {
migrate_to_v3(pool).await?;
}
let builder = pool.get_database_backend();
pool.execute(
builder.build(
Query::update()
.table(Metadata::Table)
.value(Metadata::Version, Value::from(LAST_SCHEMA_VERSION)),
),
)
.await?;
Ok(())
}

View File

@@ -15,7 +15,7 @@ use tracing::{debug, instrument};
type SqlOpaqueHandler = SqlBackendHandler;
#[instrument(skip_all, level = "debug", err, fields(username = %username.as_str()))]
#[instrument(skip_all, level = "debug", err)]
fn passwords_match(
password_file_bytes: &[u8],
clear_password: &str,
@@ -49,7 +49,7 @@ impl SqlBackendHandler {
)?)
}
#[instrument(skip(self), level = "debug", err)]
#[instrument(skip_all, level = "debug", err)]
async fn get_password_file_for_user(&self, user_id: UserId) -> Result<Option<Vec<u8>>> {
// Fetch the previously registered password file from the DB.
Ok(model::User::find_by_id(user_id)
@@ -201,7 +201,7 @@ impl OpaqueHandler for SqlOpaqueHandler {
}
/// Convenience function to set a user's password.
#[instrument(skip_all, level = "debug", err, fields(username = %username.as_str()))]
#[instrument(skip_all, level = "debug", err)]
pub(crate) async fn register_password(
opaque_handler: &SqlOpaqueHandler,
username: &UserId,
@@ -210,7 +210,7 @@ pub(crate) async fn register_password(
let mut rng = rand::rngs::OsRng;
use registration::*;
let registration_start =
opaque::client::registration::start_registration(password.unsecure().as_bytes(), &mut rng)?;
opaque::client::registration::start_registration(password.unsecure(), &mut rng)?;
let start_response = opaque_handler
.registration_start(ClientRegistrationStartRequest {
username: username.to_string(),

View File

@@ -1,96 +0,0 @@
use crate::domain::{
error::Result,
handler::{AttributeSchema, Schema, SchemaBackendHandler},
model,
sql_backend_handler::SqlBackendHandler,
};
use async_trait::async_trait;
use sea_orm::{EntityTrait, QueryOrder};
use super::handler::AttributeList;
#[async_trait]
impl SchemaBackendHandler for SqlBackendHandler {
async fn get_schema(&self) -> Result<Schema> {
Ok(Schema {
user_attributes: AttributeList {
attributes: self.get_user_attributes().await?,
},
group_attributes: AttributeList {
attributes: self.get_group_attributes().await?,
},
})
}
}
impl SqlBackendHandler {
async fn get_user_attributes(&self) -> Result<Vec<AttributeSchema>> {
Ok(model::UserAttributeSchema::find()
.order_by_asc(model::UserAttributeSchemaColumn::AttributeName)
.all(&self.sql_pool)
.await?
.into_iter()
.map(|m| m.into())
.collect())
}
async fn get_group_attributes(&self) -> Result<Vec<AttributeSchema>> {
Ok(model::GroupAttributeSchema::find()
.order_by_asc(model::GroupAttributeSchemaColumn::AttributeName)
.all(&self.sql_pool)
.await?
.into_iter()
.map(|m| m.into())
.collect())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::domain::{
handler::AttributeList, sql_backend_handler::tests::*, types::AttributeType,
};
use pretty_assertions::assert_eq;
#[tokio::test]
async fn test_default_schema() {
let fixture = TestFixture::new().await;
assert_eq!(
fixture.handler.get_schema().await.unwrap(),
Schema {
user_attributes: AttributeList {
attributes: vec![
AttributeSchema {
name: "avatar".to_owned(),
attribute_type: AttributeType::JpegPhoto,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: true,
},
AttributeSchema {
name: "first_name".to_owned(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: true,
},
AttributeSchema {
name: "last_name".to_owned(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: true,
}
]
},
group_attributes: AttributeList {
attributes: Vec::new()
}
}
);
}
}

View File

@@ -1,12 +1,27 @@
use super::sql_migrations::{get_schema_version, migrate_from_version, upgrade_to_v1};
use sea_orm::{DeriveValueType, QueryResult, Value};
use sea_orm::Value;
pub type DbConnection = sea_orm::DatabaseConnection;
#[derive(Copy, PartialEq, Eq, Debug, Clone, PartialOrd, Ord, DeriveValueType)]
#[derive(Copy, PartialEq, Eq, Debug, Clone, PartialOrd, Ord)]
pub struct SchemaVersion(pub i16);
pub const LAST_SCHEMA_VERSION: SchemaVersion = SchemaVersion(5);
impl sea_orm::TryGetable for SchemaVersion {
fn try_get_by<I: sea_orm::ColIdx>(
res: &sea_orm::QueryResult,
index: I,
) -> Result<Self, sea_orm::TryGetError> {
Ok(SchemaVersion(i16::try_get_by(res, index)?))
}
}
impl From<SchemaVersion> for Value {
fn from(version: SchemaVersion) -> Self {
version.0.into()
}
}
pub const LAST_SCHEMA_VERSION: SchemaVersion = SchemaVersion(3);
pub async fn init_table(pool: &DbConnection) -> anyhow::Result<()> {
let version = {
@@ -17,7 +32,7 @@ pub async fn init_table(pool: &DbConnection) -> anyhow::Result<()> {
SchemaVersion(1)
}
};
migrate_from_version(pool, version, LAST_SCHEMA_VERSION).await?;
migrate_from_version(pool, version).await?;
Ok(())
}
@@ -25,14 +40,12 @@ pub async fn init_table(pool: &DbConnection) -> anyhow::Result<()> {
mod tests {
use crate::domain::{
sql_migrations,
types::{GroupId, JpegPhoto, Serialized, Uuid},
types::{GroupId, Uuid},
};
use pretty_assertions::assert_eq;
use super::*;
use chrono::prelude::*;
use sea_orm::{ConnectionTrait, Database, DbBackend, FromQueryResult};
use tracing::error;
async fn get_in_memory_db() -> DbConnection {
let mut sql_opt = sea_orm::ConnectOptions::new("sqlite::memory:".to_owned());
@@ -48,22 +61,10 @@ mod tests {
async fn test_init_table() {
let sql_pool = get_in_memory_db().await;
init_table(&sql_pool).await.unwrap();
sql_pool
.execute(raw_statement(
r#"INSERT INTO users
(user_id, email, display_name, creation_date, password_hash, uuid)
VALUES ("bôb", "böb@bob.bob", "Bob Bobbersön", "1970-01-01 00:00:00", "bob00", "abc")"#,
))
.await
.unwrap();
sql_pool
.execute(raw_statement(
r#"INSERT INTO user_attributes
(user_attribute_user_id, user_attribute_name, user_attribute_value)
VALUES ("bôb", "first_name", "Bob")"#,
))
.await
.unwrap();
sql_pool.execute(raw_statement(
r#"INSERT INTO users
(user_id, email, display_name, first_name, last_name, creation_date, password_hash, uuid)
VALUES ("bôb", "böb@bob.bob", "Bob Bobbersön", "Bob", "Bobberson", "1970-01-01 00:00:00", "bob00", "abc")"#)).await.unwrap();
#[derive(FromQueryResult, PartialEq, Eq, Debug)]
struct ShortUserDetails {
display_name: String,
@@ -95,26 +96,25 @@ mod tests {
#[tokio::test]
async fn test_migrate_tables() {
crate::infra::logging::init_for_tests();
// Test that we add the column creation_date to groups and uuid to users and groups.
let sql_pool = get_in_memory_db().await;
sql_pool
.execute(raw_statement(
r#"CREATE TABLE users ( user_id TEXT PRIMARY KEY, display_name TEXT, first_name TEXT NOT NULL, last_name TEXT, avatar BLOB, creation_date TEXT, email TEXT);"#,
r#"CREATE TABLE users ( user_id TEXT, display_name TEXT, first_name TEXT NOT NULL, last_name TEXT, avatar BLOB, creation_date TEXT);"#,
))
.await
.unwrap();
sql_pool
.execute(raw_statement(
r#"INSERT INTO users (user_id, display_name, first_name, creation_date, email)
VALUES ("bôb", "", "", "1970-01-01 00:00:00", "bob@bob.com")"#,
r#"INSERT INTO users (user_id, display_name, first_name, creation_date)
VALUES ("bôb", "", "", "1970-01-01 00:00:00")"#,
))
.await
.unwrap();
sql_pool
.execute(raw_statement(
r#"INSERT INTO users (user_id, display_name, first_name, creation_date, email)
VALUES ("john", "John Doe", "John", "1971-01-01 00:00:00", "bob2@bob.com")"#,
r#"INSERT INTO users (user_id, display_name, first_name, creation_date)
VALUES ("john", "John Doe", "John", "1971-01-01 00:00:00")"#,
))
.await
.unwrap();
@@ -142,11 +142,12 @@ mod tests {
#[derive(FromQueryResult, PartialEq, Eq, Debug)]
struct SimpleUser {
display_name: Option<String>,
first_name: Option<String>,
uuid: Uuid,
}
assert_eq!(
SimpleUser::find_by_statement(raw_statement(
r#"SELECT display_name, uuid FROM users ORDER BY display_name"#
r#"SELECT display_name, first_name, uuid FROM users ORDER BY display_name"#
))
.all(&sql_pool)
.await
@@ -154,36 +155,17 @@ mod tests {
vec![
SimpleUser {
display_name: None,
first_name: None,
uuid: crate::uuid!("a02eaf13-48a7-30f6-a3d4-040ff7c52b04")
},
SimpleUser {
display_name: Some("John Doe".to_owned()),
first_name: Some("John".to_owned()),
uuid: crate::uuid!("986765a5-3f03-389e-b47b-536b2d6e1bec")
}
]
);
#[derive(FromQueryResult, PartialEq, Eq, Debug)]
struct UserAttribute {
user_attribute_user_id: String,
user_attribute_name: String,
user_attribute_value: Serialized,
}
assert_eq!(
UserAttribute::find_by_statement(raw_statement(
r#"SELECT user_attribute_user_id, user_attribute_name, user_attribute_value FROM user_attributes ORDER BY user_attribute_user_id, user_attribute_value"#
))
.all(&sql_pool)
.await
.unwrap(),
vec![
UserAttribute {
user_attribute_user_id: "john".to_owned(),
user_attribute_name: "first_name".to_owned(),
user_attribute_value: Serialized::from("John"),
}
]
);
#[derive(FromQueryResult, PartialEq, Eq, Debug)]
struct ShortGroupDetails {
group_id: GroupId,
display_name: String,
@@ -224,155 +206,6 @@ mod tests {
);
}
#[tokio::test]
async fn test_migration_to_v4() {
crate::infra::logging::init_for_tests();
let sql_pool = get_in_memory_db().await;
upgrade_to_v1(&sql_pool).await.unwrap();
migrate_from_version(&sql_pool, SchemaVersion(1), SchemaVersion(3))
.await
.unwrap();
sql_pool
.execute(raw_statement(
r#"INSERT INTO users (user_id, email, display_name, first_name, creation_date, uuid)
VALUES ("bob", "bob@bob.com", "", "", "1970-01-01 00:00:00", "a02eaf13-48a7-30f6-a3d4-040ff7c52b04")"#,
))
.await
.unwrap();
sql_pool
.execute(raw_statement(
r#"INSERT INTO users (user_id, email, display_name, first_name, creation_date, uuid)
VALUES ("bob2", "bob@bob.com", "", "", "1970-01-01 00:00:00", "986765a5-3f03-389e-b47b-536b2d6e1bec")"#,
))
.await
.unwrap();
error!(
"{}",
migrate_from_version(&sql_pool, SchemaVersion(3), SchemaVersion(4))
.await
.expect_err("migration should fail")
);
assert_eq!(
sql_migrations::JustSchemaVersion::find_by_statement(raw_statement(
r#"SELECT version FROM metadata"#
))
.one(&sql_pool)
.await
.unwrap()
.unwrap(),
sql_migrations::JustSchemaVersion {
version: SchemaVersion(3)
}
);
sql_pool
.execute(raw_statement(
r#"UPDATE users SET email = "new@bob.com" WHERE user_id = "bob2""#,
))
.await
.unwrap();
migrate_from_version(&sql_pool, SchemaVersion(3), SchemaVersion(4))
.await
.unwrap();
assert_eq!(
sql_migrations::JustSchemaVersion::find_by_statement(raw_statement(
r#"SELECT version FROM metadata"#
))
.one(&sql_pool)
.await
.unwrap()
.unwrap(),
sql_migrations::JustSchemaVersion {
version: SchemaVersion(4)
}
);
}
#[tokio::test]
async fn test_migration_to_v5() {
crate::infra::logging::init_for_tests();
let sql_pool = get_in_memory_db().await;
upgrade_to_v1(&sql_pool).await.unwrap();
migrate_from_version(&sql_pool, SchemaVersion(1), SchemaVersion(4))
.await
.unwrap();
sql_pool
.execute(raw_statement(
r#"INSERT INTO users (user_id, email, creation_date, uuid)
VALUES ("bob", "bob@bob.com", "1970-01-01 00:00:00", "a02eaf13-48a7-30f6-a3d4-040ff7c52b04")"#,
))
.await
.unwrap();
sql_pool
.execute(sea_orm::Statement::from_sql_and_values(DbBackend::Sqlite,
r#"INSERT INTO users (user_id, email, display_name, first_name, last_name, avatar, creation_date, uuid)
VALUES ("bob2", "bob2@bob.com", "display bob", "first bob", "last bob", $1, "1970-01-01 00:00:00", "986765a5-3f03-389e-b47b-536b2d6e1bec")"#, [JpegPhoto::for_tests().into()]),
)
.await
.unwrap();
migrate_from_version(&sql_pool, SchemaVersion(4), SchemaVersion(5))
.await
.unwrap();
assert_eq!(
sql_migrations::JustSchemaVersion::find_by_statement(raw_statement(
r#"SELECT version FROM metadata"#
))
.one(&sql_pool)
.await
.unwrap()
.unwrap(),
sql_migrations::JustSchemaVersion {
version: SchemaVersion(5)
}
);
#[derive(FromQueryResult, PartialEq, Eq, Debug)]
pub struct UserV5 {
user_id: String,
email: String,
display_name: Option<String>,
}
assert_eq!(
UserV5::find_by_statement(raw_statement(
r#"SELECT user_id, email, display_name FROM users ORDER BY user_id ASC"#
))
.all(&sql_pool)
.await
.unwrap(),
vec![
UserV5 {
user_id: "bob".to_owned(),
email: "bob@bob.com".to_owned(),
display_name: None
},
UserV5 {
user_id: "bob2".to_owned(),
email: "bob2@bob.com".to_owned(),
display_name: Some("display bob".to_owned())
},
]
);
sql_pool
.execute(raw_statement(r#"SELECT first_name FROM users"#))
.await
.unwrap_err();
#[derive(FromQueryResult, PartialEq, Eq, Debug)]
pub struct UserAttribute {
user_attribute_user_id: String,
user_attribute_name: String,
user_attribute_value: Serialized,
}
assert_eq!(
UserAttribute::find_by_statement(raw_statement(r#"SELECT * FROM user_attributes ORDER BY user_attribute_user_id, user_attribute_name ASC"#))
.all(&sql_pool)
.await
.unwrap(),
vec![
UserAttribute { user_attribute_user_id: "bob2".to_string(), user_attribute_name: "avatar".to_owned(), user_attribute_value: Serialized::from(&JpegPhoto::for_tests()) },
UserAttribute { user_attribute_user_id: "bob2".to_string(), user_attribute_name: "first_name".to_owned(), user_attribute_value: Serialized::from("first bob") },
UserAttribute { user_attribute_user_id: "bob2".to_string(), user_attribute_name: "last_name".to_owned(), user_attribute_value: Serialized::from("last bob") },
]
);
}
#[tokio::test]
async fn test_too_high_version() {
let sql_pool = get_in_memory_db().await;

View File

@@ -6,31 +6,17 @@ use crate::domain::{
},
model::{self, GroupColumn, UserColumn},
sql_backend_handler::SqlBackendHandler,
types::{AttributeValue, GroupDetails, GroupId, Serialized, User, UserAndGroups, UserId, Uuid},
types::{GroupDetails, GroupId, User, UserAndGroups, UserId, Uuid},
};
use async_trait::async_trait;
use sea_orm::{
sea_query::{
query::OnConflict, Alias, Cond, Expr, Func, IntoColumnRef, IntoCondition, SimpleExpr,
},
ActiveModelTrait, ActiveValue, ColumnTrait, EntityTrait, IntoActiveValue, ModelTrait,
QueryFilter, QueryOrder, QuerySelect, QueryTrait, Set, TransactionTrait,
entity::IntoActiveValue,
sea_query::{Alias, Cond, Expr, Func, IntoColumnRef, IntoCondition, SimpleExpr},
ActiveModelTrait, ActiveValue, ColumnTrait, EntityTrait, ModelTrait, QueryFilter, QueryOrder,
QuerySelect, QueryTrait, Set,
};
use std::collections::HashSet;
use tracing::instrument;
fn attribute_condition(name: String, value: String) -> Cond {
Expr::in_subquery(
Expr::col(UserColumn::UserId.as_column_ref()),
model::UserAttributes::find()
.select_only()
.column(model::UserAttributesColumn::UserId)
.filter(model::UserAttributesColumn::AttributeName.eq(name))
.filter(model::UserAttributesColumn::Value.eq(Serialized::from(&value)))
.into_query(),
)
.into_condition()
}
use tracing::{debug, instrument};
fn get_user_filter_expr(filter: UserRequestFilter) -> Cond {
use UserRequestFilter::*;
@@ -60,7 +46,6 @@ fn get_user_filter_expr(filter: UserRequestFilter) -> Cond {
ColumnTrait::eq(&s1, s2).into_condition()
}
}
AttributeEquality(s1, s2) => attribute_condition(s1, s2),
MemberOf(group) => Expr::col((group_table, GroupColumn::DisplayName))
.eq(group)
.into_condition(),
@@ -68,13 +53,11 @@ fn get_user_filter_expr(filter: UserRequestFilter) -> Cond {
.eq(group_id)
.into_condition(),
UserIdSubString(filter) => UserColumn::UserId
.like(&filter.to_sql_filter())
.into_condition(),
SubString(col, filter) => SimpleExpr::FunctionCall(Func::lower(Expr::col(col)))
.like(filter.to_sql_filter())
.into_condition(),
SubString(col, filter) => {
SimpleExpr::FunctionCall(Func::lower(Expr::col(col.as_column_ref())))
.like(filter.to_sql_filter())
.into_condition()
}
}
}
@@ -91,14 +74,14 @@ fn to_value(opt_name: &Option<String>) -> ActiveValue<Option<String>> {
#[async_trait]
impl UserListerBackendHandler for SqlBackendHandler {
#[instrument(skip(self), level = "debug", ret, err)]
#[instrument(skip_all, level = "debug", ret, err)]
async fn list_users(
&self,
filters: Option<UserRequestFilter>,
// To simplify the query, we always fetch groups. TODO: cleanup.
_get_groups: bool,
get_groups: bool,
) -> Result<Vec<UserAndGroups>> {
let mut users: Vec<_> = model::User::find()
debug!(?filters);
let query = model::User::find()
.filter(
filters
.map(|f| {
@@ -115,203 +98,114 @@ impl UserListerBackendHandler for SqlBackendHandler {
})
.unwrap_or_else(|| SimpleExpr::Value(true.into()).into_condition()),
)
.order_by_asc(UserColumn::UserId)
.find_with_linked(model::memberships::UserToGroup)
.order_by_asc(SimpleExpr::Column(
(Alias::new("r1"), GroupColumn::DisplayName).into_column_ref(),
))
.all(&self.sql_pool)
.await?
.into_iter()
.map(|(user, groups)| UserAndGroups {
user: user.into(),
groups: Some(groups.into_iter().map(Into::<GroupDetails>::into).collect()),
})
.collect();
// At this point, the users don't have attributes, we need to populate it with another query.
let user_ids = users.iter().map(|u| &u.user.user_id);
let attributes = model::UserAttributes::find()
.filter(model::UserAttributesColumn::UserId.is_in(user_ids))
.order_by_asc(model::UserAttributesColumn::UserId)
.order_by_asc(model::UserAttributesColumn::AttributeName)
.all(&self.sql_pool)
.await?;
let mut attributes_iter = attributes.into_iter().peekable();
use itertools::Itertools; // For take_while_ref
for user in users.iter_mut() {
assert!(attributes_iter
.peek()
.map(|u| u.user_id >= user.user.user_id)
.unwrap_or(true),
"Attributes are not sorted, users are not sorted, or previous user didn't consume all the attributes");
user.user.attributes = attributes_iter
.take_while_ref(|u| u.user_id == user.user.user_id)
.map(AttributeValue::from)
.collect();
.order_by_asc(UserColumn::UserId);
if !get_groups {
Ok(query
.into_model::<User>()
.all(&self.sql_pool)
.await?
.into_iter()
.map(|u| UserAndGroups {
user: u,
groups: None,
})
.collect())
} else {
let results = query
//find_with_linked?
.find_also_linked(model::memberships::UserToGroup)
.order_by_asc(SimpleExpr::Column(
(Alias::new("r1"), GroupColumn::GroupId).into_column_ref(),
))
.all(&self.sql_pool)
.await?;
use itertools::Itertools;
Ok(results
.iter()
.group_by(|(u, _)| u)
.into_iter()
.map(|(user, groups)| {
let groups: Vec<_> = groups
.into_iter()
.flat_map(|(_, g)| g)
.map(|g| GroupDetails::from(g.clone()))
.collect();
UserAndGroups {
user: user.clone().into(),
groups: Some(groups),
}
})
.collect())
}
Ok(users)
}
}
#[async_trait]
impl UserBackendHandler for SqlBackendHandler {
#[instrument(skip_all, level = "debug", ret, fields(user_id = ?user_id.as_str()))]
#[instrument(skip_all, level = "debug", ret)]
async fn get_user_details(&self, user_id: &UserId) -> Result<User> {
let mut user = User::from(
model::User::find_by_id(user_id.to_owned())
.one(&self.sql_pool)
.await?
.ok_or_else(|| DomainError::EntityNotFound(user_id.to_string()))?,
);
let attributes = model::UserAttributes::find()
.filter(model::UserAttributesColumn::UserId.eq(user_id))
.order_by_asc(model::UserAttributesColumn::AttributeName)
.all(&self.sql_pool)
.await?;
user.attributes = attributes.into_iter().map(AttributeValue::from).collect();
Ok(user)
debug!(?user_id);
model::User::find_by_id(user_id.to_owned())
.into_model::<User>()
.one(&self.sql_pool)
.await?
.ok_or_else(|| DomainError::EntityNotFound(user_id.to_string()))
}
#[instrument(skip_all, level = "debug", ret, err, fields(user_id = ?user_id.as_str()))]
#[instrument(skip_all, level = "debug", ret, err)]
async fn get_user_groups(&self, user_id: &UserId) -> Result<HashSet<GroupDetails>> {
debug!(?user_id);
let user = model::User::find_by_id(user_id.to_owned())
.one(&self.sql_pool)
.await?
.ok_or_else(|| DomainError::EntityNotFound(user_id.to_string()))?;
Ok(HashSet::from_iter(
user.find_linked(model::memberships::UserToGroup)
.into_model::<GroupDetails>()
.all(&self.sql_pool)
.await?
.into_iter()
.map(Into::<GroupDetails>::into),
.await?,
))
}
#[instrument(skip(self), level = "debug", err, fields(user_id = ?request.user_id.as_str()))]
#[instrument(skip_all, level = "debug", err)]
async fn create_user(&self, request: CreateUserRequest) -> Result<()> {
debug!(user_id = ?request.user_id);
let now = chrono::Utc::now().naive_utc();
let uuid = Uuid::from_name_and_date(request.user_id.as_str(), &now);
let new_user = model::users::ActiveModel {
user_id: Set(request.user_id.clone()),
user_id: Set(request.user_id),
email: Set(request.email),
display_name: to_value(&request.display_name),
first_name: to_value(&request.first_name),
last_name: to_value(&request.last_name),
avatar: request.avatar.into_active_value(),
creation_date: ActiveValue::Set(now),
uuid: ActiveValue::Set(uuid),
..Default::default()
};
let mut new_user_attributes = Vec::new();
if let Some(first_name) = request.first_name {
new_user_attributes.push(model::user_attributes::ActiveModel {
user_id: Set(request.user_id.clone()),
attribute_name: Set("first_name".to_owned()),
value: Set(Serialized::from(&first_name)),
});
}
if let Some(last_name) = request.last_name {
new_user_attributes.push(model::user_attributes::ActiveModel {
user_id: Set(request.user_id.clone()),
attribute_name: Set("last_name".to_owned()),
value: Set(Serialized::from(&last_name)),
});
}
if let Some(avatar) = request.avatar {
new_user_attributes.push(model::user_attributes::ActiveModel {
user_id: Set(request.user_id),
attribute_name: Set("avatar".to_owned()),
value: Set(Serialized::from(&avatar)),
});
}
self.sql_pool
.transaction::<_, (), DomainError>(|transaction| {
Box::pin(async move {
new_user.insert(transaction).await?;
if !new_user_attributes.is_empty() {
model::UserAttributes::insert_many(new_user_attributes)
.exec(transaction)
.await?;
}
Ok(())
})
})
.await?;
new_user.insert(&self.sql_pool).await?;
Ok(())
}
#[instrument(skip(self), level = "debug", err, fields(user_id = ?request.user_id.as_str()))]
#[instrument(skip_all, level = "debug", err)]
async fn update_user(&self, request: UpdateUserRequest) -> Result<()> {
debug!(user_id = ?request.user_id);
let update_user = model::users::ActiveModel {
user_id: ActiveValue::Set(request.user_id.clone()),
user_id: ActiveValue::Set(request.user_id),
email: request.email.map(ActiveValue::Set).unwrap_or_default(),
display_name: to_value(&request.display_name),
first_name: to_value(&request.first_name),
last_name: to_value(&request.last_name),
avatar: request.avatar.into_active_value(),
..Default::default()
};
let mut update_user_attributes = Vec::new();
let mut remove_user_attributes = Vec::new();
let to_serialized_value = |s: &Option<String>| match s.as_ref().map(|s| s.as_str()) {
None => None,
Some("") => Some(ActiveValue::NotSet),
Some(s) => Some(ActiveValue::Set(Serialized::from(s))),
};
let mut process_serialized =
|value: ActiveValue<Serialized>, attribute_name: &str| match &value {
ActiveValue::NotSet => {
remove_user_attributes.push(attribute_name.to_owned());
}
ActiveValue::Set(_) => {
update_user_attributes.push(model::user_attributes::ActiveModel {
user_id: Set(request.user_id.clone()),
attribute_name: Set(attribute_name.to_owned()),
value,
})
}
_ => unreachable!(),
};
if let Some(value) = to_serialized_value(&request.first_name) {
process_serialized(value, "first_name");
}
if let Some(value) = to_serialized_value(&request.last_name) {
process_serialized(value, "last_name");
}
if let Some(avatar) = request.avatar {
process_serialized(avatar.into_active_value(), "avatar");
}
self.sql_pool
.transaction::<_, (), DomainError>(|transaction| {
Box::pin(async move {
update_user.update(transaction).await?;
if !update_user_attributes.is_empty() {
model::UserAttributes::insert_many(update_user_attributes)
.on_conflict(
OnConflict::columns([
model::UserAttributesColumn::UserId,
model::UserAttributesColumn::AttributeName,
])
.update_column(model::UserAttributesColumn::Value)
.to_owned(),
)
.exec(transaction)
.await?;
}
if !remove_user_attributes.is_empty() {
model::UserAttributes::delete_many()
.filter(model::UserAttributesColumn::UserId.eq(&request.user_id))
.filter(
model::UserAttributesColumn::AttributeName
.is_in(remove_user_attributes),
)
.exec(transaction)
.await?;
}
Ok(())
})
})
.await?;
update_user.update(&self.sql_pool).await?;
Ok(())
}
#[instrument(skip_all, level = "debug", err, fields(user_id = ?user_id.as_str()))]
#[instrument(skip_all, level = "debug", err)]
async fn delete_user(&self, user_id: &UserId) -> Result<()> {
debug!(?user_id);
let res = model::User::delete_by_id(user_id.clone())
.exec(&self.sql_pool)
.await?;
@@ -324,8 +218,9 @@ impl UserBackendHandler for SqlBackendHandler {
Ok(())
}
#[instrument(skip_all, level = "debug", err, fields(user_id = ?user_id.as_str(), group_id))]
#[instrument(skip_all, level = "debug", err)]
async fn add_user_to_group(&self, user_id: &UserId, group_id: GroupId) -> Result<()> {
debug!(?user_id, ?group_id);
let new_membership = model::memberships::ActiveModel {
user_id: ActiveValue::Set(user_id.clone()),
group_id: ActiveValue::Set(group_id),
@@ -334,8 +229,9 @@ impl UserBackendHandler for SqlBackendHandler {
Ok(())
}
#[instrument(skip_all, level = "debug", err, fields(user_id = ?user_id.as_str(), group_id))]
#[instrument(skip_all, level = "debug", err)]
async fn remove_user_from_group(&self, user_id: &UserId, group_id: GroupId) -> Result<()> {
debug!(?user_id, ?group_id);
let res = model::Membership::delete_by_id((user_id.clone(), group_id))
.exec(&self.sql_pool)
.await?;
@@ -357,7 +253,6 @@ mod tests {
sql_backend_handler::tests::*,
types::{JpegPhoto, UserColumn},
};
use pretty_assertions::{assert_eq, assert_ne};
#[tokio::test]
async fn test_list_users_no_filter() {
@@ -396,8 +291,8 @@ mod tests {
let fixture = TestFixture::new().await;
let users = get_user_names(
&fixture.handler,
Some(UserRequestFilter::AttributeEquality(
"first_name".to_string(),
Some(UserRequestFilter::Equality(
UserColumn::FirstName,
"first bob".to_string(),
)),
)
@@ -417,10 +312,10 @@ mod tests {
final_: Some("K".to_owned()),
}),
UserRequestFilter::SubString(
UserColumn::DisplayName,
UserColumn::FirstName,
SubStringFilter {
initial: None,
any: vec!["t".to_owned(), "r".to_owned()],
any: vec!["r".to_owned(), "t".to_owned()],
final_: None,
},
),
@@ -725,23 +620,9 @@ mod tests {
.unwrap();
assert_eq!(user.email, "email");
assert_eq!(user.display_name.unwrap(), "display_name");
assert_eq!(
user.attributes,
vec![
AttributeValue {
name: "avatar".to_owned(),
value: Serialized::from(&JpegPhoto::for_tests())
},
AttributeValue {
name: "first_name".to_owned(),
value: Serialized::from("first_name")
},
AttributeValue {
name: "last_name".to_owned(),
value: Serialized::from("last_name")
}
]
);
assert_eq!(user.first_name.unwrap(), "first_name");
assert_eq!(user.last_name.unwrap(), "last_name");
assert_eq!(user.avatar, Some(JpegPhoto::for_tests()));
}
#[tokio::test]
@@ -752,9 +633,8 @@ mod tests {
.handler
.update_user(UpdateUserRequest {
user_id: UserId::new("bob"),
first_name: None,
first_name: Some("first_name".to_string()),
last_name: Some(String::new()),
avatar: Some(JpegPhoto::for_tests()),
..Default::default()
})
.await
@@ -766,104 +646,9 @@ mod tests {
.await
.unwrap();
assert_eq!(user.display_name.unwrap(), "display bob");
assert_eq!(
user.attributes,
vec![
AttributeValue {
name: "avatar".to_owned(),
value: Serialized::from(&JpegPhoto::for_tests())
},
AttributeValue {
name: "first_name".to_owned(),
value: Serialized::from("first bob")
}
]
);
}
#[tokio::test]
async fn test_update_user_delete_avatar() {
let fixture = TestFixture::new().await;
fixture
.handler
.update_user(UpdateUserRequest {
user_id: UserId::new("bob"),
avatar: Some(JpegPhoto::for_tests()),
..Default::default()
})
.await
.unwrap();
let user = fixture
.handler
.get_user_details(&UserId::new("bob"))
.await
.unwrap();
let avatar = AttributeValue {
name: "avatar".to_owned(),
value: Serialized::from(&JpegPhoto::for_tests()),
};
assert!(user.attributes.contains(&avatar));
fixture
.handler
.update_user(UpdateUserRequest {
user_id: UserId::new("bob"),
avatar: Some(JpegPhoto::null()),
..Default::default()
})
.await
.unwrap();
let user = fixture
.handler
.get_user_details(&UserId::new("bob"))
.await
.unwrap();
assert!(!user.attributes.contains(&avatar));
}
#[tokio::test]
async fn test_create_user_all_values() {
let fixture = TestFixture::new().await;
fixture
.handler
.create_user(CreateUserRequest {
user_id: UserId::new("james"),
email: "email".to_string(),
display_name: Some("display_name".to_string()),
first_name: Some("first_name".to_string()),
last_name: Some("last_name".to_string()),
avatar: Some(JpegPhoto::for_tests()),
})
.await
.unwrap();
let user = fixture
.handler
.get_user_details(&UserId::new("james"))
.await
.unwrap();
assert_eq!(user.email, "email");
assert_eq!(user.display_name.unwrap(), "display_name");
assert_eq!(
user.attributes,
vec![
AttributeValue {
name: "avatar".to_owned(),
value: Serialized::from(&JpegPhoto::for_tests())
},
AttributeValue {
name: "first_name".to_owned(),
value: Serialized::from("first_name")
},
AttributeValue {
name: "last_name".to_owned(),
value: Serialized::from("last_name")
}
]
);
assert_eq!(user.first_name.unwrap(), "first_name");
assert_eq!(user.last_name, None);
assert_eq!(user.avatar, None);
}
#[tokio::test]
@@ -885,32 +670,4 @@ mod tests {
vec!["patrick"]
);
}
#[tokio::test]
async fn test_delete_user_not_found() {
let fixture = TestFixture::new().await;
fixture
.handler
.delete_user(&UserId::new("not found"))
.await
.expect_err("Should have failed");
}
#[tokio::test]
async fn test_remove_user_from_group_not_found() {
let fixture = TestFixture::new().await;
fixture
.handler
.remove_user_from_group(&UserId::new("not found"), fixture.groups[0])
.await
.expect_err("Should have failed");
fixture
.handler
.remove_user_from_group(&UserId::new("not found"), GroupId(16242))
.await
.expect_err("Should have failed");
}
}

View File

@@ -2,17 +2,15 @@ use base64::Engine;
use chrono::{NaiveDateTime, TimeZone};
use sea_orm::{
entity::IntoActiveValue,
sea_query::{value::ValueType, ArrayType, BlobSize, ColumnType, Nullable, ValueTypeErr},
DbErr, DeriveValueType, QueryResult, TryFromU64, TryGetError, TryGetable, Value,
sea_query::{value::ValueType, ArrayType, ColumnType, Nullable, ValueTypeErr},
DbErr, FromQueryResult, QueryResult, TryFromU64, TryGetError, TryGetable, Value,
};
use serde::{Deserialize, Serialize};
use strum::{EnumString, IntoStaticStr};
pub use super::model::{GroupColumn, UserColumn};
#[derive(PartialEq, Hash, Eq, Clone, Debug, Default, Serialize, Deserialize, DeriveValueType)]
#[derive(PartialEq, Hash, Eq, Clone, Debug, Default, Serialize, Deserialize)]
#[serde(try_from = "&str")]
#[sea_orm(column_type = "String(Some(36))")]
pub struct Uuid(String);
impl Uuid {
@@ -55,6 +53,48 @@ impl std::string::ToString for Uuid {
}
}
impl TryGetable for Uuid {
fn try_get_by<I: sea_orm::ColIdx>(
res: &QueryResult,
index: I,
) -> std::result::Result<Self, TryGetError> {
Ok(Uuid(String::try_get_by(res, index)?))
}
}
impl ValueType for Uuid {
fn try_from(v: Value) -> Result<Self, ValueTypeErr> {
<Self as std::convert::TryFrom<_>>::try_from(
<std::string::String as sea_orm::sea_query::ValueType>::try_from(v)?.as_str(),
)
.map_err(|_| ValueTypeErr {})
}
fn type_name() -> String {
"Uuid".to_owned()
}
fn array_type() -> ArrayType {
ArrayType::String
}
fn column_type() -> ColumnType {
ColumnType::String(Some(36))
}
}
impl From<Uuid> for Value {
fn from(uuid: Uuid) -> Self {
uuid.as_str().into()
}
}
impl From<&Uuid> for Value {
fn from(uuid: &Uuid) -> Self {
uuid.as_str().into()
}
}
#[cfg(test)]
#[macro_export]
macro_rules! uuid {
@@ -63,66 +103,7 @@ macro_rules! uuid {
};
}
#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize, DeriveValueType)]
#[sea_orm(column_type = "Binary(BlobSize::Long)", array_type = "Bytes")]
pub struct Serialized(Vec<u8>);
const SERIALIZED_I64_LEN: usize = 8;
impl std::fmt::Debug for Serialized {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Serialized")
.field(
&self
.convert_to()
.and_then(|s| {
String::from_utf8(s)
.map_err(|_| Box::new(bincode::ErrorKind::InvalidCharEncoding))
})
.or_else(|e| {
if self.0.len() == SERIALIZED_I64_LEN {
self.convert_to::<i64>()
.map(|i| i.to_string())
.map_err(|_| Box::new(bincode::ErrorKind::InvalidCharEncoding))
} else {
Err(e)
}
})
.unwrap_or_else(|_| {
format!("hash: {:#016X}", {
let mut hasher = std::collections::hash_map::DefaultHasher::new();
std::hash::Hash::hash(&self.0, &mut hasher);
std::hash::Hasher::finish(&hasher)
})
}),
)
.finish()
}
}
impl<'a, T: Serialize + ?Sized> From<&'a T> for Serialized {
fn from(t: &'a T) -> Self {
Self(bincode::serialize(&t).unwrap())
}
}
impl Serialized {
fn convert_to<'a, T: Deserialize<'a>>(&'a self) -> bincode::Result<T> {
bincode::deserialize(&self.0)
}
pub fn unwrap<'a, T: Deserialize<'a>>(&'a self) -> T {
self.convert_to().unwrap()
}
pub fn expect<'a, T: Deserialize<'a>>(&'a self, message: &str) -> T {
self.convert_to().expect(message)
}
}
#[derive(
PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Default, Serialize, Deserialize, DeriveValueType,
)]
#[derive(PartialEq, Eq, Clone, Debug, Default, Serialize, Deserialize)]
#[serde(from = "String")]
pub struct UserId(String);
@@ -152,12 +133,24 @@ impl From<String> for UserId {
}
}
impl From<UserId> for Value {
fn from(user_id: UserId) -> Self {
user_id.into_string().into()
}
}
impl From<&UserId> for Value {
fn from(user_id: &UserId) -> Self {
user_id.as_str().into()
}
}
impl TryGetable for UserId {
fn try_get_by<I: sea_orm::ColIdx>(res: &QueryResult, index: I) -> Result<Self, TryGetError> {
Ok(UserId::new(&String::try_get_by(res, index)?))
}
}
impl TryFromU64 for UserId {
fn try_from_u64(_n: u64) -> Result<Self, DbErr> {
Err(DbErr::ConvertFromU64(
@@ -166,10 +159,33 @@ impl TryFromU64 for UserId {
}
}
#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, DeriveValueType)]
#[sea_orm(column_type = "Binary(BlobSize::Long)", array_type = "Bytes")]
impl ValueType for UserId {
fn try_from(v: Value) -> Result<Self, ValueTypeErr> {
Ok(UserId::new(<String as ValueType>::try_from(v)?.as_str()))
}
fn type_name() -> String {
"UserId".to_owned()
}
fn array_type() -> ArrayType {
ArrayType::String
}
fn column_type() -> ColumnType {
ColumnType::String(Some(255))
}
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct JpegPhoto(#[serde(with = "serde_bytes")] Vec<u8>);
impl From<JpegPhoto> for Value {
fn from(photo: JpegPhoto) -> Self {
photo.0.into()
}
}
impl From<&JpegPhoto> for Value {
fn from(photo: &JpegPhoto) -> Self {
photo.0.as_slice().into()
@@ -221,24 +237,7 @@ impl From<&JpegPhoto> for String {
}
}
impl std::fmt::Debug for JpegPhoto {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut encoded = base64::engine::general_purpose::STANDARD.encode(&self.0);
if encoded.len() > 100 {
encoded.truncate(100);
encoded.push_str(" ...");
};
f.debug_tuple("JpegPhoto")
.field(&format!("b64[{}]", encoded))
.finish()
}
}
impl JpegPhoto {
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn null() -> Self {
Self(vec![])
}
@@ -267,36 +266,62 @@ impl JpegPhoto {
}
}
impl TryGetable for JpegPhoto {
fn try_get_by<I: sea_orm::ColIdx>(res: &QueryResult, index: I) -> Result<Self, TryGetError> {
<Self as std::convert::TryFrom<Vec<_>>>::try_from(Vec::<u8>::try_get_by(res, index)?)
.map_err(|e| {
TryGetError::DbErr(DbErr::TryIntoErr {
from: "[u8]",
into: "JpegPhoto",
source: e.into(),
})
})
}
}
impl ValueType for JpegPhoto {
fn try_from(v: Value) -> Result<Self, ValueTypeErr> {
<Self as std::convert::TryFrom<_>>::try_from(
<Vec<u8> as sea_orm::sea_query::ValueType>::try_from(v)?.as_slice(),
)
.map_err(|_| ValueTypeErr {})
}
fn type_name() -> String {
"JpegPhoto".to_owned()
}
fn array_type() -> ArrayType {
ArrayType::Bytes
}
fn column_type() -> ColumnType {
ColumnType::Binary(sea_orm::sea_query::BlobSize::Long)
}
}
impl Nullable for JpegPhoto {
fn null() -> Value {
JpegPhoto::null().into()
}
}
impl IntoActiveValue<Serialized> for JpegPhoto {
fn into_active_value(self) -> sea_orm::ActiveValue<Serialized> {
if self.is_empty() {
sea_orm::ActiveValue::NotSet
} else {
sea_orm::ActiveValue::Set(Serialized::from(&self))
}
impl IntoActiveValue<JpegPhoto> for JpegPhoto {
fn into_active_value(self) -> sea_orm::ActiveValue<JpegPhoto> {
sea_orm::ActiveValue::Set(self)
}
}
#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize, Hash)]
pub struct AttributeValue {
pub name: String,
pub value: Serialized,
}
#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)]
#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize, FromQueryResult)]
pub struct User {
pub user_id: UserId,
pub email: String,
pub display_name: Option<String>,
pub first_name: Option<String>,
pub last_name: Option<String>,
pub avatar: Option<JpegPhoto>,
pub creation_date: NaiveDateTime,
pub uuid: Uuid,
pub attributes: Vec<AttributeValue>,
}
#[cfg(test)]
@@ -307,85 +332,54 @@ impl Default for User {
user_id: UserId::default(),
email: String::new(),
display_name: None,
first_name: None,
last_name: None,
avatar: None,
creation_date: epoch,
uuid: Uuid::from_name_and_date("", &epoch),
attributes: Vec::new(),
}
}
}
#[derive(
Debug,
Copy,
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
Deserialize,
DeriveValueType,
)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct GroupId(pub i32);
impl From<GroupId> for Value {
fn from(group_id: GroupId) -> Self {
group_id.0.into()
}
}
impl TryGetable for GroupId {
fn try_get_by<I: sea_orm::ColIdx>(res: &QueryResult, index: I) -> Result<Self, TryGetError> {
Ok(GroupId(i32::try_get_by(res, index)?))
}
}
impl ValueType for GroupId {
fn try_from(v: Value) -> Result<Self, ValueTypeErr> {
Ok(GroupId(<i32 as ValueType>::try_from(v)?))
}
fn type_name() -> String {
"GroupId".to_owned()
}
fn array_type() -> ArrayType {
ArrayType::Int
}
fn column_type() -> ColumnType {
ColumnType::Integer
}
}
impl TryFromU64 for GroupId {
fn try_from_u64(n: u64) -> Result<Self, DbErr> {
Ok(GroupId(i32::try_from_u64(n)?))
}
}
impl From<&GroupId> for Value {
fn from(id: &GroupId) -> Self {
(*id).into()
}
}
#[derive(
Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, EnumString, IntoStaticStr,
)]
pub enum AttributeType {
String,
Integer,
JpegPhoto,
DateTime,
}
impl From<AttributeType> for Value {
fn from(attribute_type: AttributeType) -> Self {
Into::<&'static str>::into(attribute_type).into()
}
}
impl TryGetable for AttributeType {
fn try_get_by<I: sea_orm::ColIdx>(res: &QueryResult, index: I) -> Result<Self, TryGetError> {
use std::str::FromStr;
Ok(AttributeType::from_str(&String::try_get_by(res, index)?).expect("Invalid enum value"))
}
}
impl ValueType for AttributeType {
fn try_from(v: Value) -> Result<Self, ValueTypeErr> {
use std::str::FromStr;
Ok(
AttributeType::from_str(&<String as ValueType>::try_from(v)?)
.expect("Invalid enum value"),
)
}
fn type_name() -> String {
"AttributeType".to_owned()
}
fn array_type() -> ArrayType {
ArrayType::String
}
fn column_type() -> ColumnType {
ColumnType::String(Some(64))
}
}
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct Group {
pub id: GroupId,
@@ -393,16 +387,14 @@ pub struct Group {
pub creation_date: NaiveDateTime,
pub uuid: Uuid,
pub users: Vec<UserId>,
pub attributes: Vec<AttributeValue>,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, FromQueryResult)]
pub struct GroupDetails {
pub group_id: GroupId,
pub display_name: String,
pub creation_date: NaiveDateTime,
pub uuid: Uuid,
pub attributes: Vec<AttributeValue>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
@@ -410,39 +402,3 @@ pub struct UserAndGroups {
pub user: User,
pub groups: Option<Vec<GroupDetails>>,
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn test_serialized_debug_string() {
assert_eq!(
&format!("{:?}", Serialized::from("abcd")),
"Serialized(\"abcd\")"
);
assert_eq!(
&format!("{:?}", Serialized::from(&1234i64)),
"Serialized(\"1234\")"
);
assert_eq!(
&format!("{:?}", Serialized::from(&JpegPhoto::for_tests())),
"Serialized(\"hash: 0xB947C77A16F3C3BD\")"
);
}
#[test]
fn test_serialized_i64_len() {
assert_eq!(SERIALIZED_I64_LEN, Serialized::from(&0i64).0.len());
assert_eq!(
SERIALIZED_I64_LEN,
Serialized::from(&i64::max_value()).0.len()
);
assert_eq!(
SERIALIZED_I64_LEN,
Serialized::from(&i64::min_value()).0.len()
);
assert_eq!(SERIALIZED_I64_LEN, Serialized::from(&-1000i64).0.len());
}
}

View File

@@ -6,10 +6,8 @@ use tracing::info;
use crate::domain::{
error::Result,
handler::{
AttributeSchema, BackendHandler, CreateUserRequest, GroupBackendHandler,
GroupListerBackendHandler, GroupRequestFilter, Schema, SchemaBackendHandler,
UpdateGroupRequest, UpdateUserRequest, UserBackendHandler, UserListerBackendHandler,
UserRequestFilter,
BackendHandler, CreateUserRequest, GroupListerBackendHandler, GroupRequestFilter,
UpdateGroupRequest, UpdateUserRequest, UserListerBackendHandler, UserRequestFilter,
},
types::{Group, GroupDetails, GroupId, User, UserAndGroups, UserId},
};
@@ -108,10 +106,10 @@ pub trait AdminBackendHandler:
#[async_trait]
impl<Handler: BackendHandler> UserReadableBackendHandler for Handler {
async fn get_user_details(&self, user_id: &UserId) -> Result<User> {
<Handler as UserBackendHandler>::get_user_details(self, user_id).await
self.get_user_details(user_id).await
}
async fn get_user_groups(&self, user_id: &UserId) -> Result<HashSet<GroupDetails>> {
<Handler as UserBackendHandler>::get_user_groups(self, user_id).await
self.get_user_groups(user_id).await
}
}
@@ -122,44 +120,44 @@ impl<Handler: BackendHandler> ReadonlyBackendHandler for Handler {
filters: Option<UserRequestFilter>,
get_groups: bool,
) -> Result<Vec<UserAndGroups>> {
<Handler as UserListerBackendHandler>::list_users(self, filters, get_groups).await
self.list_users(filters, get_groups).await
}
async fn list_groups(&self, filters: Option<GroupRequestFilter>) -> Result<Vec<Group>> {
<Handler as GroupListerBackendHandler>::list_groups(self, filters).await
self.list_groups(filters).await
}
async fn get_group_details(&self, group_id: GroupId) -> Result<GroupDetails> {
<Handler as GroupBackendHandler>::get_group_details(self, group_id).await
self.get_group_details(group_id).await
}
}
#[async_trait]
impl<Handler: BackendHandler> UserWriteableBackendHandler for Handler {
async fn update_user(&self, request: UpdateUserRequest) -> Result<()> {
<Handler as UserBackendHandler>::update_user(self, request).await
self.update_user(request).await
}
}
#[async_trait]
impl<Handler: BackendHandler> AdminBackendHandler for Handler {
async fn create_user(&self, request: CreateUserRequest) -> Result<()> {
<Handler as UserBackendHandler>::create_user(self, request).await
self.create_user(request).await
}
async fn delete_user(&self, user_id: &UserId) -> Result<()> {
<Handler as UserBackendHandler>::delete_user(self, user_id).await
self.delete_user(user_id).await
}
async fn add_user_to_group(&self, user_id: &UserId, group_id: GroupId) -> Result<()> {
<Handler as UserBackendHandler>::add_user_to_group(self, user_id, group_id).await
self.add_user_to_group(user_id, group_id).await
}
async fn remove_user_from_group(&self, user_id: &UserId, group_id: GroupId) -> Result<()> {
<Handler as UserBackendHandler>::remove_user_from_group(self, user_id, group_id).await
self.remove_user_from_group(user_id, group_id).await
}
async fn update_group(&self, request: UpdateGroupRequest) -> Result<()> {
<Handler as GroupBackendHandler>::update_group(self, request).await
self.update_group(request).await
}
async fn create_group(&self, group_name: &str) -> Result<GroupId> {
<Handler as GroupBackendHandler>::create_group(self, group_name).await
self.create_group(group_name).await
}
async fn delete_group(&self, group_id: GroupId) -> Result<()> {
<Handler as GroupBackendHandler>::delete_group(self, group_id).await
self.delete_group(group_id).await
}
}
@@ -264,23 +262,6 @@ pub struct UserRestrictedListerBackendHandler<'a, Handler> {
pub user_filter: Option<UserId>,
}
#[async_trait]
impl<'a, Handler: SchemaBackendHandler + Sync> SchemaBackendHandler
for UserRestrictedListerBackendHandler<'a, Handler>
{
async fn get_schema(&self) -> Result<Schema> {
let mut schema = self.handler.get_schema().await?;
if self.user_filter.is_some() {
let filter_attributes = |attributes: &mut Vec<AttributeSchema>| {
attributes.retain(|a| a.is_visible);
};
filter_attributes(&mut schema.user_attributes.attributes);
filter_attributes(&mut schema.group_attributes.attributes);
}
Ok(schema)
}
}
#[async_trait]
impl<'a, Handler: UserListerBackendHandler + Sync> UserListerBackendHandler
for UserRestrictedListerBackendHandler<'a, Handler>

View File

@@ -1,21 +1,22 @@
use std::collections::{hash_map::DefaultHasher, HashSet};
use std::hash::{Hash, Hasher};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::task::Poll;
use actix_web::{
cookie::{Cookie, SameSite},
dev::{Service, ServiceRequest, ServiceResponse, Transform},
error::{ErrorBadRequest, ErrorUnauthorized},
web, HttpRequest, HttpResponse,
web, FromRequest, HttpRequest, HttpResponse,
};
use actix_web_httpauth::extractors::bearer::BearerAuth;
use anyhow::Result;
use anyhow::{bail, Context, Result};
use chrono::prelude::*;
use futures::future::{ok, Ready};
use futures_util::FutureExt;
use hmac::Hmac;
use jwt::{SignWithKey, VerifyWithKey};
use secstr::SecUtf8;
use sha2::Sha512;
use time::ext::NumericalDuration;
use tracing::{debug, info, instrument, warn};
@@ -205,6 +206,24 @@ where
.unwrap_or_else(error_to_http_response)
}
async fn check_password_reset_token<'a, Backend>(
backend_handler: &Backend,
token: &Option<&'a str>,
) -> TcpResult<Option<(&'a str, UserId)>>
where
Backend: TcpBackendHandler + 'static,
{
let token = match token {
None => return Ok(None),
Some(token) => token,
};
let user_id = backend_handler
.get_user_id_for_password_reset_token(token)
.await
.map_err(|_| TcpError::UnauthorizedError("Invalid or expired token".to_string()))?;
Ok(Some((token, user_id)))
}
#[instrument(skip_all, level = "debug")]
async fn get_password_reset_step2<Backend>(
data: web::Data<AppState<Backend>>,
@@ -213,22 +232,12 @@ async fn get_password_reset_step2<Backend>(
where
Backend: TcpBackendHandler + BackendHandler + 'static,
{
let token = request
.match_info()
.get("token")
.ok_or_else(|| TcpError::BadRequest("Missing reset token".to_owned()))?;
let user_id = data
.get_tcp_handler()
.get_user_id_for_password_reset_token(token)
.await
.map_err(|e| {
debug!("Reset token error: {e:#}");
TcpError::NotFoundError("Wrong or expired reset token".to_owned())
})?;
let _ = data
.get_tcp_handler()
.delete_password_reset_token(token)
.await;
let tcp_handler = data.get_tcp_handler();
let (token, user_id) =
check_password_reset_token(tcp_handler, &request.match_info().get("token"))
.await?
.ok_or_else(|| TcpError::BadRequest("Missing token".to_string()))?;
let _ = tcp_handler.delete_password_reset_token(token).await;
let groups = HashSet::new();
let token = create_jwt(&data.jwt_key, user_id.to_string(), groups);
Ok(HttpResponse::Ok()
@@ -403,6 +412,7 @@ where
Backend: TcpBackendHandler + BackendHandler + OpaqueHandler + LoginHandler + 'static,
{
let user_id = UserId::new(&request.username);
debug!(?user_id);
let bind_request = BindRequest {
name: user_id.clone(),
password: request.password.clone(),
@@ -423,7 +433,7 @@ where
.unwrap_or_else(error_to_http_response)
}
#[instrument(skip_all, level = "debug", fields(name = %request.name))]
#[instrument(skip_all, level = "debug")]
async fn post_authorize<Backend>(
data: web::Data<AppState<Backend>>,
request: web::Json<BindRequest>,
@@ -432,6 +442,7 @@ where
Backend: TcpBackendHandler + BackendHandler + LoginHandler + 'static,
{
let name = request.name.clone();
debug!(%name);
data.get_login_handler().bind(request.into_inner()).await?;
get_login_successful_response(&data, &name).await
}
@@ -448,6 +459,115 @@ where
.unwrap_or_else(error_to_http_response)
}
// Parse the response from the HaveIBeenPwned API. Sample response:
//
// 0018A45C4D1DEF81644B54AB7F969B88D65:1
// 00D4F6E8FA6EECAD2A3AA415EEC418D38EC:2
// 011053FD0102E94D6AE2F8B83D76FAF94F6:13
fn parse_hash_list(response: &str) -> Result<password_reset::PasswordHashList> {
use password_reset::*;
let parse_line = |line: &str| -> Result<PasswordHashCount> {
let split = line.trim().split(':').collect::<Vec<_>>();
if let [hash, count] = &split[..] {
if hash.len() == 35 {
if let Ok(count) = str::parse::<u64>(count) {
return Ok(PasswordHashCount {
hash: hash.to_string(),
count,
});
}
}
}
bail!("Invalid password hash from API: {}", line)
};
Ok(PasswordHashList {
hashes: response
.split('\n')
.map(parse_line)
.collect::<Result<Vec<_>>>()?,
})
}
// TODO: Refactor that for testing.
async fn get_password_hash_list(
hash: &str,
api_key: &SecUtf8,
) -> Result<password_reset::PasswordHashList> {
use reqwest::*;
let client = Client::new();
let resp = client
.get(format!("https://api.pwnedpasswords.com/range/{}", hash))
.header(header::USER_AGENT, "LLDAP")
.header("hibp-api-key", api_key.unsecure())
.send()
.await
.context("Could not get response from HIBP")?
.text()
.await?;
parse_hash_list(&resp).context("Invalid HIBP response")
}
async fn check_password_pwned<Backend>(
data: web::Data<AppState<Backend>>,
request: HttpRequest,
payload: web::Payload,
) -> TcpResult<HttpResponse>
where
Backend: TcpBackendHandler + BackendHandler + OpaqueHandler + 'static,
{
let has_reset_token = check_password_reset_token(
data.get_tcp_handler(),
&request
.headers()
.get("reset-token")
.map(|v| v.to_str().unwrap()),
)
.await?
.is_some();
let inner_payload = &mut payload.into_inner();
if !has_reset_token
&& BearerAuth::from_request(&request, inner_payload)
.await
.ok()
.and_then(|bearer| check_if_token_is_valid(&data, bearer.token()).ok())
.is_none()
{
return Err(TcpError::UnauthorizedError(
"No token or invalid token".to_string(),
));
}
if data.hibp_api_key.unsecure().is_empty() {
return Err(TcpError::NotImplemented("No HIBP API key".to_string()));
}
let hash = request
.match_info()
.get("hash")
.ok_or_else(|| TcpError::BadRequest("Missing hash".to_string()))?;
if hash.len() != 5 || !hash.chars().all(|c| c.is_ascii_hexdigit()) {
return Err(TcpError::BadRequest(format!(
"Bad request: invalid hash format \"{}\"",
hash
)));
}
get_password_hash_list(hash, &data.hibp_api_key)
.await
.map(|hashes| HttpResponse::Ok().json(hashes))
.map_err(|e| TcpError::InternalServerError(e.to_string()))
}
async fn check_password_pwned_handler<Backend>(
data: web::Data<AppState<Backend>>,
request: HttpRequest,
payload: web::Payload,
) -> HttpResponse
where
Backend: TcpBackendHandler + BackendHandler + OpaqueHandler + 'static,
{
check_password_pwned(data, request, payload)
.await
.unwrap_or_else(error_to_http_response)
}
#[instrument(skip_all, level = "debug")]
async fn opaque_register_start<Backend>(
request: actix_web::HttpRequest,
@@ -564,7 +684,7 @@ where
#[allow(clippy::type_complexity)]
type Future = Pin<Box<dyn core::future::Future<Output = Result<Self::Response, Self::Error>>>>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
fn poll_ready(&self, cx: &mut std::task::Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
@@ -635,6 +755,11 @@ where
web::resource("/simple/login").route(web::post().to(simple_login_handler::<Backend>)),
)
.service(web::resource("/refresh").route(web::get().to(get_refresh_handler::<Backend>)))
.service(
web::resource("/password/check/{hash}")
.wrap(CookieToHeaderTranslatorFactory)
.route(web::get().to(check_password_pwned_handler::<Backend>)),
)
.service(web::resource("/logout").route(web::get().to(get_logout_handler::<Backend>)))
.service(
web::scope("/opaque/register")

View File

@@ -1,7 +1,6 @@
use clap::{builder::EnumValueParser, Parser};
use lettre::message::Mailbox;
use serde::{Deserialize, Serialize};
use url::Url;
/// lldap is a lightweight LDAP server
#[derive(Debug, Parser, Clone)]
@@ -55,16 +54,9 @@ pub struct RunOpts {
/// Path to the file that contains the private server key.
/// It will be created if it doesn't exist.
/// Alternatively, you can set `server_key_seed`. If `server_key_seed` is given,
/// `server_key_file` will be ignored.
#[clap(long, env = "LLDAP_SERVER_KEY_FILE")]
pub server_key_file: Option<String>,
/// Seed used to generate the private server key.
/// Takes precedence over `server_key_file`.
#[clap(long, env = "LLDAP_SERVER_KEY_SEED")]
pub server_key_seed: Option<String>,
/// Change ldap host. Default: "0.0.0.0"
#[clap(long, env = "LLDAP_LDAP_HOST")]
pub ldap_host: Option<String>,
@@ -83,12 +75,16 @@ pub struct RunOpts {
/// URL of the server, for password reset links.
#[clap(long, env = "LLDAP_HTTP_URL")]
pub http_url: Option<Url>,
pub http_url: Option<String>,
/// Database connection URL
#[clap(short, long, env = "LLDAP_DATABASE_URL")]
pub database_url: Option<String>,
/// HaveIBeenPwned API key, to check passwords against leaks.
#[clap(long, env = "LLDAP_HIBP_API_KEY")]
pub hibp_api_key: Option<String>,
#[clap(flatten)]
pub smtp_opts: SmtpOpts,
@@ -131,7 +127,6 @@ pub struct LdapsOpts {
#[derive(Clone, Debug, Deserialize, Serialize, clap::ValueEnum)]
#[serde(rename_all = "UPPERCASE")]
#[clap(rename_all = "UPPERCASE")]
pub enum SmtpEncryption {
None,
Tls,
@@ -173,7 +168,7 @@ pub struct SmtpOpts {
#[clap(long, env = "LLDAP_SMTP_OPTIONS__TLS_REQUIRED", hide = true)]
pub smtp_tls_required: Option<bool>,
#[clap(long, env = "LLDAP_SMTP_OPTIONS__SMTP_ENCRYPTION", value_parser = EnumValueParser::<SmtpEncryption>::new(), ignore_case = true)]
#[clap(long, env = "LLDAP_SMTP_OPTIONS__ENCRYPTION", value_parser = EnumValueParser::<SmtpEncryption>::new(), ignore_case = true)]
pub smtp_encryption: Option<SmtpEncryption>,
}

View File

@@ -11,14 +11,13 @@ use lettre::message::Mailbox;
use lldap_auth::opaque::{server::ServerSetup, KeyPair};
use secstr::SecUtf8;
use serde::{Deserialize, Serialize};
use url::Url;
#[derive(Clone, Debug, Deserialize, Serialize, derive_builder::Builder)]
#[builder(pattern = "owned")]
pub struct MailOptions {
#[builder(default = "false")]
pub enable_password_reset: bool,
#[builder(default)]
#[builder(default = "None")]
pub from: Option<Mailbox>,
#[builder(default = "None")]
pub reply_to: Option<Mailbox>,
@@ -26,7 +25,7 @@ pub struct MailOptions {
pub server: String,
#[builder(default = "587")]
pub port: u16,
#[builder(default)]
#[builder(default = r#"String::default()"#)]
pub user: String,
#[builder(default = r#"SecUtf8::from("")"#)]
pub password: SecUtf8,
@@ -79,7 +78,7 @@ pub struct Configuration {
pub ldap_base_dn: String,
#[builder(default = r#"UserId::new("admin")"#)]
pub ldap_user_dn: UserId,
#[builder(default)]
#[builder(default = r#"String::default()"#)]
pub ldap_user_email: String,
#[builder(default = r#"SecUtf8::from("password")"#)]
pub ldap_user_pass: SecUtf8,
@@ -93,16 +92,14 @@ pub struct Configuration {
pub verbose: bool,
#[builder(default = r#"String::from("server_key")"#)]
pub key_file: String,
// We want an Option to see whether there is a value or not, since the value is printed as
// "***SECRET***".
#[builder(default)]
pub key_seed: Option<SecUtf8>,
#[builder(default)]
pub smtp_options: MailOptions,
#[builder(default)]
pub ldaps_options: LdapsOptions,
#[builder(default = r#"Url::parse("http://localhost").unwrap()"#)]
pub http_url: Url,
#[builder(default = r#"String::from("http://localhost")"#)]
pub http_url: String,
#[builder(default = r#"SecUtf8::from("")"#)]
pub hibp_api_key: SecUtf8,
#[serde(skip)]
#[builder(field(private), default = "None")]
server_setup: Option<ServerSetup>,
@@ -116,14 +113,7 @@ impl std::default::Default for Configuration {
impl ConfigurationBuilder {
pub fn build(self) -> Result<Configuration> {
let server_setup = get_server_setup(
self.key_file.as_deref().unwrap_or("server_key"),
self.key_seed
.as_ref()
.and_then(|o| o.as_ref())
.map(SecUtf8::unsecure)
.unwrap_or_default(),
)?;
let server_setup = get_server_setup(self.key_file.as_deref().unwrap_or("server_key"))?;
Ok(self.server_setup(Some(server_setup)).private_build()?)
}
@@ -166,25 +156,10 @@ fn write_to_readonly_file(path: &std::path::Path, buffer: &[u8]) -> Result<()> {
Ok(file.write_all(buffer)?)
}
fn get_server_setup(file_path: &str, key_seed: &str) -> Result<ServerSetup> {
fn get_server_setup(file_path: &str) -> Result<ServerSetup> {
use std::fs::read;
let path = std::path::Path::new(file_path);
if !key_seed.is_empty() {
if file_path != "server_key" || path.exists() {
eprintln!("WARNING: A key_seed was given, we will ignore the server_key and generate one from the seed!");
} else {
println!("Got a key_seed, ignoring key_file");
}
let hash = |val: &[u8]| -> [u8; 32] {
use sha2::{Digest, Sha256};
let mut seed_hasher = Sha256::new();
seed_hasher.update(val);
seed_hasher.finalize().into()
};
use rand::SeedableRng;
let mut rng = rand_chacha::ChaCha20Rng::from_seed(hash(key_seed.as_bytes()));
Ok(ServerSetup::new(&mut rng))
} else if path.exists() {
if path.exists() {
let bytes = read(file_path).context(format!("Could not read key file `{}`", file_path))?;
Ok(ServerSetup::deserialize(&bytes)?)
} else {
@@ -225,10 +200,6 @@ impl ConfigOverrider for RunOpts {
config.key_file = path.to_string();
}
if let Some(seed) = self.server_key_seed.as_ref() {
config.key_seed = Some(SecUtf8::from(seed));
}
if let Some(port) = self.ldap_port {
config.ldap_port = port;
}
@@ -238,12 +209,16 @@ impl ConfigOverrider for RunOpts {
}
if let Some(url) = self.http_url.as_ref() {
config.http_url = url.clone();
config.http_url = url.to_string();
}
if let Some(database_url) = self.database_url.as_ref() {
config.database_url = database_url.to_string();
}
if let Some(api_key) = self.hibp_api_key.as_ref() {
config.hibp_api_key = SecUtf8::from(api_key.clone());
}
self.smtp_opts.override_config(config);
self.ldaps_opts.override_config(config);
}
@@ -337,14 +312,7 @@ where
if config.verbose {
println!("Configuration: {:#?}", &config);
}
config.server_setup = Some(get_server_setup(
&config.key_file,
config
.key_seed
.as_ref()
.map(SecUtf8::unsecure)
.unwrap_or_default(),
)?);
config.server_setup = Some(get_server_setup(&config.key_file)?);
if config.jwt_secret == SecUtf8::from("secretjwtsecret") {
println!("WARNING: Default JWT secret used! This is highly unsafe and can allow attackers to log in as admin.");
}
@@ -356,30 +324,3 @@ where
}
Ok(config)
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn check_generated_server_key() {
assert_eq!(
bincode::serialize(&get_server_setup("/doesnt/exist", "key seed").unwrap()).unwrap(),
[
255, 206, 202, 50, 247, 13, 59, 191, 69, 244, 148, 187, 150, 227, 12, 250, 20, 207,
211, 151, 147, 33, 107, 132, 2, 252, 121, 94, 97, 6, 97, 232, 163, 168, 86, 246,
249, 186, 31, 204, 59, 75, 65, 134, 108, 159, 15, 70, 246, 250, 150, 195, 54, 197,
195, 176, 150, 200, 157, 119, 13, 173, 119, 8, 32, 0, 0, 0, 0, 0, 0, 0, 248, 123,
35, 91, 194, 51, 52, 57, 191, 210, 68, 227, 107, 166, 232, 37, 195, 244, 100, 84,
88, 212, 190, 12, 195, 57, 83, 72, 127, 189, 179, 16, 32, 0, 0, 0, 0, 0, 0, 0, 128,
112, 60, 207, 205, 69, 67, 73, 24, 175, 187, 62, 16, 45, 59, 136, 78, 40, 187, 54,
159, 94, 116, 33, 133, 119, 231, 43, 199, 164, 141, 7, 32, 0, 0, 0, 0, 0, 0, 0,
212, 134, 53, 203, 131, 24, 138, 211, 162, 28, 23, 233, 251, 82, 34, 66, 98, 12,
249, 205, 35, 208, 241, 50, 128, 131, 46, 189, 211, 51, 56, 109, 32, 0, 0, 0, 0, 0,
0, 0, 84, 20, 147, 25, 50, 5, 243, 203, 216, 180, 175, 121, 159, 96, 123, 183, 146,
251, 22, 44, 98, 168, 67, 224, 255, 139, 159, 25, 24, 254, 88, 3
]
);
}
}

View File

@@ -48,6 +48,7 @@ impl Scheduler {
#[instrument(skip_all)]
async fn cleanup_db(sql_pool: DbConnection) {
info!("Cleaning DB");
if let Err(e) = model::JwtRefreshStorage::delete_many()
.filter(JwtRefreshStorageColumn::ExpiryDate.lt(chrono::Utc::now().naive_utc()))
.exec(&sql_pool)
@@ -69,6 +70,7 @@ impl Scheduler {
{
error!("DB error while cleaning up password reset tokens: {}", e);
};
info!("DB cleaned!");
}
fn duration_until_next(&self) -> Duration {

View File

@@ -1,17 +1,16 @@
use crate::{
domain::{
handler::{BackendHandler, SchemaBackendHandler},
ldap::utils::{map_user_field, UserFieldType},
types::{AttributeType, GroupDetails, GroupId, JpegPhoto, UserColumn, UserId},
handler::BackendHandler,
ldap::utils::map_user_field,
types::{GroupDetails, GroupId, UserColumn, UserId},
},
infra::{
access_control::{ReadonlyBackendHandler, UserReadableBackendHandler},
graphql::api::{field_error_callback, Context},
schema::PublicSchema,
graphql::api::field_error_callback,
},
};
use chrono::{NaiveDateTime, TimeZone};
use juniper::{graphql_object, FieldError, FieldResult, GraphQLInputObject};
use chrono::TimeZone;
use juniper::{graphql_object, FieldResult, GraphQLInputObject};
use serde::{Deserialize, Serialize};
use tracing::{debug, debug_span, Instrument};
@@ -19,10 +18,7 @@ type DomainRequestFilter = crate::domain::handler::UserRequestFilter;
type DomainUser = crate::domain::types::User;
type DomainGroup = crate::domain::types::Group;
type DomainUserAndGroups = crate::domain::types::UserAndGroups;
type DomainSchema = crate::infra::schema::PublicSchema;
type DomainAttributeList = crate::domain::handler::AttributeList;
type DomainAttributeSchema = crate::domain::handler::AttributeSchema;
type DomainAttributeValue = crate::domain::types::AttributeValue;
use super::api::Context;
#[derive(PartialEq, Eq, Debug, GraphQLInputObject)]
/// A filter for requests, specifying a boolean expression based on field constraints. Only one of
@@ -65,19 +61,14 @@ impl TryInto<DomainRequestFilter> for RequestFilter {
return Err("Multiple fields specified in request filter".to_string());
}
if let Some(e) = self.eq {
return match map_user_field(&e.field.to_ascii_lowercase()) {
UserFieldType::NoMatch => Err(format!("Unknown request filter: {}", &e.field)),
UserFieldType::PrimaryField(UserColumn::UserId) => {
Ok(DomainRequestFilter::UserId(UserId::new(&e.value)))
if let Some(column) = map_user_field(&e.field) {
if column == UserColumn::UserId {
return Ok(DomainRequestFilter::UserId(UserId::new(&e.value)));
}
UserFieldType::PrimaryField(column) => {
Ok(DomainRequestFilter::Equality(column, e.value))
}
UserFieldType::Attribute(column) => Ok(DomainRequestFilter::AttributeEquality(
column.to_owned(),
e.value,
)),
};
return Ok(DomainRequestFilter::Equality(column, e.value));
} else {
return Err(format!("Unknown request filter: {}", &e.field));
}
}
if let Some(c) = self.any {
return Ok(DomainRequestFilter::Or(
@@ -133,12 +124,10 @@ impl<Handler: BackendHandler> Query<Handler> {
}
pub async fn user(context: &Context<Handler>, user_id: String) -> FieldResult<User<Handler>> {
use anyhow::Context;
let span = debug_span!("[GraphQL query] user");
span.in_scope(|| {
debug!(?user_id);
});
let user_id = urlencoding::decode(&user_id).context("Invalid user parameter")?;
let user_id = UserId::new(&user_id);
let handler = context
.get_readable_handler(&user_id)
@@ -206,19 +195,6 @@ impl<Handler: BackendHandler> Query<Handler> {
.await
.map(Into::into)?)
}
async fn schema(context: &Context<Handler>) -> FieldResult<Schema<Handler>> {
let span = debug_span!("[GraphQL query] get_schema");
let handler = context
.handler
.get_user_restricted_lister_handler(&context.validation_result);
Ok(handler
.get_schema()
.instrument(span)
.await
.map(Into::<PublicSchema>::into)
.map(Into::into)?)
}
}
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
@@ -253,29 +229,15 @@ impl<Handler: BackendHandler> User<Handler> {
}
fn first_name(&self) -> &str {
self.user
.attributes
.iter()
.find(|a| a.name == "first_name")
.map(|a| a.value.unwrap())
.unwrap_or("")
self.user.first_name.as_deref().unwrap_or("")
}
fn last_name(&self) -> &str {
self.user
.attributes
.iter()
.find(|a| a.name == "last_name")
.map(|a| a.value.unwrap())
.unwrap_or("")
self.user.last_name.as_deref().unwrap_or("")
}
fn avatar(&self) -> Option<String> {
self.user
.attributes
.iter()
.find(|a| a.name == "avatar")
.map(|a| String::from(&a.value.unwrap::<JpegPhoto>()))
self.user.avatar.as_ref().map(String::from)
}
fn creation_date(&self) -> chrono::DateTime<chrono::Utc> {
@@ -286,16 +248,6 @@ impl<Handler: BackendHandler> User<Handler> {
self.user.uuid.as_str()
}
/// User-defined attributes.
fn attributes(&self) -> Vec<AttributeValue<Handler, SchemaUserAttributeExtractor>> {
self.user
.attributes
.clone()
.into_iter()
.map(Into::into)
.collect()
}
/// The groups to which this user belongs.
async fn groups(&self, context: &Context<Handler>) -> FieldResult<Vec<Group<Handler>>> {
let span = debug_span!("[GraphQL query] user::groups");
@@ -309,14 +261,7 @@ impl<Handler: BackendHandler> User<Handler> {
.get_user_groups(&self.user.user_id)
.instrument(span)
.await
.map(|set| {
let mut groups = set
.into_iter()
.map(Into::into)
.collect::<Vec<Group<Handler>>>();
groups.sort_by(|g1, g2| g1.display_name.cmp(&g2.display_name));
groups
})?)
.map(|set| set.into_iter().map(Into::into).collect())?)
}
}
@@ -345,7 +290,6 @@ pub struct Group<Handler: BackendHandler> {
display_name: String,
creation_date: chrono::NaiveDateTime,
uuid: String,
attributes: Vec<DomainAttributeValue>,
members: Option<Vec<String>>,
_phantom: std::marker::PhantomData<Box<Handler>>,
}
@@ -364,16 +308,6 @@ impl<Handler: BackendHandler> Group<Handler> {
fn uuid(&self) -> String {
self.uuid.clone()
}
/// User-defined attributes.
fn attributes(&self) -> Vec<AttributeValue<Handler, SchemaGroupAttributeExtractor>> {
self.attributes
.clone()
.into_iter()
.map(Into::into)
.collect()
}
/// The groups to which this user belongs.
async fn users(&self, context: &Context<Handler>) -> FieldResult<Vec<User<Handler>>> {
let span = debug_span!("[GraphQL query] group::users");
@@ -404,7 +338,6 @@ impl<Handler: BackendHandler> From<GroupDetails> for Group<Handler> {
display_name: group_details.display_name,
creation_date: group_details.creation_date,
uuid: group_details.uuid.into_string(),
attributes: group_details.attributes,
members: None,
_phantom: std::marker::PhantomData,
}
@@ -418,222 +351,17 @@ impl<Handler: BackendHandler> From<DomainGroup> for Group<Handler> {
display_name: group.display_name,
creation_date: group.creation_date,
uuid: group.uuid.into_string(),
attributes: group.attributes,
members: Some(group.users.into_iter().map(UserId::into_string).collect()),
_phantom: std::marker::PhantomData,
}
}
}
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct AttributeSchema<Handler: BackendHandler> {
schema: DomainAttributeSchema,
_phantom: std::marker::PhantomData<Box<Handler>>,
}
#[graphql_object(context = Context<Handler>)]
impl<Handler: BackendHandler> AttributeSchema<Handler> {
fn name(&self) -> String {
self.schema.name.clone()
}
fn attribute_type(&self) -> String {
let name: &'static str = self.schema.attribute_type.into();
name.to_owned()
}
fn is_list(&self) -> bool {
self.schema.is_list
}
fn is_visible(&self) -> bool {
self.schema.is_visible
}
fn is_editable(&self) -> bool {
self.schema.is_editable
}
fn is_hardcoded(&self) -> bool {
self.schema.is_hardcoded
}
}
impl<Handler: BackendHandler> From<DomainAttributeSchema> for AttributeSchema<Handler> {
fn from(value: DomainAttributeSchema) -> Self {
Self {
schema: value,
_phantom: std::marker::PhantomData,
}
}
}
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct AttributeList<Handler: BackendHandler> {
schema: DomainAttributeList,
_phantom: std::marker::PhantomData<Box<Handler>>,
}
#[graphql_object(context = Context<Handler>)]
impl<Handler: BackendHandler> AttributeList<Handler> {
fn attributes(&self) -> Vec<AttributeSchema<Handler>> {
self.schema
.attributes
.clone()
.into_iter()
.map(Into::into)
.collect()
}
}
impl<Handler: BackendHandler> From<DomainAttributeList> for AttributeList<Handler> {
fn from(value: DomainAttributeList) -> Self {
Self {
schema: value,
_phantom: std::marker::PhantomData,
}
}
}
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct Schema<Handler: BackendHandler> {
schema: DomainSchema,
_phantom: std::marker::PhantomData<Box<Handler>>,
}
#[graphql_object(context = Context<Handler>)]
impl<Handler: BackendHandler> Schema<Handler> {
fn user_schema(&self) -> AttributeList<Handler> {
self.schema.get_schema().user_attributes.clone().into()
}
fn group_schema(&self) -> AttributeList<Handler> {
self.schema.get_schema().group_attributes.clone().into()
}
}
impl<Handler: BackendHandler> From<DomainSchema> for Schema<Handler> {
fn from(value: DomainSchema) -> Self {
Self {
schema: value,
_phantom: std::marker::PhantomData,
}
}
}
trait SchemaAttributeExtractor: std::marker::Send {
fn get_attributes(schema: &DomainSchema) -> &DomainAttributeList;
}
struct SchemaUserAttributeExtractor;
impl SchemaAttributeExtractor for SchemaUserAttributeExtractor {
fn get_attributes(schema: &DomainSchema) -> &DomainAttributeList {
&schema.get_schema().user_attributes
}
}
struct SchemaGroupAttributeExtractor;
impl SchemaAttributeExtractor for SchemaGroupAttributeExtractor {
fn get_attributes(schema: &DomainSchema) -> &DomainAttributeList {
&schema.get_schema().group_attributes
}
}
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct AttributeValue<Handler: BackendHandler, Extractor> {
attribute: DomainAttributeValue,
_phantom: std::marker::PhantomData<Box<Handler>>,
_phantom_extractor: std::marker::PhantomData<Extractor>,
}
#[graphql_object(context = Context<Handler>)]
impl<Handler: BackendHandler, Extractor: SchemaAttributeExtractor>
AttributeValue<Handler, Extractor>
{
fn name(&self) -> &str {
&self.attribute.name
}
async fn value(&self, context: &Context<Handler>) -> FieldResult<Vec<String>> {
let handler = context
.handler
.get_user_restricted_lister_handler(&context.validation_result);
serialize_attribute(
&self.attribute,
Extractor::get_attributes(&PublicSchema::from(handler.get_schema().await?)),
)
}
}
pub fn serialize_attribute(
attribute: &DomainAttributeValue,
attributes: &DomainAttributeList,
) -> FieldResult<Vec<String>> {
let convert_date = |date| chrono::Utc.from_utc_datetime(&date).to_rfc3339();
attributes
.get_attribute_type(&attribute.name)
.map(|attribute_type| {
match attribute_type {
(AttributeType::String, false) => {
vec![attribute.value.unwrap::<String>()]
}
(AttributeType::Integer, false) => {
// LDAP integers are encoded as strings.
vec![attribute.value.unwrap::<i64>().to_string()]
}
(AttributeType::JpegPhoto, false) => {
vec![String::from(&attribute.value.unwrap::<JpegPhoto>())]
}
(AttributeType::DateTime, false) => {
vec![convert_date(attribute.value.unwrap::<NaiveDateTime>())]
}
(AttributeType::String, true) => attribute
.value
.unwrap::<Vec<String>>()
.into_iter()
.collect(),
(AttributeType::Integer, true) => attribute
.value
.unwrap::<Vec<i64>>()
.into_iter()
.map(|i| i.to_string())
.collect(),
(AttributeType::JpegPhoto, true) => attribute
.value
.unwrap::<Vec<JpegPhoto>>()
.iter()
.map(String::from)
.collect(),
(AttributeType::DateTime, true) => attribute
.value
.unwrap::<Vec<NaiveDateTime>>()
.into_iter()
.map(convert_date)
.collect(),
}
})
.ok_or_else(|| FieldError::from(anyhow::anyhow!("Unknown attribute: {}", &attribute.name)))
}
impl<Handler: BackendHandler, Extractor> From<DomainAttributeValue>
for AttributeValue<Handler, Extractor>
{
fn from(value: DomainAttributeValue) -> Self {
Self {
attribute: value,
_phantom: std::marker::PhantomData,
_phantom_extractor: std::marker::PhantomData,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
domain::{
handler::AttributeList,
types::{AttributeType, Serialized},
},
infra::{
access_control::{Permission, ValidationResults},
test_utils::{setup_default_schema, MockTestBackendHandler},
},
domain::handler::MockTestBackendHandler, infra::access_control::ValidationResults,
};
use chrono::TimeZone;
use juniper::{
@@ -641,7 +369,6 @@ mod tests {
RootNode, Variables,
};
use mockall::predicate::eq;
use pretty_assertions::assert_eq;
use std::collections::HashSet;
fn schema<'q, C, Q>(query_root: Q) -> RootNode<'q, Q, EmptyMutation<C>, EmptySubscription<C>>
@@ -663,58 +390,15 @@ mod tests {
email
creationDate
uuid
attributes {
name
value
}
groups {
id
displayName
creationDate
uuid
attributes {
name
value
}
}
}
}"#;
let mut mock = MockTestBackendHandler::new();
mock.expect_get_schema().returning(|| {
Ok(crate::domain::handler::Schema {
user_attributes: DomainAttributeList {
attributes: vec![
DomainAttributeSchema {
name: "first_name".to_owned(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: true,
},
DomainAttributeSchema {
name: "last_name".to_owned(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: true,
},
],
},
group_attributes: DomainAttributeList {
attributes: vec![DomainAttributeSchema {
name: "club_name".to_owned(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: false,
}],
},
})
});
mock.expect_get_user_details()
.with(eq(UserId::new("bob")))
.return_once(|_| {
@@ -723,16 +407,6 @@ mod tests {
email: "bob@bobbers.on".to_string(),
creation_date: chrono::Utc.timestamp_millis_opt(42).unwrap().naive_utc(),
uuid: crate::uuid!("b1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
attributes: vec![
DomainAttributeValue {
name: "first_name".to_owned(),
value: Serialized::from("Bob"),
},
DomainAttributeValue {
name: "last_name".to_owned(),
value: Serialized::from("Bobberson"),
},
],
..Default::default()
})
});
@@ -742,17 +416,6 @@ mod tests {
display_name: "Bobbersons".to_string(),
creation_date: chrono::Utc.timestamp_nanos(42).naive_utc(),
uuid: crate::uuid!("a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
attributes: vec![DomainAttributeValue {
name: "club_name".to_owned(),
value: Serialized::from("Gang of Four"),
}],
});
groups.insert(GroupDetails {
group_id: GroupId(7),
display_name: "Jefferees".to_string(),
creation_date: chrono::Utc.timestamp_nanos(12).naive_utc(),
uuid: crate::uuid!("b1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
attributes: Vec::new(),
});
mock.expect_get_user_groups()
.with(eq(UserId::new("bob")))
@@ -772,31 +435,10 @@ mod tests {
"email": "bob@bobbers.on",
"creationDate": "1970-01-01T00:00:00.042+00:00",
"uuid": "b1a2a3a4-b1b2-c1c2-d1d2-d3d4d5d6d7d8",
"attributes": [{
"name": "first_name",
"value": ["Bob"],
},
{
"name": "last_name",
"value": ["Bobberson"],
}],
"groups": [{
"id": 3,
"displayName": "Bobbersons",
"creationDate": "1970-01-01T00:00:00.000000042+00:00",
"uuid": "a1a2a3a4-b1b2-c1c2-d1d2-d3d4d5d6d7d8",
"attributes": [{
"name": "club_name",
"value": ["Gang of Four"],
},
],
},
{
"id": 7,
"displayName": "Jefferees",
"creationDate": "1970-01-01T00:00:00.000000012+00:00",
"uuid": "b1a2a3a4-b1b2-c1c2-d1d2-d3d4d5d6d7d8",
"attributes": [],
"uuid": "a1a2a3a4-b1b2-c1c2-d1d2-d3d4d5d6d7d8"
}]
}
}),
@@ -817,10 +459,6 @@ mod tests {
{eq: {
field: "email"
value: "robert@bobbers.on"
}},
{eq: {
field: "firstName"
value: "robert"
}}
]}) {
id
@@ -835,11 +473,7 @@ mod tests {
DomainRequestFilter::UserId(UserId::new("bob")),
DomainRequestFilter::Equality(
UserColumn::Email,
"robert@bobbers.on".to_owned(),
),
DomainRequestFilter::AttributeEquality(
"first_name".to_owned(),
"robert".to_owned(),
"robert@bobbers.on".to_string(),
),
]))),
eq(false),
@@ -849,7 +483,7 @@ mod tests {
DomainUserAndGroups {
user: DomainUser {
user_id: UserId::new("bob"),
email: "bob@bobbers.on".to_owned(),
email: "bob@bobbers.on".to_string(),
..Default::default()
},
groups: None,
@@ -857,7 +491,7 @@ mod tests {
DomainUserAndGroups {
user: DomainUser {
user_id: UserId::new("robert"),
email: "robert@bobbers.on".to_owned(),
email: "robert@bobbers.on".to_string(),
..Default::default()
},
groups: None,
@@ -889,219 +523,4 @@ mod tests {
))
);
}
#[tokio::test]
async fn get_schema() {
const QUERY: &str = r#"{
schema {
userSchema {
attributes {
name
attributeType
isList
isVisible
isEditable
isHardcoded
}
}
groupSchema {
attributes {
name
attributeType
isList
isVisible
isEditable
isHardcoded
}
}
}
}"#;
let mut mock = MockTestBackendHandler::new();
setup_default_schema(&mut mock);
let context =
Context::<MockTestBackendHandler>::new_for_tests(mock, ValidationResults::admin());
let schema = schema(Query::<MockTestBackendHandler>::new());
assert_eq!(
execute(QUERY, None, &schema, &Variables::new(), &context).await,
Ok((
graphql_value!(
{
"schema": {
"userSchema": {
"attributes": [
{
"name": "avatar",
"attributeType": "JpegPhoto",
"isList": false,
"isVisible": true,
"isEditable": true,
"isHardcoded": true,
},
{
"name": "creation_date",
"attributeType": "DateTime",
"isList": false,
"isVisible": true,
"isEditable": false,
"isHardcoded": true,
},
{
"name": "display_name",
"attributeType": "String",
"isList": false,
"isVisible": true,
"isEditable": true,
"isHardcoded": true,
},
{
"name": "first_name",
"attributeType": "String",
"isList": false,
"isVisible": true,
"isEditable": true,
"isHardcoded": true,
},
{
"name": "last_name",
"attributeType": "String",
"isList": false,
"isVisible": true,
"isEditable": true,
"isHardcoded": true,
},
{
"name": "mail",
"attributeType": "String",
"isList": false,
"isVisible": true,
"isEditable": true,
"isHardcoded": true,
},
{
"name": "user_id",
"attributeType": "String",
"isList": false,
"isVisible": true,
"isEditable": false,
"isHardcoded": true,
},
{
"name": "uuid",
"attributeType": "String",
"isList": false,
"isVisible": true,
"isEditable": false,
"isHardcoded": true,
},
]
},
"groupSchema": {
"attributes": [
{
"name": "creation_date",
"attributeType": "DateTime",
"isList": false,
"isVisible": true,
"isEditable": false,
"isHardcoded": true,
},
{
"name": "display_name",
"attributeType": "String",
"isList": false,
"isVisible": true,
"isEditable": true,
"isHardcoded": true,
},
{
"name": "group_id",
"attributeType": "Integer",
"isList": false,
"isVisible": true,
"isEditable": false,
"isHardcoded": true,
},
{
"name": "uuid",
"attributeType": "String",
"isList": false,
"isVisible": true,
"isEditable": false,
"isHardcoded": true,
},
]
}
}
}),
vec![]
))
);
}
#[tokio::test]
async fn regular_user_doesnt_see_non_visible_attributes() {
const QUERY: &str = r#"{
schema {
userSchema {
attributes {
name
}
}
}
}"#;
let mut mock = MockTestBackendHandler::new();
mock.expect_get_schema().times(1).return_once(|| {
Ok(crate::domain::handler::Schema {
user_attributes: AttributeList {
attributes: vec![crate::domain::handler::AttributeSchema {
name: "invisible".to_owned(),
attribute_type: AttributeType::JpegPhoto,
is_list: false,
is_visible: false,
is_editable: true,
is_hardcoded: true,
}],
},
group_attributes: AttributeList {
attributes: Vec::new(),
},
})
});
let context = Context::<MockTestBackendHandler>::new_for_tests(
mock,
ValidationResults {
user: UserId::new("bob"),
permission: Permission::Regular,
},
);
let schema = schema(Query::<MockTestBackendHandler>::new());
assert_eq!(
execute(QUERY, None, &schema, &Variables::new(), &context).await,
Ok((
graphql_value!(
{
"schema": {
"userSchema": {
"attributes": [
{"name": "creation_date"},
{"name": "display_name"},
{"name": "mail"},
{"name": "user_id"},
{"name": "uuid"},
]
}
}
} ),
vec![]
))
);
}
}

View File

@@ -1,4 +1,4 @@
use crate::infra::{configuration::LdapsOptions, ldap_server::read_certificates};
use crate::infra::configuration::LdapsOptions;
use anyhow::{anyhow, bail, ensure, Context, Result};
use futures_util::SinkExt;
use ldap3_proto::{
@@ -19,8 +19,8 @@ where
{
use tokio_stream::StreamExt;
let (r, w) = tokio::io::split(stream);
let mut requests = FramedRead::new(r, LdapCodec::default());
let mut resp = FramedWrite::new(w, LdapCodec::default());
let mut requests = FramedRead::new(r, LdapCodec);
let mut resp = FramedWrite::new(w, LdapCodec);
resp.send(LdapMsg {
msgid: 0,
@@ -65,11 +65,10 @@ where
invalid_answer
);
info!("Success");
resp.close().await?;
Ok(())
}
#[instrument(level = "info", err)]
#[instrument(skip_all, level = "info", err)]
pub async fn check_ldap(port: u16) -> Result<()> {
check_ldap_endpoint(TcpStream::connect(format!("localhost:{}", port)).await?).await
}
@@ -86,71 +85,37 @@ fn get_root_certificates() -> rustls::RootCertStore {
root_store
}
fn get_tls_connector(ldaps_options: &LdapsOptions) -> Result<RustlsTlsConnector> {
let mut client_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(get_root_certificates())
.with_no_client_auth();
let (certs, _private_key) = read_certificates(ldaps_options)?;
// Check that the server cert is the one in the config file.
struct CertificateVerifier {
certificate: rustls::Certificate,
certificate_path: String,
}
impl rustls::client::ServerCertVerifier for CertificateVerifier {
fn verify_server_cert(
&self,
end_entity: &rustls::Certificate,
_intermediates: &[rustls::Certificate],
_server_name: &rustls::ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
_ocsp_response: &[u8],
_now: std::time::SystemTime,
) -> std::result::Result<rustls::client::ServerCertVerified, rustls::Error> {
if end_entity != &self.certificate {
return Err(rustls::Error::InvalidCertificateData(format!(
"Server certificate doesn't match the one in the config file {}",
&self.certificate_path
)));
}
Ok(rustls::client::ServerCertVerified::assertion())
}
}
let mut dangerous_config = rustls::client::DangerousClientConfig {
cfg: &mut client_config,
};
dangerous_config.set_certificate_verifier(std::sync::Arc::new(CertificateVerifier {
certificate: certs.first().expect("empty certificate chain").clone(),
certificate_path: ldaps_options.cert_file.clone(),
}));
Ok(std::sync::Arc::new(client_config).into())
fn get_tls_connector() -> Result<RustlsTlsConnector> {
use rustls::ClientConfig;
let client_config = std::sync::Arc::new(
ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(get_root_certificates())
.with_no_client_auth(),
);
Ok(client_config.into())
}
#[instrument(skip_all, level = "info", err, fields(port = %ldaps_options.port))]
#[instrument(skip_all, level = "info", err)]
pub async fn check_ldaps(ldaps_options: &LdapsOptions) -> Result<()> {
if !ldaps_options.enabled {
info!("LDAPS not enabled");
return Ok(());
};
let tls_connector =
get_tls_connector(ldaps_options).context("while preparing the tls connection")?;
let tls_connector = get_tls_connector()?;
let url = format!("localhost:{}", ldaps_options.port);
check_ldap_endpoint(
tls_connector
.connect(
rustls::ServerName::try_from("localhost")
.context("while parsing the server name")?,
TcpStream::connect(&url)
.await
.context("while connecting TCP")?,
rustls::ServerName::try_from(url.as_str())?,
TcpStream::connect(&url).await?,
)
.await
.context("while connecting TLS")?,
.await?,
)
.await
}
#[instrument(level = "info", err)]
#[instrument(skip_all, level = "info", err)]
pub async fn check_api(port: u16) -> Result<()> {
reqwest::get(format!("http://localhost:{}/health", port))
.await?

View File

@@ -1,12 +1,12 @@
use sea_orm::{
sea_query::{ColumnDef, ForeignKey, ForeignKeyAction, Table},
ConnectionTrait, DeriveIden,
sea_query::{self, ColumnDef, ForeignKey, ForeignKeyAction, Iden, Table},
ConnectionTrait,
};
pub use crate::domain::{sql_migrations::Users, sql_tables::DbConnection};
/// Contains the refresh tokens for a given user.
#[derive(DeriveIden)]
#[derive(Iden)]
pub enum JwtRefreshStorage {
Table,
RefreshTokenHash,
@@ -15,7 +15,7 @@ pub enum JwtRefreshStorage {
}
/// Contains the blacklisted JWT that haven't expired yet.
#[derive(DeriveIden)]
#[derive(Iden)]
pub enum JwtStorage {
Table,
JwtHash,
@@ -25,7 +25,7 @@ pub enum JwtStorage {
}
/// Contains the temporary tokens to reset the password, sent by email.
#[derive(DeriveIden)]
#[derive(Iden)]
pub enum PasswordResetTokens {
Table,
Token,

View File

@@ -1,8 +1,6 @@
use crate::{
domain::{
handler::{
BackendHandler, BindRequest, CreateUserRequest, LoginHandler, SchemaBackendHandler,
},
handler::{BackendHandler, BindRequest, CreateUserRequest, LoginHandler},
ldap::{
error::{LdapError, LdapResult},
group::{convert_groups_to_ldap_op, get_groups_list},
@@ -22,10 +20,9 @@ use crate::{
use anyhow::Result;
use ldap3_proto::proto::{
LdapAddRequest, LdapBindCred, LdapBindRequest, LdapBindResponse, LdapCompareRequest,
LdapDerefAliases, LdapExtendedRequest, LdapExtendedResponse, LdapFilter, LdapModify,
LdapModifyRequest, LdapModifyType, LdapOp, LdapPartialAttribute, LdapPasswordModifyRequest,
LdapResult as LdapResultOp, LdapResultCode, LdapSearchRequest, LdapSearchResultEntry,
LdapSearchScope,
LdapDerefAliases, LdapExtendedRequest, LdapExtendedResponse, LdapFilter, LdapOp,
LdapPartialAttribute, LdapPasswordModifyRequest, LdapResult as LdapResultOp, LdapResultCode,
LdapSearchRequest, LdapSearchResultEntry, LdapSearchScope,
};
use std::collections::HashMap;
use tracing::{debug, instrument, warn};
@@ -40,23 +37,11 @@ enum SearchScope {
Groups,
User(LdapFilter),
Group(LdapFilter),
UserOuOnly,
GroupOuOnly,
Unknown,
Invalid,
}
enum InternalSearchResults {
UsersAndGroups(Vec<UserAndGroups>, Vec<Group>),
Raw(Vec<LdapOp>),
Empty,
}
fn get_search_scope(
base_dn: &[(String, String)],
dn_parts: &[(String, String)],
ldap_scope: &LdapSearchScope,
) -> SearchScope {
fn get_search_scope(base_dn: &[(String, String)], dn_parts: &[(String, String)]) -> SearchScope {
let base_dn_len = base_dn.len();
if !is_subtree(dn_parts, base_dn) {
SearchScope::Invalid
@@ -65,19 +50,11 @@ fn get_search_scope(
} else if dn_parts.len() == base_dn_len + 1
&& dn_parts[0] == ("ou".to_string(), "people".to_string())
{
if matches!(ldap_scope, LdapSearchScope::Base) {
SearchScope::UserOuOnly
} else {
SearchScope::Users
}
SearchScope::Users
} else if dn_parts.len() == base_dn_len + 1
&& dn_parts[0] == ("ou".to_string(), "groups".to_string())
{
if matches!(ldap_scope, LdapSearchScope::Base) {
SearchScope::GroupOuOnly
} else {
SearchScope::Groups
}
SearchScope::Groups
} else if dn_parts.len() == base_dn_len + 2
&& dn_parts[1] == ("ou".to_string(), "people".to_string())
{
@@ -104,7 +81,7 @@ fn make_search_request<S: Into<String>>(
) -> LdapSearchRequest {
LdapSearchRequest {
base: base.to_string(),
scope: LdapSearchScope::Subtree,
scope: LdapSearchScope::Base,
aliases: LdapDerefAliases::Never,
sizelimit: 0,
timelimit: 0,
@@ -149,15 +126,6 @@ fn make_extended_response(code: LdapResultCode, message: String) -> LdapOp {
})
}
fn make_modify_response(code: LdapResultCode, message: String) -> LdapOp {
LdapOp::ModifyResponse(LdapResultOp {
code,
matcheddn: "".to_string(),
message,
referral: vec![],
})
}
fn root_dse_response(base_dn: &str) -> LdapOp {
LdapOp::SearchResultEntry(LdapSearchResultEntry {
dn: "".to_string(),
@@ -263,8 +231,9 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
)
}
#[instrument(skip_all, level = "debug", fields(dn = %request.dn))]
#[instrument(skip_all, level = "debug")]
pub async fn do_bind(&mut self, request: &LdapBindRequest) -> (LdapResultCode, String) {
debug!("DN: {}", &request.dn);
let user_id = match get_user_id_from_distinguished_name(
&request.dn.to_ascii_lowercase(),
&self.ldap_info.base_dn,
@@ -299,7 +268,7 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
&self,
backend_handler: &B,
user: &UserId,
password: &[u8],
password: &str,
) -> Result<()> {
use lldap_auth::*;
let mut rng = rand::rngs::OsRng;
@@ -363,7 +332,7 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
),
})
} else if let Err(e) = self
.change_password(self.get_opaque_handler(), &uid, password.as_bytes())
.change_password(self.get_opaque_handler(), &uid, password)
.await
{
Err(LdapError {
@@ -403,104 +372,6 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
}
}
async fn handle_modify_change(
&mut self,
user_id: &UserId,
credentials: &ValidationResults,
user_is_admin: bool,
change: &LdapModify,
) -> LdapResult<()> {
if change.modification.atype.to_ascii_lowercase() != "userpassword"
|| change.operation != LdapModifyType::Replace
{
return Err(LdapError {
code: LdapResultCode::UnwillingToPerform,
message: format!(
r#"Unsupported operation: `{:?}` for `{}`"#,
change.operation, change.modification.atype
),
});
}
if !credentials.can_change_password(user_id, user_is_admin) {
return Err(LdapError {
code: LdapResultCode::InsufficentAccessRights,
message: format!(
r#"User `{}` cannot modify the password of user `{}`"#,
&credentials.user, &user_id
),
});
}
if let [value] = &change.modification.vals.as_slice() {
self.change_password(self.get_opaque_handler(), user_id, value)
.await
.map_err(|e| LdapError {
code: LdapResultCode::Other,
message: format!("Error while changing the password: {:#?}", e),
})?;
} else {
return Err(LdapError {
code: LdapResultCode::InvalidAttributeSyntax,
message: format!(
r#"Wrong number of values for password attribute: {}"#,
change.modification.vals.len()
),
});
}
Ok(())
}
async fn handle_modify_request(
&mut self,
request: &LdapModifyRequest,
) -> LdapResult<Vec<LdapOp>> {
let credentials = self
.user_info
.as_ref()
.ok_or_else(|| LdapError {
code: LdapResultCode::InsufficentAccessRights,
message: "No user currently bound".to_string(),
})?
.clone();
match get_user_id_from_distinguished_name(
&request.dn,
&self.ldap_info.base_dn,
&self.ldap_info.base_dn_str,
) {
Ok(uid) => {
let user_is_admin = self
.backend_handler
.get_readable_handler(&credentials, &uid)
.expect("Unexpected permission error")
.get_user_groups(&uid)
.await
.map_err(|e| LdapError {
code: LdapResultCode::OperationsError,
message: format!("Internal error while requesting user's groups: {:#?}", e),
})?
.iter()
.any(|g| g.display_name == "lldap_admin");
for change in &request.changes {
self.handle_modify_change(&uid, &credentials, user_is_admin, change)
.await?
}
Ok(vec![make_modify_response(
LdapResultCode::Success,
String::new(),
)])
}
Err(e) => Err(LdapError {
code: LdapResultCode::InvalidDNSyntax,
message: format!("Invalid username: {}", e),
}),
}
}
async fn do_modify_request(&mut self, request: &LdapModifyRequest) -> Vec<LdapOp> {
self.handle_modify_request(request)
.await
.unwrap_or_else(|e: LdapError| vec![make_modify_response(e.code, e.message)])
}
pub async fn do_search_or_dse(
&mut self,
request: &LdapSearchRequest,
@@ -523,9 +394,9 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
&self,
backend_handler: &impl UserAndGroupListerBackendHandler,
request: &LdapSearchRequest,
) -> LdapResult<InternalSearchResults> {
) -> LdapResult<(Option<Vec<UserAndGroups>>, Option<Vec<Group>>)> {
let dn_parts = parse_distinguished_name(&request.base.to_ascii_lowercase())?;
let scope = get_search_scope(&self.ldap_info.base_dn, &dn_parts, &request.scope);
let scope = get_search_scope(&self.ldap_info.base_dn, &dn_parts);
debug!(?request.base, ?scope);
// Disambiguate the lifetimes.
fn cast<'a, T, R>(x: T) -> T
@@ -553,41 +424,26 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
get_groups_list(&self.ldap_info, filter, &request.base, backend_handler).await
});
Ok(match scope {
SearchScope::Global => InternalSearchResults::UsersAndGroups(
get_user_list(&request.filter).await?,
get_group_list(&request.filter).await?,
),
SearchScope::Users => InternalSearchResults::UsersAndGroups(
get_user_list(&request.filter).await?,
Vec::new(),
),
SearchScope::Groups => InternalSearchResults::UsersAndGroups(
Vec::new(),
get_group_list(&request.filter).await?,
SearchScope::Global => (
Some(get_user_list(&request.filter).await?),
Some(get_group_list(&request.filter).await?),
),
SearchScope::Users => (Some(get_user_list(&request.filter).await?), None),
SearchScope::Groups => (None, Some(get_group_list(&request.filter).await?)),
SearchScope::User(filter) => {
let filter = LdapFilter::And(vec![request.filter.clone(), filter]);
InternalSearchResults::UsersAndGroups(get_user_list(&filter).await?, Vec::new())
(Some(get_user_list(&filter).await?), None)
}
SearchScope::Group(filter) => {
let filter = LdapFilter::And(vec![request.filter.clone(), filter]);
InternalSearchResults::UsersAndGroups(Vec::new(), get_group_list(&filter).await?)
}
SearchScope::UserOuOnly | SearchScope::GroupOuOnly => {
InternalSearchResults::Raw(vec![LdapOp::SearchResultEntry(LdapSearchResultEntry {
dn: request.base.clone(),
attributes: vec![LdapPartialAttribute {
atype: "objectClass".to_owned(),
vals: vec![b"top".to_vec(), b"organizationalUnit".to_vec()],
}],
})])
(None, Some(get_group_list(&filter).await?))
}
SearchScope::Unknown => {
warn!(
r#"The requested search tree "{}" matches neither the user subtree "ou=people,{}" nor the group subtree "ou=groups,{}""#,
&request.base, &self.ldap_info.base_dn_str, &self.ldap_info.base_dn_str
);
InternalSearchResults::Empty
(None, None)
}
SearchScope::Invalid => {
// Search path is not in our tree, just return an empty success.
@@ -595,7 +451,7 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
"The specified search tree {:?} is not under the common subtree {:?}",
&dn_parts, &self.ldap_info.base_dn
);
InternalSearchResults::Empty
(None, None)
}
})
}
@@ -609,27 +465,26 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
let backend_handler = self
.backend_handler
.get_user_restricted_lister_handler(user_info);
let search_results = self.do_search_internal(&backend_handler, request).await?;
let (users, groups) = self.do_search_internal(&backend_handler, request).await?;
let schema = backend_handler.get_schema().await.map_err(|e| LdapError {
code: LdapResultCode::OperationsError,
message: format!("Unable to get schema: {:#}", e),
})?;
let mut results = match search_results {
InternalSearchResults::UsersAndGroups(users, groups) => {
convert_users_to_ldap_op(users, &request.attrs, &self.ldap_info, &schema)
.chain(convert_groups_to_ldap_op(
groups,
&request.attrs,
&self.ldap_info,
&backend_handler.user_filter,
))
.collect()
}
InternalSearchResults::Raw(raw_results) => raw_results,
InternalSearchResults::Empty => Vec::new(),
};
if !matches!(results.last(), Some(LdapOp::SearchResultDone(_))) {
let mut results = Vec::new();
if let Some(users) = users {
results.extend(convert_users_to_ldap_op(
users,
&request.attrs,
&self.ldap_info,
));
}
if let Some(groups) = groups {
results.extend(convert_groups_to_ldap_op(
groups,
&request.attrs,
&self.ldap_info,
&backend_handler.user_filter,
));
}
if results.is_empty() || matches!(results[results.len() - 1], LdapOp::SearchResultEntry(_))
{
results.push(make_search_success());
}
Ok(results)
@@ -669,7 +524,6 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
let attributes: HashMap<String, Vec<u8>> = request
.attributes
.into_iter()
.filter(|a| !a.atype.eq_ignore_ascii_case("objectclass"))
.map(parse_attribute)
.collect::<LdapResult<_>>()?;
fn decode_attribute_value(val: &[u8]) -> LdapResult<String> {
@@ -789,7 +643,6 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
// No need to notify on unbind (per rfc4511)
return None;
}
LdapOp::ModifyRequest(request) => self.do_modify_request(&request).await,
LdapOp::ExtendedRequest(request) => self.do_extended_request(&request).await,
LdapOp::AddRequest(request) => self
.do_create_user(request)
@@ -811,17 +664,70 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
mod tests {
use super::*;
use crate::{
domain::{handler::*, types::*},
infra::test_utils::{setup_default_schema, MockTestBackendHandler},
domain::{error::Result, handler::*, opaque_handler::*, types::*},
uuid,
};
use async_trait::async_trait;
use chrono::TimeZone;
use ldap3_proto::proto::{LdapDerefAliases, LdapSearchScope, LdapSubstringFilter};
use mockall::predicate::eq;
use pretty_assertions::assert_eq;
use std::collections::HashSet;
use tokio;
mockall::mock! {
pub TestBackendHandler{}
impl Clone for TestBackendHandler {
fn clone(&self) -> Self;
}
#[async_trait]
impl LoginHandler for TestBackendHandler {
async fn bind(&self, request: BindRequest) -> Result<()>;
}
#[async_trait]
impl GroupListerBackendHandler for TestBackendHandler {
async fn list_groups(&self, filters: Option<GroupRequestFilter>) -> Result<Vec<Group>>;
}
#[async_trait]
impl GroupBackendHandler for TestBackendHandler {
async fn get_group_details(&self, group_id: GroupId) -> Result<GroupDetails>;
async fn update_group(&self, request: UpdateGroupRequest) -> Result<()>;
async fn create_group(&self, group_name: &str) -> Result<GroupId>;
async fn delete_group(&self, group_id: GroupId) -> Result<()>;
}
#[async_trait]
impl UserListerBackendHandler for TestBackendHandler {
async fn list_users(&self, filters: Option<UserRequestFilter>, get_groups: bool) -> Result<Vec<UserAndGroups>>;
}
#[async_trait]
impl UserBackendHandler for TestBackendHandler {
async fn get_user_details(&self, user_id: &UserId) -> Result<User>;
async fn create_user(&self, request: CreateUserRequest) -> Result<()>;
async fn update_user(&self, request: UpdateUserRequest) -> Result<()>;
async fn delete_user(&self, user_id: &UserId) -> Result<()>;
async fn get_user_groups(&self, user_id: &UserId) -> Result<HashSet<GroupDetails>>;
async fn add_user_to_group(&self, user_id: &UserId, group_id: GroupId) -> Result<()>;
async fn remove_user_from_group(&self, user_id: &UserId, group_id: GroupId) -> Result<()>;
}
#[async_trait]
impl BackendHandler for TestBackendHandler {}
#[async_trait]
impl OpaqueHandler for TestBackendHandler {
async fn login_start(
&self,
request: login::ClientLoginStartRequest
) -> Result<login::ServerLoginStartResponse>;
async fn login_finish(&self, request: login::ClientLoginFinishRequest) -> Result<UserId>;
async fn registration_start(
&self,
request: registration::ClientRegistrationStartRequest
) -> Result<registration::ServerRegistrationStartResponse>;
async fn registration_finish(
&self,
request: registration::ClientRegistrationFinishRequest
) -> Result<()>;
}
}
fn make_user_search_request<S: Into<String>>(
filter: LdapFilter,
attrs: Vec<S>,
@@ -856,11 +762,9 @@ mod tests {
display_name: group,
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
uuid: uuid!("a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
attributes: Vec::new(),
});
Ok(set)
});
setup_default_schema(&mut mock);
let mut ldap_handler = LdapHandler::new_for_tests(mock, "dc=Example,dc=com");
let request = LdapBindRequest {
dn: "uid=test,ou=people,dc=example,dc=coM".to_string(),
@@ -943,7 +847,6 @@ mod tests {
display_name: "lldap_admin".to_string(),
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
uuid: uuid!("a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
attributes: Vec::new(),
});
Ok(set)
});
@@ -1030,7 +933,6 @@ mod tests {
display_name: "rockstars".to_string(),
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
uuid: uuid!("a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
attributes: Vec::new(),
}]),
}])
});
@@ -1177,17 +1079,9 @@ mod tests {
user_id: UserId::new("bob_1"),
email: "bob@bobmail.bob".to_string(),
display_name: Some("Bôb Böbberson".to_string()),
first_name: Some("Bôb".to_string()),
last_name: Some("Böbberson".to_string()),
uuid: uuid!("698e1d5f-7a40-3151-8745-b9b8a37839da"),
attributes: vec![
AttributeValue {
name: "first_name".to_owned(),
value: Serialized::from("Bôb"),
},
AttributeValue {
name: "last_name".to_owned(),
value: Serialized::from("Böbberson"),
},
],
..Default::default()
},
groups: None,
@@ -1197,20 +1091,9 @@ mod tests {
user_id: UserId::new("jim"),
email: "jim@cricket.jim".to_string(),
display_name: Some("Jimminy Cricket".to_string()),
attributes: vec![
AttributeValue {
name: "avatar".to_owned(),
value: Serialized::from(&JpegPhoto::for_tests()),
},
AttributeValue {
name: "first_name".to_owned(),
value: Serialized::from("Jim"),
},
AttributeValue {
name: "last_name".to_owned(),
value: Serialized::from("Cricket"),
},
],
first_name: Some("Jim".to_string()),
last_name: Some("Cricket".to_string()),
avatar: Some(JpegPhoto::for_tests()),
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
creation_date: Utc
.with_ymd_and_hms(2014, 7, 8, 9, 10, 11)
@@ -1347,7 +1230,6 @@ mod tests {
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
users: vec![UserId::new("bob"), UserId::new("john")],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
},
Group {
id: GroupId(3),
@@ -1355,7 +1237,6 @@ mod tests {
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
users: vec![UserId::new("john")],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
},
])
});
@@ -1426,7 +1307,6 @@ mod tests {
GroupRequestFilter::Member(UserId::new("bob")),
GroupRequestFilter::DisplayName("rockstars".to_string()),
false.into(),
GroupRequestFilter::Uuid(uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc")),
true.into(),
true.into(),
true.into(),
@@ -1447,7 +1327,6 @@ mod tests {
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
users: vec![],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
}])
});
let mut ldap_handler = setup_bound_admin_handler(mock).await;
@@ -1466,10 +1345,6 @@ mod tests {
"dn".to_string(),
"uid=rockstars,ou=people,dc=example,dc=com".to_string(),
),
LdapFilter::Equality(
"uuid".to_string(),
"04ac75e0-2900-3e21-926c-2f732c26b3fc".to_string(),
),
LdapFilter::Equality("obJEctclass".to_string(), "groupofUniqueNames".to_string()),
LdapFilter::Equality("objectclass".to_string(), "groupOfNames".to_string()),
LdapFilter::Present("objectclass".to_string()),
@@ -1518,7 +1393,6 @@ mod tests {
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
users: vec![],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
}])
});
let mut ldap_handler = setup_bound_admin_handler(mock).await;
@@ -1656,10 +1530,6 @@ mod tests {
true.into(),
true.into(),
false.into(),
UserRequestFilter::AttributeEquality(
"first_name".to_owned(),
"firstname".to_owned(),
),
false.into(),
UserRequestFilter::UserIdSubString(SubStringFilter {
initial: Some("iNIt".to_owned()),
@@ -1667,7 +1537,7 @@ mod tests {
final_: Some("finAl".to_owned()),
}),
UserRequestFilter::SubString(
UserColumn::DisplayName,
UserColumn::FirstName,
SubStringFilter {
initial: Some("iNIt".to_owned()),
any: vec!["1".to_owned(), "2aA".to_owned()],
@@ -1700,7 +1570,6 @@ mod tests {
LdapFilter::Present("objectClass".to_string()),
LdapFilter::Present("uid".to_string()),
LdapFilter::Present("unknown".to_string()),
LdapFilter::Equality("givenname".to_string(), "firstname".to_string()),
LdapFilter::Equality("unknown_attribute".to_string(), "randomValue".to_string()),
LdapFilter::Substring(
"uid".to_owned(),
@@ -1711,7 +1580,7 @@ mod tests {
},
),
LdapFilter::Substring(
"displayName".to_owned(),
"firstName".to_owned(),
LdapSubstringFilter {
initial: Some("iNIt".to_owned()),
any: vec!["1".to_owned(), "2aA".to_owned()],
@@ -1727,35 +1596,6 @@ mod tests {
);
}
#[tokio::test]
async fn test_search_unsupported_substring_filter() {
let mut ldap_handler = setup_bound_admin_handler(MockTestBackendHandler::new()).await;
let request = make_user_search_request(
LdapFilter::Substring(
"uuid".to_owned(),
LdapSubstringFilter {
initial: Some("iNIt".to_owned()),
any: vec!["1".to_owned(), "2aA".to_owned()],
final_: Some("finAl".to_owned()),
},
),
vec!["objectClass"],
);
ldap_handler.do_search_or_dse(&request).await.unwrap_err();
let request = make_user_search_request(
LdapFilter::Substring(
"givenname".to_owned(),
LdapSubstringFilter {
initial: Some("iNIt".to_owned()),
any: vec!["1".to_owned(), "2aA".to_owned()],
final_: Some("finAl".to_owned()),
},
),
vec!["objectClass"],
);
ldap_handler.do_search_or_dse(&request).await.unwrap_err();
}
#[tokio::test]
async fn test_search_member_of_filter() {
let mut mock = MockTestBackendHandler::new();
@@ -1812,7 +1652,7 @@ mod tests {
.with(
eq(Some(UserRequestFilter::And(vec![UserRequestFilter::Or(
vec![UserRequestFilter::Not(Box::new(
UserRequestFilter::Equality(UserColumn::DisplayName, "bob".to_string()),
UserRequestFilter::Equality(UserColumn::FirstName, "bob".to_string()),
))],
)]))),
eq(false),
@@ -1830,7 +1670,7 @@ mod tests {
let mut ldap_handler = setup_bound_admin_handler(mock).await;
let request = make_user_search_request(
LdapFilter::And(vec![LdapFilter::Or(vec![LdapFilter::Not(Box::new(
LdapFilter::Equality("displayname".to_string(), "bob".to_string()),
LdapFilter::Equality("givenname".to_string(), "bob".to_string()),
))])]),
vec!["objectclass"],
);
@@ -1863,16 +1703,8 @@ mod tests {
user_id: UserId::new("bob_1"),
email: "bob@bobmail.bob".to_string(),
display_name: Some("Bôb Böbberson".to_string()),
attributes: vec![
AttributeValue {
name: "first_name".to_owned(),
value: Serialized::from("Bôb"),
},
AttributeValue {
name: "last_name".to_owned(),
value: Serialized::from("Böbberson"),
},
],
first_name: Some("Bôb".to_string()),
last_name: Some("Böbberson".to_string()),
..Default::default()
},
groups: None,
@@ -1888,7 +1720,6 @@ mod tests {
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
users: vec![UserId::new("bob"), UserId::new("john")],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
}])
});
let mut ldap_handler = setup_bound_admin_handler(mock).await;
@@ -1946,16 +1777,8 @@ mod tests {
user_id: UserId::new("bob_1"),
email: "bob@bobmail.bob".to_string(),
display_name: Some("Bôb Böbberson".to_string()),
attributes: vec![
AttributeValue {
name: "avatar".to_owned(),
value: Serialized::from(&JpegPhoto::for_tests()),
},
AttributeValue {
name: "last_name".to_owned(),
value: Serialized::from("Böbberson"),
},
],
last_name: Some("Böbberson".to_string()),
avatar: Some(JpegPhoto::for_tests()),
uuid: uuid!("b4ac75e0-2900-3e21-926c-2f732c26b3fc"),
..Default::default()
},
@@ -1971,7 +1794,6 @@ mod tests {
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
users: vec![UserId::new("bob"), UserId::new("john")],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
}])
});
let mut ldap_handler = setup_bound_admin_handler(mock).await;
@@ -2157,8 +1979,7 @@ mod tests {
use lldap_auth::*;
let mut rng = rand::rngs::OsRng;
let registration_start_request =
opaque::client::registration::start_registration("password".as_bytes(), &mut rng)
.unwrap();
opaque::client::registration::start_registration("password", &mut rng).unwrap();
let request = registration::ClientRegistrationStartRequest {
username: "bob".to_string(),
registration_start_request: registration_start_request.message,
@@ -2196,56 +2017,6 @@ mod tests {
);
}
#[tokio::test]
async fn test_password_change_modify_request() {
let mut mock = MockTestBackendHandler::new();
mock.expect_get_user_groups()
.with(eq(UserId::new("bob")))
.returning(|_| Ok(HashSet::new()));
use lldap_auth::*;
let mut rng = rand::rngs::OsRng;
let registration_start_request =
opaque::client::registration::start_registration("password".as_bytes(), &mut rng)
.unwrap();
let request = registration::ClientRegistrationStartRequest {
username: "bob".to_string(),
registration_start_request: registration_start_request.message,
};
let start_response = opaque::server::registration::start_registration(
&opaque::server::ServerSetup::new(&mut rng),
request.registration_start_request,
&request.username,
)
.unwrap();
mock.expect_registration_start().times(1).return_once(|_| {
Ok(registration::ServerRegistrationStartResponse {
server_data: "".to_string(),
registration_response: start_response.message,
})
});
mock.expect_registration_finish()
.times(1)
.return_once(|_| Ok(()));
let mut ldap_handler = setup_bound_admin_handler(mock).await;
let request = LdapOp::ModifyRequest(LdapModifyRequest {
dn: "uid=bob,ou=people,dc=example,dc=com".to_string(),
changes: vec![LdapModify {
operation: LdapModifyType::Replace,
modification: LdapPartialAttribute {
atype: "userPassword".to_owned(),
vals: vec!["password".as_bytes().to_vec()],
},
}],
});
assert_eq!(
ldap_handler.handle_ldap_message(request).await,
Some(vec![make_modify_response(
LdapResultCode::Success,
"".to_string(),
)])
);
}
#[tokio::test]
async fn test_password_change_password_manager() {
let mut mock = MockTestBackendHandler::new();
@@ -2255,8 +2026,7 @@ mod tests {
use lldap_auth::*;
let mut rng = rand::rngs::OsRng;
let registration_start_request =
opaque::client::registration::start_registration("password".as_bytes(), &mut rng)
.unwrap();
opaque::client::registration::start_registration("password", &mut rng).unwrap();
let request = registration::ClientRegistrationStartRequest {
username: "bob".to_string(),
registration_start_request: registration_start_request.message,
@@ -2353,7 +2123,6 @@ mod tests {
display_name: "lldap_admin".to_string(),
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
uuid: uuid!("a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
attributes: Vec::new(),
});
mock.expect_get_user_groups()
.with(eq(UserId::new("bob")))
@@ -2466,42 +2235,6 @@ mod tests {
);
}
#[tokio::test]
async fn test_create_user_multiple_object_class() {
let mut mock = MockTestBackendHandler::new();
mock.expect_create_user()
.with(eq(CreateUserRequest {
user_id: UserId::new("bob"),
email: "".to_owned(),
display_name: Some("Bob".to_string()),
..Default::default()
}))
.times(1)
.return_once(|_| Ok(()));
let ldap_handler = setup_bound_admin_handler(mock).await;
let request = LdapAddRequest {
dn: "uid=bob,ou=people,dc=example,dc=com".to_owned(),
attributes: vec![
LdapPartialAttribute {
atype: "cn".to_owned(),
vals: vec![b"Bob".to_vec()],
},
LdapPartialAttribute {
atype: "objectClass".to_owned(),
vals: vec![
b"top".to_vec(),
b"person".to_vec(),
b"inetOrgPerson".to_vec(),
],
},
],
};
assert_eq!(
ldap_handler.do_create_user(request).await,
Ok(vec![make_add_error(LdapResultCode::Success, String::new())])
);
}
#[tokio::test]
async fn test_search_filter_non_attribute() {
let mut mock = MockTestBackendHandler::new();
@@ -2581,7 +2314,6 @@ mod tests {
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
users: vec![UserId::new("bob")],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
}])
});
let mut ldap_handler = setup_bound_admin_handler(mock).await;
@@ -2675,7 +2407,6 @@ mod tests {
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
users: vec![UserId::new("bob")],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
}])
});
let mut ldap_handler = setup_bound_admin_handler(mock).await;
@@ -2695,32 +2426,4 @@ mod tests {
})])
);
}
#[tokio::test]
async fn test_user_ou_search() {
let mut ldap_handler = setup_bound_readonly_handler(MockTestBackendHandler::new()).await;
let request = LdapSearchRequest {
base: "ou=people,dc=example,dc=com".to_owned(),
scope: LdapSearchScope::Base,
aliases: LdapDerefAliases::Never,
sizelimit: 0,
timelimit: 0,
typesonly: false,
filter: LdapFilter::And(vec![]),
attrs: Vec::new(),
};
assert_eq!(
ldap_handler.do_search_or_dse(&request).await,
Ok(vec![
LdapOp::SearchResultEntry(LdapSearchResultEntry {
dn: "ou=people,dc=example,dc=com".to_owned(),
attributes: vec![LdapPartialAttribute {
atype: "objectClass".to_owned(),
vals: vec![b"top".to_vec(), b"organizationalUnit".to_vec()]
}]
}),
make_search_success()
])
);
}
}

View File

@@ -4,8 +4,7 @@ use crate::{
opaque_handler::OpaqueHandler,
},
infra::{
access_control::AccessControlledBackendHandler,
configuration::{Configuration, LdapsOptions},
access_control::AccessControlledBackendHandler, configuration::Configuration,
ldap_handler::LdapHandler,
},
};
@@ -73,8 +72,8 @@ where
use tokio_stream::StreamExt;
let (r, w) = tokio::io::split(stream);
// Configure the codec etc.
let mut requests = FramedRead::new(r, LdapCodec::default());
let mut resp = FramedWrite::new(w, LdapCodec::default());
let mut requests = FramedRead::new(r, LdapCodec);
let mut resp = FramedWrite::new(w, LdapCodec);
let mut session = LdapHandler::new(
AccessControlledBackendHandler::new(backend_handler),
@@ -95,7 +94,7 @@ where
}
fn read_private_key(key_file: &str) -> Result<PrivateKey> {
use rustls_pemfile::{ec_private_keys, pkcs8_private_keys, rsa_private_keys};
use rustls_pemfile::{pkcs8_private_keys, rsa_private_keys};
use std::{fs::File, io::BufReader};
pkcs8_private_keys(&mut BufReader::new(File::open(key_file)?))
.map_err(anyhow::Error::from)
@@ -113,36 +112,29 @@ fn read_private_key(key_file: &str) -> Result<PrivateKey> {
.ok_or_else(|| anyhow!("No PKCS1 key"))
})
})
.or_else(|_| {
ec_private_keys(&mut BufReader::new(File::open(key_file)?))
.map_err(anyhow::Error::from)
.and_then(|keys| keys.into_iter().next().ok_or_else(|| anyhow!("No EC key")))
})
.with_context(|| {
format!(
"Cannot read either PKCS1, PKCS8 or EC private key from {}",
"Cannot read either PKCS1 or PKCS8 private key from {}",
key_file
)
})
.map(rustls::PrivateKey)
}
pub fn read_certificates(
ldaps_options: &LdapsOptions,
) -> Result<(Vec<rustls::Certificate>, rustls::PrivateKey)> {
fn get_tls_acceptor(config: &Configuration) -> Result<RustlsTlsAcceptor> {
use rustls::{Certificate, ServerConfig};
use rustls_pemfile::certs;
use std::{fs::File, io::BufReader};
let certs = rustls_pemfile::certs(&mut BufReader::new(File::open(&ldaps_options.cert_file)?))?
.into_iter()
.map(rustls::Certificate)
.collect::<Vec<_>>();
let private_key = read_private_key(&ldaps_options.key_file)?;
Ok((certs, private_key))
}
fn get_tls_acceptor(ldaps_options: &LdapsOptions) -> Result<RustlsTlsAcceptor> {
let (certs, private_key) = read_certificates(ldaps_options)?;
// Load TLS key and cert files
let certs = certs(&mut BufReader::new(File::open(
&config.ldaps_options.cert_file,
)?))?
.into_iter()
.map(Certificate)
.collect::<Vec<_>>();
let private_key = read_private_key(&config.ldaps_options.key_file)?;
let server_config = std::sync::Arc::new(
rustls::ServerConfig::builder()
ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(certs, private_key)?,
@@ -193,8 +185,7 @@ where
if config.ldaps_options.enabled {
let tls_context = (
context_for_tls,
get_tls_acceptor(&config.ldaps_options)
.context("while setting up the SSL certificate")?,
get_tls_acceptor(config).context("while setting up the SSL certificate")?,
);
let tls_binder = move || {
let tls_context = tls_context.clone();

View File

@@ -3,8 +3,8 @@ use actix_web::{
dev::{ServiceRequest, ServiceResponse},
Error,
};
use tracing::{debug, error, Span};
use tracing_actix_web::RootSpanBuilder;
use tracing::{error, info, Span};
use tracing_actix_web::{root_span, RootSpanBuilder};
use tracing_subscriber::{filter::EnvFilter, layer::SubscriberExt, util::SubscriberInitExt};
/// We will define a custom root span builder to capture additional fields, specific
@@ -13,11 +13,11 @@ pub struct CustomRootSpanBuilder;
impl RootSpanBuilder for CustomRootSpanBuilder {
fn on_request_start(request: &ServiceRequest) -> Span {
tracing::debug_span!(
"HTTP request",
method = request.method().to_string(),
uri = request.uri().to_string()
)
let span = root_span!(request);
span.in_scope(|| {
info!(uri = %request.uri());
});
span
}
fn on_request_end<B>(_: Span, outcome: &Result<ServiceResponse<B>, Error>) {
@@ -26,7 +26,7 @@ impl RootSpanBuilder for CustomRootSpanBuilder {
if let Some(error) = response.response().error() {
error!(?error);
} else {
debug!(status_code = &response.response().status().as_u16());
info!(status_code = &response.response().status().as_u16());
}
}
Err(error) => error!(?error),

View File

@@ -1,18 +1,12 @@
use crate::infra::{cli::SmtpEncryption, configuration::MailOptions};
use anyhow::{anyhow, Ok, Result};
use anyhow::{Ok, Result};
use lettre::{
message::Mailbox, transport::smtp::authentication::Credentials, AsyncSmtpTransport,
AsyncTransport, Message, Tokio1Executor,
};
use tracing::debug;
async fn send_email(
to: Mailbox,
subject: &str,
body: String,
options: &MailOptions,
server_url: &url::Url,
) -> Result<()> {
async fn send_email(to: Mailbox, subject: &str, body: String, options: &MailOptions) -> Result<()> {
let from = options
.from
.clone()
@@ -23,14 +17,6 @@ async fn send_email(
&to, &from, &options.user, &options.server, options.port
);
let email = Message::builder()
.message_id(Some(format!(
"<{}@{}>",
uuid::Uuid::new_v1(
uuid::Timestamp::now(uuid::NoContext),
"lldap!".as_bytes().try_into().unwrap()
),
server_url.domain().unwrap_or_default()
)))
.from(from)
.reply_to(reply_to)
.to(to)
@@ -57,49 +43,32 @@ async fn send_email(
mailer = mailer.credentials(creds)
}
if let Err(e) = mailer.port(options.port).build().send(email).await {
if e.to_string().contains("CorruptMessage") {
Err(anyhow!("CorruptMessage returned by lettre, this usually means the SMTP encryption setting is wrong.").context(e))
} else {
Err(e.into())
}
} else {
Ok(())
}
mailer.port(options.port).build().send(email).await?;
Ok(())
}
pub async fn send_password_reset_email(
username: &str,
to: &str,
token: &str,
server_url: &url::Url,
domain: &str,
options: &MailOptions,
) -> Result<()> {
let to = to.parse()?;
let mut reset_url = server_url.clone();
reset_url
.path_segments_mut()
.unwrap()
.extend(["reset-password", "step2", token]);
let body = format!(
"Hello {},
This email has been sent to you in order to validate your identity.
If you did not initiate the process your credentials might have been
compromised. You should reset your password and contact an administrator.
To reset your password please visit the following URL: {}
To reset your password please visit the following URL: {}/reset-password/step2/{}
Please contact an administrator if you did not initiate the process.",
username, reset_url
username,
domain.trim_end_matches('/'),
token
);
send_email(
to,
"[LLDAP] Password reset requested",
body,
options,
server_url,
)
.await
send_email(to, "[LLDAP] Password reset requested", body, options).await
}
pub async fn send_test_email(to: Mailbox, options: &MailOptions) -> Result<()> {
@@ -108,7 +77,6 @@ pub async fn send_test_email(to: Mailbox, options: &MailOptions) -> Result<()> {
"LLDAP test email",
"The test is successful! You can send emails from LLDAP".to_string(),
options,
&url::Url::parse("http://localhost").unwrap(),
)
.await
}

View File

@@ -10,10 +10,6 @@ pub mod ldap_handler;
pub mod ldap_server;
pub mod logging;
pub mod mail;
pub mod schema;
pub mod sql_backend_handler;
pub mod tcp_backend_handler;
pub mod tcp_server;
#[cfg(test)]
pub mod test_utils;

View File

@@ -1,104 +0,0 @@
use crate::domain::{
handler::{AttributeSchema, Schema},
types::AttributeType,
};
use serde::{Deserialize, Serialize};
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct PublicSchema(Schema);
impl PublicSchema {
pub fn get_schema(&self) -> &Schema {
&self.0
}
}
impl From<Schema> for PublicSchema {
fn from(mut schema: Schema) -> Self {
schema.user_attributes.attributes.extend_from_slice(&[
AttributeSchema {
name: "user_id".to_owned(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: false,
is_hardcoded: true,
},
AttributeSchema {
name: "creation_date".to_owned(),
attribute_type: AttributeType::DateTime,
is_list: false,
is_visible: true,
is_editable: false,
is_hardcoded: true,
},
AttributeSchema {
name: "mail".to_owned(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: true,
},
AttributeSchema {
name: "uuid".to_owned(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: false,
is_hardcoded: true,
},
AttributeSchema {
name: "display_name".to_owned(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: true,
},
]);
schema
.user_attributes
.attributes
.sort_by(|a, b| a.name.cmp(&b.name));
schema.group_attributes.attributes.extend_from_slice(&[
AttributeSchema {
name: "group_id".to_owned(),
attribute_type: AttributeType::Integer,
is_list: false,
is_visible: true,
is_editable: false,
is_hardcoded: true,
},
AttributeSchema {
name: "creation_date".to_owned(),
attribute_type: AttributeType::DateTime,
is_list: false,
is_visible: true,
is_editable: false,
is_hardcoded: true,
},
AttributeSchema {
name: "uuid".to_owned(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: false,
is_hardcoded: true,
},
AttributeSchema {
name: "display_name".to_owned(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: true,
},
]);
schema
.group_attributes
.attributes
.sort_by(|a, b| a.name.cmp(&b.name));
PublicSchema(schema)
}
}

View File

@@ -19,6 +19,7 @@ use actix_service::map_config;
use actix_web::{dev::AppConfig, guard, web, App, HttpResponse, Responder};
use anyhow::{Context, Result};
use hmac::Hmac;
use secstr::SecUtf8;
use sha2::Sha512;
use std::collections::HashSet;
use std::path::PathBuf;
@@ -38,10 +39,10 @@ pub enum TcpError {
BadRequest(String),
#[error("Internal server error: `{0}`")]
InternalServerError(String),
#[error("Not found: `{0}`")]
NotFoundError(String),
#[error("Unauthorized: `{0}`")]
UnauthorizedError(String),
#[error("Not implemented: `{0}`")]
NotImplemented(String),
}
pub type TcpResult<T> = std::result::Result<T, TcpError>;
@@ -53,7 +54,6 @@ pub(crate) fn error_to_http_response(error: TcpError) -> HttpResponse {
HttpResponse::Unauthorized()
}
DomainError::DatabaseError(_)
| DomainError::DatabaseTransactionError(_)
| DomainError::InternalError(_)
| DomainError::UnknownCryptoError(_) => HttpResponse::InternalServerError(),
DomainError::Base64DecodeError(_)
@@ -61,9 +61,9 @@ pub(crate) fn error_to_http_response(error: TcpError) -> HttpResponse {
| DomainError::EntityNotFound(_) => HttpResponse::BadRequest(),
},
TcpError::BadRequest(_) => HttpResponse::BadRequest(),
TcpError::NotFoundError(_) => HttpResponse::NotFound(),
TcpError::InternalServerError(_) => HttpResponse::InternalServerError(),
TcpError::UnauthorizedError(_) => HttpResponse::Unauthorized(),
TcpError::NotImplemented(_) => HttpResponse::NotImplemented(),
}
.body(error.to_string())
}
@@ -87,8 +87,9 @@ fn http_config<Backend>(
backend_handler: Backend,
jwt_secret: secstr::SecUtf8,
jwt_blacklist: HashSet<u64>,
server_url: url::Url,
server_url: String,
mail_options: MailOptions,
hibp_api_key: SecUtf8,
) where
Backend: TcpBackendHandler + BackendHandler + LoginHandler + OpaqueHandler + Clone + 'static,
{
@@ -99,6 +100,7 @@ fn http_config<Backend>(
jwt_blacklist: RwLock::new(jwt_blacklist),
server_url,
mail_options,
hibp_api_key,
}))
.route(
"/health",
@@ -132,8 +134,9 @@ pub(crate) struct AppState<Backend> {
pub backend_handler: AccessControlledBackendHandler<Backend>,
pub jwt_key: Hmac<Sha512>,
pub jwt_blacklist: RwLock<HashSet<u64>>,
pub server_url: url::Url,
pub server_url: String,
pub mail_options: MailOptions,
pub hibp_api_key: SecUtf8,
}
impl<Backend: BackendHandler> AppState<Backend> {
@@ -174,6 +177,7 @@ where
let mail_options = config.smtp_options.clone();
let verbose = config.verbose;
info!("Starting the API/web server on port {}", config.http_port);
let hibp_api_key = config.hibp_api_key.clone();
server_builder
.bind(
"http",
@@ -184,6 +188,7 @@ where
let jwt_blacklist = jwt_blacklist.clone();
let server_url = server_url.clone();
let mail_options = mail_options.clone();
let hibp_api_key = hibp_api_key.clone();
HttpServiceBuilder::default()
.finish(map_config(
App::new()
@@ -199,6 +204,7 @@ where
jwt_blacklist,
server_url,
mail_options,
hibp_api_key,
)
}),
|_| AppConfig::default(),

View File

@@ -1,100 +0,0 @@
use crate::domain::{error::Result, handler::*, opaque_handler::*, types::*};
use async_trait::async_trait;
use std::collections::HashSet;
mockall::mock! {
pub TestBackendHandler{}
impl Clone for TestBackendHandler {
fn clone(&self) -> Self;
}
#[async_trait]
impl LoginHandler for TestBackendHandler {
async fn bind(&self, request: BindRequest) -> Result<()>;
}
#[async_trait]
impl GroupListerBackendHandler for TestBackendHandler {
async fn list_groups(&self, filters: Option<GroupRequestFilter>) -> Result<Vec<Group>>;
}
#[async_trait]
impl GroupBackendHandler for TestBackendHandler {
async fn get_group_details(&self, group_id: GroupId) -> Result<GroupDetails>;
async fn update_group(&self, request: UpdateGroupRequest) -> Result<()>;
async fn create_group(&self, group_name: &str) -> Result<GroupId>;
async fn delete_group(&self, group_id: GroupId) -> Result<()>;
}
#[async_trait]
impl UserListerBackendHandler for TestBackendHandler {
async fn list_users(&self, filters: Option<UserRequestFilter>, get_groups: bool) -> Result<Vec<UserAndGroups>>;
}
#[async_trait]
impl UserBackendHandler for TestBackendHandler {
async fn get_user_details(&self, user_id: &UserId) -> Result<User>;
async fn create_user(&self, request: CreateUserRequest) -> Result<()>;
async fn update_user(&self, request: UpdateUserRequest) -> Result<()>;
async fn delete_user(&self, user_id: &UserId) -> Result<()>;
async fn get_user_groups(&self, user_id: &UserId) -> Result<HashSet<GroupDetails>>;
async fn add_user_to_group(&self, user_id: &UserId, group_id: GroupId) -> Result<()>;
async fn remove_user_from_group(&self, user_id: &UserId, group_id: GroupId) -> Result<()>;
}
#[async_trait]
impl SchemaBackendHandler for TestBackendHandler {
async fn get_schema(&self) -> Result<Schema>;
}
#[async_trait]
impl BackendHandler for TestBackendHandler {}
#[async_trait]
impl OpaqueHandler for TestBackendHandler {
async fn login_start(
&self,
request: login::ClientLoginStartRequest
) -> Result<login::ServerLoginStartResponse>;
async fn login_finish(&self, request: login::ClientLoginFinishRequest) -> Result<UserId>;
async fn registration_start(
&self,
request: registration::ClientRegistrationStartRequest
) -> Result<registration::ServerRegistrationStartResponse>;
async fn registration_finish(
&self,
request: registration::ClientRegistrationFinishRequest
) -> Result<()>;
}
}
pub fn setup_default_schema(mock: &mut MockTestBackendHandler) {
mock.expect_get_schema().returning(|| {
Ok(Schema {
user_attributes: AttributeList {
attributes: vec![
AttributeSchema {
name: "avatar".to_owned(),
attribute_type: AttributeType::JpegPhoto,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: true,
},
AttributeSchema {
name: "first_name".to_owned(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: true,
},
AttributeSchema {
name: "last_name".to_owned(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: true,
},
],
},
group_attributes: AttributeList {
attributes: Vec::new(),
},
})
});
}

View File

@@ -9,7 +9,7 @@ use crate::{
domain::{
handler::{
CreateUserRequest, GroupBackendHandler, GroupListerBackendHandler, GroupRequestFilter,
UserBackendHandler, UserListerBackendHandler, UserRequestFilter,
UserBackendHandler,
},
sql_backend_handler::SqlBackendHandler,
sql_opaque_handler::register_password,
@@ -89,19 +89,8 @@ async fn set_up_server(config: Configuration) -> Result<ServerBuilder> {
ensure_group_exists(&backend_handler, "lldap_admin").await?;
ensure_group_exists(&backend_handler, "lldap_password_manager").await?;
ensure_group_exists(&backend_handler, "lldap_strict_readonly").await?;
let admin_present = if let Ok(admins) = backend_handler
.list_users(
Some(UserRequestFilter::MemberOf("lldap_admin".to_owned())),
false,
)
.await
{
!admins.is_empty()
} else {
false
};
if !admin_present {
warn!("Could not find an admin user, trying to create the user \"admin\" with the config-provided password");
if let Err(e) = backend_handler.get_user_details(&config.ldap_user_dn).await {
warn!("Could not get admin user, trying to create it: {:#}", e);
create_admin_user(&backend_handler, &config)
.await
.map_err(|e| anyhow!("Error setting up admin login/account: {:#}", e))

Some files were not shown because too many files have changed in this diff Show More